diff --git a/.config/nextest.toml b/.config/nextest.toml index b701259fc2..1ef771b3d9 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -20,7 +20,7 @@ retries = 0 # The number of threads to run tests with. Supported values are either an integer or # the string "num-cpus". Can be overridden through the `--test-threads` option. -test-threads = "num-cpus" +test-threads = 8 # The number of threads required for each test. This is generally used in overrides to # mark certain tests as heavier than others. However, it can also be set as a global parameter. diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 8145503e70..007070dbb5 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -5,7 +5,6 @@ on: branches: - unstable - stable - - deneb-free-blobs tags: - v* @@ -41,11 +40,6 @@ jobs: run: | echo "VERSION=latest" >> $GITHUB_ENV echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV - - name: Extract version (if deneb) - if: github.event.ref == 'refs/heads/deneb-free-blobs' - run: | - echo "VERSION=deneb" >> $GITHUB_ENV - echo "VERSION_SUFFIX=" >> $GITHUB_ENV - name: Extract version (if tagged release) if: startsWith(github.event.ref, 'refs/tags') run: | diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 1d80feaddf..f0811257ae 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -317,10 +317,11 @@ jobs: ./doppelganger_protection.sh success genesis.json execution-engine-integration-ubuntu: name: execution-engine-integration-ubuntu - runs-on: ubuntu-latest + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust + if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 with: channel: stable @@ -328,6 +329,9 @@ jobs: cache: false env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Add go compiler to $PATH + if: env.SELF_HOSTED_RUNNERS == 'true' + run: echo "/usr/local/go/bin" >> $GITHUB_PATH - name: Run exec engine integration tests in release run: make test-exec-engine check-code: diff --git a/.gitignore b/.gitignore index bbae314541..e63e218a3b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ target/ +vendor/ **/*.rs.bk *.pk *.sk diff --git a/Cargo.lock b/Cargo.lock index 157c4aad91..6216901420 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -53,7 +53,6 @@ dependencies = [ "regex", "rpassword", "serde", - "serde_derive", "serde_yaml", "slog", "types", @@ -153,9 +152,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea5d730647d4fadd988536d06fecce94b7b4f2a7efdae548f1cf4b63205518ab" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] @@ -296,7 +295,7 @@ dependencies = [ "log", "parking", "polling", - "rustix 0.37.23", + "rustix 0.37.24", "slab", "socket2 0.4.9", "waker-fn", @@ -319,7 +318,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -666,7 +665,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.37", + "syn 2.0.38", "which", ] @@ -755,7 +754,6 @@ dependencies = [ "milagro_bls", "rand 0.8.5", "serde", - "serde_derive", "tree_hash", "zeroize", ] @@ -796,7 +794,6 @@ dependencies = [ "log", "logging", "serde", - "serde_derive", "serde_json", "serde_yaml", "slog", @@ -849,9 +846,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" @@ -937,9 +934,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479" +checksum = "12024c4645c97566567129c204f65d5815a8c9aecf30fcbe682b2fe034996d36" dependencies = [ "serde", ] @@ -1115,7 +1112,6 @@ dependencies = [ "parking_lot 0.12.1", "sensitive_url", "serde", - "serde_derive", "serde_yaml", "slasher", "slasher_service", @@ -1375,9 +1371,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" dependencies = [ "csv-core", "itoa", @@ -1387,9 +1383,9 @@ dependencies = [ [[package]] name = "csv-core" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" dependencies = [ "memchr", ] @@ -1451,7 +1447,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1645,7 +1641,7 @@ checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1663,9 +1659,9 @@ dependencies = [ [[package]] name = "diesel" -version = "2.1.2" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53c8a2cb22327206568569e5a45bb5a2c946455efdd76e24d15b7e82171af95e" +checksum = "2268a214a6f118fce1838edba3d1561cf0e78d8de785475957a580a7f8c69d33" dependencies = [ "bitflags 2.4.0", "byteorder", @@ -1684,7 +1680,7 @@ dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1704,7 +1700,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1819,7 +1815,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1854,7 +1850,7 @@ checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4" dependencies = [ "der 0.7.8", "digest 0.10.7", - "elliptic-curve 0.13.5", + "elliptic-curve 0.13.6", "rfc6979 0.4.0", "signature 2.1.0", "spki 0.7.2", @@ -1880,7 +1876,7 @@ dependencies = [ "ed25519", "rand_core 0.6.4", "serde", - "sha2 0.10.7", + "sha2 0.10.8", "zeroize", ] @@ -1907,7 +1903,6 @@ dependencies = [ "logging", "rayon", "serde", - "serde_derive", "serde_json", "serde_repr", "serde_yaml", @@ -1947,9 +1942,9 @@ dependencies = [ [[package]] name = "elliptic-curve" -version = "0.13.5" +version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" +checksum = "d97ca172ae9dc9f9b779a6e3a65d308f2af74e5b8c921299075bdb4a0370e914" dependencies = [ "base16ct 0.2.0", "crypto-bigint 0.5.3", @@ -2039,7 +2034,6 @@ dependencies = [ "futures", "logging", "serde", - "serde_derive", "slog", "slog-async", "slog-json", @@ -2067,25 +2061,14 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" +checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" dependencies = [ - "errno-dragonfly", "libc", "windows-sys 0.48.0", ] -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "error-chain" version = "0.12.4" @@ -2194,7 +2177,6 @@ dependencies = [ "lazy_static", "num-bigint", "serde", - "serde_derive", "serde_yaml", ] @@ -2366,7 +2348,7 @@ dependencies = [ "impl-codec 0.6.0", "impl-rlp", "impl-serde 0.4.0", - "primitive-types 0.12.1", + "primitive-types 0.12.2", "scale-info", "uint", ] @@ -2380,7 +2362,7 @@ dependencies = [ "cpufeatures", "lazy_static", "ring", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -2582,6 +2564,7 @@ dependencies = [ "bytes", "environment", "eth2", + "eth2_network_config", "ethereum_serde_utils", "ethereum_ssz", "ethers-core", @@ -2905,7 +2888,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3165,9 +3148,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" dependencies = [ "ahash 0.8.3", "allocator-api2", @@ -3197,7 +3180,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.0", + "hashbrown 0.14.1", ] [[package]] @@ -3672,12 +3655,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.1", ] [[package]] @@ -3796,9 +3779,9 @@ dependencies = [ [[package]] name = "jobserver" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" dependencies = [ "libc", ] @@ -3835,7 +3818,7 @@ dependencies = [ "cfg-if", "ecdsa 0.14.8", "elliptic-curve 0.12.3", - "sha2 0.10.7", + "sha2 0.10.8", "sha3 0.10.8", ] @@ -3847,9 +3830,9 @@ checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" dependencies = [ "cfg-if", "ecdsa 0.16.8", - "elliptic-curve 0.13.5", + "elliptic-curve 0.13.6", "once_cell", - "sha2 0.10.7", + "sha2 0.10.8", "signature 2.1.0", ] @@ -3868,7 +3851,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b286e6b663fb926e1eeb68528e69cb70ed46c6d65871a21b2215ae8154c6d3c" dependencies = [ - "primitive-types 0.12.1", + "primitive-types 0.12.2", "tiny-keccak", ] @@ -3886,7 +3869,6 @@ dependencies = [ "ethereum_ssz_derive", "hex", "serde", - "serde_derive", "tree_hash", ] @@ -3969,9 +3951,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.148" +version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libflate" @@ -4005,9 +3987,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libmdbx" @@ -4147,7 +4129,7 @@ dependencies = [ "quick-protobuf-codec", "rand 0.8.5", "regex", - "sha2 0.10.7", + "sha2 0.10.8", "smallvec", "unsigned-varint 0.7.2", "void", @@ -4177,13 +4159,14 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.3" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686e73aff5e23efbb99bc85340ea6fd8686986aa7b283a881ba182cfca535ca9" +checksum = "57bf6e730ec5e7022958da53ffb03b326e681b7316939012ae9b3c7449a812d4" dependencies = [ "asn1_der", "bs58 0.5.0", "ed25519-dalek", + "hkdf", "libsecp256k1", "log", "multihash", @@ -4191,7 +4174,7 @@ dependencies = [ "quick-protobuf", "rand 0.8.5", "sec1 0.7.3", - "sha2 0.10.7", + "sha2 0.10.8", "thiserror", "void", "zeroize", @@ -4270,7 +4253,7 @@ dependencies = [ "once_cell", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.7", + "sha2 0.10.8", "snow", "static_assertions", "thiserror", @@ -4319,9 +4302,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.43.4" +version = "0.43.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0cf749abdc5ca1dce6296dc8ea0f012464dfcfd3ddd67ffc0cabd8241c4e1da" +checksum = "ab94183f8fc2325817835b57946deb44340c99362cd4606c0a5717299b2ba369" dependencies = [ "either", "fnv", @@ -4350,7 +4333,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4554,7 +4537,6 @@ dependencies = [ "rand 0.8.5", "regex", "serde", - "serde_derive", "sha2 0.9.9", "slog", "slog-async", @@ -4607,9 +4589,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.7" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lmdb-rkv" @@ -4792,9 +4774,9 @@ checksum = "8c408dc227d302f1496c84d9dc68c00fec6f56f9228a18f3023f976f3ca7c945" [[package]] name = "memchr" -version = "2.6.3" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memoffset" @@ -4964,7 +4946,6 @@ dependencies = [ "reqwest", "sensitive_url", "serde", - "serde_derive", "serde_json", "slog", "store", @@ -5304,9 +5285,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg 1.1.0", ] @@ -5421,7 +5402,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -5469,7 +5450,6 @@ dependencies = [ "rand 0.8.5", "rayon", "serde", - "serde_derive", "state_processing", "store", "tokio", @@ -5489,9 +5469,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ "ecdsa 0.16.8", - "elliptic-curve 0.13.5", + "elliptic-curve 0.13.6", "primeorder", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -5635,7 +5615,7 @@ dependencies = [ "digest 0.10.7", "hmac 0.12.1", "password-hash", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -5713,7 +5693,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -5847,7 +5827,7 @@ dependencies = [ "md-5", "memchr", "rand 0.8.5", - "sha2 0.10.7", + "sha2 0.10.8", "stringprep", ] @@ -5892,7 +5872,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ "proc-macro2", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -5901,7 +5881,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c2fcef82c0ec6eefcc179b978446c399b3cdf73c392c35604e399eee6df1ee3" dependencies = [ - "elliptic-curve 0.13.5", + "elliptic-curve 0.13.6", ] [[package]] @@ -5919,9 +5899,9 @@ dependencies = [ [[package]] name = "primitive-types" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash 0.8.0", "impl-codec 0.6.0", @@ -5933,12 +5913,12 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.1.3" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ - "thiserror", - "toml 0.5.11", + "once_cell", + "toml_edit", ] [[package]] @@ -5979,14 +5959,14 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] @@ -6041,7 +6021,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -6052,7 +6032,6 @@ dependencies = [ "ethereum_ssz_derive", "safe_arith", "serde", - "serde_derive", "serde_yaml", "superstruct", "types", @@ -6449,14 +6428,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.5" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" +checksum = "d119d7c7ca818f8a53c300863d4f87566aac09943aef5b355bb83969dae75d87" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.8", - "regex-syntax 0.7.5", + "regex-automata 0.4.1", + "regex-syntax 0.8.1", ] [[package]] @@ -6470,13 +6449,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.8" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" +checksum = "465c6fc0621e4abc4187a2bda0937bfd4f722c2730b29562e19689ea796c9a4b" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.5", + "regex-syntax 0.8.1", ] [[package]] @@ -6487,15 +6466,15 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.5" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +checksum = "56d84fdd47036b038fc80dd333d10b6aab10d5d31f4a366e20014def75328d33" [[package]] name = "reqwest" -version = "0.11.20" +version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" +checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ "base64 0.21.4", "bytes", @@ -6521,6 +6500,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "system-configuration", "tokio", "tokio-native-tls", "tokio-rustls", @@ -6708,9 +6688,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.23" +version = "0.37.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" +checksum = "4279d76516df406a8bd37e7dff53fd37d1a093f997a3c34a5c21658c126db06d" dependencies = [ "bitflags 1.3.2", "errno", @@ -6722,14 +6702,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.14" +version = "0.38.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747c788e9ce8e92b12cd485c49ddf90723550b654b32508f979b71a7b1ecda4f" +checksum = "5a74ee2d7c2581cd139b42447d7d9389b889bdaad3a73f1ebb16f2a3237bb19c" dependencies = [ "bitflags 2.4.0", "errno", "libc", - "linux-raw-sys 0.4.7", + "linux-raw-sys 0.4.10", "windows-sys 0.48.0", ] @@ -6938,9 +6918,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" +checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" dependencies = [ "serde", ] @@ -6996,7 +6976,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -7028,7 +7008,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -7112,9 +7092,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", @@ -7145,9 +7125,9 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] @@ -7255,7 +7235,6 @@ dependencies = [ "rayon", "safe_arith", "serde", - "serde_derive", "slog", "sloggers", "strum", @@ -7295,7 +7274,6 @@ dependencies = [ "rayon", "rusqlite", "serde", - "serde_derive", "serde_json", "tempfile", "types", @@ -7438,7 +7416,7 @@ dependencies = [ "rand_core 0.6.4", "ring", "rustc_version", - "sha2 0.10.7", + "sha2 0.10.8", "subtle", ] @@ -7577,7 +7555,6 @@ dependencies = [ "parking_lot 0.12.1", "safe_arith", "serde", - "serde_derive", "slog", "sloggers", "smallvec", @@ -7676,9 +7653,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.37" +version = "2.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" +checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" dependencies = [ "proc-macro2", "quote", @@ -7746,7 +7723,6 @@ dependencies = [ "lighthouse_network", "parking_lot 0.12.1", "serde", - "serde_derive", "serde_json", "sysinfo", "types", @@ -7800,7 +7776,7 @@ dependencies = [ "cfg-if", "fastrand 2.0.1", "redox_syscall 0.3.5", - "rustix 0.38.14", + "rustix 0.38.18", "windows-sys 0.48.0", ] @@ -7853,7 +7829,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -7867,22 +7843,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" +checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" +checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -7957,7 +7933,7 @@ dependencies = [ "pbkdf2 0.11.0", "rand 0.8.5", "rustc-hash", - "sha2 0.10.7", + "sha2 0.10.8", "thiserror", "unicode-normalization", "wasm-bindgen", @@ -8000,9 +7976,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.32.0" +version = "1.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" +checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" dependencies = [ "backtrace", "bytes", @@ -8034,7 +8010,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -8162,7 +8138,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.0.2", "serde", "serde_spanned", "toml_datetime", @@ -8218,7 +8194,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -8427,7 +8403,6 @@ dependencies = [ "rusqlite", "safe_arith", "serde", - "serde_derive", "serde_json", "serde_yaml", "slog", @@ -8608,7 +8583,6 @@ dependencies = [ "safe_arith", "sensitive_url", "serde", - "serde_derive", "serde_json", "slashing_protection", "slog", @@ -8727,8 +8701,8 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.5" -source = "git+https://github.com/seanmonstar/warp.git#5ad8a9cb155f6485d13d591a564d8c70053a388a" +version = "0.3.6" +source = "git+https://github.com/seanmonstar/warp.git#efe8548a19172e69918396d0fdbc369df9d0eb17" dependencies = [ "bytes", "futures-channel", @@ -8805,7 +8779,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", "wasm-bindgen-shared", ] @@ -8839,7 +8813,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8964,7 +8938,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.14", + "rustix 0.38.18", ] [[package]] @@ -9224,9 +9198,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.15" +version = "0.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c2e3184b9c4e92ad5167ca73039d0c42476302ab603e2fec4487511f38ccefc" +checksum = "037711d82167854aff2018dfd193aa0fef5370f456732f0d5a0c59b0f1b4b907" dependencies = [ "memchr", ] @@ -9382,7 +9356,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -9426,11 +9400,10 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.8+zstd.1.5.5" +version = "2.0.9+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" dependencies = [ "cc", - "libc", "pkg-config", ] diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e047cce5cd..41f609eb86 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -69,7 +69,7 @@ use crate::{ kzg_utils, metrics, AvailabilityPendingExecutedBlock, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead, }; -use eth2::types::{EventKind, SseBlock, SseExtendedPayloadAttributes, SyncDuty}; +use eth2::types::{EventKind, SseBlobSidecar, SseBlock, SseExtendedPayloadAttributes, SyncDuty}; use execution_layer::{ BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, @@ -2779,6 +2779,14 @@ impl BeaconChain { return Err(BlockError::BlockIsAlreadyKnown); } + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_blob_sidecar_subscribers() { + event_handler.register(EventKind::BlobSidecar(SseBlobSidecar::from_blob_sidecar( + blob.as_blob(), + ))); + } + } + self.data_availability_checker .notify_gossip_blob(blob.as_blob().slot, block_root, &blob); let r = self.check_gossip_blob_availability_and_import(blob).await; @@ -2803,6 +2811,16 @@ impl BeaconChain { return Err(BlockError::BlockIsAlreadyKnown); } + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_blob_sidecar_subscribers() { + for blob in blobs.iter().filter_map(|maybe_blob| maybe_blob.as_ref()) { + event_handler.register(EventKind::BlobSidecar( + SseBlobSidecar::from_blob_sidecar(blob), + )); + } + } + } + self.data_availability_checker .notify_rpc_blobs(slot, block_root, &blobs); let r = self @@ -3913,6 +3931,9 @@ impl BeaconChain { validator_graffiti: Option, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { + metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); + let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); + // Part 1/2 (blocking) // // Load the parent state from disk. @@ -3947,9 +3968,6 @@ impl BeaconChain { self: &Arc, slot: Slot, ) -> Result<(BeaconState, Option), BlockProductionError> { - metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); - let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); - let fork_choice_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_FORK_CHOICE_TIMES); self.wait_for_fork_choice_before_block_production(slot)?; drop(fork_choice_timer); @@ -5031,7 +5049,7 @@ impl BeaconChain { kzg_utils::validate_blobs::( kzg, expected_kzg_commitments, - blobs, + blobs.iter().collect(), &kzg_proofs, ) .map_err(BlockProductionError::KzgError)?; diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 23d1601855..0cc586553b 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -7,7 +7,7 @@ use crate::block_verification::cheap_state_advance_to_obtain_committees; use crate::data_availability_checker::AvailabilityCheckError; use crate::kzg_utils::{validate_blob, validate_blobs}; use crate::{metrics, BeaconChainError}; -use kzg::Kzg; +use kzg::{Kzg, KzgCommitment}; use slog::debug; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; @@ -178,6 +178,12 @@ impl GossipVerifiedBlob { pub fn slot(&self) -> Slot { self.blob.message.slot } + pub fn index(&self) -> u64 { + self.blob.message.index + } + pub fn kzg_commitment(&self) -> KzgCommitment { + self.blob.message.kzg_commitment + } pub fn proposer_index(&self) -> u64 { self.blob.message.proposer_index } @@ -433,8 +439,7 @@ pub fn verify_kzg_for_blob( kzg: &Kzg, ) -> Result, AvailabilityCheckError> { let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_SINGLE_TIMES); - //TODO(sean) remove clone - if validate_blob::(kzg, blob.blob.clone(), blob.kzg_commitment, blob.kzg_proof) + if validate_blob::(kzg, &blob.blob, blob.kzg_commitment, blob.kzg_proof) .map_err(AvailabilityCheckError::Kzg)? { Ok(KzgVerifiedBlob { blob }) @@ -455,15 +460,10 @@ pub fn verify_kzg_for_blob_list( let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_BATCH_TIMES); let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list .iter() - .map(|blob| (blob.blob.clone(), (blob.kzg_commitment, blob.kzg_proof))) + .map(|blob| (&blob.blob, (blob.kzg_commitment, blob.kzg_proof))) .unzip(); - if validate_blobs::( - kzg, - commitments.as_slice(), - blobs.as_slice(), - proofs.as_slice(), - ) - .map_err(AvailabilityCheckError::Kzg)? + if validate_blobs::(kzg, commitments.as_slice(), blobs, proofs.as_slice()) + .map_err(AvailabilityCheckError::Kzg)? { Ok(()) } else { diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 7dae9d6cbe..3dfd45b007 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -5,15 +5,10 @@ pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock}; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::{get_block_root, GossipVerifiedBlock, PayloadVerificationOutcome}; use derivative::Derivative; -use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use state_processing::ConsensusContext; use std::sync::Arc; -use types::blob_sidecar::FixedBlobSidecarList; -use types::{ - blob_sidecar::BlobIdentifier, ssz_tagged_beacon_state, ssz_tagged_signed_beacon_block, - ssz_tagged_signed_beacon_block_arc, -}; +use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; use types::{ BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, @@ -251,9 +246,7 @@ impl AvailableExecutedBlock { /// A block that has completed all pre-deneb block processing checks, verification /// by an EL client but does not have all requisite blob data to get imported into /// fork choice. -#[derive(Encode, Decode, Clone)] pub struct AvailabilityPendingExecutedBlock { - #[ssz(with = "ssz_tagged_signed_beacon_block_arc")] pub block: Arc>, pub import_data: BlockImportData, pub payload_verification_outcome: PayloadVerificationOutcome, @@ -285,14 +278,10 @@ impl AvailabilityPendingExecutedBlock { } } -#[derive(Debug, PartialEq, Encode, Decode, Clone)] -// TODO (mark): investigate using an Arc / Arc -// here to make this cheaper to clone +#[derive(Debug, PartialEq)] pub struct BlockImportData { pub block_root: Hash256, - #[ssz(with = "ssz_tagged_beacon_state")] pub state: BeaconState, - #[ssz(with = "ssz_tagged_signed_beacon_block")] pub parent_block: SignedBeaconBlock>, pub parent_eth1_finalization_data: Eth1FinalizationData, pub confirmed_state_roots: Vec, diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index be427ae9f6..e1024da46c 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -10,17 +10,14 @@ use crate::data_availability_checker::overflow_lru_cache::OverflowLRUCache; use crate::data_availability_checker::processing_cache::ProcessingCache; use crate::{BeaconChain, BeaconChainTypes, BeaconStore}; use kzg::Kzg; -use kzg::{Error as KzgError, KzgCommitment}; use parking_lot::RwLock; pub use processing_cache::ProcessingComponents; use slasher::test_utils::E; use slog::{debug, error, Logger}; use slot_clock::SlotClock; -use ssz_types::Error; use std::fmt; use std::fmt::Debug; use std::sync::Arc; -use strum::IntoStaticStr; use task_executor::TaskExecutor; use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments}; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; @@ -29,8 +26,12 @@ use types::{BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlo mod availability_view; mod child_components; +mod error; mod overflow_lru_cache; mod processing_cache; +mod state_lru_cache; + +pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; /// The LRU Cache stores `PendingComponents` which can store up to /// `MAX_BLOBS_PER_BLOCK = 6` blobs each. A `BlobSidecar` is 0.131256 MB. So @@ -38,45 +39,8 @@ mod processing_cache; /// to 1024 means the maximum size of the cache is ~ 0.8 GB. But the cache /// will target a size of less than 75% of capacity. pub const OVERFLOW_LRU_CAPACITY: usize = 1024; - -#[derive(Debug, IntoStaticStr)] -pub enum AvailabilityCheckError { - Kzg(KzgError), - KzgNotInitialized, - KzgVerificationFailed, - KzgCommitmentMismatch { - blob_commitment: KzgCommitment, - block_commitment: KzgCommitment, - }, - Unexpected, - SszTypes(ssz_types::Error), - MissingBlobs, - BlobIndexInvalid(u64), - StoreError(store::Error), - DecodeError(ssz::DecodeError), - InconsistentBlobBlockRoots { - block_root: Hash256, - blob_block_root: Hash256, - }, -} - -impl From for AvailabilityCheckError { - fn from(value: Error) -> Self { - Self::SszTypes(value) - } -} - -impl From for AvailabilityCheckError { - fn from(value: store::Error) -> Self { - Self::StoreError(value) - } -} - -impl From for AvailabilityCheckError { - fn from(value: ssz::DecodeError) -> Self { - Self::DecodeError(value) - } -} +/// Until tree-states is implemented, we can't store very many states in memory :( +pub const STATE_LRU_CAPACITY: usize = 2; /// This includes a cache for any blocks or blobs that have been received over gossip or RPC /// and are awaiting more components before they can be imported. Additionally the @@ -120,7 +84,7 @@ impl DataAvailabilityChecker { log: &Logger, spec: ChainSpec, ) -> Result { - let overflow_cache = OverflowLRUCache::new(OVERFLOW_LRU_CAPACITY, store)?; + let overflow_cache = OverflowLRUCache::new(OVERFLOW_LRU_CAPACITY, store, spec.clone())?; Ok(Self { processing_cache: <_>::default(), availability_cache: Arc::new(overflow_cache), diff --git a/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs b/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs index eb1f23d48f..f013cf649a 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs @@ -1,9 +1,9 @@ use super::child_components::ChildComponents; +use super::state_lru_cache::DietAvailabilityPendingExecutedBlock; use crate::blob_verification::KzgVerifiedBlob; use crate::block_verification_types::AsBlock; use crate::data_availability_checker::overflow_lru_cache::PendingComponents; use crate::data_availability_checker::ProcessingComponents; -use crate::AvailabilityPendingExecutedBlock; use kzg::KzgCommitment; use ssz_types::FixedVector; use std::sync::Arc; @@ -190,7 +190,7 @@ impl_availability_view!( impl_availability_view!( PendingComponents, - AvailabilityPendingExecutedBlock, + DietAvailabilityPendingExecutedBlock, KzgVerifiedBlob, executed_block, verified_blobs @@ -225,7 +225,7 @@ impl GetCommitment for KzgCommitment { } // These implementations are required to implement `AvailabilityView` for `PendingComponents`. -impl GetCommitments for AvailabilityPendingExecutedBlock { +impl GetCommitments for DietAvailabilityPendingExecutedBlock { fn get_commitments(&self) -> KzgCommitments { self.as_block() .message() @@ -235,6 +235,7 @@ impl GetCommitments for AvailabilityPendingExecutedBlock { .unwrap_or_default() } } + impl GetCommitment for KzgVerifiedBlob { fn get_commitment(&self) -> &KzgCommitment { &self.as_blob().kzg_commitment @@ -264,10 +265,9 @@ pub mod tests { use crate::block_verification_types::BlockImportData; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::test_utils::{generate_rand_block_and_blobs, NumBlobs}; + use crate::AvailabilityPendingExecutedBlock; use crate::PayloadVerificationOutcome; - use eth2_network_config::get_trusted_setup; use fork_choice::PayloadVerificationStatus; - use kzg::{Kzg, TrustedSetup}; use rand::rngs::StdRng; use rand::SeedableRng; use state_processing::ConsensusContext; @@ -283,13 +283,9 @@ pub mod tests { ); pub fn pre_setup() -> Setup { - let trusted_setup: TrustedSetup = - serde_json::from_reader(get_trusted_setup::<::Kzg>()).unwrap(); - let kzg = Kzg::new_from_trusted_setup(trusted_setup).unwrap(); - let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); let (block, blobs_vec) = - generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &kzg, &mut rng); + generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &mut rng); let mut blobs: FixedVector<_, ::MaxBlobsPerBlock> = FixedVector::default(); for blob in blobs_vec { @@ -346,7 +342,7 @@ pub mod tests { } type PendingComponentsSetup = ( - AvailabilityPendingExecutedBlock, + DietAvailabilityPendingExecutedBlock, FixedVector>, ::MaxBlobsPerBlock>, FixedVector>, ::MaxBlobsPerBlock>, ); @@ -395,7 +391,7 @@ pub mod tests { is_valid_merge_transition_block: false, }, }; - (block, blobs, invalid_blobs) + (block.into(), blobs, invalid_blobs) } type ChildComponentsSetup = ( diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs new file mode 100644 index 0000000000..5415d1f958 --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -0,0 +1,79 @@ +use kzg::{Error as KzgError, KzgCommitment}; +use strum::IntoStaticStr; +use types::{BeaconStateError, Hash256}; + +#[derive(Debug, IntoStaticStr)] +pub enum Error { + Kzg(KzgError), + KzgNotInitialized, + KzgVerificationFailed, + KzgCommitmentMismatch { + blob_commitment: KzgCommitment, + block_commitment: KzgCommitment, + }, + Unexpected, + SszTypes(ssz_types::Error), + MissingBlobs, + BlobIndexInvalid(u64), + StoreError(store::Error), + DecodeError(ssz::DecodeError), + InconsistentBlobBlockRoots { + block_root: Hash256, + blob_block_root: Hash256, + }, + ParentStateMissing(Hash256), + BlockReplayError(state_processing::BlockReplayError), + RebuildingStateCaches(BeaconStateError), +} + +pub enum ErrorCategory { + /// Internal Errors (not caused by peers) + Internal, + /// Errors caused by faulty / malicious peers + Malicious, +} + +impl Error { + pub fn category(&self) -> ErrorCategory { + match self { + Error::KzgNotInitialized + | Error::SszTypes(_) + | Error::MissingBlobs + | Error::StoreError(_) + | Error::DecodeError(_) + | Error::Unexpected + | Error::ParentStateMissing(_) + | Error::BlockReplayError(_) + | Error::RebuildingStateCaches(_) => ErrorCategory::Internal, + Error::Kzg(_) + | Error::BlobIndexInvalid(_) + | Error::KzgCommitmentMismatch { .. } + | Error::KzgVerificationFailed + | Error::InconsistentBlobBlockRoots { .. } => ErrorCategory::Malicious, + } + } +} + +impl From for Error { + fn from(value: ssz_types::Error) -> Self { + Self::SszTypes(value) + } +} + +impl From for Error { + fn from(value: store::Error) -> Self { + Self::StoreError(value) + } +} + +impl From for Error { + fn from(value: ssz::DecodeError) -> Self { + Self::DecodeError(value) + } +} + +impl From for Error { + fn from(value: state_processing::BlockReplayError) -> Self { + Self::BlockReplayError(value) + } +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index d7a5130df1..691253e600 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -27,10 +27,11 @@ //! On startup, the keys of these components are stored in memory and will be loaded in //! the cache when they are accessed. +use super::state_lru_cache::{DietAvailabilityPendingExecutedBlock, StateLRUCache}; use crate::beacon_chain::BeaconStore; use crate::blob_verification::KzgVerifiedBlob; use crate::block_verification_types::{ - AsBlock, AvailabilityPendingExecutedBlock, AvailableBlock, AvailableExecutedBlock, + AvailabilityPendingExecutedBlock, AvailableBlock, AvailableExecutedBlock, }; use crate::data_availability_checker::availability_view::AvailabilityView; use crate::data_availability_checker::{Availability, AvailabilityCheckError}; @@ -43,7 +44,7 @@ use ssz_derive::{Decode, Encode}; use ssz_types::{FixedVector, VariableList}; use std::{collections::HashSet, sync::Arc}; use types::blob_sidecar::BlobIdentifier; -use types::{BlobSidecar, Epoch, EthSpec, Hash256}; +use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256}; /// This represents the components of a partially available block /// @@ -53,7 +54,7 @@ use types::{BlobSidecar, Epoch, EthSpec, Hash256}; pub struct PendingComponents { pub block_root: Hash256, pub verified_blobs: FixedVector>, T::MaxBlobsPerBlock>, - pub executed_block: Option>, + pub executed_block: Option>, } impl PendingComponents { @@ -68,17 +69,25 @@ impl PendingComponents { /// Verifies an `SignedBeaconBlock` against a set of KZG verified blobs. /// This does not check whether a block *should* have blobs, these checks should have been /// completed when producing the `AvailabilityPendingBlock`. - pub fn make_available(self) -> Result, AvailabilityCheckError> { + /// + /// WARNING: This function can potentially take a lot of time if the state needs to be + /// reconstructed from disk. Ensure you are not holding any write locks while calling this. + pub fn make_available(self, recover: R) -> Result, AvailabilityCheckError> + where + R: FnOnce( + DietAvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError>, + { let Self { block_root, verified_blobs, executed_block, } = self; - let Some(executed_block) = executed_block else { + let Some(diet_executed_block) = executed_block else { return Err(AvailabilityCheckError::Unexpected); }; - let num_blobs_expected = executed_block.num_blobs_expected(); + let num_blobs_expected = diet_executed_block.num_blobs_expected(); let Some(verified_blobs) = verified_blobs .into_iter() .cloned() @@ -90,6 +99,8 @@ impl PendingComponents { }; let verified_blobs = VariableList::new(verified_blobs)?; + let executed_block = recover(diet_executed_block)?; + let AvailabilityPendingExecutedBlock { block, import_data, @@ -109,7 +120,7 @@ impl PendingComponents { pub fn epoch(&self) -> Option { self.executed_block .as_ref() - .map(|pending_block| pending_block.block.epoch()) + .map(|pending_block| pending_block.as_block().epoch()) .or_else(|| { for maybe_blob in self.verified_blobs.iter() { if maybe_blob.is_some() { @@ -208,9 +219,10 @@ impl OverflowStore { OverflowKey::Block(_) => { maybe_pending_components .get_or_insert_with(|| PendingComponents::empty(block_root)) - .executed_block = Some(AvailabilityPendingExecutedBlock::from_ssz_bytes( - value_bytes.as_slice(), - )?); + .executed_block = + Some(DietAvailabilityPendingExecutedBlock::from_ssz_bytes( + value_bytes.as_slice(), + )?); } OverflowKey::Blob(_, index) => { *maybe_pending_components @@ -356,6 +368,9 @@ pub struct OverflowLRUCache { critical: RwLock>, /// This is how we read and write components to the disk overflow_store: OverflowStore, + /// This cache holds a limited number of states in memory and reconstructs them + /// from disk when necessary. This is necessary until we merge tree-states + state_cache: StateLRUCache, /// Mutex to guard maintenance methods which move data between disk and memory maintenance_lock: Mutex<()>, /// The capacity of the LRU cache @@ -366,13 +381,15 @@ impl OverflowLRUCache { pub fn new( capacity: usize, beacon_store: BeaconStore, + spec: ChainSpec, ) -> Result { - let overflow_store = OverflowStore(beacon_store); + let overflow_store = OverflowStore(beacon_store.clone()); let mut critical = Critical::new(capacity); critical.reload_store_keys(&overflow_store)?; Ok(Self { critical: RwLock::new(critical), overflow_store, + state_cache: StateLRUCache::new(beacon_store, spec), maintenance_lock: Mutex::new(()), capacity, }) @@ -426,7 +443,11 @@ impl OverflowLRUCache { pending_components.merge_blobs(fixed_blobs); if pending_components.is_available() { - pending_components.make_available() + // No need to hold the write lock anymore + drop(write_lock); + pending_components.make_available(|diet_block| { + self.state_cache.recover_pending_executed_block(diet_block) + }) } else { write_lock.put_pending_components( block_root, @@ -446,17 +467,26 @@ impl OverflowLRUCache { let mut write_lock = self.critical.write(); let block_root = executed_block.import_data.block_root; + // register the block to get the diet block + let diet_executed_block = self + .state_cache + .register_pending_executed_block(executed_block); + // Grab existing entry or create a new entry. let mut pending_components = write_lock .pop_pending_components(block_root, &self.overflow_store)? .unwrap_or_else(|| PendingComponents::empty(block_root)); // Merge in the block. - pending_components.merge_block(executed_block); + pending_components.merge_block(diet_executed_block); // Check if we have all components and entire set is consistent. if pending_components.is_available() { - pending_components.make_available() + // No need to hold the write lock anymore + drop(write_lock); + pending_components.make_available(|diet_block| { + self.state_cache.recover_pending_executed_block(diet_block) + }) } else { write_lock.put_pending_components( block_root, @@ -493,6 +523,8 @@ impl OverflowLRUCache { self.maintain_threshold(threshold, cutoff_epoch)?; // clean up any keys on the disk that shouldn't be there self.prune_disk(cutoff_epoch)?; + // clean up any lingering states in the state cache + self.state_cache.do_maintenance(cutoff_epoch); Ok(()) } @@ -612,10 +644,10 @@ impl OverflowLRUCache { delete_if_outdated(self, current_block_data)?; let current_epoch = match &overflow_key { OverflowKey::Block(_) => { - AvailabilityPendingExecutedBlock::::from_ssz_bytes( + DietAvailabilityPendingExecutedBlock::::from_ssz_bytes( value_bytes.as_slice(), )? - .block + .as_block() .epoch() } OverflowKey::Blob(_, _) => { @@ -639,6 +671,12 @@ impl OverflowLRUCache { drop(maintenance_lock); Ok(()) } + + #[cfg(test)] + /// get the state cache for inspection (used only for tests) + pub fn state_lru_cache(&self) -> &StateLRUCache { + &self.state_cache + } } impl ssz::Encode for OverflowKey { @@ -711,11 +749,11 @@ mod test { validate_blob_sidecar_for_gossip, verify_kzg_for_blob, GossipVerifiedBlob, }, block_verification::PayloadVerificationOutcome, - block_verification_types::BlockImportData, + block_verification_types::{AsBlock, BlockImportData}, + data_availability_checker::STATE_LRU_CAPACITY, eth1_finalization_cache::Eth1FinalizationData, test_utils::{BaseHarnessType, BeaconChainHarness, DiskHarnessType}, }; - use execution_layer::test_utils::DEFAULT_TERMINAL_BLOCK; use fork_choice::PayloadVerificationStatus; use logging::test_logger; use slog::{info, Logger}; @@ -724,7 +762,6 @@ mod test { use std::ops::AddAssign; use store::{HotColdDB, ItemStore, LevelDB, StoreConfig}; use tempfile::{tempdir, TempDir}; - use types::beacon_state::ssz_tagged_beacon_state; use types::{ChainSpec, ExecPayload, MinimalEthSpec}; const LOW_VALIDATOR_COUNT: usize = 32; @@ -754,7 +791,7 @@ mod test { async fn get_deneb_chain( log: Logger, db_path: &TempDir, - ) -> BeaconChainHarness, LevelDB>> { + ) -> BeaconChainHarness> { let altair_fork_epoch = Epoch::new(1); let bellatrix_fork_epoch = Epoch::new(2); let bellatrix_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); @@ -837,91 +874,8 @@ mod test { } } - #[tokio::test] - async fn ssz_tagged_beacon_state_encode_decode_equality() { - type E = MinimalEthSpec; - let altair_fork_epoch = Epoch::new(1); - let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); - let bellatrix_fork_epoch = Epoch::new(2); - let merge_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); - let capella_fork_epoch = Epoch::new(3); - let capella_fork_slot = capella_fork_epoch.start_slot(E::slots_per_epoch()); - let deneb_fork_epoch = Epoch::new(4); - let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch()); - - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(altair_fork_epoch); - spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); - spec.capella_fork_epoch = Some(capella_fork_epoch); - spec.deneb_fork_epoch = Some(deneb_fork_epoch); - let genesis_block = execution_layer::test_utils::generate_genesis_block( - spec.terminal_total_difficulty, - DEFAULT_TERMINAL_BLOCK, - ) - .unwrap(); - spec.terminal_block_hash = genesis_block.block_hash; - spec.terminal_block_hash_activation_epoch = bellatrix_fork_epoch; - - let harness = BeaconChainHarness::builder(E::default()) - .spec(spec) - .logger(logging::test_logger()) - .deterministic_keypairs(LOW_VALIDATOR_COUNT) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); - - let mut state = harness.get_current_state(); - assert!(state.as_base().is_ok()); - let encoded = ssz_tagged_beacon_state::encode::as_ssz_bytes(&state); - let decoded = - ssz_tagged_beacon_state::decode::from_ssz_bytes(&encoded).expect("should decode"); - state.drop_all_caches().expect("should drop caches"); - assert_eq!(state, decoded, "Encoded and decoded states should be equal"); - - harness.extend_to_slot(altair_fork_slot).await; - - let mut state = harness.get_current_state(); - assert!(state.as_altair().is_ok()); - let encoded = ssz_tagged_beacon_state::encode::as_ssz_bytes(&state); - let decoded = - ssz_tagged_beacon_state::decode::from_ssz_bytes(&encoded).expect("should decode"); - state.drop_all_caches().expect("should drop caches"); - assert_eq!(state, decoded, "Encoded and decoded states should be equal"); - - harness.extend_to_slot(merge_fork_slot).await; - - let mut state = harness.get_current_state(); - assert!(state.as_merge().is_ok()); - let encoded = ssz_tagged_beacon_state::encode::as_ssz_bytes(&state); - let decoded = - ssz_tagged_beacon_state::decode::from_ssz_bytes(&encoded).expect("should decode"); - state.drop_all_caches().expect("should drop caches"); - assert_eq!(state, decoded, "Encoded and decoded states should be equal"); - - harness.extend_to_slot(capella_fork_slot).await; - - let mut state = harness.get_current_state(); - assert!(state.as_capella().is_ok()); - let encoded = ssz_tagged_beacon_state::encode::as_ssz_bytes(&state); - let decoded = - ssz_tagged_beacon_state::decode::from_ssz_bytes(&encoded).expect("should decode"); - state.drop_all_caches().expect("should drop caches"); - assert_eq!(state, decoded, "Encoded and decoded states should be equal"); - - harness.extend_to_slot(deneb_fork_slot).await; - - let mut state = harness.get_current_state(); - assert!(state.as_deneb().is_ok()); - let encoded = ssz_tagged_beacon_state::encode::as_ssz_bytes(&state); - let decoded = - ssz_tagged_beacon_state::decode::from_ssz_bytes(&encoded).expect("should decode"); - state.drop_all_caches().expect("should drop caches"); - assert_eq!(state, decoded, "Encoded and decoded states should be equal"); - } - async fn availability_pending_block( harness: &BeaconChainHarness>, - log: Logger, ) -> ( AvailabilityPendingExecutedBlock, Vec>>, @@ -932,6 +886,7 @@ mod test { Cold: ItemStore, { let chain = &harness.chain; + let log = chain.log.clone(); let head = chain.head_snapshot(); let parent_state = head.beacon_state.clone(); @@ -1010,22 +965,36 @@ mod test { (availability_pending_block, gossip_verified_blobs) } + async fn setup_harness_and_cache( + capacity: usize, + ) -> ( + BeaconChainHarness>, + Arc>, + ) + where + E: EthSpec, + T: BeaconChainTypes, ColdStore = LevelDB, EthSpec = E>, + { + let log = test_logger(); + let chain_db_path = tempdir().expect("should get temp dir"); + let harness = get_deneb_chain(log.clone(), &chain_db_path).await; + let spec = harness.spec.clone(); + let test_store = harness.chain.store.clone(); + let cache = Arc::new( + OverflowLRUCache::::new(capacity, test_store, spec.clone()) + .expect("should create cache"), + ); + (harness, cache) + } + #[tokio::test] async fn overflow_cache_test_insert_components() { type E = MinimalEthSpec; type T = DiskHarnessType; - let log = test_logger(); - let chain_db_path = tempdir().expect("should get temp dir"); - let harness: BeaconChainHarness = get_deneb_chain(log.clone(), &chain_db_path).await; - let spec = harness.spec.clone(); let capacity = 4; - let db_path = tempdir().expect("should get temp dir"); - let test_store = get_store_with_spec::(&db_path, spec.clone(), log.clone()); - let cache = Arc::new( - OverflowLRUCache::::new(capacity, test_store).expect("should create cache"), - ); + let (harness, cache) = setup_harness_and_cache::(capacity).await; - let (pending_block, blobs) = availability_pending_block(&harness, log.clone()).await; + let (pending_block, blobs) = availability_pending_block(&harness).await; let root = pending_block.import_data.block_root; let blobs_expected = pending_block.num_blobs_expected(); @@ -1093,7 +1062,7 @@ mod test { "cache should be empty now that all components available" ); - let (pending_block, blobs) = availability_pending_block(&harness, log.clone()).await; + let (pending_block, blobs) = availability_pending_block(&harness).await; let blobs_expected = pending_block.num_blobs_expected(); assert_eq!( blobs.len(), @@ -1134,22 +1103,14 @@ mod test { async fn overflow_cache_test_overflow() { type E = MinimalEthSpec; type T = DiskHarnessType; - let log = test_logger(); - let chain_db_path = tempdir().expect("should get temp dir"); - let harness: BeaconChainHarness = get_deneb_chain(log.clone(), &chain_db_path).await; - let spec = harness.spec.clone(); let capacity = 4; - let db_path = tempdir().expect("should get temp dir"); - let test_store = get_store_with_spec::(&db_path, spec.clone(), log.clone()); - let cache = Arc::new( - OverflowLRUCache::::new(capacity, test_store).expect("should create cache"), - ); + let (harness, cache) = setup_harness_and_cache::(capacity).await; let mut pending_blocks = VecDeque::new(); let mut pending_blobs = VecDeque::new(); let mut roots = VecDeque::new(); while pending_blobs.len() < capacity + 1 { - let (pending_block, blobs) = availability_pending_block(&harness, log.clone()).await; + let (pending_block, blobs) = availability_pending_block(&harness).await; if pending_block.num_blobs_expected() == 0 { // we need blocks with blobs continue; @@ -1293,29 +1254,19 @@ mod test { async fn overflow_cache_test_maintenance() { type E = MinimalEthSpec; type T = DiskHarnessType; - let log = test_logger(); - let chain_db_path = tempdir().expect("should get temp dir"); - let harness: BeaconChainHarness = get_deneb_chain(log.clone(), &chain_db_path).await; - let spec = harness.spec.clone(); - let n_epochs = 4; let capacity = E::slots_per_epoch() as usize; - let db_path = tempdir().expect("should get temp dir"); - let test_store = get_store_with_spec::(&db_path, spec.clone(), log.clone()); - let cache = Arc::new( - OverflowLRUCache::::new(capacity, test_store).expect("should create cache"), - ); + let (harness, cache) = setup_harness_and_cache::(capacity).await; + let n_epochs = 4; let mut pending_blocks = VecDeque::new(); let mut pending_blobs = VecDeque::new(); - let mut roots = VecDeque::new(); let mut epoch_count = BTreeMap::new(); while pending_blobs.len() < n_epochs * capacity { - let (pending_block, blobs) = availability_pending_block(&harness, log.clone()).await; + let (pending_block, blobs) = availability_pending_block(&harness).await; if pending_block.num_blobs_expected() == 0 { // we need blocks with blobs continue; } - let root = pending_block.block.canonical_root(); let epoch = pending_block .block .as_block() @@ -1325,7 +1276,6 @@ mod test { pending_blocks.push_back(pending_block); pending_blobs.push_back(blobs); - roots.push_back(root); } let kzg = harness @@ -1424,7 +1374,7 @@ mod test { let mem_keys = cache.critical.read().in_memory.len(); expected_length -= count; info!( - log, + harness.chain.log, "EPOCH: {} DISK KEYS: {} MEM KEYS: {} TOTAL: {} EXPECTED: {}", epoch, disk_keys, @@ -1444,29 +1394,19 @@ mod test { async fn overflow_cache_test_persist_recover() { type E = MinimalEthSpec; type T = DiskHarnessType; - let log = test_logger(); - let chain_db_path = tempdir().expect("should get temp dir"); - let harness: BeaconChainHarness = get_deneb_chain(log.clone(), &chain_db_path).await; - let spec = harness.spec.clone(); - let n_epochs = 4; let capacity = E::slots_per_epoch() as usize; - let db_path = tempdir().expect("should get temp dir"); - let test_store = get_store_with_spec::(&db_path, spec.clone(), log.clone()); - let cache = Arc::new( - OverflowLRUCache::::new(capacity, test_store.clone()).expect("should create cache"), - ); + let (harness, cache) = setup_harness_and_cache::(capacity).await; + let n_epochs = 4; let mut pending_blocks = VecDeque::new(); let mut pending_blobs = VecDeque::new(); - let mut roots = VecDeque::new(); let mut epoch_count = BTreeMap::new(); while pending_blobs.len() < n_epochs * capacity { - let (pending_block, blobs) = availability_pending_block(&harness, log.clone()).await; + let (pending_block, blobs) = availability_pending_block(&harness).await; if pending_block.num_blobs_expected() == 0 { // we need blocks with blobs continue; } - let root = pending_block.block.as_block().canonical_root(); let epoch = pending_block .block .as_block() @@ -1476,7 +1416,6 @@ mod test { pending_blocks.push_back(pending_block); pending_blobs.push_back(blobs); - roots.push_back(root); } let kzg = harness @@ -1580,8 +1519,12 @@ mod test { drop(cache); // create a new cache with the same store - let recovered_cache = - OverflowLRUCache::::new(capacity, test_store).expect("should recover cache"); + let recovered_cache = OverflowLRUCache::::new( + capacity, + harness.chain.store.clone(), + harness.chain.spec.clone(), + ) + .expect("should recover cache"); // again, everything should be on disk assert_eq!( recovered_cache @@ -1622,4 +1565,133 @@ mod test { } } } + + #[tokio::test] + // ensure the state cache keeps memory usage low and that it can properly recover states + // THIS TEST CAN BE DELETED ONCE TREE STATES IS MERGED AND WE RIP OUT THE STATE CACHE + async fn overflow_cache_test_state_cache() { + type E = MinimalEthSpec; + type T = DiskHarnessType; + let capacity = STATE_LRU_CAPACITY * 2; + let (harness, cache) = setup_harness_and_cache::(capacity).await; + + let mut pending_blocks = VecDeque::new(); + let mut states = Vec::new(); + let mut state_roots = Vec::new(); + // Get enough blocks to fill the cache to capacity, ensuring all blocks have blobs + while pending_blocks.len() < capacity { + let (pending_block, _) = availability_pending_block(&harness).await; + if pending_block.num_blobs_expected() == 0 { + // we need blocks with blobs + continue; + } + let state_root = pending_block.import_data.state.canonical_root(); + states.push(pending_block.import_data.state.clone()); + pending_blocks.push_back(pending_block); + state_roots.push(state_root); + } + + let state_cache = cache.state_lru_cache().lru_cache(); + let mut pushed_diet_blocks = VecDeque::new(); + + for i in 0..capacity { + let pending_block = pending_blocks.pop_front().expect("should have block"); + let block_root = pending_block.as_block().canonical_root(); + + assert_eq!( + state_cache.read().len(), + std::cmp::min(i, STATE_LRU_CAPACITY), + "state cache should be empty at start" + ); + + if i >= STATE_LRU_CAPACITY { + let lru_root = state_roots[i - STATE_LRU_CAPACITY]; + assert_eq!( + state_cache.read().peek_lru().map(|(root, _)| root), + Some(&lru_root), + "lru block should be in cache" + ); + } + + // put the block in the cache + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + + // grab the diet block from the cache for later testing + let diet_block = cache + .critical + .read() + .in_memory + .peek(&block_root) + .map(|pending_components| { + pending_components + .executed_block + .clone() + .expect("should exist") + }) + .expect("should exist"); + pushed_diet_blocks.push_back(diet_block); + + // should be unavailable since we made sure all blocks had blobs + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should be pending blobs" + ); + + if i >= STATE_LRU_CAPACITY { + let evicted_index = i - STATE_LRU_CAPACITY; + let evicted_root = state_roots[evicted_index]; + assert!( + state_cache.read().peek(&evicted_root).is_none(), + "lru root should be evicted" + ); + // get the diet block via direct conversion (testing only) + let diet_block = pushed_diet_blocks.pop_front().expect("should have block"); + // reconstruct the pending block by replaying the block on the parent state + let recovered_pending_block = cache + .state_lru_cache() + .reconstruct_pending_executed_block(diet_block) + .expect("should reconstruct pending block"); + + // assert the recovered state is the same as the original + assert_eq!( + recovered_pending_block.import_data.state, states[evicted_index], + "recovered state should be the same as the original" + ); + } + } + + // now check the last block + let last_block = pushed_diet_blocks.pop_back().expect("should exist").clone(); + // the state should still be in the cache + assert!( + state_cache + .read() + .peek(&last_block.as_block().state_root()) + .is_some(), + "last block state should still be in cache" + ); + // get the diet block via direct conversion (testing only) + let diet_block = last_block.clone(); + // recover the pending block from the cache + let recovered_pending_block = cache + .state_lru_cache() + .recover_pending_executed_block(diet_block) + .expect("should reconstruct pending block"); + // assert the recovered state is the same as the original + assert_eq!( + Some(&recovered_pending_block.import_data.state), + states.last(), + "recovered state should be the same as the original" + ); + // the state should no longer be in the cache + assert!( + state_cache + .read() + .peek(&last_block.as_block().state_root()) + .is_none(), + "last block state should no longer be in cache" + ); + } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs new file mode 100644 index 0000000000..d3348b67fb --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -0,0 +1,230 @@ +use crate::block_verification_types::AsBlock; +use crate::{ + block_verification_types::BlockImportData, + data_availability_checker::{AvailabilityCheckError, STATE_LRU_CAPACITY}, + eth1_finalization_cache::Eth1FinalizationData, + AvailabilityPendingExecutedBlock, BeaconChainTypes, BeaconStore, PayloadVerificationOutcome, +}; +use lru::LruCache; +use parking_lot::RwLock; +use ssz_derive::{Decode, Encode}; +use state_processing::{BlockReplayer, ConsensusContext, StateProcessingStrategy}; +use std::sync::Arc; +use types::{ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc}; +use types::{BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; + +/// This mirrors everything in the `AvailabilityPendingExecutedBlock`, except +/// that it is much smaller because it contains only a state root instead of +/// a full `BeaconState`. +#[derive(Encode, Decode, Clone)] +pub struct DietAvailabilityPendingExecutedBlock { + #[ssz(with = "ssz_tagged_signed_beacon_block_arc")] + block: Arc>, + state_root: Hash256, + #[ssz(with = "ssz_tagged_signed_beacon_block")] + parent_block: SignedBeaconBlock>, + parent_eth1_finalization_data: Eth1FinalizationData, + confirmed_state_roots: Vec, + consensus_context: ConsensusContext, + payload_verification_outcome: PayloadVerificationOutcome, +} + +/// just implementing the same methods as `AvailabilityPendingExecutedBlock` +impl DietAvailabilityPendingExecutedBlock { + pub fn as_block(&self) -> &SignedBeaconBlock { + &self.block + } + + pub fn num_blobs_expected(&self) -> usize { + self.block + .message() + .body() + .blob_kzg_commitments() + .map_or(0, |commitments| commitments.len()) + } +} + +/// This LRU cache holds BeaconStates used for block import. If the cache overflows, +/// the least recently used state will be dropped. If the dropped state is needed +/// later on, it will be recovered from the parent state and replaying the block. +/// +/// WARNING: This cache assumes the parent block of any `AvailabilityPendingExecutedBlock` +/// has already been imported into ForkChoice. If this is not the case, the cache +/// will fail to recover the state when the cache overflows because it can't load +/// the parent state! +pub struct StateLRUCache { + states: RwLock>>, + store: BeaconStore, + spec: ChainSpec, +} + +impl StateLRUCache { + pub fn new(store: BeaconStore, spec: ChainSpec) -> Self { + Self { + states: RwLock::new(LruCache::new(STATE_LRU_CAPACITY)), + store, + spec, + } + } + + /// This will store the state in the LRU cache and return a + /// `DietAvailabilityPendingExecutedBlock` which is much cheaper to + /// keep around in memory. + pub fn register_pending_executed_block( + &self, + executed_block: AvailabilityPendingExecutedBlock, + ) -> DietAvailabilityPendingExecutedBlock { + let state = executed_block.import_data.state; + let state_root = executed_block.block.state_root(); + self.states.write().put(state_root, state); + + DietAvailabilityPendingExecutedBlock { + block: executed_block.block, + state_root, + parent_block: executed_block.import_data.parent_block, + parent_eth1_finalization_data: executed_block.import_data.parent_eth1_finalization_data, + confirmed_state_roots: executed_block.import_data.confirmed_state_roots, + consensus_context: executed_block.import_data.consensus_context, + payload_verification_outcome: executed_block.payload_verification_outcome, + } + } + + /// Recover the `AvailabilityPendingExecutedBlock` from the diet version. + /// This method will first check the cache and if the state is not found + /// it will reconstruct the state by loading the parent state from disk and + /// replaying the block. + pub fn recover_pending_executed_block( + &self, + diet_executed_block: DietAvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError> { + let maybe_state = self.states.write().pop(&diet_executed_block.state_root); + if let Some(state) = maybe_state { + let block_root = diet_executed_block.block.canonical_root(); + Ok(AvailabilityPendingExecutedBlock { + block: diet_executed_block.block, + import_data: BlockImportData { + block_root, + state, + parent_block: diet_executed_block.parent_block, + parent_eth1_finalization_data: diet_executed_block + .parent_eth1_finalization_data, + confirmed_state_roots: diet_executed_block.confirmed_state_roots, + consensus_context: diet_executed_block.consensus_context, + }, + payload_verification_outcome: diet_executed_block.payload_verification_outcome, + }) + } else { + self.reconstruct_pending_executed_block(diet_executed_block) + } + } + + /// Reconstruct the `AvailabilityPendingExecutedBlock` by loading the parent + /// state from disk and replaying the block. This function does NOT check the + /// LRU cache. + pub fn reconstruct_pending_executed_block( + &self, + diet_executed_block: DietAvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError> { + let block_root = diet_executed_block.block.canonical_root(); + let state = self.reconstruct_state(&diet_executed_block)?; + Ok(AvailabilityPendingExecutedBlock { + block: diet_executed_block.block, + import_data: BlockImportData { + block_root, + state, + parent_block: diet_executed_block.parent_block, + parent_eth1_finalization_data: diet_executed_block.parent_eth1_finalization_data, + confirmed_state_roots: diet_executed_block.confirmed_state_roots, + consensus_context: diet_executed_block.consensus_context, + }, + payload_verification_outcome: diet_executed_block.payload_verification_outcome, + }) + } + + /// Reconstruct the state by loading the parent state from disk and replaying + /// the block. + fn reconstruct_state( + &self, + diet_executed_block: &DietAvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError> { + let parent_block_root = diet_executed_block.parent_block.canonical_root(); + let parent_block_state_root = diet_executed_block.parent_block.state_root(); + let (parent_state_root, parent_state) = self + .store + .get_advanced_hot_state( + parent_block_root, + diet_executed_block.parent_block.slot(), + parent_block_state_root, + ) + .map_err(AvailabilityCheckError::StoreError)? + .ok_or(AvailabilityCheckError::ParentStateMissing( + parent_block_state_root, + ))?; + + let state_roots = vec![ + Ok((parent_state_root, diet_executed_block.parent_block.slot())), + Ok(( + diet_executed_block.state_root, + diet_executed_block.block.slot(), + )), + ]; + + let block_replayer: BlockReplayer<'_, T::EthSpec, AvailabilityCheckError, _> = + BlockReplayer::new(parent_state, &self.spec) + .no_signature_verification() + .state_processing_strategy(StateProcessingStrategy::Accurate) + .state_root_iter(state_roots.into_iter()) + .minimal_block_root_verification(); + + block_replayer + .apply_blocks(vec![diet_executed_block.block.clone_as_blinded()], None) + .map(|block_replayer| block_replayer.into_state()) + .and_then(|mut state| { + state + .build_exit_cache(&self.spec) + .map_err(AvailabilityCheckError::RebuildingStateCaches)?; + state + .update_tree_hash_cache() + .map_err(AvailabilityCheckError::RebuildingStateCaches)?; + Ok(state) + }) + } + + /// returns the state cache for inspection in tests + #[cfg(test)] + pub fn lru_cache(&self) -> &RwLock>> { + &self.states + } + + /// remove any states from the cache from before the given epoch + pub fn do_maintenance(&self, cutoff_epoch: Epoch) { + let mut write_lock = self.states.write(); + while let Some((_, state)) = write_lock.peek_lru() { + if state.slot().epoch(T::EthSpec::slots_per_epoch()) < cutoff_epoch { + write_lock.pop_lru(); + } else { + break; + } + } + } +} + +/// This can only be used during testing. The intended way to +/// obtain a `DietAvailabilityPendingExecutedBlock` is to call +/// `register_pending_executed_block` on the `StateLRUCache`. +#[cfg(test)] +impl From> + for DietAvailabilityPendingExecutedBlock +{ + fn from(value: AvailabilityPendingExecutedBlock) -> Self { + Self { + block: value.block, + state_root: value.import_data.state.canonical_root(), + parent_block: value.import_data.parent_block, + parent_eth1_finalization_data: value.import_data.parent_eth1_finalization_data, + confirmed_state_roots: value.import_data.confirmed_state_roots, + consensus_context: value.import_data.consensus_context, + payload_verification_outcome: value.payload_verification_outcome, + } + } +} diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index b267cc853f..8d3a682794 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -9,6 +9,7 @@ const DEFAULT_CHANNEL_CAPACITY: usize = 16; pub struct ServerSentEventHandler { attestation_tx: Sender>, block_tx: Sender>, + blob_sidecar_tx: Sender>, finalized_tx: Sender>, head_tx: Sender>, exit_tx: Sender>, @@ -31,6 +32,7 @@ impl ServerSentEventHandler { pub fn new_with_capacity(log: Logger, capacity: usize) -> Self { let (attestation_tx, _) = broadcast::channel(capacity); let (block_tx, _) = broadcast::channel(capacity); + let (blob_sidecar_tx, _) = broadcast::channel(capacity); let (finalized_tx, _) = broadcast::channel(capacity); let (head_tx, _) = broadcast::channel(capacity); let (exit_tx, _) = broadcast::channel(capacity); @@ -43,6 +45,7 @@ impl ServerSentEventHandler { Self { attestation_tx, block_tx, + blob_sidecar_tx, finalized_tx, head_tx, exit_tx, @@ -73,6 +76,10 @@ impl ServerSentEventHandler { .block_tx .send(kind) .map(|count| log_count("block", count)), + EventKind::BlobSidecar(_) => self + .blob_sidecar_tx + .send(kind) + .map(|count| log_count("blob sidecar", count)), EventKind::FinalizedCheckpoint(_) => self .finalized_tx .send(kind) @@ -119,6 +126,10 @@ impl ServerSentEventHandler { self.block_tx.subscribe() } + pub fn subscribe_blob_sidecar(&self) -> Receiver> { + self.blob_sidecar_tx.subscribe() + } + pub fn subscribe_finalized(&self) -> Receiver> { self.finalized_tx.subscribe() } @@ -159,6 +170,10 @@ impl ServerSentEventHandler { self.block_tx.receiver_count() > 0 } + pub fn has_blob_sidecar_subscribers(&self) -> bool { + self.blob_sidecar_tx.receiver_count() > 0 + } + pub fn has_finalized_subscribers(&self) -> bool { self.finalized_tx.receiver_count() > 0 } diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 144e213675..b4c70befa0 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -12,12 +12,12 @@ fn ssz_blob_to_crypto_blob( /// Validate a single blob-commitment-proof triplet from a `BlobSidecar`. pub fn validate_blob( kzg: &Kzg, - blob: Blob, + blob: &Blob, kzg_commitment: KzgCommitment, kzg_proof: KzgProof, ) -> Result { kzg.verify_blob_kzg_proof( - &ssz_blob_to_crypto_blob::(&blob)?, + &ssz_blob_to_crypto_blob::(blob)?, kzg_commitment, kzg_proof, ) @@ -27,12 +27,12 @@ pub fn validate_blob( pub fn validate_blobs( kzg: &Kzg, expected_kzg_commitments: &[KzgCommitment], - blobs: &[Blob], + blobs: Vec<&Blob>, kzg_proofs: &[KzgProof], ) -> Result { let blobs = blobs - .iter() - .map(|blob| ssz_blob_to_crypto_blob::(blob)) // Avoid this clone + .into_iter() + .map(|blob| ssz_blob_to_crypto_blob::(blob)) .collect::, KzgError>>()?; kzg.verify_blob_kzg_proof_batch(&blobs, expected_kzg_commitments, kzg_proofs) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index d99329c703..1a2b4fa4b7 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2524,7 +2524,6 @@ pub enum NumBlobs { pub fn generate_rand_block_and_blobs( fork_name: ForkName, num_blobs: NumBlobs, - kzg: &Kzg, rng: &mut impl Rng, ) -> (SignedBeaconBlock>, Vec>) { let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); @@ -2538,8 +2537,7 @@ pub fn generate_rand_block_and_blobs( NumBlobs::None => 0, }; let (bundle, transactions) = - execution_layer::test_utils::generate_random_blobs::(num_blobs, kzg, rng) - .unwrap(); + execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); payload.execution_payload.transactions = <_>::default(); for tx in Vec::from(transactions) { diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs new file mode 100644 index 0000000000..c48cf310a2 --- /dev/null +++ b/beacon_node/beacon_chain/tests/events.rs @@ -0,0 +1,99 @@ +use beacon_chain::blob_verification::GossipVerifiedBlob; +use beacon_chain::test_utils::BeaconChainHarness; +use bls::Signature; +use eth2::types::{EventKind, SseBlobSidecar}; +use rand::rngs::StdRng; +use rand::SeedableRng; +use std::marker::PhantomData; +use std::sync::Arc; +use types::blob_sidecar::FixedBlobSidecarList; +use types::{BlobSidecar, EthSpec, ForkName, MinimalEthSpec, SignedBlobSidecar}; + +type E = MinimalEthSpec; + +/// Verifies that a blob event is emitted when a gossip verified blob is received via gossip or the publish block API. +#[tokio::test] +async fn blob_sidecar_event_on_process_gossip_blob() { + let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + // subscribe to blob sidecar events + let event_handler = harness.chain.event_handler.as_ref().unwrap(); + let mut blob_event_receiver = event_handler.subscribe_blob_sidecar(); + + // build and process a gossip verified blob + let kzg = harness.chain.kzg.as_ref().unwrap(); + let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); + let signed_sidecar = SignedBlobSidecar { + message: BlobSidecar::random_valid(&mut rng, kzg) + .map(Arc::new) + .unwrap(), + signature: Signature::empty(), + _phantom: PhantomData, + }; + let gossip_verified_blob = GossipVerifiedBlob::__assumed_valid(signed_sidecar); + let expected_sse_blobs = SseBlobSidecar::from_blob_sidecar(gossip_verified_blob.as_blob()); + + let _ = harness + .chain + .process_gossip_blob(gossip_verified_blob) + .await + .unwrap(); + + let sidecar_event = blob_event_receiver.try_recv().unwrap(); + assert_eq!(sidecar_event, EventKind::BlobSidecar(expected_sse_blobs)); +} + +/// Verifies that a blob event is emitted when blobs are received via RPC. +#[tokio::test] +async fn blob_sidecar_event_on_process_rpc_blobs() { + let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + // subscribe to blob sidecar events + let event_handler = harness.chain.event_handler.as_ref().unwrap(); + let mut blob_event_receiver = event_handler.subscribe_blob_sidecar(); + + // build and process multiple rpc blobs + let kzg = harness.chain.kzg.as_ref().unwrap(); + let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); + + let blob_1 = BlobSidecar::random_valid(&mut rng, kzg) + .map(Arc::new) + .unwrap(); + let blob_2 = Arc::new(BlobSidecar { + index: 1, + ..BlobSidecar::random_valid(&mut rng, kzg).unwrap() + }); + let blobs = FixedBlobSidecarList::from(vec![Some(blob_1.clone()), Some(blob_2.clone())]); + let expected_sse_blobs = vec![ + SseBlobSidecar::from_blob_sidecar(blob_1.as_ref()), + SseBlobSidecar::from_blob_sidecar(blob_2.as_ref()), + ]; + + let _ = harness + .chain + .process_rpc_blobs(blob_1.slot, blob_1.block_root, blobs) + .await + .unwrap(); + + let mut sse_blobs: Vec = vec![]; + while let Ok(sidecar_event) = blob_event_receiver.try_recv() { + if let EventKind::BlobSidecar(sse_blob_sidecar) = sidecar_event { + sse_blobs.push(sse_blob_sidecar); + } else { + panic!("`BlobSidecar` event kind expected."); + } + } + assert_eq!(sse_blobs, expected_sse_blobs); +} diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs index c81a547406..332f6a4829 100644 --- a/beacon_node/beacon_chain/tests/main.rs +++ b/beacon_node/beacon_chain/tests/main.rs @@ -2,6 +2,7 @@ mod attestation_production; mod attestation_verification; mod block_verification; mod capella; +mod events; mod merge; mod op_verification; mod payload_invalidation; diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index b60748e30c..26c53154e3 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -22,7 +22,6 @@ types = { workspace = true } eth2_config = { workspace = true } slot_clock = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" error-chain = { workspace = true } slog = { workspace = true } tokio = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index d0cfb3825b..d1184cf75d 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -71,7 +71,6 @@ pub struct ClientBuilder { gossipsub_registry: Option, db_path: Option, freezer_db_path: Option, - blobs_db_path: Option, http_api_config: http_api::Config, http_metrics_config: http_metrics::Config, slasher: Option>>, @@ -106,7 +105,6 @@ where gossipsub_registry: None, db_path: None, freezer_db_path: None, - blobs_db_path: None, http_api_config: <_>::default(), http_metrics_config: <_>::default(), slasher: None, @@ -927,7 +925,6 @@ where self.db_path = Some(hot_path.into()); self.freezer_db_path = Some(cold_path.into()); - self.blobs_db_path = blobs_path.clone(); let inner_spec = spec.clone(); let deposit_contract_deploy_block = context diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 282eacc15a..8b47d0fc62 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -5,7 +5,7 @@ use directory::DEFAULT_ROOT_DIR; use environment::LoggerConfig; use network::NetworkConfig; use sensitive_url::SensitiveUrl; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; use std::time::Duration; diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 46c585bb05..7f65268980 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -50,3 +50,4 @@ triehash = "0.8.4" hash-db = "0.15.2" pretty_reqwest_error = { workspace = true } arc-swap = "1.6.0" +eth2_network_config = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index c680617108..ac7dfa57e9 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -897,10 +897,9 @@ impl HttpJsonRpc { .await?; Ok(JsonGetPayloadResponse::V3(response).into()) } - _ => Err(Error::UnsupportedForkVariant(format!( - "called get_payload_v3 with {}", - fork_name - ))), + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => Err( + Error::UnsupportedForkVariant(format!("called get_payload_v3 with {}", fork_name)), + ), } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 9eb19bb2b7..034400487c 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -48,7 +48,7 @@ use types::{ AbstractExecPayload, BeaconStateError, BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge, }; -use types::{ProposerPreparationData, PublicKeyBytes, Signature, Slot, Transaction}; +use types::{ProposerPreparationData, PublicKeyBytes, Signature, Slot}; mod block_hash; mod engine_api; @@ -2163,31 +2163,6 @@ fn timestamp_now() -> u64 { .as_secs() } -fn static_valid_tx() -> Result, String> { - // This is a real transaction hex encoded, but we don't care about the contents of the transaction. - let transaction: EthersTransaction = serde_json::from_str( - r#"{ - "blockHash":"0x1d59ff54b1eb26b013ce3cb5fc9dab3705b415a67127a003c3e61eb445bb8df2", - "blockNumber":"0x5daf3b", - "from":"0xa7d9ddbe1f17865597fbd27ec712455208b6b76d", - "gas":"0xc350", - "gasPrice":"0x4a817c800", - "hash":"0x88df016429689c079f3b2f6ad39fa052532c56795b733da78a91ebe6a713944b", - "input":"0x68656c6c6f21", - "nonce":"0x15", - "to":"0xf02c1c8e6114b1dbe8937a39260b5b0a374432bb", - "transactionIndex":"0x41", - "value":"0xf3dbb76162000", - "v":"0x25", - "r":"0x1b5e176d927f8e9ab405058b2d2457392da3e20f328b16ddabcebc33eaac5fea", - "s":"0x4ba69724e8f69de52f0125ad8b3c5c2cef33019bac3249e2c0a2192766d1721c" - }"#, - ) - .unwrap(); - VariableList::new(transaction.rlp().to_vec()) - .map_err(|e| format!("Failed to convert transaction to SSZ: {:?}", e)) -} - fn noop( _: &ExecutionLayer, _: PayloadContentsRefTuple, diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index c191fb2a00..b91c9b51a0 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,4 +1,5 @@ use crate::engines::ForkchoiceState; +use crate::EthersTransaction; use crate::{ engine_api::{ json_structures::{ @@ -6,24 +7,29 @@ use crate::{ }, ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, }, - static_valid_tx, ExecutionBlockWithTransactions, + ExecutionBlockWithTransactions, }; use eth2::types::BlobsBundle; -use kzg::Kzg; +use kzg::{Kzg, KzgCommitment, KzgProof}; use parking_lot::Mutex; use rand::{rngs::StdRng, Rng, SeedableRng}; use serde::{Deserialize, Serialize}; +use ssz_types::VariableList; use std::collections::HashMap; use std::sync::Arc; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use types::{ - BlobSidecar, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, - ExecutionPayloadDeneb, ExecutionPayloadHeader, ExecutionPayloadMerge, ForkName, Hash256, - Transactions, Uint256, + Blob, ChainSpec, EthSpec, EthSpecId, ExecutionBlockHash, ExecutionPayload, + ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadHeader, ExecutionPayloadMerge, + ForkName, Hash256, Transaction, Transactions, Uint256, }; use super::DEFAULT_TERMINAL_BLOCK; +use ssz::Decode; + +const TEST_BLOB_BUNDLE_MAINNET: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle.ssz"); +const TEST_BLOB_BUNDLE_MINIMAL: &[u8] = include_bytes!("fixtures/minimal/test_blobs_bundle.ssz"); const GAS_LIMIT: u64 = 16384; const GAS_USED: u64 = GAS_LIMIT - 1; @@ -625,8 +631,7 @@ impl ExecutionBlockGenerator { // get random number between 0 and Max Blobs let mut rng = self.rng.lock(); let num_blobs = rng.gen::() % (T::max_blobs_per_block() + 1); - let kzg = self.kzg.as_ref().ok_or("kzg not initialized")?; - let (bundle, transactions) = generate_random_blobs(num_blobs, kzg, &mut *rng)?; + let (bundle, transactions) = generate_blobs(num_blobs)?; for tx in Vec::from(transactions) { execution_payload .transactions_mut() @@ -643,30 +648,51 @@ impl ExecutionBlockGenerator { } } -pub fn generate_random_blobs( +pub fn load_test_blobs_bundle() -> Result<(KzgCommitment, KzgProof, Blob), String> { + let blob_bundle_bytes = match E::spec_name() { + EthSpecId::Mainnet => TEST_BLOB_BUNDLE_MAINNET, + EthSpecId::Minimal => TEST_BLOB_BUNDLE_MINIMAL, + EthSpecId::Gnosis => { + return Err("Test blobs bundle not available for Gnosis preset".to_string()) + } + }; + + let BlobsBundle { + commitments, + proofs, + blobs, + } = BlobsBundle::::from_ssz_bytes(blob_bundle_bytes) + .map_err(|e| format!("Unable to decode SSZ: {:?}", e))?; + + Ok(( + commitments + .get(0) + .cloned() + .ok_or("commitment missing in test bundle")?, + proofs + .get(0) + .cloned() + .ok_or("proof missing in test bundle")?, + blobs.get(0).cloned().ok_or("blob missing in test bundle")?, + )) +} + +pub fn generate_blobs( n_blobs: usize, - kzg: &Kzg, - rng: &mut R, -) -> Result<(BlobsBundle, Transactions), String> { - let mut bundle = BlobsBundle::::default(); +) -> Result<(BlobsBundle, Transactions), String> { + let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::()?; + + let mut bundle = BlobsBundle::::default(); let mut transactions = vec![]; + for blob_index in 0..n_blobs { - let random_valid_sidecar = BlobSidecar::::random_valid(rng, kzg)?; - - let BlobSidecar { - blob, - kzg_commitment, - kzg_proof, - .. - } = random_valid_sidecar; - - let tx = static_valid_tx::() + let tx = static_valid_tx::() .map_err(|e| format!("error creating valid tx SSZ bytes: {:?}", e))?; transactions.push(tx); bundle .blobs - .push(blob) + .push(blob.clone()) .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; bundle .commitments @@ -681,6 +707,31 @@ pub fn generate_random_blobs( Ok((bundle, transactions.into())) } +fn static_valid_tx() -> Result, String> { + // This is a real transaction hex encoded, but we don't care about the contents of the transaction. + let transaction: EthersTransaction = serde_json::from_str( + r#"{ + "blockHash":"0x1d59ff54b1eb26b013ce3cb5fc9dab3705b415a67127a003c3e61eb445bb8df2", + "blockNumber":"0x5daf3b", + "from":"0xa7d9ddbe1f17865597fbd27ec712455208b6b76d", + "gas":"0xc350", + "gasPrice":"0x4a817c800", + "hash":"0x88df016429689c079f3b2f6ad39fa052532c56795b733da78a91ebe6a713944b", + "input":"0x68656c6c6f21", + "nonce":"0x15", + "to":"0xf02c1c8e6114b1dbe8937a39260b5b0a374432bb", + "transactionIndex":"0x41", + "value":"0xf3dbb76162000", + "v":"0x25", + "r":"0x1b5e176d927f8e9ab405058b2d2457392da3e20f328b16ddabcebc33eaac5fea", + "s":"0x4ba69724e8f69de52f0125ad8b3c5c2cef33019bac3249e2c0a2192766d1721c" + }"#, + ) + .unwrap(); + VariableList::new(transaction.rlp().to_vec()) + .map_err(|e| format!("Failed to convert transaction to SSZ: {:?}", e)) +} + fn payload_id_from_u64(n: u64) -> PayloadId { n.to_le_bytes() } @@ -711,7 +762,7 @@ pub fn generate_genesis_header( Some(header) } ForkName::Deneb => { - let mut header = ExecutionPayloadHeader::Capella(<_>::default()); + let mut header = ExecutionPayloadHeader::Deneb(<_>::default()); *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); Some(header) } @@ -770,7 +821,8 @@ pub fn generate_pow_block( #[cfg(test)] mod test { use super::*; - use types::MainnetEthSpec; + use kzg::TrustedSetup; + use types::{MainnetEthSpec, MinimalEthSpec}; #[test] fn pow_chain_only() { @@ -832,4 +884,33 @@ mod test { assert!(generator.block_by_number(next_i).is_none()); } } + + #[test] + fn valid_test_blobs() { + assert!( + validate_blob::().unwrap(), + "Mainnet preset test blobs bundle should contain valid proofs" + ); + assert!( + validate_blob::().unwrap(), + "Minimal preset test blobs bundle should contain valid proofs" + ); + } + + fn validate_blob() -> Result { + let kzg = load_kzg::()?; + let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::()?; + let kzg_blob = E::blob_from_bytes(blob.as_ref()) + .map_err(|e| format!("Error converting blob to kzg blob: {e:?}"))?; + kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) + .map_err(|e| format!("Invalid blobs bundle: {e:?}")) + } + + fn load_kzg() -> Result, String> { + let trusted_setup: TrustedSetup = + serde_json::from_reader(eth2_network_config::get_trusted_setup::()) + .map_err(|e| format!("Unable to read trusted setup file: {e:?}"))?; + Kzg::new_from_trusted_setup(trusted_setup) + .map_err(|e| format!("Failed to load trusted setup: {e:?}")) + } } diff --git a/beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz b/beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz new file mode 100644 index 0000000000..8ef325a00c Binary files /dev/null and b/beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz differ diff --git a/beacon_node/execution_layer/src/test_utils/fixtures/minimal/test_blobs_bundle.ssz b/beacon_node/execution_layer/src/test_utils/fixtures/minimal/test_blobs_bundle.ssz new file mode 100644 index 0000000000..366a467b2c Binary files /dev/null and b/beacon_node/execution_layer/src/test_utils/fixtures/minimal/test_blobs_bundle.ssz differ diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index e50e6f8d37..9dff1ac008 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -282,20 +282,6 @@ pub async fn handle_rpc( _ => unreachable!(), }), ENGINE_GET_PAYLOAD_V3 => Ok(match JsonExecutionPayload::from(response) { - JsonExecutionPayload::V1(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseV1 { - execution_payload, - block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), - }) - .unwrap() - } - JsonExecutionPayload::V2(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseV2 { - execution_payload, - block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), - }) - .unwrap() - } JsonExecutionPayload::V3(execution_payload) => { serde_json::to_value(JsonGetPayloadResponseV3 { execution_payload, @@ -310,6 +296,7 @@ pub async fn handle_rpc( }) .unwrap() } + _ => unreachable!(), }), _ => unreachable!(), } diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index f1bd89868c..2ba51bd67d 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -103,14 +103,8 @@ impl MockExecutionLayer { justified_hash: None, finalized_hash: None, }; - let payload_attributes = PayloadAttributes::new( - timestamp, - prev_randao, - Address::repeat_byte(42), - // FIXME: think about how to handle different forks here.. - None, - None, - ); + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None, None); // Insert a proposer to ensure the fork choice updated command works. let slot = Slot::new(0); @@ -146,7 +140,6 @@ impl MockExecutionLayer { &payload_attributes, forkchoice_update_params, builder_params, - // FIXME: do we need to consider other forks somehow? ForkName::Merge, &self.spec, ) @@ -181,7 +174,6 @@ impl MockExecutionLayer { &payload_attributes, forkchoice_update_params, builder_params, - // FIXME: do we need to consider other forks somehow? What about withdrawals? ForkName::Merge, &self.spec, ) diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index f56a04b074..dc3f61815e 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -25,8 +25,8 @@ use warp::{http::StatusCode, Filter, Rejection}; use crate::EngineCapabilities; pub use execution_block_generator::{ - generate_genesis_block, generate_genesis_header, generate_pow_block, generate_random_blobs, - Block, ExecutionBlockGenerator, + generate_blobs, generate_genesis_block, generate_genesis_header, generate_pow_block, Block, + ExecutionBlockGenerator, }; pub use hook::Hook; pub use mock_builder::{MockBuilder, Operation}; diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 545213ca81..45fc651f05 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -253,7 +253,7 @@ impl BlockId { } /// Return the `BlobSidecarList` identified by `self`. - pub async fn blob_sidecar_list( + pub fn blob_sidecar_list( &self, chain: &BeaconChain, ) -> Result, warp::Rejection> { @@ -263,12 +263,12 @@ impl BlockId { .map_err(warp_utils::reject::beacon_chain_error) } - pub async fn blob_sidecar_list_filtered( + pub fn blob_sidecar_list_filtered( &self, indices: BlobIndicesQuery, chain: &BeaconChain, ) -> Result, warp::Rejection> { - let blob_sidecar_list = self.blob_sidecar_list(chain).await?; + let blob_sidecar_list = self.blob_sidecar_list(chain)?; let blob_sidecar_list_filtered = match indices.indices { Some(vec) => { let list = blob_sidecar_list diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index eeb9c18eb5..a658346f57 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1727,16 +1727,18 @@ pub fn serve( .and(block_id_or_err) .and(warp::query::()) .and(warp::path::end()) + .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(warp::header::optional::("accept")) - .and_then( + .then( |block_id: BlockId, indices: api_types::BlobIndicesQuery, + task_spawner: TaskSpawner, chain: Arc>, accept_header: Option| { - async move { + task_spawner.blocking_response_task(Priority::P1, move || { let blob_sidecar_list_filtered = - block_id.blob_sidecar_list_filtered(indices, &chain).await?; + block_id.blob_sidecar_list_filtered(indices, &chain)?; match accept_header { Some(api_types::Accept::Ssz) => Response::builder() .status(200) @@ -1753,7 +1755,7 @@ pub fn serve( )) .into_response()), } - } + }) }, ); @@ -4423,6 +4425,9 @@ pub fn serve( let receiver = match topic { api_types::EventTopic::Head => event_handler.subscribe_head(), api_types::EventTopic::Block => event_handler.subscribe_block(), + api_types::EventTopic::BlobSidecar => { + event_handler.subscribe_blob_sidecar() + } api_types::EventTopic::Attestation => { event_handler.subscribe_attestation() } diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index a0e63ec9e9..e68691ce8b 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -75,8 +75,7 @@ pub async fn publish_block block.slot(), "publish_delay" => ?publish_delay); - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. + match block.as_ref() { SignedBeaconBlock::Base(_) | SignedBeaconBlock::Altair(_) @@ -199,9 +198,17 @@ pub async fn publish_block &msg + ); + Err(warp_utils::reject::custom_bad_request(msg)) + }; } } } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 07674fb6dd..125bbe9bc2 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -10,7 +10,6 @@ unsigned-varint = { version = "0.6", features = ["codec"] } ssz_types = { workspace = true } types = { workspace = true } serde = { workspace = true } -serde_derive = "1" ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } tree_hash = { workspace = true } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 472f3ef75e..5d84753781 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -8,7 +8,7 @@ use directory::{ use discv5::{Discv5Config, Discv5ConfigBuilder}; use libp2p::gossipsub; use libp2p::Multiaddr; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use std::net::{Ipv4Addr, Ipv6Addr}; use std::num::NonZeroU16; @@ -468,8 +468,6 @@ pub fn gossipsub_config( ) -> Vec { let topic_bytes = message.topic.as_str().as_bytes(); match fork_context.current_fork() { - // according to: https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#the-gossip-domain-gossipsub - // the derivation of the message-id remains the same in the merge and for eip 4844. ForkName::Altair | ForkName::Merge | ForkName::Capella | ForkName::Deneb => { let topic_len_bytes = topic_bytes.len().to_le_bytes(); let mut vec = Vec::with_capacity( diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index 2fcb380bb6..ad96731141 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -6,7 +6,7 @@ use std::{ use super::{methods, rate_limiter::Quota, Protocol}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; /// Auxiliary struct to aid on configuration parsing. /// diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 1031b66e2d..0b57374e8b 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -2,7 +2,7 @@ use super::config::RateLimiterConfig; use crate::rpc::Protocol; use fnv::FnvHashMap; use libp2p::PeerId; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::convert::TryInto; use std::future::Future; use std::hash::Hash; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 53b6bcab37..5daa6557ea 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1602,7 +1602,14 @@ impl Network { SwarmEvent::ListenerClosed { addresses, reason, .. } => { - crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); + match reason { + Ok(_) => { + debug!(self.log, "Listener gracefuly closed"; "addresses" => ?addresses) + } + Err(reason) => { + crit!(self.log, "Listener abruptly closed"; "addresses" => ?addresses, "reason" => ?reason) + } + }; if Swarm::listeners(&self.swarm).count() == 0 { Some(NetworkEvent::ZeroListeners) } else { diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 4099949ac3..e7e771e1ad 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -1,5 +1,5 @@ use libp2p::gossipsub::{IdentTopic as Topic, TopicHash}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use strum::AsRefStr; use types::consts::deneb::BLOB_SIDECAR_SUBNET_COUNT; use types::{EthSpec, ForkName, SubnetId, SyncSubnetId}; diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index fe70f3c1ba..9fe64d159f 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -11,7 +11,7 @@ use beacon_chain::block_verification_types::AsBlock; use beacon_chain::store::Error; use beacon_chain::{ attestation_verification::{self, Error as AttnError, VerifiedAttestation}, - data_availability_checker::AvailabilityCheckError, + data_availability_checker::AvailabilityCheckErrorCategory, light_client_finality_update_verification::Error as LightClientFinalityUpdateError, light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, observed_operations::ObservationOutcome, @@ -1233,24 +1233,15 @@ impl NetworkBeaconProcessor { ); } Err(BlockError::AvailabilityCheck(err)) => { - match err { - AvailabilityCheckError::KzgNotInitialized - | AvailabilityCheckError::Unexpected - | AvailabilityCheckError::SszTypes(_) - | AvailabilityCheckError::MissingBlobs - | AvailabilityCheckError::StoreError(_) - | AvailabilityCheckError::DecodeError(_) => { + match err.category() { + AvailabilityCheckErrorCategory::Internal => { warn!( self.log, "Internal availability check error"; "error" => ?err, ); } - AvailabilityCheckError::Kzg(_) - | AvailabilityCheckError::KzgVerificationFailed - | AvailabilityCheckError::KzgCommitmentMismatch { .. } - | AvailabilityCheckError::BlobIndexInvalid(_) - | AvailabilityCheckError::InconsistentBlobBlockRoots { .. } => { + AvailabilityCheckErrorCategory::Malicious => { // Note: we cannot penalize the peer that sent us the block // over gossip here because these errors imply either an issue // with: diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index d2d589b35a..d6bb7421e8 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -260,13 +260,13 @@ impl NetworkBeaconProcessor { pub fn generate_rpc_blobs_process_fn( self: Arc, block_root: Hash256, - block: FixedBlobSidecarList, + blobs: FixedBlobSidecarList, seen_timestamp: Duration, process_type: BlockProcessType, ) -> AsyncFn { let process_fn = async move { self.clone() - .process_rpc_blobs(block_root, block, seen_timestamp, process_type) + .process_rpc_blobs(block_root, blobs, seen_timestamp, process_type) .await; }; Box::pin(process_fn) diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 7248147178..4df940a3b7 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -540,7 +540,7 @@ impl Router { seen_timestamp: timestamp_now(), }); } else { - debug!( + crit!( self.log, "All blobs by range responses should belong to sync" ); diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 9b865fdfee..3f1ecd129b 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -13,7 +13,9 @@ use crate::sync::block_lookups::single_block_lookup::{ use crate::sync::manager::{Id, SingleLookupReqId}; use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; pub use beacon_chain::data_availability_checker::ChildComponents; -use beacon_chain::data_availability_checker::{AvailabilityCheckError, DataAvailabilityChecker}; +use beacon_chain::data_availability_checker::{ + AvailabilityCheckErrorCategory, DataAvailabilityChecker, +}; use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError}; pub use common::Current; @@ -47,7 +49,7 @@ pub type DownloadedBlock = (Hash256, RpcBlock); const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; pub const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; -/// This enum is used to track what a peer *should* be able to respond with respond based on +/// This enum is used to track what a peer *should* be able to respond with based on /// other messages we've seen from this peer on the network. This is useful for peer scoring. /// We expect a peer tracked by the `BlockAndBlobs` variant to be able to respond to all /// components of a block. This peer has either sent an attestation for the requested block @@ -447,7 +449,7 @@ impl BlockLookups { } } CachedChild::DownloadIncomplete => { - // If this was the result of a block request, we can't determined if the block peer + // If this was the result of a block request, we can't determine if the block peer // did anything wrong. If we already had both a block and blobs response processed, // we should penalize the blobs peer because they did not provide all blobs on the // initial request. @@ -893,39 +895,25 @@ impl BlockLookups { ); return Ok(None); } - BlockError::AvailabilityCheck(e) => { - match e { - // Internal error. - AvailabilityCheckError::KzgNotInitialized - | AvailabilityCheckError::SszTypes(_) - | AvailabilityCheckError::MissingBlobs - | AvailabilityCheckError::StoreError(_) - | AvailabilityCheckError::DecodeError(_) - | AvailabilityCheckError::Unexpected => { - warn!(self.log, "Internal availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e); - lookup - .block_request_state - .state - .register_failure_downloading(); - lookup - .blob_request_state - .state - .register_failure_downloading(); - lookup.request_block_and_blobs(cx)? - } - - // Malicious errors. - AvailabilityCheckError::Kzg(_) - | AvailabilityCheckError::BlobIndexInvalid(_) - | AvailabilityCheckError::KzgCommitmentMismatch { .. } - | AvailabilityCheckError::KzgVerificationFailed - | AvailabilityCheckError::InconsistentBlobBlockRoots { .. } => { - warn!(self.log, "Availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e); - lookup.handle_availability_check_failure(cx); - lookup.request_block_and_blobs(cx)? - } + BlockError::AvailabilityCheck(e) => match e.category() { + AvailabilityCheckErrorCategory::Internal => { + warn!(self.log, "Internal availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e); + lookup + .block_request_state + .state + .register_failure_downloading(); + lookup + .blob_request_state + .state + .register_failure_downloading(); + lookup.request_block_and_blobs(cx)? } - } + AvailabilityCheckErrorCategory::Malicious => { + warn!(self.log, "Availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e); + lookup.handle_availability_check_failure(cx); + lookup.request_block_and_blobs(cx)? + } + }, other => { warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); if let Ok(block_peer) = lookup.block_request_state.state.processing_peer() { diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 59ac9c4338..bd1e72ee18 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -91,10 +91,8 @@ impl TestRig { fork_name: ForkName, num_blobs: NumBlobs, ) -> (SignedBeaconBlock, Vec>) { - let kzg = self.harness.chain.kzg.as_ref().unwrap(); let rng = &mut self.rng; - - generate_rand_block_and_blobs::(fork_name, num_blobs, kzg.as_ref(), rng) + generate_rand_block_and_blobs::(fork_name, num_blobs, rng) } #[track_caller] diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index afdbd7257a..dd8cd9c49e 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -16,7 +16,6 @@ ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } rayon = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" store = { workspace = true } bitvec = { workspace = true } rand = { workspace = true } diff --git a/beacon_node/operation_pool/src/attestation_id.rs b/beacon_node/operation_pool/src/attestation_id.rs index b65975787e..f0dc6536a5 100644 --- a/beacon_node/operation_pool/src/attestation_id.rs +++ b/beacon_node/operation_pool/src/attestation_id.rs @@ -1,4 +1,4 @@ -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; /// Serialized `AttestationData` augmented with a domain to encode the fork info. diff --git a/beacon_node/operation_pool/src/sync_aggregate_id.rs b/beacon_node/operation_pool/src/sync_aggregate_id.rs index 401e0c5f82..40d6e36490 100644 --- a/beacon_node/operation_pool/src/sync_aggregate_id.rs +++ b/beacon_node/operation_pool/src/sync_aggregate_id.rs @@ -1,4 +1,4 @@ -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use types::{Hash256, Slot}; diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index d912da0420..288d167b41 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -19,7 +19,6 @@ types = { workspace = true } state_processing = { workspace = true } slog = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } lru = { workspace = true } diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 6582bc4810..2fbef2f709 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -1,6 +1,6 @@ use crate::hdiff::HierarchyConfig; use crate::{DBColumn, Error, StoreItem}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::io::Write; diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 87720e1304..324c83d01a 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -25,7 +25,7 @@ use leveldb::iterator::LevelDBIterator; use lru::LruCache; use parking_lot::{Mutex, RwLock}; use safe_arith::SafeArith; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use slog::{debug, error, info, trace, warn, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index fc610b186e..5937c04a01 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -1,5 +1,5 @@ use crate::{DBColumn, Error, StoreItem}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; diff --git a/book/src/api-bn.md b/book/src/api-bn.md index 11a006493a..519ce57055 100644 --- a/book/src/api-bn.md +++ b/book/src/api-bn.md @@ -126,6 +126,22 @@ curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H " ``` You can replace `1` in the above command with the validator index that you would like to query. Other API query can be done similarly by changing the link according to the Beacon API. +### Events API +The [events API](https://ethereum.github.io/beacon-APIs/#/Events/eventstream) provides information such as the payload attributes that are of interest to block builders and relays. To query the payload attributes, it is necessary to run Lighthouse beacon node with the flag `--always-prepare-payload`. It is also recommended to add the flag `--prepare-payload-lookahead 8000` which configures the payload attributes to be sent at 4s into each slot (or 8s from the start of the next slot). An example of the command is: + +```bash +curl -X 'GET' \ +'http://localhost:5052/eth/v1/events?topics=payload_attributes' \ +-H 'accept: text/event-stream' +``` + +An example of response is: + +```json +data:{"version":"capella","data":{"proposal_slot":"11047","proposer_index":"336057","parent_block_root":"0x26f8999d270dd4677c2a1c815361707157a531f6c599f78fa942c98b545e1799","parent_block_number":"9259","parent_block_hash":"0x7fb788cd7afa814e578afa00a3edd250cdd4c8e35c22badd327d981b5bda33d2","payload_attributes":{"timestamp":"1696034964","prev_randao":"0xeee34d7a3f6b99ade6c6a881046c9c0e96baab2ed9469102d46eb8d6e4fde14c","suggested_fee_recipient":"0x0000000000000000000000000000000000000001","withdrawals":[{"index":"40705","validator_index":"360712","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1202941"},{"index":"40706","validator_index":"360713","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1201138"},{"index":"40707","validator_index":"360714","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1215255"},{"index":"40708","validator_index":"360715","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1161977"},{"index":"40709","validator_index":"360716","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1257278"},{"index":"40710","validator_index":"360717","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1247740"},{"index":"40711","validator_index":"360718","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1204337"},{"index":"40712","validator_index":"360719","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1183575"},{"index":"40713","validator_index":"360720","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1157785"},{"index":"40714","validator_index":"360721","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1143371"},{"index":"40715","validator_index":"360722","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1234787"},{"index":"40716","validator_index":"360723","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1286673"},{"index":"40717","validator_index":"360724","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1419241"},{"index":"40718","validator_index":"360725","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1231015"},{"index":"40719","validator_index":"360726","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1304321"},{"index":"40720","validator_index":"360727","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1236543"}]}}} +``` + + ## Serving the HTTP API over TLS > **Warning**: This feature is currently experimental. diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 552e1fd6f8..32c967c9e0 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -125,7 +125,7 @@ curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: ap ### `/lighthouse/ui/validator_metrics` -Re-exposes certain metrics from the validator monitor to the HTTP API. This API requires that the beacon node to have the flag `--validator-monitor-auto`. This API will only return metrics for the validators currently being monitored and present in the POST data, or the validators running in the validator client. +Re-exposes certain metrics from the validator monitor to the HTTP API. This API requires that the beacon node to have the flag `--validator-monitor-auto`. This API will only return metrics for the validators currently being monitored and present in the POST data, or the validators running in the validator client. ```bash curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indices": [12345]}' -H "Content-Type: application/json" | jq ``` @@ -356,7 +356,7 @@ health of the execution node that the beacon node is connected to. - `latest_cached_block_number` & `latest_cached_block_timestamp`: the block number and timestamp of the latest block we have in our block cache. - For correct execution client voting this timestamp should be later than the -`voting_target_timestamp`. +`voting_target_timestamp`. - `voting_target_timestamp`: The latest timestamp allowed for an execution layer block in this voting period. - `eth1_node_sync_status_percentage` (float): An estimate of how far the head of the @@ -480,9 +480,9 @@ curl -X GET "http://localhost:5052/lighthouse/beacon/states/0/ssz" | jq ### `/lighthouse/liveness` POST request that checks if any of the given validators have attested in the given epoch. Returns a list -of objects, each including the validator index, epoch, and `is_live` status of a requested validator. +of objects, each including the validator index, epoch, and `is_live` status of a requested validator. -This endpoint is used in doppelganger detection, and can only provide accurate information for the current, previous, or next epoch. +This endpoint is used in doppelganger detection, and can only provide accurate information for the current, previous, or next epoch. > Note that for this API, if you insert an arbitrary epoch other than the previous, current or next epoch of the network, it will return `"code:400"` and `BAD_REQUEST`. @@ -547,21 +547,6 @@ reconstruction has yet to be completed. For more information on the specific meanings of these fields see the docs on [Checkpoint Sync](./checkpoint-sync.md#reconstructing-states). -### `/lighthouse/database/reconstruct` - -Instruct Lighthouse to begin reconstructing historic states, see -[Reconstructing States](./checkpoint-sync.md#reconstructing-states). This is an alternative -to the `--reconstruct-historic-states` flag. - -``` -curl -X POST "http://localhost:5052/lighthouse/database/reconstruct" | jq -``` - -```json -"success" -``` - -The endpoint will return immediately. See the beacon node logs for an indication of progress. ### `/lighthouse/merge_readiness` Returns the current difficulty and terminal total difficulty of the network. Before [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, you will see that the current difficulty is less than the terminal total difficulty, An example is shown below: @@ -714,7 +699,7 @@ The first few lines of the response would look like: ] } } -] +] ``` Caveats: @@ -811,4 +796,4 @@ An open port will return: ```json { "data": true -} \ No newline at end of file +} diff --git a/book/src/builders.md b/book/src/builders.md index 2be4841ddf..b0d6112431 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -258,6 +258,9 @@ used in place of one from the builder: INFO Reconstructing a full block using a local payload ``` +## Information for block builders and relays +Block builders and relays can query beacon node events from the [Events API](https://ethereum.github.io/beacon-APIs/#/Events/eventstream). An example of querying the payload attributes in the Events API is outlined in [Beacon node API - Events API](./api-bn.md#events-api) + [mev-rs]: https://github.com/ralexstokes/mev-rs [mev-boost]: https://github.com/flashbots/mev-boost [gas-limit-api]: https://ethereum.github.io/keymanager-APIs/#/Gas%20Limit diff --git a/book/src/developers.md b/book/src/developers.md index 2ba09bd341..ab12bed5b9 100644 --- a/book/src/developers.md +++ b/book/src/developers.md @@ -48,4 +48,5 @@ custom RPC error messages. | Code | Message | Description | | ---- | ---- | ---- | -| 139 | Rate Limited | The peer has been rate limited so we return this error as a response | \ No newline at end of file +| 139 | Rate Limited | The peer has been rate limited so we return this error as a response | +| 140 | Blobs Not Found For Block | We do not possess the blobs for the requested block | diff --git a/book/src/homebrew.md b/book/src/homebrew.md index 317dc0e0fa..486de371f8 100644 --- a/book/src/homebrew.md +++ b/book/src/homebrew.md @@ -31,6 +31,6 @@ Alternatively, you can find the `lighthouse` binary at: The [formula][] is kept up-to-date by the Homebrew community and a bot that lists for new releases. -The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/lighthouse.rb) repo. +The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/l/lighthouse.rb) repo. [formula]: https://formulae.brew.sh/formula/lighthouse diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index ef81b2b751..cd31d78d62 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -8,7 +8,7 @@ These endpoints are not stable or included in the Ethereum consensus standard AP they are subject to change or removal without a change in major release version. -In order to apply these APIs, you need to have historical states information in the database of your node. This means adding the flag `--reconstruct-historic-states` in the beacon node or using the [/lighthouse/database/reconstruct API](./api-lighthouse.md#lighthousedatabasereconstruct). Once the state reconstruction process is completed, you can apply these APIs to any epoch. +In order to apply these APIs, you need to have historical states information in the database of your node. This means adding the flag `--reconstruct-historic-states` in the beacon node. Once the state reconstruction process is completed, you can apply these APIs to any epoch. ## Endpoints diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 4df079d7ae..0040183c77 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -21,7 +21,6 @@ slog-scope = "4.3.0" slog-stdlog = "4.0.0" hex = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" serde_json = { workspace = true } serde_yaml = { workspace = true } eth2_network_config = { workspace = true } diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index d435efc6f5..6cfa8f4cf7 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -7,7 +7,7 @@ use lighthouse_network::{ discovery::{load_enr_from_disk, use_or_load_enr}, load_private_key, CombinedKeyExt, NetworkConfig, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::Encode; use std::net::{SocketAddrV4, SocketAddrV6}; use std::time::Duration; diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index 10113ab320..e66bf14233 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -13,7 +13,6 @@ eth2_keystore = { workspace = true } filesystem = { workspace = true } zeroize = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" serde_yaml = { workspace = true } slog = { workspace = true } types = { workspace = true } diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index e566d7cdda..8707ae531f 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -8,7 +8,7 @@ use eth2_wallet::{ }; use filesystem::{create_with_600_perms, Error as FsError}; use rand::{distributions::Alphanumeric, Rng}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::fs::{self, File}; use std::io; use std::io::prelude::*; diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index c91e717d11..8dc0888e6e 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -9,7 +9,7 @@ use crate::{ use directory::ensure_dir_exists; use eth2_keystore::Keystore; use regex::Regex; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use slog::{error, Logger}; use std::collections::HashSet; use std::fs::{self, File}; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 11cd950965..3c1b7391c6 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -7,7 +7,7 @@ use mediatype::{names, MediaType, MediaTypeList}; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::Value; use ssz::{Decode, DecodeError}; -use ssz_derive::Encode; +use ssz_derive::{Decode, Encode}; use std::convert::TryFrom; use std::fmt::{self, Display}; use std::str::{from_utf8, FromStr}; @@ -888,6 +888,28 @@ pub struct SseBlock { pub execution_optimistic: bool, } +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct SseBlobSidecar { + pub block_root: Hash256, + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + pub slot: Slot, + pub kzg_commitment: KzgCommitment, + pub versioned_hash: VersionedHash, +} + +impl SseBlobSidecar { + pub fn from_blob_sidecar(blob_sidecar: &BlobSidecar) -> SseBlobSidecar { + SseBlobSidecar { + block_root: blob_sidecar.block_root, + index: blob_sidecar.index, + slot: blob_sidecar.slot, + kzg_commitment: blob_sidecar.kzg_commitment, + versioned_hash: blob_sidecar.kzg_commitment.calculate_versioned_hash(), + } + } +} + #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct SseFinalizedCheckpoint { pub block: Hash256, @@ -1019,6 +1041,7 @@ impl ForkVersionDeserialize for SseExtendedPayloadAttributes { pub enum EventKind { Attestation(Box>), Block(SseBlock), + BlobSidecar(SseBlobSidecar), FinalizedCheckpoint(SseFinalizedCheckpoint), Head(SseHead), VoluntaryExit(SignedVoluntaryExit), @@ -1035,6 +1058,7 @@ impl EventKind { match self { EventKind::Head(_) => "head", EventKind::Block(_) => "block", + EventKind::BlobSidecar(_) => "blob_sidecar", EventKind::Attestation(_) => "attestation", EventKind::VoluntaryExit(_) => "voluntary_exit", EventKind::FinalizedCheckpoint(_) => "finalized_checkpoint", @@ -1072,6 +1096,9 @@ impl EventKind { "block" => Ok(EventKind::Block(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Block: {:?}", e)), )?)), + "blob_sidecar" => Ok(EventKind::BlobSidecar(serde_json::from_str(data).map_err( + |e| ServerError::InvalidServerSentEvent(format!("Blob Sidecar: {:?}", e)), + )?)), "chain_reorg" => Ok(EventKind::ChainReorg(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Chain Reorg: {:?}", e)), )?)), @@ -1124,6 +1151,7 @@ pub struct EventQuery { pub enum EventTopic { Head, Block, + BlobSidecar, Attestation, VoluntaryExit, FinalizedCheckpoint, @@ -1142,6 +1170,7 @@ impl FromStr for EventTopic { match s { "head" => Ok(EventTopic::Head), "block" => Ok(EventTopic::Block), + "blob_sidecar" => Ok(EventTopic::BlobSidecar), "attestation" => Ok(EventTopic::Attestation), "voluntary_exit" => Ok(EventTopic::VoluntaryExit), "finalized_checkpoint" => Ok(EventTopic::FinalizedCheckpoint), @@ -1161,6 +1190,7 @@ impl fmt::Display for EventTopic { match self { EventTopic::Head => write!(f, "head"), EventTopic::Block => write!(f, "block"), + EventTopic::BlobSidecar => write!(f, "blob_sidecar"), EventTopic::Attestation => write!(f, "attestation"), EventTopic::VoluntaryExit => write!(f, "voluntary_exit"), EventTopic::FinalizedCheckpoint => write!(f, "finalized_checkpoint"), @@ -1983,7 +2013,7 @@ pub struct ExecutionPayloadAndBlobs { pub blobs_bundle: BlobsBundle, } -#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode)] +#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode, Decode)] #[serde(bound = "E: EthSpec")] pub struct BlobsBundle { pub commitments: KzgCommitments, diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index ded62653e5..6f92acc84a 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -13,7 +13,6 @@ ethereum_hashing = { workspace = true } hex = { workspace = true } serde_yaml = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" bls = { workspace = true } [dev-dependencies] diff --git a/common/eth2_interop_keypairs/src/lib.rs b/common/eth2_interop_keypairs/src/lib.rs index 7b5fa7a8e4..3d4ff02c38 100644 --- a/common/eth2_interop_keypairs/src/lib.rs +++ b/common/eth2_interop_keypairs/src/lib.rs @@ -22,7 +22,7 @@ extern crate lazy_static; use bls::{Keypair, PublicKey, SecretKey}; use ethereum_hashing::hash; use num_bigint::BigUint; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::convert::TryInto; use std::fs::File; use std::path::PathBuf; diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index e22f747bb1..3731229c39 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -12,7 +12,6 @@ task_executor = { workspace = true } tokio = { workspace = true } eth2 = { workspace = true } serde_json = { workspace = true } -serde_derive = "1.0.116" serde = { workspace = true } lighthouse_version = { workspace = true } lighthouse_metrics = { workspace = true } diff --git a/common/monitoring_api/src/types.rs b/common/monitoring_api/src/types.rs index 9765e34613..cf33ccb9c0 100644 --- a/common/monitoring_api/src/types.rs +++ b/common/monitoring_api/src/types.rs @@ -1,7 +1,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use eth2::lighthouse::{ProcessHealth, SystemHealth}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; pub const VERSION: u64 = 1; pub const CLIENT_NAME: &str = "lighthouse"; diff --git a/common/system_health/Cargo.toml b/common/system_health/Cargo.toml index c02380c9d4..5f0de80d90 100644 --- a/common/system_health/Cargo.toml +++ b/common/system_health/Cargo.toml @@ -8,6 +8,5 @@ lighthouse_network = { workspace = true } types = { workspace = true } sysinfo = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" serde_json = { workspace = true } parking_lot = { workspace = true } diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index b30173eb7e..99f98cf545 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -13,7 +13,6 @@ types = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" serde_yaml = { workspace = true } safe_arith = { workspace = true } -superstruct = { workspace = true } \ No newline at end of file +superstruct = { workspace = true } diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 98d43e4850..ebb639819d 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -5,7 +5,7 @@ mod votes; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; use crate::{InvalidationOperation, JustifiedBalances}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::BTreeSet; use types::{ AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 7b6afb94f5..6b88e5e426 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1,6 +1,6 @@ use crate::error::InvalidBestNodeInfo; use crate::{error::Error, Block, ExecutionStatus, JustifiedBalances}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::four_byte_option_impl; use ssz::Encode; use ssz_derive::{Decode, Encode}; diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 5911e50fcd..6fc677073e 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -7,7 +7,7 @@ use crate::{ ssz_container::SszContainer, JustifiedBalances, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::{ diff --git a/consensus/ssz_types/src/serde_utils/list_of_hex_fixed_vec.rs b/consensus/ssz_types/src/serde_utils/list_of_hex_fixed_vec.rs deleted file mode 100644 index b93c869067..0000000000 --- a/consensus/ssz_types/src/serde_utils/list_of_hex_fixed_vec.rs +++ /dev/null @@ -1,77 +0,0 @@ -//! Serialize `VariableList, N>` as list of 0x-prefixed hex string. -use crate::{FixedVector, VariableList}; -use serde::{ser::SerializeSeq, Deserialize, Deserializer, Serialize, Serializer}; -use std::marker::PhantomData; -use typenum::Unsigned; - -#[derive(Deserialize)] -#[serde(transparent)] -pub struct WrappedListOwned( - #[serde(with = "crate::serde_utils::hex_fixed_vec")] FixedVector, -); - -#[derive(Serialize)] -#[serde(transparent)] -pub struct WrappedListRef<'a, N: Unsigned>( - #[serde(with = "crate::serde_utils::hex_fixed_vec")] &'a FixedVector, -); - -pub fn serialize( - list: &VariableList, N>, - serializer: S, -) -> Result -where - S: Serializer, - M: Unsigned, - N: Unsigned, -{ - let mut seq = serializer.serialize_seq(Some(list.len()))?; - for bytes in list { - seq.serialize_element(&WrappedListRef(bytes))?; - } - seq.end() -} - -#[derive(Default)] -pub struct Visitor { - _phantom_m: PhantomData, - _phantom_n: PhantomData, -} - -impl<'a, M, N> serde::de::Visitor<'a> for Visitor -where - M: Unsigned, - N: Unsigned, -{ - type Value = VariableList, N>; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of 0x-prefixed hex bytes") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let mut list: VariableList, N> = <_>::default(); - - while let Some(val) = seq.next_element::>()? { - list.push(val.0).map_err(|e| { - serde::de::Error::custom(format!("failed to push value to list: {:?}.", e)) - })?; - } - - Ok(list) - } -} - -pub fn deserialize<'de, D, M, N>( - deserializer: D, -) -> Result, N>, D::Error> -where - D: Deserializer<'de>, - M: Unsigned, - N: Unsigned, -{ - deserializer.deserialize_seq(Visitor::default()) -} diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index a520eee09e..fa0624bc36 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -24,7 +24,6 @@ rayon = { workspace = true } rand = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true, features = ["rc"] } -serde_derive = "1.0.116" slog = { workspace = true } ethereum_ssz = { workspace = true, features = ["arbitrary"] } ethereum_ssz_derive = { workspace = true } diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 20d66cd447..ac31e78cb7 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -3,7 +3,7 @@ use super::{ Signature, SignedRoot, }; use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 5c333e0d45..ac4a583cbb 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -1,6 +1,6 @@ use derivative::Derivative; use safe_arith::ArithError; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index 286502b449..7578981f51 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::{Checkpoint, Hash256, SignedRoot, Slot}; use crate::slot_data::SlotData; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation_duty.rs index 93a4c147b6..22b03dda61 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation_duty.rs @@ -1,5 +1,5 @@ use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; #[derive(arbitrary::Arbitrary, Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] pub struct AttestationDuty { diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/attester_slashing.rs index c563495074..c2bbea637e 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/attester_slashing.rs @@ -1,7 +1,7 @@ use crate::{test_utils::TestRandom, EthSpec, IndexedAttestation}; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 98a0fd204d..a8e013165a 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -6,7 +6,7 @@ use crate::test_utils::TestRandom; use crate::*; use bls::Signature; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index d6cfe7d345..ca7dcfe5dd 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::{FixedVector, VariableList}; use std::marker::PhantomData; diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index f2ef0a3dcc..689f1a28b0 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index ec63886cd4..415366d79a 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -10,7 +10,7 @@ use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use metastruct::{metastruct, NumFields}; pub use pubkey_cache::PubkeyCache; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{ssz_encode, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::Unsigned, BitVector}; @@ -2300,80 +2300,3 @@ impl ForkVersionDeserialize for BeaconState { )) } } - -/// This module can be used to encode and decode a `BeaconState` the same way it -/// would be done if we had tagged the superstruct enum with -/// `#[ssz(enum_behaviour = "union")]` -/// This should _only_ be used for *some* cases to store these objects in the -/// database and _NEVER_ for encoding / decoding states sent over the network! -pub mod ssz_tagged_beacon_state { - use super::*; - pub mod encode { - use super::*; - #[allow(unused_imports)] - use ssz::*; - - pub fn is_ssz_fixed_len() -> bool { - false - } - - pub fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - pub fn ssz_bytes_len(state: &BeaconState) -> usize { - state - .ssz_bytes_len() - .checked_add(1) - .expect("encoded length must be less than usize::max") - } - - pub fn ssz_append(state: &BeaconState, buf: &mut Vec) { - let fork_name = state.fork_name_unchecked(); - fork_name.ssz_append(buf); - state.ssz_append(buf); - } - - pub fn as_ssz_bytes(state: &BeaconState) -> Vec { - let mut buf = vec![]; - ssz_append(state, &mut buf); - - buf - } - } - - pub mod decode { - use super::*; - #[allow(unused_imports)] - use ssz::*; - - pub fn is_ssz_fixed_len() -> bool { - false - } - - pub fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - pub fn from_ssz_bytes(bytes: &[u8]) -> Result, DecodeError> { - let fork_byte = bytes - .first() - .copied() - .ok_or(DecodeError::OutOfBoundsByte { i: 0 })?; - let body = bytes - .get(1..) - .ok_or(DecodeError::OutOfBoundsByte { i: 1 })?; - match ForkName::from_ssz_bytes(&[fork_byte])? { - ForkName::Base => Ok(BeaconState::Base(BeaconStateBase::from_ssz_bytes(body)?)), - ForkName::Altair => Ok(BeaconState::Altair(BeaconStateAltair::from_ssz_bytes( - body, - )?)), - ForkName::Merge => Ok(BeaconState::Merge(BeaconStateMerge::from_ssz_bytes(body)?)), - ForkName::Capella => Ok(BeaconState::Capella(BeaconStateCapella::from_ssz_bytes( - body, - )?)), - ForkName::Deneb => Ok(BeaconState::Deneb(BeaconStateDeneb::from_ssz_bytes(body)?)), - } - } - } -} diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 170c1446fb..741fd89e48 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -5,7 +5,7 @@ use crate::*; use core::num::NonZeroUsize; use derivative::Derivative; use safe_arith::SafeArith; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{four_byte_option_impl, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::ops::Range; diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/beacon_state/progressive_balances_cache.rs index c0a8c29052..d0e0010b93 100644 --- a/consensus/types/src/beacon_state/progressive_balances_cache.rs +++ b/consensus/types/src/beacon_state/progressive_balances_cache.rs @@ -8,7 +8,7 @@ use crate::{ }; use arbitrary::Arbitrary; use safe_arith::SafeArith; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use strum::{Display, EnumString, EnumVariantNames}; /// This cache keeps track of the accumulated target attestation balance for the current & previous diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index e4da89ca2f..a5fbc3206a 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -3,7 +3,7 @@ use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot}; use derivative::Derivative; use kzg::{Kzg, KzgCommitment, KzgPreset, KzgProof, BYTES_PER_FIELD_ELEMENT}; use rand::Rng; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use ssz_types::{FixedVector, VariableList}; diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index 3ed9ee9255..baa65f5172 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; use bls::PublicKeyBytes; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index d621c38914..910ef97c71 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -6,8 +6,7 @@ use crate::{ }; use bls::PublicKeyBytes; use bls::Signature; -use serde::Deserializer; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::Encode; use superstruct::superstruct; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 9fb38fb2f4..e7c1490c33 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -2,8 +2,7 @@ use crate::application_domain::{ApplicationDomain, APPLICATION_DOMAIN_BUILDER}; use crate::*; use int_to_bytes::int_to_bytes4; use safe_arith::{ArithError, SafeArith}; -use serde::{Deserializer, Serialize, Serializer}; -use serde_derive::Deserialize; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_utils::quoted_u64::MaybeQuoted; use std::fs::File; use std::path::Path; @@ -1042,7 +1041,7 @@ pub struct Config { ejection_balance: u64, #[serde(with = "serde_utils::quoted_u64")] min_per_epoch_churn_limit: u64, - #[serde(default)] + #[serde(default = "default_max_per_epoch_activation_churn_limit")] #[serde(with = "serde_utils::quoted_u64")] max_per_epoch_activation_churn_limit: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -1129,6 +1128,10 @@ fn default_subnets_per_node() -> u8 { 2u8 } +const fn default_max_per_epoch_activation_churn_limit() -> u64 { + 8 +} + const fn default_gossip_max_size() -> u64 { 10485760 } diff --git a/consensus/types/src/checkpoint.rs b/consensus/types/src/checkpoint.rs index e84798f6f7..044fc57f22 100644 --- a/consensus/types/src/checkpoint.rs +++ b/consensus/types/src/checkpoint.rs @@ -1,6 +1,6 @@ use crate::test_utils::TestRandom; use crate::{Epoch, Hash256}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index 911aa585d7..bd2efd3d9e 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -3,7 +3,7 @@ use crate::{ DenebPreset, EthSpec, ForkName, }; use maplit::hashmap; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use serde_json::Value; use std::collections::HashMap; use superstruct::superstruct; diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/contribution_and_proof.rs index 7e757f89b1..aba98c92b7 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/contribution_and_proof.rs @@ -3,7 +3,7 @@ use super::{ SyncSelectionProof, }; use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit.rs index af9f621152..eaad96af3b 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit.rs @@ -1,6 +1,6 @@ use crate::test_utils::TestRandom; use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::U33, FixedVector}; use test_random_derive::TestRandom; diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index d75643f659..e074ffdfaa 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::*; use bls::{PublicKeyBytes, SignatureBytes}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit_message.rs index 1096cfaa28..e5c666df82 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit_message.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::*; use bls::PublicKeyBytes; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs index 12e81d0028..d4dcdb2eda 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -1,7 +1,7 @@ use crate::*; use ethereum_hashing::{hash32_concat, ZERO_HASHES}; use int_to_bytes::int_to_bytes32; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use test_utils::TestRandom; diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index 409383c904..3ae7c39cfe 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::Epoch; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index d8f476b99b..e2c4e511ef 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -1,7 +1,7 @@ use super::Hash256; use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index cde438e9e3..3ad6781941 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -2,7 +2,7 @@ use crate::*; use kzg::{BlobTrait, KzgPreset, MainnetKzgPreset, MinimalKzgPreset}; use safe_arith::SafeArith; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ bit::B0, UInt, Unsigned, U0, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U16, U16777216, U2, U2048, U256, U32, U4, U4096, U512, U6, U625, U64, U65536, U8, U8192, diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs index 8c4d677517..b2401f0c0f 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution_block_hash.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::Hash256; use derivative::Derivative; use rand::RngCore; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 94f34d8654..ea591eb749 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -1,6 +1,6 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index f174058b28..915d5aab34 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -1,6 +1,6 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::Decode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork.rs index 4650881f72..b23113f436 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::Epoch; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork_data.rs index bf9c48cd7e..52ce57a2a9 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork_data.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::{Hash256, SignedRoot}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index e87b6d61ae..6523b2a678 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -1,5 +1,5 @@ use crate::{ChainSpec, Epoch}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::convert::TryFrom; use std::fmt::{self, Display, Formatter}; diff --git a/consensus/types/src/historical_batch.rs b/consensus/types/src/historical_batch.rs index adf401eddb..7eb129e1b6 100644 --- a/consensus/types/src/historical_batch.rs +++ b/consensus/types/src/historical_batch.rs @@ -1,6 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; -use serde_derive::{Deserialize, Serialize}; + +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/historical_summary.rs index 84d87b85fd..dcc387d3d6 100644 --- a/consensus/types/src/historical_summary.rs +++ b/consensus/types/src/historical_summary.rs @@ -4,7 +4,7 @@ use crate::{BeaconState, EthSpec, Hash256}; use cached_tree_hash::Error; use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; use compare_fields_derive::CompareFields; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use test_random_derive::TestRandom; diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index c59cbef307..c2d48d7242 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -1,6 +1,6 @@ use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, EthSpec, VariableList}; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::hash::{Hash, Hasher}; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index e99afd45a1..96d85546a6 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -1,6 +1,6 @@ use super::{BeaconBlockHeader, BeaconState, EthSpec, Hash256, SyncCommittee}; use crate::{light_client_update::*, test_utils::TestRandom}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::FixedVector; use std::sync::Arc; diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index c8b2ca9750..7b99817f15 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -3,7 +3,7 @@ use super::{ Slot, SyncAggregate, }; use crate::{light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 7a39bd9ac1..fbb0558ece 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -2,7 +2,7 @@ use super::{BeaconBlockHeader, EthSpec, Slot, SyncAggregate}; use crate::{ light_client_update::Error, test_utils::TestRandom, BeaconState, ChainSpec, SignedBeaconBlock, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index f3d6433246..0a5b9f0f52 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -1,7 +1,7 @@ use super::{BeaconBlockHeader, EthSpec, Hash256, Slot, SyncAggregate, SyncCommittee}; use crate::{beacon_state, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec}; use safe_arith::ArithError; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::{ typenum::{U5, U6}, diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/participation_flags.rs index 4f170a60be..e94e56f0cd 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/participation_flags.rs @@ -1,6 +1,6 @@ use crate::{consts::altair::NUM_FLAG_INDICES, test_utils::TestRandom, Hash256}; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use test_random_derive::TestRandom; use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/pending_attestation.rs index 88db0ec4d3..d25a6987c0 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/pending_attestation.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::{AttestationData, BitList, EthSpec}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index a1c1e7024c..63a372ea1c 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -1,5 +1,5 @@ use crate::{ChainSpec, Epoch, EthSpec, Unsigned}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; /// Value-level representation of an Ethereum consensus "preset". /// diff --git a/consensus/types/src/proposer_slashing.rs b/consensus/types/src/proposer_slashing.rs index 1ac2464a47..ee55d62c20 100644 --- a/consensus/types/src/proposer_slashing.rs +++ b/consensus/types/src/proposer_slashing.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::SignedBeaconBlockHeader; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/shuffling_id.rs b/consensus/types/src/shuffling_id.rs index 120d744a5e..a5bdc86673 100644 --- a/consensus/types/src/shuffling_id.rs +++ b/consensus/types/src/shuffling_id.rs @@ -1,5 +1,5 @@ use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::hash::Hash; diff --git a/consensus/types/src/signed_aggregate_and_proof.rs b/consensus/types/src/signed_aggregate_and_proof.rs index 6d86c05634..10010073e5 100644 --- a/consensus/types/src/signed_aggregate_and_proof.rs +++ b/consensus/types/src/signed_aggregate_and_proof.rs @@ -3,7 +3,7 @@ use super::{ SelectionProof, Signature, SignedRoot, }; use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 2234e38f08..11009457fd 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -1,7 +1,7 @@ use crate::*; use bls::Signature; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::fmt; use superstruct::superstruct; diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index c265eded1d..3d4269a2ce 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -2,7 +2,7 @@ use crate::{ test_utils::TestRandom, BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, Signature, SignedRoot, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/signed_blob.rs b/consensus/types/src/signed_blob.rs index b8fab8e122..3c560823ce 100644 --- a/consensus/types/src/signed_blob.rs +++ b/consensus/types/src/signed_blob.rs @@ -5,7 +5,7 @@ use crate::{ }; use bls::PublicKey; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use std::marker::PhantomData; diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/signed_bls_to_execution_change.rs index 2b17095ae7..2a4ecdf438 100644 --- a/consensus/types/src/signed_bls_to_execution_change.rs +++ b/consensus/types/src/signed_bls_to_execution_change.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; use bls::Signature; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/signed_contribution_and_proof.rs b/consensus/types/src/signed_contribution_and_proof.rs index 4cb3588433..6cb45ac8e6 100644 --- a/consensus/types/src/signed_contribution_and_proof.rs +++ b/consensus/types/src/signed_contribution_and_proof.rs @@ -3,7 +3,7 @@ use super::{ SignedRoot, SyncCommitteeContribution, SyncSelectionProof, }; use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/signed_voluntary_exit.rs index 3392826a62..30eda11791 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/signed_voluntary_exit.rs @@ -1,7 +1,7 @@ use crate::{test_utils::TestRandom, VoluntaryExit}; use bls::Signature; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/signing_data.rs b/consensus/types/src/signing_data.rs index b80d4a40d5..f30d5fdfcb 100644 --- a/consensus/types/src/signing_data.rs +++ b/consensus/types/src/signing_data.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::Hash256; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index 991261d16a..ec659d1dbb 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -15,7 +15,7 @@ use crate::{ChainSpec, SignedRoot}; use rand::RngCore; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; use std::hash::Hash; diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 415d6a1404..2752e31092 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -1,7 +1,7 @@ //! Identifies each shard by an integer identifier. use crate::{AttestationData, ChainSpec, CommitteeIndex, Epoch, EthSpec, Slot}; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::ops::{Deref, DerefMut}; use swap_or_not_shuffle::compute_shuffled_index; diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_aggregate.rs index 300c86fc0f..bb00c4aa20 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_aggregate.rs @@ -3,7 +3,7 @@ use crate::test_utils::TestRandom; use crate::{AggregateSignature, BitVector, EthSpec, SyncCommitteeContribution}; use derivative::Derivative; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_aggregator_selection_data.rs index b101068123..2b60d01b8e 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_aggregator_selection_data.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::{SignedRoot, Slot}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/sync_committee.rs b/consensus/types/src/sync_committee.rs index 2ae645604d..dcc2d2952d 100644 --- a/consensus/types/src/sync_committee.rs +++ b/consensus/types/src/sync_committee.rs @@ -3,7 +3,7 @@ use crate::typenum::Unsigned; use crate::{EthSpec, SyncSubnetId}; use bls::PublicKeyBytes; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; use test_random_derive::TestRandom; diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index 425f8f116d..b8ee5c2e36 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -2,7 +2,7 @@ use super::{AggregateSignature, EthSpec, SignedRoot}; use crate::slot_data::SlotData; use crate::{test_utils::TestRandom, BitVector, Hash256, Slot, SyncCommitteeMessage}; use safe_arith::ArithError; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee_message.rs index d0301cdf63..d7d309cd56 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee_message.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::{ChainSpec, Domain, EthSpec, Fork, Hash256, SecretKey, Signature, SignedRoot, Slot}; use crate::slot_data::SlotData; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/sync_duty.rs b/consensus/types/src/sync_duty.rs index e3ffe62bfd..1058b9d3b4 100644 --- a/consensus/types/src/sync_duty.rs +++ b/consensus/types/src/sync_duty.rs @@ -1,7 +1,7 @@ use crate::{EthSpec, SyncCommittee, SyncSubnetId}; use bls::PublicKeyBytes; use safe_arith::ArithError; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::HashSet; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index 5af756ae01..5605482929 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -2,7 +2,7 @@ use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::EthSpec; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_types::typenum::Unsigned; use std::collections::HashSet; use std::fmt::{self, Display}; diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index eb890ac715..62a7409c07 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -3,7 +3,7 @@ use crate::{ PublicKeyBytes, }; use arbitrary::Arbitrary; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::sync::Arc; use test_random_derive::TestRandom; diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 446029b560..a24f7376a1 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -3,7 +3,7 @@ use crate::{ SignedVoluntaryExit, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index eed7c7e277..3e61156554 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -1,6 +1,6 @@ use crate::test_utils::TestRandom; use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index c801f27bf0..226251043f 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -10,7 +10,6 @@ tree_hash = { workspace = true } milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.5.1", optional = true } rand = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" ethereum_serde_utils = { workspace = true } hex = { workspace = true } ethereum_hashing = { workspace = true } diff --git a/crypto/bls/src/zeroize_hash.rs b/crypto/bls/src/zeroize_hash.rs index 41136f97a7..e346f456d1 100644 --- a/crypto/bls/src/zeroize_hash.rs +++ b/crypto/bls/src/zeroize_hash.rs @@ -1,5 +1,5 @@ use super::SECRET_KEY_BYTES_LEN; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use zeroize::Zeroize; /// Provides a wrapper around a `[u8; SECRET_KEY_BYTES_LEN]` that implements `Zeroize` on `Drop`. diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index b1e9337954..d652ecb4c1 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -7,18 +7,17 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ethereum_ssz = "0.5.0" -ethereum_ssz_derive = "0.5.3" -tree_hash = "0.5.2" -derivative = "2.1.1" -serde = "1.0.116" -serde_derive = "1.0.116" -ethereum_serde_utils = "0.5.0" -hex = "0.4.2" -ethereum_hashing = "1.0.0-beta.2" +arbitrary = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +tree_hash = { workspace = true } +derivative = { workspace = true } +serde = { workspace = true } +ethereum_serde_utils = { workspace = true } +hex = { workspace = true } +ethereum_hashing = { workspace = true } c-kzg = { git = "https://github.com/ethereum/c-kzg-4844", rev = "f5f6f863d475847876a2bd5ee252058d37c3a15d" , features = ["mainnet-spec", "serde"]} c_kzg_min = { package = "c-kzg", git = "https://github.com/ethereum//c-kzg-4844", rev = "f5f6f863d475847876a2bd5ee252058d37c3a15d", features = ["minimal-spec", "serde"], optional = true } -arbitrary = { version = "1.0", features = ["derive"] } [features] # TODO(deneb): enabled by default for convenience, would need more cfg magic to disable diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index d7870c15bb..410ae8a495 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -2,7 +2,7 @@ mod kzg_commitment; mod kzg_proof; mod trusted_setup; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::fmt::Debug; use std::ops::Deref; use std::str::FromStr; diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index d2a181a1b9..b57e1e9dee 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -19,7 +19,6 @@ futures = { workspace = true } slog-json = "2.3.0" exit-future = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" [target.'cfg(not(target_family = "unix"))'.dependencies] ctrlc = { version = "3.1.6", features = ["termination"] } diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index fc7ab8d52c..8e7c237a36 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -13,7 +13,7 @@ use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::{future, StreamExt}; use logging::SSELoggingComponents; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use slog::{error, info, o, warn, Drain, Duplicate, Level, Logger}; use sloggers::{file::FileLoggerBuilder, types::Format, types::Severity, Build}; use std::fs::create_dir_all; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 847a2ea2f6..3e03b18e61 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -866,7 +866,7 @@ fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_fl let id = "bn-1"; let version = "Lighthouse-v2.1.3"; CommandLineTest::new() - .flag("execution-endpoint", Some(execution_endpoint.clone())) + .flag("execution-endpoint", Some(execution_endpoint)) .flag(jwt_flag, dir.path().join(jwt_file).as_os_str().to_str()) .flag(jwt_id_flag, Some(id)) .flag(jwt_version_flag, Some(version)) diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 9df77daa10..87e77494b1 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -23,7 +23,6 @@ parking_lot = { workspace = true } rand = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } -serde_derive = "1.0" slog = { workspace = true } sloggers = { workspace = true } tree_hash = { workspace = true } diff --git a/slasher/src/array.rs b/slasher/src/array.rs index 4deb389124..f3b11cccd7 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -4,7 +4,7 @@ use crate::{ SlasherDB, }; use flate2::bufread::{ZlibDecoder, ZlibEncoder}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::borrow::Borrow; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::convert::TryFrom; diff --git a/slasher/src/config.rs b/slasher/src/config.rs index 361621d176..894760d277 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -1,5 +1,5 @@ use crate::Error; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::path::PathBuf; use strum::{Display, EnumString, EnumVariantNames}; use types::{Epoch, EthSpec, IndexedAttestation}; diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index f41b5d381f..2e3b9ad249 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -20,7 +20,6 @@ hex = { workspace = true } kzg = { workspace = true } rayon = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" serde_json = { workspace = true } serde_repr = { workspace = true } serde_yaml = { workspace = true } diff --git a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs index 53387ee4d7..c1085e0702 100644 --- a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs +++ b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{AggregateSignature, Signature}; -use serde_derive::Deserialize; +use serde::Deserialize; #[derive(Debug, Clone, Deserialize)] pub struct BlsAggregateSigs { diff --git a/testing/ef_tests/src/cases/bls_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_aggregate_verify.rs index e9539dc15e..0e006d95c2 100644 --- a/testing/ef_tests/src/cases/bls_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_aggregate_verify.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{AggregateSignature, PublicKeyBytes}; -use serde_derive::Deserialize; +use serde::Deserialize; use types::Hash256; #[derive(Debug, Clone, Deserialize)] diff --git a/testing/ef_tests/src/cases/bls_batch_verify.rs b/testing/ef_tests/src/cases/bls_batch_verify.rs index de8721d67d..703444c987 100644 --- a/testing/ef_tests/src/cases/bls_batch_verify.rs +++ b/testing/ef_tests/src/cases/bls_batch_verify.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{verify_signature_sets, BlsWrappedSignature, PublicKeyBytes, Signature, SignatureSet}; -use serde_derive::Deserialize; +use serde::Deserialize; use std::borrow::Cow; use std::str::FromStr; use types::Hash256; diff --git a/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs b/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs index c41fbca393..8783aa141e 100644 --- a/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs +++ b/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{AggregatePublicKey, PublicKeyBytes}; -use serde_derive::Deserialize; +use serde::Deserialize; #[derive(Debug, Clone, Deserialize)] pub struct BlsEthAggregatePubkeys { diff --git a/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs index 80e018459b..0fb3a026cf 100644 --- a/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{AggregateSignature, PublicKeyBytes}; -use serde_derive::Deserialize; +use serde::Deserialize; use std::convert::TryInto; use types::Hash256; diff --git a/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs index 608995db9d..dcdc1bd197 100644 --- a/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{AggregateSignature, PublicKeyBytes}; -use serde_derive::Deserialize; +use serde::Deserialize; use std::convert::TryInto; use types::Hash256; diff --git a/testing/ef_tests/src/cases/bls_sign_msg.rs b/testing/ef_tests/src/cases/bls_sign_msg.rs index 53c13b569a..6479fabe42 100644 --- a/testing/ef_tests/src/cases/bls_sign_msg.rs +++ b/testing/ef_tests/src/cases/bls_sign_msg.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::SecretKey; -use serde_derive::Deserialize; +use serde::Deserialize; use types::Hash256; #[derive(Debug, Clone, Deserialize)] diff --git a/testing/ef_tests/src/cases/bls_verify_msg.rs b/testing/ef_tests/src/cases/bls_verify_msg.rs index ac81c2a9bd..dc918309b5 100644 --- a/testing/ef_tests/src/cases/bls_verify_msg.rs +++ b/testing/ef_tests/src/cases/bls_verify_msg.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{PublicKey, PublicKeyBytes, Signature, SignatureBytes}; -use serde_derive::Deserialize; +use serde::Deserialize; use std::convert::TryInto; use types::Hash256; diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index 68fd990f1f..2a7c998758 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -1,4 +1,4 @@ -use serde_derive::Deserialize; +use serde::Deserialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::convert::TryFrom; diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index aa7cd2ee72..b22183c742 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -4,7 +4,7 @@ use crate::case_result::{check_state_diff, compare_beacon_state_results_without_ use crate::decode::{ssz_decode_state, yaml_decode_file}; use crate::type_name; use crate::type_name::TypeName; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use state_processing::epoch_cache::initialize_epoch_cache; use state_processing::per_epoch_processing::capella::process_historical_summaries_update; diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index d27fbcd671..bc340fa1cb 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; use crate::cases::common::previous_fork; use crate::decode::{ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::upgrade::{ upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, }; diff --git a/testing/ef_tests/src/cases/genesis_initialization.rs b/testing/ef_tests/src/cases/genesis_initialization.rs index dbf6c70b29..14fe7ef959 100644 --- a/testing/ef_tests/src/cases/genesis_initialization.rs +++ b/testing/ef_tests/src/cases/genesis_initialization.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::initialize_beacon_state_from_eth1; use std::path::PathBuf; use types::{BeaconState, Deposit, EthSpec, ExecutionPayloadHeader, ForkName, Hash256}; diff --git a/testing/ef_tests/src/cases/genesis_validity.rs b/testing/ef_tests/src/cases/genesis_validity.rs index abdc1ed4a7..ec89e0f64b 100644 --- a/testing/ef_tests/src/cases/genesis_validity.rs +++ b/testing/ef_tests/src/cases/genesis_validity.rs @@ -1,6 +1,6 @@ use super::*; use crate::decode::{ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::is_valid_genesis_state; use std::path::Path; use types::{BeaconState, EthSpec, ForkName}; diff --git a/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs b/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs index ec2f1b1694..72a6052fea 100644 --- a/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs +++ b/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use beacon_chain::kzg_utils::blob_to_kzg_commitment; use kzg::KzgCommitment; -use serde_derive::Deserialize; +use serde::Deserialize; use std::marker::PhantomData; #[derive(Debug, Clone, Deserialize)] diff --git a/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs index 59929678d4..2cec8f1fb3 100644 --- a/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use beacon_chain::kzg_utils::compute_blob_kzg_proof; use kzg::KzgProof; -use serde_derive::Deserialize; +use serde::Deserialize; use std::marker::PhantomData; #[derive(Debug, Clone, Deserialize)] diff --git a/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs index 49cd2d0b88..0085b8bd29 100644 --- a/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use beacon_chain::kzg_utils::compute_kzg_proof; use kzg::KzgProof; -use serde_derive::Deserialize; +use serde::Deserialize; use std::marker::PhantomData; use std::str::FromStr; use types::Hash256; diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs index a813efdb7f..ff918cac13 100644 --- a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs @@ -3,7 +3,7 @@ use crate::case_result::compare_result; use beacon_chain::kzg_utils::validate_blob; use eth2_network_config::get_trusted_setup; use kzg::{Kzg, KzgCommitment, KzgPreset, KzgProof, TrustedSetup}; -use serde_derive::Deserialize; +use serde::Deserialize; use std::convert::TryInto; use std::marker::PhantomData; use types::Blob; @@ -91,7 +91,7 @@ impl Case for KZGVerifyBlobKZGProof { let kzg = get_kzg::()?; let result = parse_input(&self.input).and_then(|(blob, commitment, proof)| { - validate_blob::(&kzg, blob, commitment, proof) + validate_blob::(&kzg, &blob, commitment, proof) .map_err(|e| Error::InternalError(format!("Failed to validate blob: {:?}", e))) }); diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs index 5e402e8da9..43c2c8cf6c 100644 --- a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_result; use beacon_chain::kzg_utils::validate_blobs; -use serde_derive::Deserialize; +use serde::Deserialize; use std::marker::PhantomData; #[derive(Debug, Clone, Deserialize)] @@ -54,7 +54,7 @@ impl Case for KZGVerifyBlobKZGProofBatch { let kzg = get_kzg::()?; let result = parse_input(&self.input).and_then(|(commitments, blobs, proofs)| { - validate_blobs::(&kzg, &commitments, &blobs, &proofs) + validate_blobs::(&kzg, &commitments, blobs.iter().collect(), &proofs) .map_err(|e| Error::InternalError(format!("Failed to validate blobs: {:?}", e))) }); diff --git a/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs index aafd999097..cdf0aeb8e3 100644 --- a/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_result; use beacon_chain::kzg_utils::verify_kzg_proof; -use serde_derive::Deserialize; +use serde::Deserialize; use std::marker::PhantomData; #[derive(Debug, Clone, Deserialize)] diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index c5d32be5c0..70c2a1d292 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -1,6 +1,6 @@ use super::*; use crate::decode::{ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use std::path::Path; use tree_hash::Hash256; use types::{BeaconState, EthSpec, ForkName}; diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 66e5241ff8..5b9e2d14d5 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -3,7 +3,7 @@ use crate::bls_setting::BlsSetting; use crate::case_result::{check_state_diff, compare_beacon_state_results_without_caches}; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; -use serde_derive::Deserialize; +use serde::Deserialize; use ssz::Decode; use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use state_processing::epoch_cache::initialize_epoch_cache; diff --git a/testing/ef_tests/src/cases/rewards.rs b/testing/ef_tests/src/cases/rewards.rs index ee0fc265e1..bb41f6fe12 100644 --- a/testing/ef_tests/src/cases/rewards.rs +++ b/testing/ef_tests/src/cases/rewards.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result_detailed; use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use compare_fields_derive::CompareFields; -use serde_derive::Deserialize; +use serde::Deserialize; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; use state_processing::{ diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index 5fe7ec4b5e..5395082a04 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -2,7 +2,7 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::{check_state_diff, compare_beacon_state_results_without_caches}; use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::{ per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, diff --git a/testing/ef_tests/src/cases/sanity_slots.rs b/testing/ef_tests/src/cases/sanity_slots.rs index 30e3de64c3..7002ed970c 100644 --- a/testing/ef_tests/src/cases/sanity_slots.rs +++ b/testing/ef_tests/src/cases/sanity_slots.rs @@ -2,7 +2,7 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::{check_state_diff, compare_beacon_state_results_without_caches}; use crate::decode::{ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::per_slot_processing; use types::{BeaconState, EthSpec, ForkName}; diff --git a/testing/ef_tests/src/cases/shuffling.rs b/testing/ef_tests/src/cases/shuffling.rs index b5ce019f5c..e05763c2d8 100644 --- a/testing/ef_tests/src/cases/shuffling.rs +++ b/testing/ef_tests/src/cases/shuffling.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_result; use crate::decode::yaml_decode_file; -use serde_derive::Deserialize; +use serde::Deserialize; use std::marker::PhantomData; use swap_or_not_shuffle::{compute_shuffled_index, shuffle_list}; use types::ForkName; diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index 6cda777ef8..4f45e248a8 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -4,8 +4,7 @@ use super::*; use crate::cases::common::{SszStaticType, TestU128, TestU256}; use crate::cases::ssz_static::{check_serialization, check_tree_hash}; use crate::decode::{log_file_access, snappy_decode_file, yaml_decode_file}; -use serde::{de::Error as SerdeError, Deserializer}; -use serde_derive::Deserialize; +use serde::{de::Error as SerdeError, Deserialize, Deserializer}; use ssz_derive::{Decode, Encode}; use std::path::{Path, PathBuf}; use tree_hash_derive::TreeHash; diff --git a/testing/ef_tests/src/cases/ssz_static.rs b/testing/ef_tests/src/cases/ssz_static.rs index f5b0e4bad8..cbfaad82cf 100644 --- a/testing/ef_tests/src/cases/ssz_static.rs +++ b/testing/ef_tests/src/cases/ssz_static.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::cases::common::SszStaticType; use crate::decode::{snappy_decode_file, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use ssz::Decode; use tree_hash::TreeHash; use types::{BeaconBlock, BeaconState, ForkName, Hash256, SignedBeaconBlock}; diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index 5c6da900e6..c94ce3a23a 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::{ per_block_processing, state_advance::complete_state_advance, BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, diff --git a/testing/execution_engine_integration/src/build_utils.rs b/testing/execution_engine_integration/src/build_utils.rs index 15e7fdc0f1..5d96520660 100644 --- a/testing/execution_engine_integration/src/build_utils.rs +++ b/testing/execution_engine_integration/src/build_utils.rs @@ -66,6 +66,7 @@ pub fn get_latest_release(repo_dir: &Path, branch_name: &str) -> Result Value { "muirGlacierBlock":0, "berlinBlock":0, "londonBlock":0, - "clique": { - "period": 5, - "epoch": 30000 - }, - "terminalTotalDifficulty":0 + "mergeNetsplitBlock": 0, + "shanghaiTime": 0, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true }, "nonce":"0x42", "timestamp":"0x0", @@ -72,8 +71,10 @@ pub fn nethermind_genesis_json() -> Value { "accountStartNonce": "0x0", "maximumExtraDataSize": "0x20", "minGasLimit": "0x1388", - "networkID": "0x1469ca", - "MergeForkIdTransition": "0x3e8", + "networkID": "0x00146A2E", + "MergeForkIdTransition": "0x0", + "maxCodeSize": "0x6000", + "maxCodeSizeTransition": "0x0", "eip150Transition": "0x0", "eip158Transition": "0x0", "eip160Transition": "0x0", @@ -101,7 +102,15 @@ pub fn nethermind_genesis_json() -> Value { "eip1559Transition": "0x0", "eip3198Transition": "0x0", "eip3529Transition": "0x0", - "eip3541Transition": "0x0" + "eip3541Transition": "0x0", + "eip3540TransitionTimestamp": "0x0", + "eip3651TransitionTimestamp": "0x0", + "eip3670TransitionTimestamp": "0x0", + "eip3675TransitionTimestamp": "0x0", + "eip3855TransitionTimestamp": "0x0", + "eip3860TransitionTimestamp": "0x0", + "eip4895TransitionTimestamp": "0x0", + "terminalTotalDifficulty": "0x0" }, "genesis": { "seal": { @@ -112,10 +121,10 @@ pub fn nethermind_genesis_json() -> Value { }, "difficulty": "0x01", "author": "0x0000000000000000000000000000000000000000", - "timestamp": "0x0", + "timestamp": "0x63585F88", "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "extraData": "", - "gasLimit": "0x1C9C380" + "gasLimit": "0x400000" }, "accounts": { "0x7b8C3a386C0eea54693fFB0DA17373ffC9228139": { @@ -123,9 +132,9 @@ pub fn nethermind_genesis_json() -> Value { }, "0xdA2DD7560DB7e212B945fC72cEB54B7D8C886D77": { "balance": "10000000000000000000000000" - }, + } }, "nodes": [] - } + } ) } diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 5c83a97e21..0bd96a5c93 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -3,7 +3,7 @@ use crate::execution_engine::GenericExecutionEngine; use crate::genesis_json::geth_genesis_json; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output}; -use std::{env, fs::File}; +use std::{env, fs}; use tempfile::TempDir; use unused_port::unused_tcp4_port; @@ -36,6 +36,13 @@ pub fn build(execution_clients_dir: &Path) { }); } +pub fn clean(execution_clients_dir: &Path) { + let repo_dir = execution_clients_dir.join("go-ethereum"); + if let Err(e) = fs::remove_dir_all(repo_dir) { + eprintln!("Error while deleting folder: {}", e); + } +} + /* * Geth-specific Implementation for GenericExecutionEngine */ @@ -60,7 +67,7 @@ impl GenericExecutionEngine for GethEngine { let datadir = TempDir::new().unwrap(); let genesis_json_path = datadir.path().join("genesis.json"); - let mut file = File::create(&genesis_json_path).unwrap(); + let mut file = fs::File::create(&genesis_json_path).unwrap(); let json = geth_genesis_json(); serde_json::to_writer(&mut file, &json).unwrap(); diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs index e46bc13c8d..efb06833f6 100644 --- a/testing/execution_engine_integration/src/main.rs +++ b/testing/execution_engine_integration/src/main.rs @@ -1,3 +1,5 @@ +#![recursion_limit = "256"] // for inline json + /// This binary runs integration tests between Lighthouse and execution engines. /// /// It will first attempt to build any supported integration clients, then it will run tests. @@ -31,6 +33,7 @@ fn test_geth() { let test_dir = build_utils::prepare_dir(); geth::build(&test_dir); TestRig::new(GethEngine).perform_tests_blocking(); + geth::clean(&test_dir); } fn test_nethermind() { diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index 8925f1cc84..aad37c32bd 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -2,7 +2,7 @@ use crate::build_utils; use crate::execution_engine::GenericExecutionEngine; use crate::genesis_json::nethermind_genesis_json; use std::env; -use std::fs::File; +use std::fs; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output}; use tempfile::TempDir; @@ -11,7 +11,7 @@ use unused_port::unused_tcp4_port; /// We've pinned the Nethermind version since our method of using the `master` branch to /// find the latest tag isn't working. It appears Nethermind don't always tag on `master`. /// We should fix this so we always pull the latest version of Nethermind. -const NETHERMIND_BRANCH: &str = "release/1.18.2"; +const NETHERMIND_BRANCH: &str = "release/1.21.0"; const NETHERMIND_REPO_URL: &str = "https://github.com/NethermindEth/nethermind"; fn build_result(repo_dir: &Path) -> Output { @@ -47,6 +47,12 @@ pub fn build(execution_clients_dir: &Path) { build_utils::check_command_output(build_result(&repo_dir), || { format!("nethermind build failed using release {last_release}") }); + + // Cleanup some disk space by removing nethermind's tests + let tests_dir = execution_clients_dir.join("nethermind/src/tests"); + if let Err(e) = fs::remove_dir_all(tests_dir) { + eprintln!("Error while deleting folder: {}", e); + } } /* @@ -68,7 +74,8 @@ impl NethermindEngine { .join("bin") .join("Release") .join("net7.0") - .join("Nethermind.Runner") + .join("linux-x64") + .join("nethermind") } } @@ -76,7 +83,7 @@ impl GenericExecutionEngine for NethermindEngine { fn init_datadir() -> TempDir { let datadir = TempDir::new().unwrap(); let genesis_json_path = datadir.path().join("genesis.json"); - let mut file = File::create(genesis_json_path).unwrap(); + let mut file = fs::File::create(genesis_json_path).unwrap(); let json = nethermind_genesis_json(); serde_json::to_writer(&mut file, &json).unwrap(); datadir diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 2aaff30f57..48195f871d 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -18,7 +18,9 @@ use types::{ Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, ForkName, FullPayload, Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; -const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(30); +const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(60); + +const TEST_FORK: ForkName = ForkName::Capella; struct ExecutionPair { /// The Lighthouse `ExecutionLayer` struct, connected to the `execution_engine` via HTTP. @@ -110,7 +112,7 @@ impl TestRig { let (runtime_shutdown, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); - let mut spec = MainnetEthSpec::default_spec(); + let mut spec = TEST_FORK.make_genesis_spec(MainnetEthSpec::default_spec()); spec.terminal_total_difficulty = Uint256::zero(); let fee_recipient = None; @@ -269,12 +271,11 @@ impl TestRig { Slot::new(1), // Insert proposer for the next slot head_root, proposer_index, - // TODO: think about how to test different forks PayloadAttributes::new( timestamp, prev_randao, Address::repeat_byte(42), - None, + Some(vec![]), None, ), ) @@ -314,8 +315,13 @@ impl TestRig { .execution_layer .get_suggested_fee_recipient(proposer_index) .await; - let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + suggested_fee_recipient, + Some(vec![]), + None, + ); let valid_payload = self .ee_a .execution_layer @@ -324,8 +330,7 @@ impl TestRig { &payload_attributes, forkchoice_update_params, builder_params, - // FIXME: think about how to test other forks - ForkName::Merge, + TEST_FORK, &self.spec, ) .await @@ -456,8 +461,13 @@ impl TestRig { .execution_layer .get_suggested_fee_recipient(proposer_index) .await; - let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + suggested_fee_recipient, + Some(vec![]), + None, + ); let second_payload = self .ee_a .execution_layer @@ -466,8 +476,7 @@ impl TestRig { &payload_attributes, forkchoice_update_params, builder_params, - // FIXME: think about how to test other forks - ForkName::Merge, + TEST_FORK, &self.spec, ) .await @@ -498,11 +507,15 @@ impl TestRig { */ let head_block_hash = valid_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); - // TODO: think about how to handle different forks // To save sending proposer preparation data, just set the fee recipient // to the fee recipient configured for EE A. - let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None, None); + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + Address::repeat_byte(42), + Some(vec![]), + None, + ); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(100); let validator_index = 0; @@ -536,11 +549,7 @@ impl TestRig { .notify_new_payload(second_payload.clone().try_into().unwrap()) .await .unwrap(); - // TODO: we should remove the `Accepted` status here once Geth fixes it - assert!(matches!( - status, - PayloadStatus::Syncing | PayloadStatus::Accepted - )); + assert!(matches!(status, PayloadStatus::Syncing)); /* * Execution Engine B: @@ -641,11 +650,13 @@ async fn check_payload_reconstruction( .get_engine_capabilities(None) .await .unwrap(); + assert!( // if the engine doesn't have these capabilities, we need to update the client in our tests capabilities.get_payload_bodies_by_hash_v1 && capabilities.get_payload_bodies_by_range_v1, "Testing engine does not support payload bodies methods" ); + let mut bodies = ee .execution_layer .get_payload_bodies_by_hash(vec![payload.block_hash()]) diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 18b71afc36..0b648a8155 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -19,7 +19,6 @@ slot_clock = { workspace = true } types = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } -serde_derive = "1.0.116" bincode = { workspace = true } serde_json = { workspace = true } slog = { workspace = true } diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index cc90c979b9..3224e61fff 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -16,7 +16,6 @@ rusqlite = { workspace = true } r2d2 = { workspace = true } r2d2_sqlite = "0.21.0" serde = { workspace = true } -serde_derive = "1.0.116" serde_json = { workspace = true } ethereum_serde_utils = { workspace = true } filesystem = { workspace = true } diff --git a/validator_client/slashing_protection/src/interchange.rs b/validator_client/slashing_protection/src/interchange.rs index 99d37c38b9..ad5f21e511 100644 --- a/validator_client/slashing_protection/src/interchange.rs +++ b/validator_client/slashing_protection/src/interchange.rs @@ -1,5 +1,5 @@ use crate::InterchangeError; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::cmp::max; use std::collections::{HashMap, HashSet}; use std::io; diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index dc828773b9..1bb1fc550b 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -3,7 +3,7 @@ use crate::{ test_utils::{pubkey, DEFAULT_GENESIS_VALIDATORS_ROOT}, SigningRoot, SlashingDatabase, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::HashSet; use tempfile::tempdir; use types::{Epoch, Hash256, PublicKeyBytes, Slot}; diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 7c662db937..aa5ce6c983 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -8,7 +8,7 @@ use directory::{ }; use eth2::types::Graffiti; use sensitive_url::SensitiveUrl; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use slog::{info, warn, Logger}; use std::fs; use std::net::IpAddr; diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index a3b3cabccc..9b9105a621 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -21,11 +21,12 @@ use eth2::types::{ }; use futures::{stream, StreamExt}; use parking_lot::RwLock; -use safe_arith::ArithError; +use safe_arith::{ArithError, SafeArith}; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::cmp::min; use std::collections::{hash_map, BTreeMap, HashMap, HashSet}; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; use sync::poll_sync_committee_duties; @@ -33,14 +34,6 @@ use sync::SyncDutiesMap; use tokio::{sync::mpsc::Sender, time::sleep}; use types::{ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, SelectionProof, Slot}; -/// Since the BN does not like it when we subscribe to slots that are close to the current time, we -/// will only subscribe to slots which are further than `SUBSCRIPTION_BUFFER_SLOTS` away. -/// -/// This number is based upon `MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD` value in the -/// `beacon_node::network::attestation_service` crate. It is not imported directly to avoid -/// bringing in the entire crate. -const SUBSCRIPTION_BUFFER_SLOTS: u64 = 2; - /// Only retain `HISTORICAL_DUTIES_EPOCHS` duties prior to the current epoch. const HISTORICAL_DUTIES_EPOCHS: u64 = 2; @@ -62,6 +55,36 @@ const VALIDATOR_METRICS_MIN_COUNT: usize = 64; /// reduces the amount of data that needs to be transferred. const INITIAL_DUTIES_QUERY_SIZE: usize = 1; +/// Offsets from the attestation duty slot at which a subscription should be sent. +const ATTESTATION_SUBSCRIPTION_OFFSETS: [u64; 8] = [3, 4, 5, 6, 7, 8, 16, 32]; + +/// Check that `ATTESTATION_SUBSCRIPTION_OFFSETS` is sorted ascendingly. +const _: () = assert!({ + let mut i = 0; + loop { + let prev = if i > 0 { + ATTESTATION_SUBSCRIPTION_OFFSETS[i - 1] + } else { + 0 + }; + let curr = ATTESTATION_SUBSCRIPTION_OFFSETS[i]; + if curr < prev { + break false; + } + i += 1; + if i == ATTESTATION_SUBSCRIPTION_OFFSETS.len() { + break true; + } + } +}); +/// Since the BN does not like it when we subscribe to slots that are close to the current time, we +/// will only subscribe to slots which are further than 2 slots away. +/// +/// This number is based upon `MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD` value in the +/// `beacon_node::network::attestation_service` crate. It is not imported directly to avoid +/// bringing in the entire crate. +const _: () = assert!(ATTESTATION_SUBSCRIPTION_OFFSETS[0] > 2); + #[derive(Debug)] pub enum Error { UnableToReadSlotClock, @@ -84,6 +107,16 @@ pub struct DutyAndProof { pub duty: AttesterData, /// This value is only set to `Some` if the proof indicates that the validator is an aggregator. pub selection_proof: Option, + /// Track which slots we should send subscriptions at for this duty. + /// + /// This value is updated after each subscription is successfully sent. + pub subscription_slots: Arc, +} + +/// Tracker containing the slots at which an attestation subscription should be sent. +pub struct SubscriptionSlots { + /// Pairs of `(slot, already_sent)` in slot-descending order. + slots: Vec<(Slot, AtomicBool)>, } impl DutyAndProof { @@ -111,17 +144,55 @@ impl DutyAndProof { } })?; + let subscription_slots = SubscriptionSlots::new(duty.slot); + Ok(Self { duty, selection_proof, + subscription_slots, }) } /// Create a new `DutyAndProof` with the selection proof waiting to be filled in. pub fn new_without_selection_proof(duty: AttesterData) -> Self { + let subscription_slots = SubscriptionSlots::new(duty.slot); Self { duty, selection_proof: None, + subscription_slots, + } + } +} + +impl SubscriptionSlots { + fn new(duty_slot: Slot) -> Arc { + let slots = ATTESTATION_SUBSCRIPTION_OFFSETS + .into_iter() + .filter_map(|offset| duty_slot.safe_sub(offset).ok()) + .map(|scheduled_slot| (scheduled_slot, AtomicBool::new(false))) + .collect(); + Arc::new(Self { slots }) + } + + /// Return `true` if we should send a subscription at `slot`. + fn should_send_subscription_at(&self, slot: Slot) -> bool { + // Iterate slots from smallest to largest looking for one that hasn't been completed yet. + self.slots + .iter() + .rev() + .any(|(scheduled_slot, already_sent)| { + slot >= *scheduled_slot && !already_sent.load(Ordering::Relaxed) + }) + } + + /// Update our record of subscribed slots to account for successful subscription at `slot`. + fn record_successful_subscription_at(&self, slot: Slot) { + for (scheduled_slot, already_sent) in self.slots.iter().rev() { + if slot >= *scheduled_slot { + already_sent.store(true, Ordering::Relaxed); + } else { + break; + } } } } @@ -574,8 +645,24 @@ async fn poll_beacon_attesters( let subscriptions_timer = metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::SUBSCRIPTIONS]); - // This vector is likely to be a little oversized, but it won't reallocate. - let mut subscriptions = Vec::with_capacity(local_pubkeys.len() * 2); + // This vector is intentionally oversized by 10% so that it won't reallocate. + // Each validator has 2 attestation duties occuring in the current and next epoch, for which + // they must send `ATTESTATION_SUBSCRIPTION_OFFSETS.len()` subscriptions. These subscription + // slots are approximately evenly distributed over the two epochs, usually with a slight lag + // that balances out (some subscriptions for the current epoch were sent in the previous, and + // some subscriptions for the next next epoch will be sent in the next epoch but aren't included + // in our calculation). We cancel the factor of 2 from the formula for simplicity. + let overallocation_numerator = 110; + let overallocation_denominator = 100; + let num_expected_subscriptions = overallocation_numerator + * std::cmp::max( + 1, + local_pubkeys.len() * ATTESTATION_SUBSCRIPTION_OFFSETS.len() + / E::slots_per_epoch() as usize, + ) + / overallocation_denominator; + let mut subscriptions = Vec::with_capacity(num_expected_subscriptions); + let mut subscription_slots_to_confirm = Vec::with_capacity(num_expected_subscriptions); // For this epoch and the next epoch, produce any beacon committee subscriptions. // @@ -588,10 +675,10 @@ async fn poll_beacon_attesters( .read() .iter() .filter_map(|(_, map)| map.get(epoch)) - // The BN logs a warning if we try and subscribe to current or near-by slots. Give it a - // buffer. .filter(|(_, duty_and_proof)| { - current_slot + SUBSCRIPTION_BUFFER_SLOTS < duty_and_proof.duty.slot + duty_and_proof + .subscription_slots + .should_send_subscription_at(current_slot) }) .for_each(|(_, duty_and_proof)| { let duty = &duty_and_proof.duty; @@ -603,7 +690,8 @@ async fn poll_beacon_attesters( committees_at_slot: duty.committees_at_slot, slot: duty.slot, is_aggregator, - }) + }); + subscription_slots_to_confirm.push(duty_and_proof.subscription_slots.clone()); }); } @@ -632,6 +720,16 @@ async fn poll_beacon_attesters( "Failed to subscribe validators"; "error" => %e ) + } else { + // Record that subscriptions were successfully sent. + debug!( + log, + "Broadcast attestation subscriptions"; + "count" => subscriptions.len(), + ); + for subscription_slots in subscription_slots_to_confirm { + subscription_slots.record_successful_subscription_at(current_slot); + } } } @@ -1200,3 +1298,67 @@ async fn notify_block_production_service( }; } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn subscription_slots_exact() { + for duty_slot in [ + Slot::new(32), + Slot::new(47), + Slot::new(99), + Slot::new(1002003), + ] { + let subscription_slots = SubscriptionSlots::new(duty_slot); + + // Run twice to check idempotence (subscription slots shouldn't be marked as done until + // we mark them manually). + for _ in 0..2 { + for offset in ATTESTATION_SUBSCRIPTION_OFFSETS { + assert!(subscription_slots.should_send_subscription_at(duty_slot - offset)); + } + } + + // Mark each slot as complete and check that all prior slots are still marked + // incomplete. + for (i, offset) in ATTESTATION_SUBSCRIPTION_OFFSETS + .into_iter() + .rev() + .enumerate() + { + subscription_slots.record_successful_subscription_at(duty_slot - offset); + for lower_offset in ATTESTATION_SUBSCRIPTION_OFFSETS + .into_iter() + .rev() + .skip(i + 1) + { + assert!(lower_offset < offset); + assert!( + subscription_slots.should_send_subscription_at(duty_slot - lower_offset) + ); + } + } + } + } + #[test] + fn subscription_slots_mark_multiple() { + for (i, offset) in ATTESTATION_SUBSCRIPTION_OFFSETS.into_iter().enumerate() { + let duty_slot = Slot::new(64); + let subscription_slots = SubscriptionSlots::new(duty_slot); + + subscription_slots.record_successful_subscription_at(duty_slot - offset); + + // All past offsets (earlier slots) should be marked as complete. + for (j, other_offset) in ATTESTATION_SUBSCRIPTION_OFFSETS.into_iter().enumerate() { + let past = j >= i; + assert_eq!(other_offset >= offset, past); + assert_eq!( + subscription_slots.should_send_subscription_at(duty_slot - other_offset), + !past + ); + } + } + } +} diff --git a/validator_client/src/graffiti_file.rs b/validator_client/src/graffiti_file.rs index 5c1f84e10b..29da3dca5a 100644 --- a/validator_client/src/graffiti_file.rs +++ b/validator_client/src/graffiti_file.rs @@ -1,4 +1,4 @@ -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::fs::File; use std::io::{prelude::*, BufReader};