diff --git a/Cargo.lock b/Cargo.lock index 2704c93f38..dbf4ca7e44 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -90,6 +90,16 @@ dependencies = [ "generic-array", ] +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + [[package]] name = "aes" version = "0.7.5" @@ -99,7 +109,7 @@ dependencies = [ "cfg-if", "cipher 0.3.0", "cpufeatures", - "ctr", + "ctr 0.8.0", "opaque-debug", ] @@ -116,38 +126,42 @@ dependencies = [ [[package]] name = "aes-gcm" -version = "0.9.4" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" +checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" dependencies = [ - "aead", + "aead 0.4.3", "aes 0.7.5", "cipher 0.3.0", - "ctr", - "ghash", + "ctr 0.7.0", + "ghash 0.4.4", + "subtle", +] + +[[package]] +name = "aes-gcm" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" +dependencies = [ + "aead 0.5.2", + "aes 0.8.3", + "cipher 0.4.4", + "ctr 0.9.2", + "ghash 0.5.0", "subtle", ] [[package]] name = "ahash" -version = "0.7.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" -dependencies = [ - "getrandom", - "once_cell", - "version_check", -] - -[[package]] -name = "ahash" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if", "once_cell", "version_check", + "zerocopy", ] [[package]] @@ -196,15 +210,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" [[package]] name = "arbitrary" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2e1373abdaa212b704512ec2bd8b26bd0b7d5c3f70117411a5d9a451383c859" +checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" dependencies = [ "derive_arbitrary", ] @@ -282,43 +296,55 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" [[package]] -name = "async-io" -version = "1.13.0" +name = "async-channel" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-io" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6afaa937395a620e33dc6a742c593c01aced20aa376ffb0f628121198578ccc7" dependencies = [ "async-lock", - "autocfg 1.1.0", "cfg-if", "concurrent-queue", + "futures-io", "futures-lite", - "log", "parking", "polling", - "rustix 0.37.26", + "rustix 0.38.28", "slab", - "socket2 0.4.9", - "waker-fn", + "tracing", + "windows-sys 0.52.0", ] [[package]] name = "async-lock" -version = "2.8.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" dependencies = [ - "event-listener", + "event-listener 4.0.3", + "event-listener-strategy", + "pin-project-lite", ] [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -332,19 +358,6 @@ dependencies = [ "rustc_version", ] -[[package]] -name = "asynchronous-codec" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4057f2c32adbb2fc158e22fb38433c8e9bbf76b75a4732c7c0cbaf695fb65568" -dependencies = [ - "bytes", - "futures-sink", - "futures-util", - "memchr", - "pin-project-lite", -] - [[package]] name = "asynchronous-codec" version = "0.7.0" @@ -364,7 +377,7 @@ version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb8867f378f33f78a811a8eb9bf108ad99430d7aad43315dd9319c827ef6247" dependencies = [ - "http", + "http 0.2.11", "log", "url", "wildmatch", @@ -376,7 +389,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" dependencies = [ - "http", + "http 0.2.11", "log", "url", ] @@ -421,18 +434,19 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.20" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +checksum = "d09dbe0e490df5da9d69b36dca48a76635288a82f92eca90024883a56202026d" dependencies = [ "async-trait", "axum-core", - "bitflags 1.3.2", "bytes", "futures-util", - "http", - "http-body", - "hyper", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.1.0", + "hyper-util", "itoa", "matchit", "memchr", @@ -449,23 +463,28 @@ dependencies = [ "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] name = "axum-core" -version = "0.3.4" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +checksum = "e87c8503f93e6d144ee5690907ba22db7ba79ab001a932ab99034f0fe836b3df" dependencies = [ "async-trait", "bytes", "futures-util", - "http", - "http-body", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -509,9 +528,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.5" +version = "0.21.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "c79fed4cdb43e993fcdadc7e58a09fd0e3e649c4436fa11da71c9f1f3ee7feb9" [[package]] name = "base64ct" @@ -547,7 +566,7 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "logging", - "lru 0.7.8", + "lru", "maplit", "merkle_proof", "oneshot_broadcast", @@ -599,7 +618,7 @@ dependencies = [ "genesis", "hex", "http_api", - "hyper", + "hyper 1.1.0", "lighthouse_network", "lighthouse_version", "monitoring_api", @@ -689,7 +708,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.38", + "syn 2.0.48", "which", ] @@ -944,9 +963,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12024c4645c97566567129c204f65d5815a8c9aecf30fcbe682b2fe034996d36" +checksum = "ceed8ef69d8518a5dda55c07425450b58a4e1946f4951eab6d7191ee86c2443d" dependencies = [ "serde", ] @@ -998,25 +1017,24 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chacha20" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ "cfg-if", - "cipher 0.3.0", + "cipher 0.4.4", "cpufeatures", - "zeroize", ] [[package]] name = "chacha20poly1305" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ - "aead", + "aead 0.5.2", "chacha20", - "cipher 0.3.0", + "cipher 0.4.4", "poly1305", "zeroize", ] @@ -1050,13 +1068,14 @@ checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", + "zeroize", ] [[package]] name = "clang-sys" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" dependencies = [ "glob", "libc", @@ -1172,18 +1191,18 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f057a694a54f12365049b0958a1685bb52d567f5593b355fbf685838e873d400" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" dependencies = [ "crossbeam-utils", ] [[package]] name = "const-oid" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "constant_time_eq" @@ -1199,9 +1218,9 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -1209,9 +1228,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "core2" @@ -1224,9 +1243,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.9" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -1278,46 +1297,37 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg 1.1.0", - "cfg-if", "crossbeam-utils", - "memoffset 0.9.0", - "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crunchy" @@ -1339,9 +1349,9 @@ dependencies = [ [[package]] name = "crypto-bigint" -version = "0.5.3" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740fe28e594155f10cfc383984cbefd529d7396050557148f79cb0f621204124" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -1356,6 +1366,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", + "rand_core 0.6.4", "typenum", ] @@ -1371,9 +1382,9 @@ dependencies = [ [[package]] name = "crypto-mac" -version = "0.11.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" +checksum = "25fab6889090c8133f3deb8f73ba3c65a7f456f66436fc012a1b1e272b1e103e" dependencies = [ "generic-array", "subtle", @@ -1400,6 +1411,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "ctr" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a232f92a03f37dd7d7dd2adc67166c77e9cd88de5b019b9a9eecfaeaf7bfd481" +dependencies = [ + "cipher 0.3.0", +] + [[package]] name = "ctr" version = "0.8.0" @@ -1410,13 +1430,22 @@ dependencies = [ ] [[package]] -name = "ctrlc" -version = "3.4.1" +name = "ctr" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e95fbd621905b854affdc67943b043a0fbb6ed7385fd5a25650d19a8a6cfdf" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher 0.4.4", +] + +[[package]] +name = "ctrlc" +version = "3.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b467862cc8610ca6fc9a1532d7777cee0804e678ab45410897b9396495994a0b" dependencies = [ "nix 0.27.1", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -1430,7 +1459,7 @@ dependencies = [ "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "platforms 3.1.2", + "platforms 3.3.0", "rustc_version", "subtle", "zeroize", @@ -1438,13 +1467,13 @@ dependencies = [ [[package]] name = "curve25519-dalek-derive" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -1504,15 +1533,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" [[package]] name = "data-encoding-macro" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c904b33cc60130e1aeea4956ab803d08a3f4a0ca82d64ed757afac3891f2bb99" +checksum = "20c01c06f5f429efdf2bae21eb67c28b3df3cf85b7dd2d8ef09c0838dac5d33e" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1520,9 +1549,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fdf3fce3ce863539ec1d7fd1b6dcc3c645663376b43ed376bbf887733e4f772" +checksum = "0047d07f2c89b17dd631c80450d69841a6b5d7fb17278cbc43d7e4cfcf2576f3" dependencies = [ "data-encoding", "syn 1.0.109", @@ -1561,7 +1590,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" dependencies = [ "futures", - "tokio-util 0.7.9", + "tokio-util 0.7.10", ] [[package]] @@ -1615,9 +1644,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", ] @@ -1635,13 +1664,13 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" +checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -1659,9 +1688,9 @@ dependencies = [ [[package]] name = "diesel" -version = "2.1.3" +version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2268a214a6f118fce1838edba3d1561cf0e78d8de785475957a580a7f8c69d33" +checksum = "62c6fcf842f17f8c78ecf7c81d75c5ce84436b41ee07e03f490fbb5f5a8731d8" dependencies = [ "bitflags 2.4.1", "byteorder", @@ -1680,7 +1709,7 @@ dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -1700,7 +1729,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -1776,33 +1805,30 @@ dependencies = [ [[package]] name = "discv5" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c05fa26996c6141f78ac4fafbe297a7fa69690565ba4e0d1f2e60bde5ce501" +version = "0.4.0" +source = "git+https://github.com/sigp/discv5?rev=dbb4a718cd32eaed8127c3c8241bfd0fde9eb908#dbb4a718cd32eaed8127c3c8241bfd0fde9eb908" dependencies = [ "aes 0.7.5", - "aes-gcm", + "aes-gcm 0.9.2", "arrayvec", "delay_map", "enr", "fnv", "futures", - "hashlink 0.7.0", + "hashlink", "hex", "hkdf", "lazy_static", - "libp2p-core 0.40.1", - "libp2p-identity", - "lru 0.7.8", + "libp2p", + "lru", "more-asserts", "parking_lot 0.11.2", "rand 0.8.5", "rlp", "smallvec", - "socket2 0.4.9", + "socket2 0.4.10", "tokio", "tracing", - "tracing-subscriber", "uint", "zeroize", ] @@ -1815,7 +1841,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -1844,16 +1870,16 @@ dependencies = [ [[package]] name = "ecdsa" -version = "0.16.8" +version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ "der 0.7.8", "digest 0.10.7", - "elliptic-curve 0.13.6", + "elliptic-curve 0.13.8", "rfc6979 0.4.0", - "signature 2.1.0", - "spki 0.7.2", + "signature 2.2.0", + "spki 0.7.3", ] [[package]] @@ -1863,20 +1889,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ "pkcs8 0.10.2", - "signature 2.1.0", + "signature 2.2.0", ] [[package]] name = "ed25519-dalek" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" dependencies = [ "curve25519-dalek", "ed25519", "rand_core 0.6.4", "serde", "sha2 0.10.8", + "subtle", "zeroize", ] @@ -1942,12 +1969,12 @@ dependencies = [ [[package]] name = "elliptic-curve" -version = "0.13.6" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97ca172ae9dc9f9b779a6e3a65d308f2af74e5b8c921299075bdb4a0370e914" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ "base16ct 0.2.0", - "crypto-bigint 0.5.3", + "crypto-bigint 0.5.5", "digest 0.10.7", "ff 0.13.0", "generic-array", @@ -1971,15 +1998,15 @@ dependencies = [ [[package]] name = "enr" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" +checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4" dependencies = [ - "base64 0.21.5", + "base64 0.21.6", "bytes", "ed25519-dalek", "hex", - "k256 0.13.1", + "k256 0.13.3", "log", "rand 0.8.5", "rlp", @@ -1997,7 +2024,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -2061,12 +2088,12 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2366,9 +2393,9 @@ dependencies = [ [[package]] name = "ethereum_serde_utils" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f8cb04ea380a33e9c269fa5f8df6f2d63dee19728235f3e639e7674e038686a" +checksum = "de4d5951468846963c24e8744c133d44f39dff2cd3a233f6be22b370d08a524f" dependencies = [ "ethereum-types 0.14.1", "hex", @@ -2504,7 +2531,7 @@ dependencies = [ "getrandom", "hashers", "hex", - "http", + "http 0.2.11", "once_cell", "parking_lot 0.11.2", "pin-project", @@ -2529,6 +2556,27 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.3", + "pin-project-lite", +] + [[package]] name = "execution_engine_integration" version = "0.1.0" @@ -2578,7 +2626,7 @@ dependencies = [ "kzg", "lazy_static", "lighthouse_metrics", - "lru 0.7.8", + "lru", "parking_lot 0.12.1", "pretty_reqwest_error", "rand 0.8.5", @@ -2615,9 +2663,9 @@ dependencies = [ [[package]] name = "eyre" -version = "0.6.8" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c2b6b5a29c02cdc822728b7d7b8ae1bab3e3b05d44522770ddd49722eeac7eb" +checksum = "b6267a1fa6f59179ea4afc8e50fd8612a3cc60bc858f786ff877a4a8cb042799" dependencies = [ "indenter", "once_cell", @@ -2635,15 +2683,6 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" -[[package]] -name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - [[package]] name = "fastrand" version = "2.0.1" @@ -2678,9 +2717,9 @@ checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" [[package]] name = "fiat-crypto" -version = "0.2.1" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" +checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" [[package]] name = "field-offset" @@ -2688,7 +2727,7 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38e2275cc4e4fc009b0669731a1e5ab7ebf11f469eaede2bab9309a5b4d6057f" dependencies = [ - "memoffset 0.9.0", + "memoffset", "rustc_version", ] @@ -2780,9 +2819,9 @@ dependencies = [ [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] @@ -2817,9 +2856,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -2833,8 +2872,7 @@ dependencies = [ [[package]] name = "futures-bounded" version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1e2774cc104e198ef3d3e1ff4ab40f86fa3245d6cb6a3a46174f21463cee173" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ "futures-timer", "futures-util", @@ -2842,9 +2880,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -2852,15 +2890,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -2870,34 +2908,29 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" -version = "1.13.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +checksum = "445ba825b27408685aaecefd65178908c36c6e96aaf6d8599419d46e624192ba" dependencies = [ - "fastrand 1.9.0", "futures-core", - "futures-io", - "memchr", - "parking", "pin-project-lite", - "waker-fn", ] [[package]] name = "futures-macro" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -2907,20 +2940,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls", + "rustls 0.21.10", ] [[package]] name = "futures-sink" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-ticker" @@ -2941,9 +2974,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -3000,9 +3033,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if", "js-sys", @@ -3018,35 +3051,43 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" dependencies = [ "opaque-debug", - "polyval", + "polyval 0.5.3", +] + +[[package]] +name = "ghash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +dependencies = [ + "opaque-debug", + "polyval 0.6.1", ] [[package]] name = "gimli" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "git-version" -version = "0.3.5" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6b0decc02f4636b9ccad390dcbe77b722a77efedfa393caf8379a51d5c61899" +checksum = "1ad568aa3db0fcbc81f2f116137f263d7304f512a1209b35b85150d3ef88ad19" dependencies = [ "git-version-macro", - "proc-macro-hack", ] [[package]] name = "git-version-macro" -version = "0.3.5" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe69f1cbdb6e28af2bac214e943b99ce8a0a06b447d15d3e61161b0423139f3f" +checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ - "proc-macro-hack", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.48", ] [[package]] @@ -3079,20 +3120,39 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.21" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" +checksum = "4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", - "http", - "indexmap 1.9.3", + "http 0.2.11", + "indexmap 2.1.0", "slab", "tokio", - "tokio-util 0.7.9", + "tokio-util 0.7.10", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "991910e35c615d8cab86b5ab04be67e6ad24d2bf5f4f11fdbbed26da999bbeab" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 1.0.0", + "indexmap 2.1.0", + "slab", + "tokio", + "tokio-util 0.7.10", "tracing", ] @@ -3117,31 +3177,19 @@ dependencies = [ "crunchy", ] -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" -dependencies = [ - "ahash 0.7.6", -] - [[package]] name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash 0.7.6", -] [[package]] name = "hashbrown" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.3", + "ahash", "allocator-api2", ] @@ -3154,22 +3202,13 @@ dependencies = [ "fxhash", ] -[[package]] -name = "hashlink" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" -dependencies = [ - "hashbrown 0.11.2", -] - [[package]] name = "hashlink" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.1", + "hashbrown 0.14.3", ] [[package]] @@ -3178,10 +3217,10 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ - "base64 0.21.5", + "base64 0.21.6", "bytes", "headers-core", - "http", + "http 0.2.11", "httpdate", "mime", "sha1", @@ -3193,7 +3232,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http", + "http 0.2.11", ] [[package]] @@ -3242,7 +3281,7 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna", + "idna 0.4.0", "ipnet", "once_cell", "rand 0.8.5", @@ -3277,9 +3316,9 @@ dependencies = [ [[package]] name = "hkdf" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" dependencies = [ "hmac 0.12.1", ] @@ -3300,7 +3339,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ - "crypto-mac 0.11.1", + "crypto-mac 0.11.0", "digest 0.9.0", ] @@ -3326,11 +3365,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.5" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -3346,9 +3385,20 @@ dependencies = [ [[package]] name = "http" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" dependencies = [ "bytes", "fnv", @@ -3357,12 +3407,35 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", + "http 0.2.11", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.0.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +dependencies = [ + "bytes", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", "pin-project-lite", ] @@ -3389,7 +3462,7 @@ dependencies = [ "lighthouse_network", "lighthouse_version", "logging", - "lru 0.7.8", + "lru", "network", "operation_pool", "parking_lot 0.12.1", @@ -3454,22 +3527,22 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.3.22", + "http 0.2.11", + "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -3477,17 +3550,36 @@ dependencies = [ ] [[package]] -name = "hyper-rustls" -version = "0.24.1" +name = "hyper" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" +checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.1", + "http 1.0.0", + "http-body 1.0.0", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http", - "hyper", - "rustls", + "http 0.2.11", + "hyper 0.14.28", + "rustls 0.21.10", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", ] [[package]] @@ -3497,24 +3589,42 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper", + "hyper 0.14.28", "native-tls", "tokio", "tokio-native-tls", ] [[package]] -name = "iana-time-zone" -version = "0.1.58" +name = "hyper-util" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" +checksum = "bdea9aac0dbe5a9240d68cfd9501e2db94222c6dc06843e06640b9e07f0fdc67" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "hyper 1.1.0", + "pin-project-lite", + "socket2 0.5.5", + "tokio", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6a67363e2aa4443928ce15e57ebae94fd8949958fd1223c4cfc0cd473ad7539" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows-core", + "windows-core 0.52.0", ] [[package]] @@ -3542,6 +3652,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "if-addrs" version = "0.6.7" @@ -3555,12 +3675,12 @@ dependencies = [ [[package]] name = "if-addrs" -version = "0.7.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc0fa01ffc752e9dbc72818cdb072cd028b86be5e09dd04c5a643704fe101a9" +checksum = "cabb0019d51a643781ff15c9c8a3e5dedc365c47211270f4e8f82812fedd8f0a" dependencies = [ "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -3575,15 +3695,15 @@ dependencies = [ [[package]] name = "if-watch" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb892e5777fe09e16f3d44de7802f4daa7267ecbe8c466f19d94e25bb0c303e" +checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" dependencies = [ "async-io", "core-foundation", "fnv", "futures", - "if-addrs 0.7.0", + "if-addrs 0.10.2", "ipnet", "log", "rtnetlink", @@ -3615,8 +3735,8 @@ dependencies = [ "attohttpc 0.24.1", "bytes", "futures", - "http", - "hyper", + "http 0.2.11", + "hyper 0.14.28", "log", "rand 0.8.5", "tokio", @@ -3639,7 +3759,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.6.8", + "parity-scale-codec 3.6.9", ] [[package]] @@ -3698,12 +3818,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", - "hashbrown 0.14.1", + "hashbrown 0.14.3", ] [[package]] @@ -3770,9 +3890,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "itertools" @@ -3785,9 +3905,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "jemalloc-ctl" @@ -3831,9 +3951,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" dependencies = [ "wasm-bindgen", ] @@ -3844,7 +3964,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.5", + "base64 0.21.6", "pem 1.1.1", "ring 0.16.20", "serde", @@ -3867,16 +3987,16 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ "cfg-if", - "ecdsa 0.16.8", - "elliptic-curve 0.13.6", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "once_cell", "sha2 0.10.8", - "signature 2.1.0", + "signature 2.2.0", ] [[package]] @@ -3994,9 +4114,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.149" +version = "0.2.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" +checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" [[package]] name = "libflate" @@ -4020,12 +4140,12 @@ dependencies = [ [[package]] name = "libloading" -version = "0.7.4" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" dependencies = [ "cfg-if", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -4051,9 +4171,8 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1252a34c693386829c34d44ccfbce86679d2a9a2c61f582863649bbf57f26260" +version = "0.54.0" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ "bytes", "either", @@ -4063,7 +4182,7 @@ dependencies = [ "instant", "libp2p-allow-block-list", "libp2p-connection-limits", - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -4086,10 +4205,9 @@ dependencies = [ [[package]] name = "libp2p-allow-block-list" version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "107b238b794cb83ab53b74ad5dcf7cca3200899b72fe662840cfb52f5b0a32e6" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-identity", "libp2p-swarm", "void", @@ -4097,11 +4215,10 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2af4b1e1a1d6c5005a59b42287c0a526bcce94d8d688e2e9233b18eb843ceb4" +version = "0.3.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-identity", "libp2p-swarm", "void", @@ -4109,37 +4226,8 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.40.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd44289ab25e4c9230d9246c475a22241e301b23e8f4061d3bdef304a1a99713" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", - "instant", - "libp2p-identity", - "log", - "multiaddr", - "multihash", - "multistream-select", - "once_cell", - "parking_lot 0.12.1", - "pin-project", - "quick-protobuf", - "rand 0.8.5", - "rw-stream-sink", - "smallvec", - "thiserror", - "unsigned-varint 0.7.2", - "void", -] - -[[package]] -name = "libp2p-core" -version = "0.41.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59c61b924474cf2c7edccca306693e798d797b85d004f4fef5689a7a3e6e8fe5" +version = "0.41.2" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ "either", "fnv", @@ -4159,20 +4247,19 @@ dependencies = [ "smallvec", "thiserror", "tracing", - "unsigned-varint 0.7.2", + "unsigned-varint 0.8.0", "void", ] [[package]] name = "libp2p-dns" version = "0.41.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d17cbcf7160ff35c3e8e560de4a068fe9d6cb777ea72840e48eb76ff9576c4b6" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ "async-trait", "futures", "hickory-resolver", - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-identity", "parking_lot 0.12.1", "smallvec", @@ -4181,22 +4268,23 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201f0626acd8985fae7fdd318e86c954574b9eef2e5dec433936a19a0338393d" +version = "0.46.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ - "asynchronous-codec 0.6.2", - "base64 0.21.5", + "async-channel", + "asynchronous-codec", + "base64 0.21.6", "byteorder", "bytes", "either", "fnv", "futures", "futures-ticker", + "futures-timer", "getrandom", "hex_fmt", "instant", - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-identity", "libp2p-swarm", "prometheus-client", @@ -4207,25 +4295,23 @@ dependencies = [ "sha2 0.10.8", "smallvec", "tracing", - "unsigned-varint 0.7.2", "void", ] [[package]] name = "libp2p-identify" -version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0544703553921214556f7567278b4f00cdd5052d29b0555ab88290cbfe54d81c" +version = "0.44.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ - "asynchronous-codec 0.6.2", + "asynchronous-codec", "either", "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-identity", "libp2p-swarm", - "lru 0.12.0", + "lru", "quick-protobuf", "quick-protobuf-codec", "smallvec", @@ -4236,16 +4322,15 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdd6317441f361babc74c2989c6484eb0726045399b6648de039e1805ea96972" +checksum = "999ec70441b2fb35355076726a6bc466c932e9bdc66f6a11c6c0aa17c7ab9be0" dependencies = [ "asn1_der", "bs58 0.5.0", "ed25519-dalek", "hkdf", "libsecp256k1", - "log", "multihash", "p256", "quick-protobuf", @@ -4253,21 +4338,21 @@ dependencies = [ "sec1 0.7.3", "sha2 0.10.8", "thiserror", + "tracing", "void", "zeroize", ] [[package]] name = "libp2p-mdns" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f273a551ee9d0a79695f75afaeafb1371459dec69c29555e8a73a35608e96a" +version = "0.45.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ "data-encoding", "futures", "hickory-proto", "if-watch", - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-identity", "libp2p-swarm", "rand 0.8.5", @@ -4281,12 +4366,11 @@ dependencies = [ [[package]] name = "libp2p-metrics" version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdac91ae4f291046a3b2660c039a2830c931f84df2ee227989af92f7692d3357" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ "futures", "instant", - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-gossipsub", "libp2p-identify", "libp2p-identity", @@ -4298,33 +4382,31 @@ dependencies = [ [[package]] name = "libp2p-mplex" version = "0.41.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e895765e27e30217b25f7cb7ac4686dad1ff80bf2fdeffd1d898566900a924" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ - "asynchronous-codec 0.6.2", + "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-identity", "nohash-hasher", "parking_lot 0.12.1", "rand 0.8.5", "smallvec", "tracing", - "unsigned-varint 0.7.2", + "unsigned-varint 0.8.0", ] [[package]] name = "libp2p-noise" version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecd0545ce077f6ea5434bcb76e8d0fe942693b4380aaad0d34a358c2bd05793" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ - "asynchronous-codec 0.7.0", + "asynchronous-codec", "bytes", "curve25519-dalek", "futures", - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-identity", "multiaddr", "multihash", @@ -4343,13 +4425,12 @@ dependencies = [ [[package]] name = "libp2p-plaintext" version = "0.41.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67330af40b67217e746d42551913cfb7ad04c74fa300fb329660a56318590b3f" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ - "asynchronous-codec 0.6.2", + "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-identity", "quick-protobuf", "quick-protobuf-codec", @@ -4358,22 +4439,21 @@ dependencies = [ [[package]] name = "libp2p-quic" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02570b9effbc7c33331803104a8e9e53af7f2bdb4a2b61be420d6667545a0f5" +version = "0.10.2" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ "bytes", "futures", "futures-timer", "if-watch", - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-identity", "libp2p-tls", "parking_lot 0.12.1", "quinn", "rand 0.8.5", "ring 0.16.20", - "rustls", + "rustls 0.21.10", "socket2 0.5.5", "thiserror", "tokio", @@ -4382,16 +4462,15 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643ce11d87db56387631c9757b61b83435b434f94dc52ec267c1666e560e78b0" +version = "0.45.0" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ "either", "fnv", "futures", "futures-timer", "instant", - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", "multistream-select", @@ -4405,27 +4484,25 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b27d257436d01433a21da8da7688c83dba35826726161a328ff0989cd7af2dd" +version = "0.34.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] name = "libp2p-tcp" version = "0.41.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b2460fc2748919adff99ecbc1aab296e4579e41f374fb164149bd2c9e529d4c" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ "futures", "futures-timer", "if-watch", "libc", - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-identity", "socket2 0.5.5", "tokio", @@ -4435,17 +4512,16 @@ dependencies = [ [[package]] name = "libp2p-tls" version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ce7e3c2e7569d685d08ec795157981722ff96e9e9f9eae75df3c29d02b07a5" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ "futures", "futures-rustls", - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-identity", "rcgen", "ring 0.16.20", - "rustls", - "rustls-webpki", + "rustls 0.21.10", + "rustls-webpki 0.101.7", "thiserror", "x509-parser", "yasna", @@ -4454,13 +4530,12 @@ dependencies = [ [[package]] name = "libp2p-upnp" version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "963eb8a174f828f6a51927999a9ab5e45dfa9aa2aa5fed99aa65f79de6229464" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core 0.41.1", + "libp2p-core", "libp2p-swarm", "tokio", "tracing", @@ -4469,15 +4544,27 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "751f4778f71bc3db1ccf2451e7f4484463fec7f00c1ac2680e39c8368c23aae8" +version = "0.45.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ + "either", "futures", - "libp2p-core 0.41.1", + "libp2p-core", "thiserror", "tracing", - "yamux", + "yamux 0.12.1", + "yamux 0.13.1", +] + +[[package]] +name = "libredox" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +dependencies = [ + "bitflags 2.4.1", + "libc", + "redox_syscall 0.4.1", ] [[package]] @@ -4541,9 +4628,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.12" +version = "1.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +checksum = "295c17e837573c8c821dbaeb3cceb3d745ad082f7572191409e69cbc1b3fd050" dependencies = [ "cc", "pkg-config", @@ -4622,7 +4709,7 @@ dependencies = [ "libp2p-mplex", "lighthouse_metrics", "lighthouse_version", - "lru 0.7.8", + "lru", "lru_cache", "parking_lot 0.12.1", "prometheus-client", @@ -4677,15 +4764,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.3.8" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - -[[package]] -name = "linux-raw-sys" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" [[package]] name = "lmdb-rkv" @@ -4752,20 +4833,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.7.8" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" +checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7" dependencies = [ - "hashbrown 0.12.3", -] - -[[package]] -name = "lru" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efa59af2ddfad1854ae27d75009d538d0998b4b2fd47083e743ac1a10e46c60" -dependencies = [ - "hashbrown 0.14.1", + "hashbrown 0.14.3", ] [[package]] @@ -4786,10 +4858,10 @@ dependencies = [ ] [[package]] -name = "mach" -version = "0.3.2" +name = "mach2" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" +checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" dependencies = [ "libc", ] @@ -4818,15 +4890,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" -[[package]] -name = "matchers" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" -dependencies = [ - "regex-automata 0.1.10", -] - [[package]] name = "matches" version = "0.1.10" @@ -4862,24 +4925,15 @@ dependencies = [ [[package]] name = "mediatype" -version = "0.19.15" +version = "0.19.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c408dc227d302f1496c84d9dc68c00fec6f56f9228a18f3023f976f3ca7c945" +checksum = "bf0bc9784973713e4a90d515a4302991ca125a7c4516951cb607f2298cb757e5" [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" - -[[package]] -name = "memoffset" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" -dependencies = [ - "autocfg 1.1.0", -] +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "memoffset" @@ -5013,9 +5067,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" dependencies = [ "libc", "wasi", @@ -5049,15 +5103,15 @@ dependencies = [ [[package]] name = "more-asserts" -version = "0.2.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" +checksum = "1fafa6961cabd9c63bcd77a45d7e3b7f3b552b70417831fb0f56db717e72407e" [[package]] name = "multiaddr" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92a651988b3ed3ad1bc8c87d016bb92f6f395b84ed1db9b926b32b1fc5a2c8b5" +checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" dependencies = [ "arrayref", "byteorder", @@ -5096,15 +5150,14 @@ dependencies = [ [[package]] name = "multistream-select" version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0df8e5eec2298a62b326ee4f0d7fe1a6b90a09dfcf9df37b38f947a8c42f19" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ "bytes", "futures", - "log", "pin-project", "smallvec", - "unsigned-varint 0.7.2", + "tracing", + "unsigned-varint 0.8.0", ] [[package]] @@ -5217,7 +5270,7 @@ dependencies = [ "lighthouse_metrics", "lighthouse_network", "logging", - "lru 0.7.8", + "lru", "lru_cache", "matches", "num_cpus", @@ -5241,19 +5294,6 @@ dependencies = [ "types", ] -[[package]] -name = "nix" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3790c00a0150112de0f4cd161e3d7fc4b2d8a5542ffc35f099a2562aecb35c" -dependencies = [ - "bitflags 1.3.2", - "cc", - "cfg-if", - "libc", - "memoffset 0.6.5", -] - [[package]] name = "nix" version = "0.24.3" @@ -5317,16 +5357,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "nu-ansi-term" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" -dependencies = [ - "overload", - "winapi", -] - [[package]] name = "num-bigint" version = "0.4.4" @@ -5407,9 +5437,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -5425,9 +5455,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oneshot_broadcast" @@ -5475,9 +5505,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.57" +version = "0.10.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" +checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671" dependencies = [ "bitflags 2.4.1", "cfg-if", @@ -5496,7 +5526,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -5507,18 +5537,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.1.5+3.1.3" +version = "300.2.1+3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "559068e4c12950d7dcaa1857a61725c0d38d4fc03ff8e070ab31a75d6e316491" +checksum = "3fe476c29791a5ca0d1273c697e96085bbabbbea2ef7afd5617e78a4b40332d3" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.93" +version = "0.9.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4d56a4c0478783083cfafcc42493dd4a981d41669da64b4572a2a089b51b1d" +checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7" dependencies = [ "cc", "libc", @@ -5550,20 +5580,14 @@ dependencies = [ "types", ] -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - [[package]] name = "p256" version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ - "ecdsa 0.16.8", - "elliptic-curve 0.13.6", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "primeorder", "sha2 0.10.8", ] @@ -5584,15 +5608,15 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.8" +version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f88eaac72ead1b9bd4ce747d577dbd2ad31fb0a56a9a20c611bf27bd1b97fbed" +checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" dependencies = [ "arrayvec", "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.6.8", + "parity-scale-codec-derive 3.6.9", "serde", ] @@ -5602,7 +5626,7 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn 1.0.109", @@ -5610,11 +5634,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.8" +version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33bdcd446e9400b6ad9fc85b4aea68846c258b07c3efb994679ae82707b133f0" +checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 1.0.109", @@ -5697,7 +5721,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" dependencies = [ - "crypto-mac 0.11.1", + "crypto-mac 0.11.0", ] [[package]] @@ -5729,11 +5753,11 @@ dependencies = [ [[package]] name = "pem" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3163d2912b7c3b52d651a055f2c7eec9ba5cd22d26ef75b8dd3a59980b185923" +checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" dependencies = [ - "base64 0.21.5", + "base64 0.21.6", "serde", ] @@ -5748,9 +5772,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pharos" @@ -5797,7 +5821,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -5829,14 +5853,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ "der 0.7.8", - "spki 0.7.2", + "spki 0.7.3", ] [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a" [[package]] name = "platforms" @@ -5846,9 +5870,9 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "platforms" -version = "3.1.2" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4503fa043bf02cee09a9582e9554b4c6403b2ef55e4612e96561d294419429f8" +checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" [[package]] name = "plotters" @@ -5880,29 +5904,27 @@ dependencies = [ [[package]] name = "polling" -version = "2.8.0" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +checksum = "cf63fa624ab313c11656b4cda960bfc46c410187ad493c41f6ba2d8c1e991c9e" dependencies = [ - "autocfg 1.1.0", - "bitflags 1.3.2", "cfg-if", "concurrent-queue", - "libc", - "log", "pin-project-lite", - "windows-sys 0.48.0", + "rustix 0.38.28", + "tracing", + "windows-sys 0.52.0", ] [[package]] name = "poly1305" -version = "0.7.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", "opaque-debug", - "universal-hash", + "universal-hash 0.5.1", ] [[package]] @@ -5914,7 +5936,19 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash", + "universal-hash 0.4.0", +] + +[[package]] +name = "polyval" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +dependencies = [ + "cfg-if", + "cpufeatures", + "opaque-debug", + "universal-hash 0.5.1", ] [[package]] @@ -5923,7 +5957,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49b6c5ef183cd3ab4ba005f1ca64c21e8bd97ce4699cfea9e8d9a2c4958ca520" dependencies = [ - "base64 0.21.5", + "base64 0.21.6", "byteorder", "bytes", "fallible-iterator", @@ -5977,21 +6011,21 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] name = "primeorder" -version = "0.13.2" +version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c2fcef82c0ec6eefcc179b978446c399b3cdf73c392c35604e399eee6df1ee3" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" dependencies = [ - "elliptic-curve 0.13.6", + "elliptic-curve 0.13.8", ] [[package]] @@ -6028,7 +6062,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "toml_edit", + "toml_edit 0.19.15", +] + +[[package]] +name = "proc-macro-crate" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +dependencies = [ + "toml_edit 0.20.7", ] [[package]] @@ -6055,17 +6098,11 @@ dependencies = [ "version_check", ] -[[package]] -name = "proc-macro-hack" -version = "0.5.20+deprecated" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" - [[package]] name = "proc-macro2" -version = "1.0.69" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" dependencies = [ "unicode-ident", ] @@ -6082,7 +6119,7 @@ dependencies = [ "flate2", "hex", "lazy_static", - "rustix 0.36.16", + "rustix 0.36.17", ] [[package]] @@ -6120,7 +6157,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -6144,16 +6181,16 @@ checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] name = "psutil" -version = "3.2.2" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f866af2b0f8e4b0d2d00aad8a9c5fc48fad33466cd99a64cbb3a4c1505f1a62d" +checksum = "5e617cc9058daa5e1fe5a0d23ed745773a5ee354111dad1ec0235b0cc16b6730" dependencies = [ "cfg-if", "darwin-libproc", "derive_more", "glob", - "mach", - "nix 0.23.2", + "mach2", + "nix 0.24.3", "num_cpus", "once_cell", "platforms 2.0.0", @@ -6178,15 +6215,14 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" +version = "0.3.1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ - "asynchronous-codec 0.6.2", + "asynchronous-codec", "bytes", "quick-protobuf", "thiserror", - "unsigned-varint 0.7.2", + "unsigned-varint 0.8.0", ] [[package]] @@ -6223,7 +6259,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls", + "rustls 0.21.10", "thiserror", "tokio", "tracing", @@ -6231,15 +6267,15 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" +checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" dependencies = [ "bytes", "rand 0.8.5", "ring 0.16.20", "rustc-hash", - "rustls", + "rustls 0.21.10", "slab", "thiserror", "tinyvec", @@ -6261,9 +6297,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -6472,7 +6508,7 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" dependencies = [ - "pem 3.0.2", + "pem 3.0.3", "ring 0.16.20", "time", "yasna", @@ -6496,15 +6532,6 @@ dependencies = [ "bitflags 1.3.2", ] -[[package]] -name = "redox_syscall" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_syscall" version = "0.4.1" @@ -6516,12 +6543,12 @@ dependencies = [ [[package]] name = "redox_users" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ "getrandom", - "redox_syscall 0.2.16", + "libredox", "thiserror", ] @@ -6533,17 +6560,8 @@ checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.3", - "regex-syntax 0.8.2", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" -dependencies = [ - "regex-syntax 0.6.29", + "regex-automata", + "regex-syntax", ] [[package]] @@ -6554,15 +6572,9 @@ checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax", ] -[[package]] -name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - [[package]] name = "regex-syntax" version = "0.8.2" @@ -6571,19 +6583,19 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ - "base64 0.21.5", + "base64 0.21.6", "bytes", "encoding_rs", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", + "h2 0.3.22", + "http 0.2.11", + "http-body 0.4.6", + "hyper 0.14.28", "hyper-rustls", "hyper-tls", "ipnet", @@ -6594,16 +6606,16 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls", - "rustls-pemfile", + "rustls 0.21.10", + "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", "system-configuration", "tokio", "tokio-native-tls", - "tokio-rustls", - "tokio-util 0.7.9", + "tokio-rustls 0.24.1", + "tokio-util 0.7.10", "tower-service", "url", "wasm-bindgen", @@ -6662,9 +6674,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.5" +version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" dependencies = [ "cc", "getrandom", @@ -6744,7 +6756,7 @@ dependencies = [ "bitflags 1.3.2", "fallible-iterator", "fallible-streaming-iterator", - "hashlink 0.8.4", + "hashlink", "libsqlite3-sys", "smallvec", ] @@ -6787,9 +6799,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.16" +version = "0.36.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6da3636faa25820d8648e0e31c5d519bbb01f72fdf57131f0f5f7da5fed36eab" +checksum = "305efbd14fde4139eb501df5f136994bb520b033fa9fbdce287507dc23b8c7ed" dependencies = [ "bitflags 1.3.2", "errno", @@ -6801,59 +6813,86 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.26" +version = "0.38.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84f3f8f960ed3b5a59055428714943298bf3fa2d4a1d53135084e0544829d995" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustix" -version = "0.38.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ce50cb2e16c2903e30d1cbccfd8387a74b9d4c938b6a4c5ec6cc7556f7a8a0" +checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" dependencies = [ "bitflags 2.4.1", "errno", "libc", - "linux-raw-sys 0.4.10", - "windows-sys 0.48.0", + "linux-raw-sys 0.4.12", + "windows-sys 0.52.0", ] [[package]] name = "rustls" -version = "0.21.9" +version = "0.21.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", - "ring 0.17.5", - "rustls-webpki", + "ring 0.17.7", + "rustls-webpki 0.101.7", "sct", ] [[package]] -name = "rustls-pemfile" -version = "1.0.3" +name = "rustls" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" dependencies = [ - "base64 0.21.5", + "log", + "ring 0.17.7", + "rustls-pki-types", + "rustls-webpki 0.102.1", + "subtle", + "zeroize", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.6", +] + +[[package]] +name = "rustls-pemfile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" +dependencies = [ + "base64 0.21.6", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e9d979b3ce68192e42760c7810125eb6cf2ea10efae545a156063e61f314e2a" + [[package]] name = "rustls-webpki" version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.5", + "ring 0.17.7", + "untrusted 0.9.0", +] + +[[package]] +name = "rustls-webpki" +version = "0.102.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef4ca26037c909dedb327b48c3327d0ba91d3dd3c4e05dad328f210ffb68e95b" +dependencies = [ + "ring 0.17.7", + "rustls-pki-types", "untrusted 0.9.0", ] @@ -6866,8 +6905,7 @@ checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "rw-stream-sink" version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c9026ff5d2f23da5e45bbc283f156383001bfb09c4e44256d02c1a685fe9a1" +source = "git+https://github.com/sigp/rust-libp2p/?rev=b96b90894faab0a1eed78e1c82c6452138a3538a#b96b90894faab0a1eed78e1c82c6452138a3538a" dependencies = [ "futures", "pin-project", @@ -6876,9 +6914,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" [[package]] name = "safe_arith" @@ -6904,23 +6942,23 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c0a159d0c45c12b20c5a844feb1fe4bea86e28f17b92a5f0c42193634d3782" +checksum = "7f7d66a1128282b7ef025a8ead62a4a9fcf017382ec53b8ffbf4d7bf77bd3c60" dependencies = [ "cfg-if", "derive_more", - "parity-scale-codec 3.6.8", + "parity-scale-codec 3.6.9", "scale-info-derive", ] [[package]] name = "scale-info-derive" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "912e55f6d20e0e80d63733872b40e1227c0bce1e1ab81ba67d696339bfd7fd29" +checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn 1.0.109", @@ -6928,11 +6966,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -6970,12 +7008,12 @@ dependencies = [ [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", + "ring 0.17.7", + "untrusted 0.9.0", ] [[package]] @@ -7031,9 +7069,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" dependencies = [ "serde", ] @@ -7054,9 +7092,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.189" +version = "1.0.195" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" dependencies = [ "serde_derive", ] @@ -7083,20 +7121,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.195" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" dependencies = [ "itoa", "ryu", @@ -7105,9 +7143,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" +checksum = "ebd154a240de39fdebcf5775d2675c204d7c13cf39a4c697be6493c8e734337c" dependencies = [ "itoa", "serde", @@ -7115,20 +7153,20 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.16" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" +checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] name = "serde_spanned" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ "serde", ] @@ -7169,14 +7207,15 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.26" +version = "0.9.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" +checksum = "b1bf28c79a99f70ee1f1d83d10c875d2e70618417fda01ad1785e027579d9d38" dependencies = [ - "indexmap 1.9.3", + "indexmap 2.1.0", + "itoa", "ryu", "serde", - "yaml-rust", + "unsafe-libyaml", ] [[package]] @@ -7236,15 +7275,6 @@ dependencies = [ "keccak", ] -[[package]] -name = "sharded-slab" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" -dependencies = [ - "lazy_static", -] - [[package]] name = "shlex" version = "1.2.0" @@ -7272,9 +7302,9 @@ dependencies = [ [[package]] name = "signature" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", "rand_core 0.6.4", @@ -7341,7 +7371,7 @@ dependencies = [ "lmdb-rkv", "lmdb-rkv-sys", "logging", - "lru 0.7.8", + "lru", "maplit", "parking_lot 0.12.1", "rand 0.8.5", @@ -7506,28 +7536,28 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.1" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" +checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" [[package]] name = "snap" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" +checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" [[package]] name = "snow" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" +checksum = "58021967fd0a5eeeb23b08df6cc244a4d4a5b4aec1d27c9e02fad1a58b4cd74e" dependencies = [ - "aes-gcm", + "aes-gcm 0.10.3", "blake2", "chacha20poly1305", "curve25519-dalek", "rand_core 0.6.4", - "ring 0.16.20", + "ring 0.17.7", "rustc_version", "sha2 0.10.8", "subtle", @@ -7535,9 +7565,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi", @@ -7577,9 +7607,9 @@ dependencies = [ [[package]] name = "spki" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", "der 0.7.8", @@ -7670,7 +7700,7 @@ dependencies = [ "leveldb", "lighthouse_metrics", "logging", - "lru 0.7.8", + "lru", "parking_lot 0.12.1", "safe_arith", "serde", @@ -7732,9 +7762,9 @@ dependencies = [ [[package]] name = "subtle" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "superstruct" @@ -7772,9 +7802,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.38" +version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ "proc-macro2", "quote", @@ -7888,15 +7918,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.0" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" dependencies = [ "cfg-if", - "fastrand 2.0.1", - "redox_syscall 0.3.5", - "rustix 0.38.20", - "windows-sys 0.48.0", + "fastrand", + "redox_syscall 0.4.1", + "rustix 0.38.28", + "windows-sys 0.52.0", ] [[package]] @@ -7912,9 +7942,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64" +checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449" dependencies = [ "winapi-util", ] @@ -7937,8 +7967,9 @@ dependencies = [ [[package]] name = "testcontainers" -version = "0.14.0" -source = "git+https://github.com/testcontainers/testcontainers-rs/?rev=0f2c9851#0f2c985160e51a200cfc847097c15b8d85ed7df1" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d2931d7f521af5bae989f716c3fa43a6af9af7ec7a5e21b59ae40878cec00" dependencies = [ "bollard-stubs", "futures", @@ -7962,22 +7993,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -8001,9 +8032,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" +checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" dependencies = [ "deranged", "itoa", @@ -8023,9 +8054,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" +checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" dependencies = [ "time-core", ] @@ -8096,9 +8127,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.33.0" +version = "1.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" +checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" dependencies = [ "backtrace", "bytes", @@ -8124,13 +8155,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -8165,7 +8196,7 @@ dependencies = [ "rand 0.8.5", "socket2 0.5.5", "tokio", - "tokio-util 0.7.9", + "tokio-util 0.7.10", "whoami", ] @@ -8175,7 +8206,18 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls", + "rustls 0.21.10", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.2", + "rustls-pki-types", "tokio", ] @@ -8188,7 +8230,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", - "tokio-util 0.7.9", + "tokio-util 0.7.10", ] [[package]] @@ -8209,9 +8251,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", @@ -8240,14 +8282,14 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit", + "toml_edit 0.19.15", ] [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] @@ -8258,13 +8300,24 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.1.0", "serde", "serde_spanned", "toml_datetime", "winnow", ] +[[package]] +name = "toml_edit" +version = "0.20.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +dependencies = [ + "indexmap 2.1.0", + "toml_datetime", + "winnow", +] + [[package]] name = "tower" version = "0.4.13" @@ -8313,7 +8366,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] @@ -8323,7 +8376,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", - "valuable", ] [[package]] @@ -8336,35 +8388,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tracing-log" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" -dependencies = [ - "lazy_static", - "log", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" -dependencies = [ - "matchers", - "nu-ansi-term", - "once_cell", - "regex", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", -] - [[package]] name = "trackable" version = "1.3.0" @@ -8428,9 +8451,9 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" @@ -8522,9 +8545,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416" [[package]] name = "unicode-ident" @@ -8555,14 +8578,30 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "universal-hash" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" +checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" dependencies = [ "generic-array", "subtle", ] +[[package]] +name = "universal-hash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" +dependencies = [ + "crypto-common", + "subtle", +] + +[[package]] +name = "unsafe-libyaml" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" + [[package]] name = "unsigned-varint" version = "0.6.0" @@ -8578,8 +8617,14 @@ name = "unsigned-varint" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" + +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" dependencies = [ - "asynchronous-codec 0.6.2", + "asynchronous-codec", "bytes", ] @@ -8606,12 +8651,12 @@ dependencies = [ [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna", + "idna 0.5.0", "percent-encoding", ] @@ -8645,7 +8690,7 @@ dependencies = [ "filesystem", "futures", "hex", - "hyper", + "hyper 1.1.0", "itertools", "lazy_static", "libsecp256k1", @@ -8724,12 +8769,6 @@ dependencies = [ "validator_client", ] -[[package]] -name = "valuable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" - [[package]] name = "vcpkg" version = "0.2.15" @@ -8754,12 +8793,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" -[[package]] -name = "waker-fn" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" - [[package]] name = "walkdir" version = "2.4.0" @@ -8782,28 +8815,27 @@ dependencies = [ [[package]] name = "warp" version = "0.3.6" -source = "git+https://github.com/seanmonstar/warp.git#efe8548a19172e69918396d0fdbc369df9d0eb17" +source = "git+https://github.com/seanmonstar/warp.git#2c3581e8387e29bab2ac1aa5f9ae9602fe62339f" dependencies = [ "bytes", "futures-channel", "futures-util", "headers", - "http", - "hyper", + "http 0.2.11", + "hyper 0.14.28", "log", "mime", "mime_guess", "percent-encoding", "pin-project", - "rustls-pemfile", + "rustls-pemfile 2.0.0", "scoped-tls", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls", - "tokio-stream", - "tokio-util 0.7.9", + "tokio-rustls 0.25.0", + "tokio-util 0.7.10", "tower-service", "tracing", ] @@ -8834,9 +8866,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -8844,24 +8876,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12" dependencies = [ "cfg-if", "js-sys", @@ -8871,9 +8903,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8881,22 +8913,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" [[package]] name = "wasm-streams" @@ -8942,7 +8974,7 @@ dependencies = [ "eth2", "hex", "http_api", - "hyper", + "hyper 1.1.0", "log", "logging", "network", @@ -8963,9 +8995,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" dependencies = [ "js-sys", "wasm-bindgen", @@ -8999,9 +9031,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.2" +version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" +checksum = "1778a42e8b3b90bff8d0f5032bf22250792889a5cdc752aa0020c84abe3aaf10" [[package]] name = "which" @@ -9012,7 +9044,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.20", + "rustix 0.38.28", ] [[package]] @@ -9080,7 +9112,7 @@ version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ - "windows-core", + "windows-core 0.51.1", "windows-targets 0.48.5", ] @@ -9105,6 +9137,15 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.0", +] + [[package]] name = "windows-sys" version = "0.45.0" @@ -9123,6 +9164,15 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -9153,6 +9203,21 @@ dependencies = [ "windows_x86_64_msvc 0.48.5", ] +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -9165,6 +9230,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -9177,6 +9248,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -9189,6 +9266,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -9201,6 +9284,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -9213,6 +9302,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -9225,6 +9320,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -9238,10 +9339,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] -name = "winnow" -version = "0.5.17" +name = "windows_x86_64_msvc" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3b801d0e0a6726477cc207f60162da452f3a95adb368399bef20a946e06f65c" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + +[[package]] +name = "winnow" +version = "0.5.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7520bbdec7211caa7c4e682eb1fbe07abe20cee6756b6e00f537c82c11816aa" dependencies = [ "memchr", ] @@ -9359,9 +9466,9 @@ dependencies = [ [[package]] name = "yamux" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0329ef377816896f014435162bb3711ea7a07729c23d0960e6f8048b21b8fe91" +checksum = "9ed0164ae619f2dc144909a9f082187ebb5893693d8c0196e8085283ccd4b776" dependencies = [ "futures", "log", @@ -9372,6 +9479,22 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "yamux" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad1d0148b89300047e72994bee99ecdabd15a9166a7b70c8b8c37c314dcc9002" +dependencies = [ + "futures", + "instant", + "log", + "nohash-hasher", + "parking_lot 0.12.1", + "pin-project", + "rand 0.8.5", + "static_assertions", +] + [[package]] name = "yasna" version = "0.5.2" @@ -9382,10 +9505,30 @@ dependencies = [ ] [[package]] -name = "zeroize" -version = "1.6.0" +name = "zerocopy" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "zeroize" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] @@ -9398,7 +9541,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.48", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 49411340bd..e962ee5e86 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -106,7 +106,7 @@ crossbeam-channel = "0.5.8" delay_map = "0.3" derivative = "2" dirs = "3" -discv5 = { version = "0.3", features = ["libp2p"] } +discv5 = { git="https://github.com/sigp/discv5", rev="dbb4a718cd32eaed8127c3c8241bfd0fde9eb908", features = ["libp2p"] } env_logger = "0.9" error-chain = "0.12" ethereum-types = "0.14" @@ -121,12 +121,12 @@ fnv = "1" fs2 = "0.4" futures = "0.3" hex = "0.4" -hyper = "0.14" +hyper = "1" itertools = "0.10" lazy_static = "1" libsecp256k1 = "0.7" log = "0.4" -lru = "0.7" +lru = "0.12" maplit = "1" milhouse = { git = "https://github.com/sigp/milhouse", branch = "main" } num_cpus = "1" @@ -145,13 +145,13 @@ rusqlite = { version = "0.28", features = ["bundled"] } serde = { version = "1", features = ["derive"] } serde_json = "1" serde_repr = "0.1" -serde_yaml = "0.8" +serde_yaml = "0.9" sha2 = "0.9" slog = { version = "2", features = ["max_level_trace", "release_max_level_trace", "nested-values"] } slog-async = "2" slog-term = "2" sloggers = { version = "2", features = ["json"] } -smallvec = "1" +smallvec = "1.11.2" snap = "1" ssz_types = "0.5" strum = { version = "0.24", features = ["derive"] } @@ -224,7 +224,7 @@ swap_or_not_shuffle = { path = "consensus/swap_or_not_shuffle" } task_executor = { path = "common/task_executor" } types = { path = "consensus/types" } unused_port = { path = "common/unused_port" } -validator_client = { path = "validator_client/" } +validator_client = { path = "validator_client" } validator_dir = { path = "common/validator_dir" } warp_utils = { path = "common/warp_utils" } diff --git a/Makefile b/Makefile index c1190ac98c..564e32843f 100644 --- a/Makefile +++ b/Makefile @@ -200,12 +200,17 @@ test-exec-engine: # test vectors. test: test-release -# Updates the CLI help text pages in the Lighthouse book. +# Updates the CLI help text pages in the Lighthouse book, building with Docker. cli: docker run --rm --user=root \ -v ${PWD}:/home/runner/actions-runner/lighthouse sigmaprime/github-runner \ bash -c 'cd lighthouse && make && ./scripts/cli.sh' - + +# Updates the CLI help text pages in the Lighthouse book, building using local +# `cargo`. +cli-local: + make && ./scripts/cli.sh + # Runs the entire test suite, downloading test vectors if required. test-full: cargo-fmt test-release test-debug test-ef test-exec-engine diff --git a/beacon_node/beacon_chain/src/attestation_simulator.rs b/beacon_node/beacon_chain/src/attestation_simulator.rs new file mode 100644 index 0000000000..6453158458 --- /dev/null +++ b/beacon_node/beacon_chain/src/attestation_simulator.rs @@ -0,0 +1,107 @@ +use crate::{BeaconChain, BeaconChainTypes}; +use slog::{debug, error}; +use slot_clock::SlotClock; +use std::sync::Arc; +use task_executor::TaskExecutor; +use tokio::time::sleep; +use types::{EthSpec, Slot}; + +/// Don't run the attestation simulator if the head slot is this many epochs +/// behind the wall-clock slot. +const SYNCING_TOLERANCE_EPOCHS: u64 = 2; + +/// Spawns a routine which produces an unaggregated attestation at every slot. +/// +/// This routine will run once per slot +pub fn start_attestation_simulator_service( + executor: TaskExecutor, + chain: Arc>, +) { + executor.clone().spawn( + async move { attestation_simulator_service(executor, chain).await }, + "attestation_simulator_service", + ); +} + +/// Loop indefinitely, calling `BeaconChain::produce_unaggregated_attestation` every 4s into each slot. +async fn attestation_simulator_service( + executor: TaskExecutor, + chain: Arc>, +) { + let slot_duration = chain.slot_clock.slot_duration(); + let additional_delay = slot_duration / 3; + + loop { + match chain.slot_clock.duration_to_next_slot() { + Some(duration) => { + sleep(duration + additional_delay).await; + + debug!( + chain.log, + "Simulating unagg. attestation production"; + ); + + // Run the task in the executor + let inner_chain = chain.clone(); + executor.spawn( + async move { + if let Ok(current_slot) = inner_chain.slot() { + produce_unaggregated_attestation(inner_chain, current_slot); + } + }, + "attestation_simulator_service", + ); + } + None => { + error!(chain.log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(slot_duration).await; + } + }; + } +} + +pub fn produce_unaggregated_attestation( + chain: Arc>, + current_slot: Slot, +) { + // Don't run the attestation simulator when the head slot is far behind the + // wall-clock slot. + // + // This helps prevent the simulator from becoming a burden by computing + // committees from old states. + let syncing_tolerance_slots = SYNCING_TOLERANCE_EPOCHS * T::EthSpec::slots_per_epoch(); + if chain.best_slot() + syncing_tolerance_slots < current_slot { + return; + } + + // Since attestations for different committees are practically identical (apart from the committee index field) + // Committee 0 is guaranteed to exist. That means there's no need to load the committee. + let beacon_committee_index = 0; + + // Store the unaggregated attestation in the validator monitor for later processing + match chain.produce_unaggregated_attestation(current_slot, beacon_committee_index) { + Ok(unaggregated_attestation) => { + let data = &unaggregated_attestation.data; + + debug!( + chain.log, + "Produce unagg. attestation"; + "attestation_source" => data.source.root.to_string(), + "attestation_target" => data.target.root.to_string(), + ); + + chain + .validator_monitor + .write() + .set_unaggregated_attestation(unaggregated_attestation); + } + Err(e) => { + debug!( + chain.log, + "Failed to simulate attestation"; + "error" => ?e + ); + } + } +} diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index cdae21ae0a..7190626c18 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -12,8 +12,8 @@ use crate::block_times_cache::BlockTimesCache; use crate::block_verification::POS_PANDA_BANNER; use crate::block_verification::{ check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy, - signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock, - IntoExecutionPendingBlock, + signature_verify_chain_segment, verify_header_signature, BlockError, ExecutionPendingBlock, + GossipVerifiedBlock, IntoExecutionPendingBlock, }; use crate::block_verification_types::{ AsBlock, AvailableExecutedBlock, BlockImportData, ExecutedBlock, RpcBlock, @@ -52,6 +52,7 @@ use crate::observed_attesters::{ use crate::observed_blob_sidecars::ObservedBlobSidecars; use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; +use crate::observed_slashable::ObservedSlashable; use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; use crate::persisted_fork_choice::PersistedForkChoice; use crate::pre_finalization_cache::PreFinalizationBlockCache; @@ -402,7 +403,9 @@ pub struct BeaconChain { /// Maintains a record of which validators have proposed blocks for each slot. pub observed_block_producers: RwLock>, /// Maintains a record of blob sidecars seen over the gossip network. - pub(crate) observed_blob_sidecars: RwLock>, + pub observed_blob_sidecars: RwLock>, + /// Maintains a record of slashable message seen over the gossip network or RPC. + pub observed_slashable: RwLock>, /// Maintains a record of which validators have submitted voluntary exits. pub(crate) observed_voluntary_exits: Mutex>, /// Maintains a record of which validators we've seen proposer slashings for. @@ -490,20 +493,24 @@ impl BeaconBlockResponseWrapper { }) } - pub fn execution_payload_value(&self) -> Option { + pub fn execution_payload_value(&self) -> Uint256 { match self { BeaconBlockResponseWrapper::Full(resp) => resp.execution_payload_value, BeaconBlockResponseWrapper::Blinded(resp) => resp.execution_payload_value, } } - pub fn consensus_block_value(&self) -> Option { + pub fn consensus_block_value_gwei(&self) -> u64 { match self { BeaconBlockResponseWrapper::Full(resp) => resp.consensus_block_value, BeaconBlockResponseWrapper::Blinded(resp) => resp.consensus_block_value, } } + pub fn consensus_block_value_wei(&self) -> Uint256 { + Uint256::from(self.consensus_block_value_gwei()) * 1_000_000_000 + } + pub fn is_blinded(&self) -> bool { matches!(self, BeaconBlockResponseWrapper::Blinded(_)) } @@ -518,9 +525,9 @@ pub struct BeaconBlockResponse> { /// The Blobs / Proofs associated with the new block pub blob_items: Option<(KzgProofs, BlobsList)>, /// The execution layer reward for the block - pub execution_payload_value: Option, + pub execution_payload_value: Uint256, /// The consensus layer reward to the proposer - pub consensus_block_value: Option, + pub consensus_block_value: u64, } impl FinalizationAndCanonicity { @@ -3122,9 +3129,27 @@ impl BeaconChain { block_root: Hash256, blobs: FixedBlobSidecarList, ) -> Result> { - if let Some(slasher) = self.slasher.as_ref() { - for blob_sidecar in blobs.iter().filter_map(|blob| blob.clone()) { - slasher.accept_block_header(blob_sidecar.signed_block_header.clone()); + // Need to scope this to ensure the lock is dropped before calling `process_availability` + // Even an explicit drop is not enough to convince the borrow checker. + { + let mut slashable_cache = self.observed_slashable.write(); + for header in blobs + .into_iter() + .filter_map(|b| b.as_ref().map(|b| b.signed_block_header.clone())) + .unique() + { + if verify_header_signature::>(self, &header).is_ok() { + slashable_cache + .observe_slashable( + header.message.slot, + header.message.proposer_index, + block_root, + ) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; + if let Some(slasher) = self.slasher.as_ref() { + slasher.accept_block_header(header); + } + } } } let availability = self @@ -3569,9 +3594,11 @@ impl BeaconChain { } // Allow the validator monitor to learn about a new valid state. - self.validator_monitor - .write() - .process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), state); + self.validator_monitor.write().process_valid_state( + current_slot.epoch(T::EthSpec::slots_per_epoch()), + state, + &self.spec, + ); let validator_monitor = self.validator_monitor.read(); @@ -3967,6 +3994,7 @@ impl BeaconChain { slot: Slot, validator_graffiti: Option, verification: ProduceBlockVerification, + builder_boost_factor: Option, block_production_version: BlockProductionVersion, ) -> Result, BlockProductionError> { metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); @@ -3995,6 +4023,7 @@ impl BeaconChain { randao_reveal, validator_graffiti, verification, + builder_boost_factor, block_production_version, ) .await @@ -4539,6 +4568,7 @@ impl BeaconChain { randao_reveal: Signature, validator_graffiti: Option, verification: ProduceBlockVerification, + builder_boost_factor: Option, block_production_version: BlockProductionVersion, ) -> Result, BlockProductionError> { // Part 1/3 (blocking) @@ -4555,6 +4585,7 @@ impl BeaconChain { produce_at_slot, randao_reveal, validator_graffiti, + builder_boost_factor, block_production_version, ) }, @@ -4644,6 +4675,7 @@ impl BeaconChain { } } + #[allow(clippy::too_many_arguments)] fn produce_partial_beacon_block( self: &Arc, mut state: BeaconState, @@ -4651,6 +4683,7 @@ impl BeaconChain { produce_at_slot: Slot, randao_reveal: Signature, validator_graffiti: Option, + builder_boost_factor: Option, block_production_version: BlockProductionVersion, ) -> Result, BlockProductionError> { let eth1_chain = self @@ -4713,6 +4746,7 @@ impl BeaconChain { parent_root, proposer_index, builder_params, + builder_boost_factor, block_production_version, )?; Some(prepare_payload_handle) @@ -5056,8 +5090,11 @@ impl BeaconChain { .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, bls_to_execution_changes: bls_to_execution_changes.into(), - blob_kzg_commitments: kzg_commitments - .ok_or(BlockProductionError::InvalidPayloadFork)?, + blob_kzg_commitments: kzg_commitments.ok_or( + BlockProductionError::MissingKzgCommitment( + "Kzg commitments missing from block contents".to_string(), + ), + )?, }, }), maybe_blobs_and_proofs, @@ -5172,8 +5209,8 @@ impl BeaconChain { block, state, blob_items, - execution_payload_value: Some(execution_payload_value), - consensus_block_value: Some(consensus_block_value), + execution_payload_value, + consensus_block_value, }) } @@ -5449,6 +5486,7 @@ impl BeaconChain { parent_block_hash: forkchoice_update_params.head_hash.unwrap_or_default(), payload_attributes: payload_attributes.into(), }, + metadata: Default::default(), version: Some(self.spec.fork_name_at_slot::(prepare_slot)), })); } diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index 330593f428..31a617ff72 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -14,12 +14,14 @@ use lru::LruCache; use smallvec::SmallVec; use state_processing::state_advance::partial_state_advance; use std::cmp::Ordering; +use std::num::NonZeroUsize; +use types::non_zero_usize::new_non_zero_usize; use types::{ BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot, Unsigned, }; /// The number of sets of proposer indices that should be cached. -const CACHE_SIZE: usize = 16; +const CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16); /// This value is fairly unimportant, it's used to avoid heap allocations. The result of it being /// incorrect is non-substantial from a consensus perspective (and probably also from a diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 7195babb52..b7cf69d2f7 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -417,7 +417,7 @@ pub fn validate_blob_sidecar_for_gossip( if chain .observed_blob_sidecars .read() - .is_known(&blob_sidecar) + .proposer_is_known(&blob_sidecar) .map_err(|e| GossipBlobError::BeaconChainError(e.into()))? { return Err(GossipBlobError::RepeatBlob { @@ -540,6 +540,16 @@ pub fn validate_blob_sidecar_for_gossip( }); } + chain + .observed_slashable + .write() + .observe_slashable( + blob_sidecar.slot(), + blob_sidecar.block_proposer_index(), + block_root, + ) + .map_err(|e| GossipBlobError::BeaconChainError(e.into()))?; + // Now the signature is valid, store the proposal so we don't accept another blob sidecar // with the same `BlobIdentifier`. // It's important to double-check that the proposer still hasn't been observed so we don't diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 9290feb519..a63d6d1f21 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -942,6 +942,11 @@ impl GossipVerifiedBlock { return Err(BlockError::ProposalSignatureInvalid); } + chain + .observed_slashable + .write() + .observe_slashable(block.slot(), block.message().proposer_index(), block_root) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; // Now the signature is valid, store the proposal so we don't accept another from this // validator and slot. // @@ -1239,6 +1244,12 @@ impl ExecutionPendingBlock { chain: &Arc>, notify_execution_layer: NotifyExecutionLayer, ) -> Result> { + chain + .observed_slashable + .write() + .observe_slashable(block.slot(), block.message().proposer_index(), block_root) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; + chain .observed_block_producers .write() @@ -2026,7 +2037,7 @@ fn get_signature_verifier<'a, T: BeaconChainTypes>( /// Verify that `header` was signed with a valid signature from its proposer. /// /// Return `Ok(())` if the signature is valid, and an `Err` otherwise. -fn verify_header_signature( +pub fn verify_header_signature( chain: &BeaconChain, header: &SignedBeaconBlockHeader, ) -> Result<(), Err> { diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index dab096ad43..322bfbd208 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -813,6 +813,7 @@ where validator_monitor.process_valid_state( slot.epoch(TEthSpec::slots_per_epoch()), &head_snapshot.beacon_state, + &self.spec, ); } @@ -905,6 +906,7 @@ where // TODO: allow for persisting and loading the pool from disk. observed_block_producers: <_>::default(), observed_blob_sidecars: <_>::default(), + observed_slashable: <_>::default(), observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index e76ae57623..e986b0bfea 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -951,6 +951,13 @@ impl BeaconChain { .start_slot(T::EthSpec::slots_per_epoch()), ); + self.observed_slashable.write().prune( + new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); + self.attester_cache .prune_below(new_view.finalized_checkpoint.epoch); diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index eff8d1d9d0..21cac9a264 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -17,11 +17,11 @@ use slog::{debug, error, Logger}; use slot_clock::SlotClock; use std::fmt; use std::fmt::Debug; +use std::num::NonZeroUsize; use std::sync::Arc; use task_executor::TaskExecutor; use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments}; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; -use types::consts::deneb::MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS; use types::{BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; mod availability_view; @@ -32,15 +32,17 @@ mod processing_cache; mod state_lru_cache; pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; +use types::non_zero_usize::new_non_zero_usize; /// The LRU Cache stores `PendingComponents` which can store up to /// `MAX_BLOBS_PER_BLOCK = 6` blobs each. A `BlobSidecar` is 0.131256 MB. So /// the maximum size of a `PendingComponents` is ~ 0.787536 MB. Setting this /// to 1024 means the maximum size of the cache is ~ 0.8 GB. But the cache /// will target a size of less than 75% of capacity. -pub const OVERFLOW_LRU_CAPACITY: usize = 1024; +pub const OVERFLOW_LRU_CAPACITY: NonZeroUsize = new_non_zero_usize(1024); /// Until tree-states is implemented, we can't store very many states in memory :( -pub const STATE_LRU_CAPACITY: usize = 2; +pub const STATE_LRU_CAPACITY_NON_ZERO: NonZeroUsize = new_non_zero_usize(2); +pub const STATE_LRU_CAPACITY: usize = STATE_LRU_CAPACITY_NON_ZERO.get(); /// This includes a cache for any blocks or blobs that have been received over gossip or RPC /// and are awaiting more components before they can be imported. Additionally the @@ -421,7 +423,8 @@ impl DataAvailabilityChecker { .map(|current_epoch| { std::cmp::max( fork_epoch, - current_epoch.saturating_sub(MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS), + current_epoch + .saturating_sub(self.spec.min_epochs_for_blob_sidecars_requests), ) }) }) @@ -514,7 +517,8 @@ async fn availability_cache_maintenance_service( let cutoff_epoch = std::cmp::max( finalized_epoch + 1, std::cmp::max( - current_epoch.saturating_sub(MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS), + current_epoch + .saturating_sub(chain.spec.min_epochs_for_blob_sidecars_requests), deneb_fork_epoch, ), ); diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index dcc999d4cd..293c928ff8 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -42,6 +42,7 @@ use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use ssz_types::{FixedVector, VariableList}; +use std::num::NonZeroUsize; use std::{collections::HashSet, sync::Arc}; use types::blob_sidecar::BlobIdentifier; use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256}; @@ -288,7 +289,7 @@ struct Critical { } impl Critical { - pub fn new(capacity: usize) -> Self { + pub fn new(capacity: NonZeroUsize) -> Self { Self { in_memory: LruCache::new(capacity), store_keys: HashSet::new(), @@ -329,7 +330,7 @@ impl Critical { pending_components: PendingComponents, overflow_store: &OverflowStore, ) -> Result<(), AvailabilityCheckError> { - if self.in_memory.len() == self.in_memory.cap() { + if self.in_memory.len() == self.in_memory.cap().get() { // cache will overflow, must write lru entry to disk if let Some((lru_key, lru_value)) = self.in_memory.pop_lru() { overflow_store.persist_pending_components(lru_key, lru_value)?; @@ -377,12 +378,12 @@ pub struct OverflowLRUCache { /// Mutex to guard maintenance methods which move data between disk and memory maintenance_lock: Mutex<()>, /// The capacity of the LRU cache - capacity: usize, + capacity: NonZeroUsize, } impl OverflowLRUCache { pub fn new( - capacity: usize, + capacity: NonZeroUsize, beacon_store: BeaconStore, spec: ChainSpec, ) -> Result { @@ -514,7 +515,7 @@ impl OverflowLRUCache { /// maintain the cache pub fn do_maintenance(&self, cutoff_epoch: Epoch) -> Result<(), AvailabilityCheckError> { // ensure memory usage is below threshold - let threshold = self.capacity * 3 / 4; + let threshold = self.capacity.get() * 3 / 4; self.maintain_threshold(threshold, cutoff_epoch)?; // clean up any keys on the disk that shouldn't be there self.prune_disk(cutoff_epoch)?; @@ -753,6 +754,7 @@ mod test { use std::ops::AddAssign; use store::{HotColdDB, ItemStore, LevelDB, StoreConfig}; use tempfile::{tempdir, TempDir}; + use types::non_zero_usize::new_non_zero_usize; use types::{ChainSpec, ExecPayload, MinimalEthSpec}; const LOW_VALIDATOR_COUNT: usize = 32; @@ -974,8 +976,9 @@ mod test { let harness = get_deneb_chain(log.clone(), &chain_db_path).await; let spec = harness.spec.clone(); let test_store = harness.chain.store.clone(); + let capacity_non_zero = new_non_zero_usize(capacity); let cache = Arc::new( - OverflowLRUCache::::new(capacity, test_store, spec.clone()) + OverflowLRUCache::::new(capacity_non_zero, test_store, spec.clone()) .expect("should create cache"), ); (harness, cache, chain_db_path) @@ -1477,7 +1480,7 @@ mod test { // create a new cache with the same store let recovered_cache = OverflowLRUCache::::new( - capacity, + new_non_zero_usize(capacity), harness.chain.store.clone(), harness.chain.spec.clone(), ) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index d3348b67fb..bd125a7f42 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -1,7 +1,7 @@ use crate::block_verification_types::AsBlock; use crate::{ block_verification_types::BlockImportData, - data_availability_checker::{AvailabilityCheckError, STATE_LRU_CAPACITY}, + data_availability_checker::{AvailabilityCheckError, STATE_LRU_CAPACITY_NON_ZERO}, eth1_finalization_cache::Eth1FinalizationData, AvailabilityPendingExecutedBlock, BeaconChainTypes, BeaconStore, PayloadVerificationOutcome, }; @@ -61,7 +61,7 @@ pub struct StateLRUCache { impl StateLRUCache { pub fn new(store: BeaconStore, spec: ChainSpec) -> Self { Self { - states: RwLock::new(LruCache::new(STATE_LRU_CAPACITY)), + states: RwLock::new(LruCache::new(STATE_LRU_CAPACITY_NON_ZERO)), store, spec, } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 093255b201..e25976c2a5 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -405,6 +405,7 @@ pub fn get_execution_payload( parent_block_root: Hash256, proposer_index: u64, builder_params: BuilderParams, + builder_boost_factor: Option, block_production_version: BlockProductionVersion, ) -> Result, BlockProductionError> { // Compute all required values from the `state` now to avoid needing to pass it into a spawned @@ -449,6 +450,7 @@ pub fn get_execution_payload( builder_params, withdrawals, parent_beacon_block_root, + builder_boost_factor, block_production_version, ) .await @@ -485,6 +487,7 @@ pub async fn prepare_execution_payload( builder_params: BuilderParams, withdrawals: Option>, parent_beacon_block_root: Option, + builder_boost_factor: Option, block_production_version: BlockProductionVersion, ) -> Result, BlockProductionError> where @@ -575,6 +578,7 @@ where builder_params, fork, &chain.spec, + builder_boost_factor, block_production_version, ) .await diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index a397ef8ea2..59d006180c 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,4 +1,5 @@ pub mod attestation_rewards; +pub mod attestation_simulator; pub mod attestation_verification; mod attester_cache; pub mod beacon_block_reward; @@ -39,6 +40,7 @@ mod observed_attesters; mod observed_blob_sidecars; pub mod observed_block_producers; pub mod observed_operations; +mod observed_slashable; pub mod otb_verification_service; mod persisted_beacon_chain; mod persisted_fork_choice; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 1b3d1faefd..9af774200b 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -6,6 +6,20 @@ pub use lighthouse_metrics::*; use slot_clock::SlotClock; use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; +// Attestation simulator metrics +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL: &str = + "validator_monitor_attestation_simulator_head_attester_hit_total"; +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL: &str = + "validator_monitor_attestation_simulator_head_attester_miss_total"; +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL: &str = + "validator_monitor_attestation_simulator_target_attester_hit_total"; +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL: &str = + "validator_monitor_attestation_simulator_target_attester_miss_total"; +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL: &str = + "validator_monitor_attestation_simulator_source_attester_hit_total"; +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL: &str = + "validator_monitor_attestation_simulator_source_attester_miss_total"; + lazy_static! { /* * Block Processing @@ -1061,6 +1075,48 @@ lazy_static! { "beacon_aggregated_attestation_subsets_total", "Count of new aggregated attestations that are subsets of already known aggregates" ); + /* + * Attestation simulator metrics + */ + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL, + "Incremented if a validator is flagged as a previous slot head attester \ + during per slot processing", + ); + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL, + "Incremented if a validator is not flagged as a previous slot head attester \ + during per slot processing", + ); + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL, + "Incremented if a validator is flagged as a previous slot target attester \ + during per slot processing", + ); + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL, + "Incremented if a validator is not flagged as a previous slot target attester \ + during per slot processing", + ); + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL, + "Incremented if a validator is flagged as a previous slot source attester \ + during per slot processing", + ); + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL, + "Incremented if a validator is not flagged as a previous slot source attester \ + during per slot processing", + ); + /* + * Missed block metrics + */ pub static ref VALIDATOR_MONITOR_MISSED_BLOCKS_TOTAL: Result = try_create_int_counter_vec( "validator_monitor_missed_blocks_total", "Number of non-finalized blocks missed", diff --git a/beacon_node/beacon_chain/src/observed_blob_sidecars.rs b/beacon_node/beacon_chain/src/observed_blob_sidecars.rs index 4f84961449..148d85befb 100644 --- a/beacon_node/beacon_chain/src/observed_blob_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_blob_sidecars.rs @@ -3,6 +3,7 @@ //! Only `BlobSidecar`s that have completed proposer signature verification can be added //! to this cache to reduce DoS risks. +use crate::observed_block_producers::ProposalKey; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; use types::{BlobSidecar, EthSpec, Slot}; @@ -29,7 +30,7 @@ pub enum Error { pub struct ObservedBlobSidecars { finalized_slot: Slot, /// Stores all received blob indices for a given `(ValidatorIndex, Slot)` tuple. - items: HashMap<(u64, Slot), HashSet>, + items: HashMap>, _phantom: PhantomData, } @@ -52,22 +53,30 @@ impl ObservedBlobSidecars { pub fn observe_sidecar(&mut self, blob_sidecar: &BlobSidecar) -> Result { self.sanitize_blob_sidecar(blob_sidecar)?; - let did_not_exist = self + let blob_indices = self .items - .entry((blob_sidecar.block_proposer_index(), blob_sidecar.slot())) - .or_insert_with(|| HashSet::with_capacity(T::max_blobs_per_block())) - .insert(blob_sidecar.index); + .entry(ProposalKey { + slot: blob_sidecar.slot(), + proposer: blob_sidecar.block_proposer_index(), + }) + .or_insert_with(|| HashSet::with_capacity(T::max_blobs_per_block())); + let did_not_exist = blob_indices.insert(blob_sidecar.index); Ok(!did_not_exist) } /// Returns `true` if the `blob_sidecar` has already been observed in the cache within the prune window. - pub fn is_known(&self, blob_sidecar: &BlobSidecar) -> Result { + pub fn proposer_is_known(&self, blob_sidecar: &BlobSidecar) -> Result { self.sanitize_blob_sidecar(blob_sidecar)?; let is_known = self .items - .get(&(blob_sidecar.block_proposer_index(), blob_sidecar.slot())) - .map_or(false, |set| set.contains(&blob_sidecar.index)); + .get(&ProposalKey { + slot: blob_sidecar.slot(), + proposer: blob_sidecar.block_proposer_index(), + }) + .map_or(false, |blob_indices| { + blob_indices.contains(&blob_sidecar.index) + }); Ok(is_known) } @@ -93,13 +102,14 @@ impl ObservedBlobSidecars { } self.finalized_slot = finalized_slot; - self.items.retain(|k, _| k.1 > finalized_slot); + self.items.retain(|k, _| k.slot > finalized_slot); } } #[cfg(test)] mod tests { use super::*; + use bls::Hash256; use std::sync::Arc; use types::{BlobSidecar, MainnetEthSpec}; @@ -140,14 +150,15 @@ mod tests { 1, "only one (validator_index, slot) tuple should be present" ); + + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); assert_eq!( - cache - .items - .get(&(proposer_index_a, Slot::new(0))) - .expect("slot zero should be present") - .len(), + cached_blob_indices.len(), 1, - "only one item should be present" + "only one proposer should be present" ); /* @@ -158,14 +169,14 @@ mod tests { assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); assert_eq!(cache.items.len(), 1, "only one slot should be present"); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); assert_eq!( - cache - .items - .get(&(proposer_index_a, Slot::new(0))) - .expect("slot zero should be present") - .len(), + cached_blob_indices.len(), 1, - "only one item should be present" + "only one proposer should be present" ); /* @@ -215,12 +226,12 @@ mod tests { ); assert_eq!(cache.items.len(), 1, "only one slot should be present"); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_b, Slot::new(three_epochs))) + .expect("the three epochs slot should be present"); assert_eq!( - cache - .items - .get(&(proposer_index_b, Slot::new(three_epochs))) - .expect("the three epochs slot should be present") - .len(), + cached_blob_indices.len(), 1, "only one proposer should be present" ); @@ -239,12 +250,12 @@ mod tests { ); assert_eq!(cache.items.len(), 1, "only one slot should be present"); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_b, Slot::new(three_epochs))) + .expect("the three epochs slot should be present"); assert_eq!( - cache - .items - .get(&(proposer_index_b, Slot::new(three_epochs))) - .expect("the three epochs slot should be present") - .len(), + cached_blob_indices.len(), 1, "only one proposer should be present" ); @@ -259,7 +270,7 @@ mod tests { let sidecar_a = get_blob_sidecar(0, proposer_index_a, 0); assert_eq!( - cache.is_known(&sidecar_a), + cache.proposer_is_known(&sidecar_a), Ok(false), "no observation in empty cache" ); @@ -271,7 +282,7 @@ mod tests { ); assert_eq!( - cache.is_known(&sidecar_a), + cache.proposer_is_known(&sidecar_a), Ok(true), "observed block is indicated as true" ); @@ -284,12 +295,12 @@ mod tests { assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); assert_eq!(cache.items.len(), 1, "only one slot should be present"); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); assert_eq!( - cache - .items - .get(&(proposer_index_a, Slot::new(0))) - .expect("slot zero should be present") - .len(), + cached_blob_indices.len(), 1, "only one proposer should be present" ); @@ -300,7 +311,7 @@ mod tests { let sidecar_b = get_blob_sidecar(1, proposer_index_b, 0); assert_eq!( - cache.is_known(&sidecar_b), + cache.proposer_is_known(&sidecar_b), Ok(false), "no observation for new slot" ); @@ -310,7 +321,7 @@ mod tests { "can observe proposer for new slot, indicates proposer unobserved" ); assert_eq!( - cache.is_known(&sidecar_b), + cache.proposer_is_known(&sidecar_b), Ok(true), "observed block in slot 1 is indicated as true" ); @@ -322,21 +333,21 @@ mod tests { assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); assert_eq!(cache.items.len(), 2, "two slots should be present"); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); assert_eq!( - cache - .items - .get(&(proposer_index_a, Slot::new(0))) - .expect("slot zero should be present") - .len(), + cached_blob_indices.len(), 1, "only one proposer should be present in slot 0" ); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_b, Slot::new(1))) + .expect("slot zero should be present"); assert_eq!( - cache - .items - .get(&(proposer_index_b, Slot::new(1))) - .expect("slot zero should be present") - .len(), + cached_blob_indices.len(), 1, "only one proposer should be present in slot 1" ); @@ -345,7 +356,7 @@ mod tests { let sidecar_c = get_blob_sidecar(0, proposer_index_a, 1); assert_eq!( - cache.is_known(&sidecar_c), + cache.proposer_is_known(&sidecar_c), Ok(false), "no observation for new index" ); @@ -355,7 +366,7 @@ mod tests { "can observe new index, indicates sidecar unobserved for new index" ); assert_eq!( - cache.is_known(&sidecar_c), + cache.proposer_is_known(&sidecar_c), Ok(true), "observed new sidecar is indicated as true" ); @@ -367,12 +378,42 @@ mod tests { assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); assert_eq!(cache.items.len(), 2, "two slots should be present"); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); assert_eq!( - cache - .items - .get(&(proposer_index_a, Slot::new(0))) - .expect("slot zero should be present") - .len(), + cached_blob_indices.len(), + 2, + "two blob indices should be present in slot 0" + ); + + // Create a sidecar sharing slot and proposer but with a different block root. + let mut sidecar_d: BlobSidecar = BlobSidecar { + index: sidecar_c.index, + blob: sidecar_c.blob.clone(), + kzg_commitment: sidecar_c.kzg_commitment, + kzg_proof: sidecar_c.kzg_proof, + signed_block_header: sidecar_c.signed_block_header.clone(), + kzg_commitment_inclusion_proof: sidecar_c.kzg_commitment_inclusion_proof.clone(), + }; + sidecar_d.signed_block_header.message.body_root = Hash256::repeat_byte(7); + assert_eq!( + cache.proposer_is_known(&sidecar_d), + Ok(true), + "there has been an observation for this proposer index" + ); + assert_eq!( + cache.observe_sidecar(&sidecar_d), + Ok(true), + "indicates sidecar proposer was observed" + ); + let cached_blob_indices = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); + assert_eq!( + cached_blob_indices.len(), 2, "two blob indices should be present in slot 0" ); diff --git a/beacon_node/beacon_chain/src/observed_block_producers.rs b/beacon_node/beacon_chain/src/observed_block_producers.rs index f76fc53796..096c8bff77 100644 --- a/beacon_node/beacon_chain/src/observed_block_producers.rs +++ b/beacon_node/beacon_chain/src/observed_block_producers.rs @@ -16,9 +16,15 @@ pub enum Error { } #[derive(Eq, Hash, PartialEq, Debug, Default)] -struct ProposalKey { - slot: Slot, - proposer: u64, +pub struct ProposalKey { + pub slot: Slot, + pub proposer: u64, +} + +impl ProposalKey { + pub fn new(proposer: u64, slot: Slot) -> Self { + Self { slot, proposer } + } } /// Maintains a cache of observed `(block.slot, block.proposer)`. diff --git a/beacon_node/beacon_chain/src/observed_slashable.rs b/beacon_node/beacon_chain/src/observed_slashable.rs new file mode 100644 index 0000000000..001a0d4a86 --- /dev/null +++ b/beacon_node/beacon_chain/src/observed_slashable.rs @@ -0,0 +1,486 @@ +//! Provides the `ObservedSlashable` struct which tracks slashable messages seen in +//! gossip or via RPC. Useful in supporting `broadcast_validation` in the Beacon API. + +use crate::observed_block_producers::Error; +use std::collections::hash_map::Entry; +use std::collections::{HashMap, HashSet}; +use std::marker::PhantomData; +use types::{EthSpec, Hash256, Slot, Unsigned}; + +#[derive(Eq, Hash, PartialEq, Debug, Default)] +pub struct ProposalKey { + pub slot: Slot, + pub proposer: u64, +} + +/// Maintains a cache of observed `(block.slot, block.proposer)`. +/// +/// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you +/// must call `Self::prune` manually. +/// +/// The maximum size of the cache is determined by `slots_since_finality * +/// VALIDATOR_REGISTRY_LIMIT`. This is quite a large size, so it's important that upstream +/// functions only use this cache for blocks with a valid signature. Only allowing valid signed +/// blocks reduces the theoretical maximum size of this cache to `slots_since_finality * +/// active_validator_count`, however in reality that is more like `slots_since_finality * +/// known_distinct_shufflings` which is much smaller. +pub struct ObservedSlashable { + finalized_slot: Slot, + items: HashMap>, + _phantom: PhantomData, +} + +impl Default for ObservedSlashable { + /// Instantiates `Self` with `finalized_slot == 0`. + fn default() -> Self { + Self { + finalized_slot: Slot::new(0), + items: HashMap::new(), + _phantom: PhantomData, + } + } +} + +impl ObservedSlashable { + /// Observe that the `header` was produced by `header.proposer_index` at `header.slot`. This will + /// update `self` so future calls to it indicate that this block is known. + /// + /// The supplied `block` **MUST** be signature verified (see struct-level documentation). + /// + /// ## Errors + /// + /// - `header.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. + /// - `header.slot` is equal to or less than the latest pruned `finalized_slot`. + pub fn observe_slashable( + &mut self, + slot: Slot, + proposer_index: u64, + block_root: Hash256, + ) -> Result<(), Error> { + self.sanitize_header(slot, proposer_index)?; + + let key = ProposalKey { + slot, + proposer: proposer_index, + }; + + let entry = self.items.entry(key); + + match entry { + Entry::Occupied(mut occupied_entry) => { + let block_roots = occupied_entry.get_mut(); + block_roots.insert(block_root); + } + Entry::Vacant(vacant_entry) => { + let block_roots = HashSet::from([block_root]); + vacant_entry.insert(block_roots); + } + } + + Ok(()) + } + + /// Returns `Ok(true)` if the `block_root` is slashable, `Ok(false)` if not. Does not + /// update the cache, so calling this function multiple times will continue to return + /// `Ok(false)`, until `Self::observe_proposer` is called. + /// + /// ## Errors + /// + /// - `proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. + /// - `slot` is equal to or less than the latest pruned `finalized_slot`. + pub fn is_slashable( + &self, + slot: Slot, + proposer_index: u64, + block_root: Hash256, + ) -> Result { + self.sanitize_header(slot, proposer_index)?; + + let key = ProposalKey { + slot, + proposer: proposer_index, + }; + + if let Some(block_roots) = self.items.get(&key) { + let no_prev_known_blocks = + block_roots.difference(&HashSet::from([block_root])).count() == 0; + + Ok(!no_prev_known_blocks) + } else { + Ok(false) + } + } + + /// Returns `Ok(())` if the given `header` is sane. + fn sanitize_header(&self, slot: Slot, proposer_index: u64) -> Result<(), Error> { + if proposer_index >= E::ValidatorRegistryLimit::to_u64() { + return Err(Error::ValidatorIndexTooHigh(proposer_index)); + } + + let finalized_slot = self.finalized_slot; + if finalized_slot > 0 && slot <= finalized_slot { + return Err(Error::FinalizedBlock { + slot, + finalized_slot, + }); + } + + Ok(()) + } + + /// Removes all observations of blocks equal to or earlier than `finalized_slot`. + /// + /// Stores `finalized_slot` in `self`, so that `self` will reject any block that has a slot + /// equal to or less than `finalized_slot`. + /// + /// No-op if `finalized_slot == 0`. + pub fn prune(&mut self, finalized_slot: Slot) { + if finalized_slot == 0 { + return; + } + + self.finalized_slot = finalized_slot; + self.items.retain(|key, _| key.slot > finalized_slot); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use types::{BeaconBlock, Graffiti, MainnetEthSpec}; + + type E = MainnetEthSpec; + + fn get_block(slot: u64, proposer: u64) -> BeaconBlock { + let mut block = BeaconBlock::empty(&E::default_spec()); + *block.slot_mut() = slot.into(); + *block.proposer_index_mut() = proposer; + block + } + + #[test] + fn pruning() { + let mut cache = ObservedSlashable::::default(); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 0, "no slots should be present"); + + // Slot 0, proposer 0 + let block_a = get_block(0, 0); + let block_root = block_a.canonical_root(); + + assert_eq!( + cache.observe_slashable(block_a.slot(), block_a.proposer_index(), block_root), + Ok(()), + "can observe proposer" + ); + + /* + * Preconditions. + */ + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + assert_eq!( + cache + .items + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) + .expect("slot zero should be present") + .len(), + 1, + "only one proposer should be present" + ); + + /* + * Check that a prune at the genesis slot does nothing. + */ + cache.prune(Slot::new(0)); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + assert_eq!( + cache + .items + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) + .expect("slot zero should be present") + .len(), + 1, + "only one block root should be present" + ); + + /* + * Check that a prune empties the cache + */ + cache.prune(E::slots_per_epoch().into()); + assert_eq!( + cache.finalized_slot, + Slot::from(E::slots_per_epoch()), + "finalized slot is updated" + ); + assert_eq!(cache.items.len(), 0, "no items left"); + + /* + * Check that we can't insert a finalized block + */ + // First slot of finalized epoch, proposer 0 + let block_b = get_block(E::slots_per_epoch(), 0); + let block_root_b = block_b.canonical_root(); + + assert_eq!( + cache.observe_slashable(block_b.slot(), block_b.proposer_index(), block_root_b), + Err(Error::FinalizedBlock { + slot: E::slots_per_epoch().into(), + finalized_slot: E::slots_per_epoch().into(), + }), + "cant insert finalized block" + ); + + assert_eq!(cache.items.len(), 0, "block was not added"); + + /* + * Check that we _can_ insert a non-finalized block + */ + let three_epochs = E::slots_per_epoch() * 3; + + // First slot of finalized epoch, proposer 0 + let block_b = get_block(three_epochs, 0); + + assert_eq!( + cache.observe_slashable(block_b.slot(), block_b.proposer_index(), block_root_b), + Ok(()), + "can insert non-finalized block" + ); + + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + assert_eq!( + cache + .items + .get(&ProposalKey { + slot: Slot::new(three_epochs), + proposer: 0 + }) + .expect("the three epochs slot should be present") + .len(), + 1, + "only one proposer should be present" + ); + + /* + * Check that a prune doesnt wipe later blocks + */ + let two_epochs = E::slots_per_epoch() * 2; + cache.prune(two_epochs.into()); + + assert_eq!( + cache.finalized_slot, + Slot::from(two_epochs), + "finalized slot is updated" + ); + + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + assert_eq!( + cache + .items + .get(&ProposalKey { + slot: Slot::new(three_epochs), + proposer: 0 + }) + .expect("the three epochs slot should be present") + .len(), + 1, + "only one block root should be present" + ); + } + + #[test] + fn simple_observations() { + let mut cache = ObservedSlashable::::default(); + + // Slot 0, proposer 0 + let block_a = get_block(0, 0); + let block_root_a = block_a.canonical_root(); + + assert_eq!( + cache.is_slashable( + block_a.slot(), + block_a.proposer_index(), + block_a.canonical_root() + ), + Ok(false), + "no observation in empty cache" + ); + assert_eq!( + cache.observe_slashable(block_a.slot(), block_a.proposer_index(), block_root_a), + Ok(()), + "can observe proposer" + ); + assert_eq!( + cache.is_slashable( + block_a.slot(), + block_a.proposer_index(), + block_a.canonical_root() + ), + Ok(false), + "observed but unslashed block" + ); + assert_eq!( + cache.observe_slashable(block_a.slot(), block_a.proposer_index(), block_root_a), + Ok(()), + "observing again" + ); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + assert_eq!( + cache + .items + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) + .expect("slot zero should be present") + .len(), + 1, + "only one block root should be present" + ); + + // Slot 1, proposer 0 + let block_b = get_block(1, 0); + let block_root_b = block_b.canonical_root(); + + assert_eq!( + cache.is_slashable( + block_b.slot(), + block_b.proposer_index(), + block_b.canonical_root() + ), + Ok(false), + "not slashable for new slot" + ); + assert_eq!( + cache.observe_slashable(block_b.slot(), block_b.proposer_index(), block_root_b), + Ok(()), + "can observe proposer for new slot" + ); + assert_eq!( + cache.is_slashable( + block_b.slot(), + block_b.proposer_index(), + block_b.canonical_root() + ), + Ok(false), + "observed but not slashable block in slot 1" + ); + assert_eq!( + cache.observe_slashable(block_b.slot(), block_b.proposer_index(), block_root_b), + Ok(()), + "observing slot 1 again" + ); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 2, "two slots should be present"); + assert_eq!( + cache + .items + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) + .expect("slot zero should be present") + .len(), + 1, + "only one block root should be present in slot 0" + ); + assert_eq!( + cache + .items + .get(&ProposalKey { + slot: Slot::new(1), + proposer: 0 + }) + .expect("slot zero should be present") + .len(), + 1, + "only one block root should be present in slot 1" + ); + + // Slot 0, proposer 1 + let block_c = get_block(0, 1); + let block_root_c = block_c.canonical_root(); + + assert_eq!( + cache.is_slashable( + block_c.slot(), + block_c.proposer_index(), + block_c.canonical_root() + ), + Ok(false), + "not slashable due to new proposer" + ); + assert_eq!( + cache.observe_slashable(block_c.slot(), block_c.proposer_index(), block_root_c), + Ok(()), + "can observe new proposer, indicates proposer unobserved" + ); + assert_eq!( + cache.is_slashable( + block_c.slot(), + block_c.proposer_index(), + block_c.canonical_root() + ), + Ok(false), + "not slashable due to new proposer" + ); + assert_eq!( + cache.observe_slashable(block_c.slot(), block_c.proposer_index(), block_root_c), + Ok(()), + "observing new proposer again" + ); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 3, "three slots should be present"); + assert_eq!( + cache + .items + .iter() + .filter(|(k, _)| k.slot == cache.finalized_slot) + .count(), + 2, + "two proposers should be present in slot 0" + ); + assert_eq!( + cache + .items + .iter() + .filter(|(k, _)| k.slot == Slot::new(1)) + .count(), + 1, + "only one proposer should be present in slot 1" + ); + + // Slot 0, proposer 1 (again) + let mut block_d = get_block(0, 1); + *block_d.body_mut().graffiti_mut() = Graffiti::from(*b"this is slashable "); + let block_root_d = block_d.canonical_root(); + + assert_eq!( + cache.is_slashable( + block_d.slot(), + block_d.proposer_index(), + block_d.canonical_root() + ), + Ok(true), + "slashable due to new proposer" + ); + assert_eq!( + cache.observe_slashable(block_d.slot(), block_d.proposer_index(), block_root_d), + Ok(()), + "can observe new proposer, indicates proposer unobserved" + ); + } +} diff --git a/beacon_node/beacon_chain/src/pre_finalization_cache.rs b/beacon_node/beacon_chain/src/pre_finalization_cache.rs index ca957af213..3b337d4228 100644 --- a/beacon_node/beacon_chain/src/pre_finalization_cache.rs +++ b/beacon_node/beacon_chain/src/pre_finalization_cache.rs @@ -3,11 +3,13 @@ use itertools::process_results; use lru::LruCache; use parking_lot::Mutex; use slog::debug; +use std::num::NonZeroUsize; use std::time::Duration; +use types::non_zero_usize::new_non_zero_usize; use types::Hash256; -const BLOCK_ROOT_CACHE_LIMIT: usize = 512; -const LOOKUP_LIMIT: usize = 8; +const BLOCK_ROOT_CACHE_LIMIT: NonZeroUsize = new_non_zero_usize(512); +const LOOKUP_LIMIT: NonZeroUsize = new_non_zero_usize(8); const METRICS_TIMEOUT: Duration = Duration::from_millis(100); /// Cache for rejecting attestations to blocks from before finalization. @@ -78,7 +80,7 @@ impl BeaconChain { // 3. Check the network with a single block lookup. cache.in_progress_lookups.put(block_root, ()); - if cache.in_progress_lookups.len() == LOOKUP_LIMIT { + if cache.in_progress_lookups.len() == LOOKUP_LIMIT.get() { // NOTE: we expect this to occur sometimes if a lot of blocks that we look up fail to be // imported for reasons other than being pre-finalization. The cache will eventually // self-repair in this case by replacing old entries with new ones until all the failed diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 3ef1b996ba..15411eab75 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -461,14 +461,13 @@ where } pub fn mock_execution_layer(self) -> Self { - self.mock_execution_layer_with_config(None) + self.mock_execution_layer_with_config() } - pub fn mock_execution_layer_with_config(mut self, builder_threshold: Option) -> Self { + pub fn mock_execution_layer_with_config(mut self) -> Self { let mock = mock_execution_layer_from_parts::( self.spec.as_ref().expect("cannot build without spec"), self.runtime.task_executor.clone(), - builder_threshold, ); self.execution_layer = Some(mock.el.clone()); self.mock_execution_layer = Some(mock); @@ -571,7 +570,6 @@ where pub fn mock_execution_layer_from_parts( spec: &ChainSpec, task_executor: TaskExecutor, - builder_threshold: Option, ) -> MockExecutionLayer { let shanghai_time = spec.capella_fork_epoch.map(|epoch| { HARNESS_GENESIS_TIME + spec.seconds_per_slot * T::slots_per_epoch() * epoch.as_u64() @@ -590,7 +588,6 @@ pub fn mock_execution_layer_from_parts( DEFAULT_TERMINAL_BLOCK, shanghai_time, cancun_time, - builder_threshold, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec.clone(), Some(kzg), @@ -855,6 +852,7 @@ where randao_reveal, Some(graffiti), ProduceBlockVerification::VerifyRandao, + None, BlockProductionVersion::FullV2, ) .await @@ -916,6 +914,7 @@ where randao_reveal, Some(graffiti), ProduceBlockVerification::VerifyRandao, + None, BlockProductionVersion::FullV2, ) .await @@ -1751,6 +1750,32 @@ where ((signed_block, blobs), state) } + pub async fn make_blob_with_modifier( + &self, + state: BeaconState, + slot: Slot, + blob_modifier: impl FnOnce(&mut BlobsList), + ) -> (SignedBlockContentsTuple, BeaconState) { + assert_ne!(slot, 0, "can't produce a block at slot 0"); + assert!(slot >= state.slot()); + + let ((block, mut blobs), state) = self.make_block_return_pre_state(state, slot).await; + + let (block, _) = block.deconstruct(); + + blob_modifier(&mut blobs.as_mut().unwrap().1); + + let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); + + let signed_block = block.sign( + &self.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &self.spec, + ); + ((signed_block, blobs), state) + } + pub fn make_deposits<'a>( &self, state: &'a mut BeaconState, diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 2391263c65..1ef3f06e80 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -10,6 +10,7 @@ use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use smallvec::SmallVec; +use state_processing::common::get_attestation_participation_flag_indices; use state_processing::per_epoch_processing::{ errors::EpochProcessingError, EpochProcessingSummary, }; @@ -21,10 +22,13 @@ use std::str::Utf8Error; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::AbstractExecPayload; +use types::consts::altair::{ + TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, +}; use types::{ - AttesterSlashing, BeaconBlockRef, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, - IndexedAttestation, ProposerSlashing, PublicKeyBytes, SignedAggregateAndProof, - SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit, + Attestation, AttestationData, AttesterSlashing, BeaconBlockRef, BeaconState, BeaconStateError, + ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, PublicKeyBytes, + SignedAggregateAndProof, SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit, }; /// Used for Prometheus labels. @@ -69,6 +73,15 @@ impl Default for ValidatorMonitorConfig { } } +/// The goal is to check the behaviour of the BN if it pretends to attest at each slot +/// Check the head/target/source once the state.slot is some slots beyond attestation.data.slot +/// to defend against re-orgs. 16 slots is the minimum to defend against re-orgs of up to 16 slots. +pub const UNAGGREGATED_ATTESTATION_LAG_SLOTS: usize = 16; + +/// Bound the storage size of simulated attestations. The head state can only verify attestations +/// from the current and previous epoch. +pub const MAX_UNAGGREGATED_ATTESTATION_HASHMAP_LENGTH: usize = 64; + #[derive(Debug)] pub enum Error { InvalidPubkey(String), @@ -370,7 +383,7 @@ struct MissedBlock { /// /// The intention of this struct is to provide users with more logging and Prometheus metrics around /// validators that they are interested in. -pub struct ValidatorMonitor { +pub struct ValidatorMonitor { /// The validators that require additional monitoring. validators: HashMap, /// A map of validator index (state.validators) to a validator public key. @@ -386,6 +399,8 @@ pub struct ValidatorMonitor { missed_blocks: HashSet, // A beacon proposer cache beacon_proposer_cache: Arc>, + // Unaggregated attestations generated by the committee index at each slot. + unaggregated_attestations: HashMap>, log: Logger, _phantom: PhantomData, } @@ -409,6 +424,7 @@ impl ValidatorMonitor { individual_tracking_threshold, missed_blocks: <_>::default(), beacon_proposer_cache, + unaggregated_attestations: <_>::default(), log, _phantom: PhantomData, }; @@ -426,7 +442,7 @@ impl ValidatorMonitor { } /// Add some validators to `self` for additional monitoring. - fn add_validator_pubkey(&mut self, pubkey: PublicKeyBytes) { + pub fn add_validator_pubkey(&mut self, pubkey: PublicKeyBytes) { let index_opt = self .indices .iter() @@ -444,9 +460,32 @@ impl ValidatorMonitor { }); } + /// Add an unaggregated attestation + pub fn set_unaggregated_attestation(&mut self, attestation: Attestation) { + let unaggregated_attestations = &mut self.unaggregated_attestations; + + // Pruning, this removes the oldest key/pair of the hashmap if it's greater than MAX_UNAGGREGATED_ATTESTATION_HASHMAP_LENGTH + if unaggregated_attestations.len() >= MAX_UNAGGREGATED_ATTESTATION_HASHMAP_LENGTH { + if let Some(oldest_slot) = unaggregated_attestations.keys().min().copied() { + unaggregated_attestations.remove(&oldest_slot); + } + } + let slot = attestation.data.slot; + self.unaggregated_attestations.insert(slot, attestation); + } + + pub fn get_unaggregated_attestation(&self, slot: Slot) -> Option<&Attestation> { + self.unaggregated_attestations.get(&slot) + } + /// Reads information from the given `state`. The `state` *must* be valid (i.e, able to be /// imported). - pub fn process_valid_state(&mut self, current_epoch: Epoch, state: &BeaconState) { + pub fn process_valid_state( + &mut self, + current_epoch: Epoch, + state: &BeaconState, + spec: &ChainSpec, + ) { // Add any new validator indices. state .validators() @@ -463,6 +502,7 @@ impl ValidatorMonitor { // Add missed non-finalized blocks for the monitored validators self.add_validators_missed_blocks(state); + self.process_unaggregated_attestations(state, spec); // Update metrics for individual validators. for monitored_validator in self.validators.values() { @@ -562,8 +602,10 @@ impl ValidatorMonitor { let end_slot = current_slot.saturating_sub(MISSED_BLOCK_LAG_SLOTS).as_u64(); - // List of proposers per epoch from the beacon_proposer_cache - let mut proposers_per_epoch: Option> = None; + // List of proposers per epoch from the beacon_proposer_cache, and the epoch at which the + // cache is valid. + let mut proposers_per_epoch: Option<(SmallVec<[usize; TYPICAL_SLOTS_PER_EPOCH]>, Epoch)> = + None; for (prev_slot, slot) in (start_slot.as_u64()..=end_slot) .map(Slot::new) @@ -577,25 +619,30 @@ impl ValidatorMonitor { // Found missed block if block_root == prev_block_root { let slot_epoch = slot.epoch(T::slots_per_epoch()); - let prev_slot_epoch = prev_slot.epoch(T::slots_per_epoch()); if let Ok(shuffling_decision_block) = state.proposer_shuffling_decision_root_at_epoch(slot_epoch, *block_root) { - // Only update the cache if it needs to be initialised or because - // slot is at epoch + 1 - if proposers_per_epoch.is_none() || slot_epoch != prev_slot_epoch { - proposers_per_epoch = self.get_proposers_by_epoch_from_cache( - slot_epoch, - shuffling_decision_block, - ); + // Update the cache if it has not yet been initialised, or if it is + // initialised for a prior epoch. This is an optimisation to avoid bouncing + // the proposer shuffling cache lock when there are lots of missed blocks. + if proposers_per_epoch + .as_ref() + .map_or(true, |(_, cached_epoch)| *cached_epoch != slot_epoch) + { + proposers_per_epoch = self + .get_proposers_by_epoch_from_cache( + slot_epoch, + shuffling_decision_block, + ) + .map(|cache| (cache, slot_epoch)); } // Only add missed blocks for the proposer if it's in the list of monitored validators let slot_in_epoch = slot % T::slots_per_epoch(); if let Some(proposer_index) = proposers_per_epoch - .as_deref() - .and_then(|proposers| proposers.get(slot_in_epoch.as_usize())) + .as_ref() + .and_then(|(proposers, _)| proposers.get(slot_in_epoch.as_usize())) { let i = *proposer_index as u64; if let Some(pub_key) = self.indices.get(&i) { @@ -634,7 +681,8 @@ impl ValidatorMonitor { debug!( self.log, "Could not get proposers from cache"; - "epoch" => ?slot_epoch + "epoch" => ?slot_epoch, + "decision_root" => ?shuffling_decision_block, ); } } @@ -654,6 +702,74 @@ impl ValidatorMonitor { .cloned() } + /// Process the unaggregated attestations generated by the service `attestation_simulator_service` + /// and check if the attestation qualifies for a reward matching the flags source/target/head + fn process_unaggregated_attestations(&mut self, state: &BeaconState, spec: &ChainSpec) { + let current_slot = state.slot(); + + // Ensures that we process attestation when there have been skipped slots between blocks + let attested_slots: Vec<_> = self + .unaggregated_attestations + .keys() + .filter(|&&attestation_slot| { + attestation_slot + < current_slot - Slot::new(UNAGGREGATED_ATTESTATION_LAG_SLOTS as u64) + }) + .cloned() + .collect(); + + let unaggregated_attestations = &mut self.unaggregated_attestations; + for slot in attested_slots { + if let Some(unaggregated_attestation) = unaggregated_attestations.remove(&slot) { + // Don't process this attestation, it's too old to be processed by this state. + if slot.epoch(T::slots_per_epoch()) < state.previous_epoch() { + continue; + } + + // We are simulating that unaggregated attestation in a service that produces unaggregated attestations + // every slot, the inclusion_delay shouldn't matter here as long as the minimum value + // that qualifies the committee index for reward is included + let inclusion_delay = spec.min_attestation_inclusion_delay; + + let data = &unaggregated_attestation.data; + + // Get the reward indices for the unaggregated attestation or log an error + match get_attestation_participation_flag_indices( + state, + &unaggregated_attestation.data, + inclusion_delay, + spec, + ) { + Ok(flag_indices) => { + let head_hit = flag_indices.contains(&TIMELY_HEAD_FLAG_INDEX); + let target_hit = flag_indices.contains(&TIMELY_TARGET_FLAG_INDEX); + let source_hit = flag_indices.contains(&TIMELY_SOURCE_FLAG_INDEX); + register_simulated_attestation( + data, head_hit, target_hit, source_hit, &self.log, + ) + } + Err(BeaconStateError::IncorrectAttestationSource) => { + register_simulated_attestation(data, false, false, false, &self.log) + } + Err(err) => { + error!( + self.log, + "Failed to get attestation participation flag indices"; + "error" => ?err, + "unaggregated_attestation" => ?unaggregated_attestation, + ); + } + } + } else { + error!( + self.log, + "Failed to remove unaggregated attestation from the hashmap"; + "slot" => ?slot, + ); + } + } + } + /// Run `func` with the `TOTAL_LABEL` and optionally the /// `individual_id`. /// @@ -1905,6 +2021,46 @@ impl ValidatorMonitor { } } +fn register_simulated_attestation( + data: &AttestationData, + head_hit: bool, + target_hit: bool, + source_hit: bool, + log: &Logger, +) { + if head_hit { + metrics::inc_counter(&metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT); + } else { + metrics::inc_counter(&metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS); + } + if target_hit { + metrics::inc_counter(&metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT); + } else { + metrics::inc_counter( + &metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS, + ); + } + if source_hit { + metrics::inc_counter(&metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT); + } else { + metrics::inc_counter( + &metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS, + ); + } + + debug!( + log, + "Simulated attestation evaluated"; + "attestation_source" => ?data.source.root, + "attestation_target" => ?data.target.root, + "attestation_head" => ?data.beacon_block_root, + "attestation_slot" => ?data.slot, + "source_hit" => source_hit, + "target_hit" => target_hit, + "head_hit" => head_hit, + ); +} + /// Returns the duration since the unix epoch. pub fn timestamp_now() -> Duration { SystemTime::now() diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index fdc37b5529..ff83b25320 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -1,8 +1,10 @@ #![cfg(not(debug_assertions))] +use beacon_chain::attestation_simulator::produce_unaggregated_attestation; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; -use beacon_chain::{StateSkipConfig, WhenSlotSkipped}; +use beacon_chain::validator_monitor::UNAGGREGATED_ATTESTATION_LAG_SLOTS; +use beacon_chain::{metrics, StateSkipConfig, WhenSlotSkipped}; use lazy_static::lazy_static; use std::sync::Arc; use tree_hash::TreeHash; @@ -15,6 +17,91 @@ lazy_static! { static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); } +/// This test builds a chain that is testing the performance of the unaggregated attestations +/// produced by the attestation simulator service. +#[tokio::test] +async fn produces_attestations_from_attestation_simulator_service() { + // Produce 2 epochs, or 64 blocks + let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 2; + + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .keypairs(KEYPAIRS[..].to_vec()) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + let chain = &harness.chain; + + // Test all valid committee indices and their rewards for all slots in the chain + // using validator monitor + for slot in 0..=num_blocks_produced { + // We do not produce at slot=0, and there's no committe cache available anyway + if slot > 0 && slot <= num_blocks_produced { + harness.advance_slot(); + + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + } + // Set the state to the current slot + let slot = Slot::from(slot); + let mut state = chain + .state_at_slot(slot, StateSkipConfig::WithStateRoots) + .expect("should get state"); + + // Prebuild the committee cache for the current epoch + state + .build_committee_cache(RelativeEpoch::Current, &harness.chain.spec) + .unwrap(); + + // Produce an unaggragetated attestation + produce_unaggregated_attestation(chain.clone(), chain.slot().unwrap()); + + // Verify that the ua is stored in validator monitor + let validator_monitor = chain.validator_monitor.read(); + validator_monitor + .get_unaggregated_attestation(slot) + .expect("should get unaggregated attestation"); + } + + // Compare the prometheus metrics that evaluates the performance of the unaggregated attestations + let hit_prometheus_metrics = vec![ + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL, + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL, + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL, + ]; + let miss_prometheus_metrics = vec![ + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL, + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL, + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL, + ]; + + // Expected metrics count should only apply to hit metrics as miss metrics are never set, nor can be found + // when gathering prometheus metrics. If they are found, which should not, it will diff from 0 and fail the test + let expected_miss_metrics_count = 0; + let expected_hit_metrics_count = + num_blocks_produced - UNAGGREGATED_ATTESTATION_LAG_SLOTS as u64; + lighthouse_metrics::gather().iter().for_each(|mf| { + if hit_prometheus_metrics.contains(&mf.get_name()) { + assert_eq!( + mf.get_metric()[0].get_counter().get_value() as u64, + expected_hit_metrics_count + ); + } + if miss_prometheus_metrics.contains(&mf.get_name()) { + assert_eq!( + mf.get_metric()[0].get_counter().get_value() as u64, + expected_miss_metrics_count + ); + } + }); +} + /// This test builds a chain that is just long enough to finalize an epoch then it produces an /// attestation at each slot from genesis through to three epochs past the head. /// diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 858642278c..1bd1506605 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2179,7 +2179,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .unwrap(); let mock = - mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone(), None); + mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); // Initialise a new beacon chain from the finalized checkpoint. // The slot clock must be set to a time ahead of the checkpoint state. diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index 5bc6b758c2..d9ff57b1b0 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -1,9 +1,9 @@ -use lazy_static::lazy_static; - use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::validator_monitor::{ValidatorMonitorConfig, MISSED_BLOCK_LAG_SLOTS}; +use lazy_static::lazy_static; +use logging::test_logger; use types::{Epoch, EthSpec, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; // Should ideally be divisible by 3. @@ -23,6 +23,7 @@ fn get_harness( let harness = BeaconChainHarness::builder(MainnetEthSpec) .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) + .logger(test_logger()) .fresh_ephemeral_store() .mock_execution_layer() .validator_monitor_config(ValidatorMonitorConfig { @@ -39,6 +40,83 @@ fn get_harness( harness } +// Regression test for off-by-one caching issue in missed block detection. +#[tokio::test] +async fn missed_blocks_across_epochs() { + let slots_per_epoch = E::slots_per_epoch(); + let all_validators = (0..VALIDATOR_COUNT).collect::>(); + + let harness = get_harness(VALIDATOR_COUNT, vec![]); + let validator_monitor = &harness.chain.validator_monitor; + let mut genesis_state = harness.get_current_state(); + let genesis_state_root = genesis_state.update_tree_hash_cache().unwrap(); + let genesis_block_root = harness.head_block_root(); + + // Skip a slot in the first epoch (to prime the cache inside the missed block function) and then + // at a different offset in the 2nd epoch. The missed block in the 2nd epoch MUST NOT reuse + // the cache from the first epoch. + let first_skip_offset = 3; + let second_skip_offset = slots_per_epoch / 2; + assert_ne!(first_skip_offset, second_skip_offset); + let first_skip_slot = Slot::new(first_skip_offset); + let second_skip_slot = Slot::new(slots_per_epoch + second_skip_offset); + let slots = (1..2 * slots_per_epoch) + .map(Slot::new) + .filter(|slot| *slot != first_skip_slot && *slot != second_skip_slot) + .collect::>(); + + let (block_roots_by_slot, state_roots_by_slot, _, head_state) = harness + .add_attested_blocks_at_slots(genesis_state, genesis_state_root, &slots, &all_validators) + .await; + + // Prime the proposer shuffling cache. + let mut proposer_shuffling_cache = harness.chain.beacon_proposer_cache.lock(); + for epoch in [0, 1].into_iter().map(Epoch::new) { + let start_slot = epoch.start_slot(slots_per_epoch) + 1; + let state = harness + .get_hot_state(state_roots_by_slot[&start_slot]) + .unwrap(); + let decision_root = state + .proposer_shuffling_decision_root(genesis_block_root) + .unwrap(); + proposer_shuffling_cache + .insert( + epoch, + decision_root, + state + .get_beacon_proposer_indices(&harness.chain.spec) + .unwrap(), + state.fork(), + ) + .unwrap(); + } + drop(proposer_shuffling_cache); + + // Monitor the validator that proposed the block at the same offset in the 0th epoch as the skip + // in the 1st epoch. + let innocent_proposer_slot = Slot::new(second_skip_offset); + let innocent_proposer = harness + .get_block(block_roots_by_slot[&innocent_proposer_slot]) + .unwrap() + .message() + .proposer_index(); + + let mut vm_write = validator_monitor.write(); + + // Call `process_` once to update validator indices. + vm_write.process_valid_state(head_state.current_epoch(), &head_state, &harness.chain.spec); + // Start monitoring the innocent validator. + vm_write.add_validator_pubkey(KEYPAIRS[innocent_proposer as usize].pk.compress()); + // Check for missed blocks. + vm_write.process_valid_state(head_state.current_epoch(), &head_state, &harness.chain.spec); + + // My client is innocent, your honour! + assert_eq!( + vm_write.get_monitored_validator_missed_block_count(innocent_proposer), + 0 + ); +} + #[tokio::test] async fn produces_missed_blocks() { let validator_count = 16; @@ -110,7 +188,7 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor let mut validator_monitor = harness1.chain.validator_monitor.write(); - validator_monitor.process_valid_state(nb_epoch_to_simulate, _state); + validator_monitor.process_valid_state(nb_epoch_to_simulate, _state, &harness1.chain.spec); // We should have one entry in the missed blocks map assert_eq!( @@ -193,7 +271,7 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor let mut validator_monitor2 = harness2.chain.validator_monitor.write(); - validator_monitor2.process_valid_state(epoch, _state2); + validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec); // We should have one entry in the missed blocks map assert_eq!( validator_monitor2.get_monitored_validator_missed_block_count(validator_index as u64), @@ -219,7 +297,7 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor - validator_monitor2.process_valid_state(epoch, _state2); + validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec); // We shouldn't have any entry in the missed blocks map assert_ne!(validator_index, not_monitored_validator_index); @@ -288,7 +366,7 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor let mut validator_monitor3 = harness3.chain.validator_monitor.write(); - validator_monitor3.process_valid_state(epoch, _state3); + validator_monitor3.process_valid_state(epoch, _state3, &harness3.chain.spec); // We shouldn't have one entry in the missed blocks map assert_eq!( diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 1c675d280f..045b06a1e7 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -60,7 +60,6 @@ use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; -use types::consts::deneb::MAX_BLOBS_PER_BLOCK; use types::{Attestation, Hash256, SignedAggregateAndProof, SubnetId}; use types::{EthSpec, Slot}; use work_reprocessing_queue::IgnoredRpcBlock; @@ -106,7 +105,7 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024; /// before we start dropping them. const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; -/// The maximum number of queued `SignedBlobSidecar` objects received on gossip that +/// The maximum number of queued `BlobSidecar` objects received on gossip that /// will be stored before we start dropping them. const MAX_GOSSIP_BLOB_QUEUE_LEN: usize = 1_024; @@ -168,8 +167,7 @@ const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024; /// The maximum number of queued `BlobsByRangeRequest` objects received from the network RPC that /// will be stored before we start dropping them. -const MAX_BLOBS_BY_RANGE_QUEUE_LEN: usize = - MAX_BLOCKS_BY_RANGE_QUEUE_LEN * MAX_BLOBS_PER_BLOCK as usize; +const MAX_BLOBS_BY_RANGE_QUEUE_LEN: usize = 1024; /// The maximum number of queued `BlocksByRootRequest` objects received from the network RPC that /// will be stored before we start dropping them. @@ -1304,7 +1302,7 @@ impl BeaconProcessor { ); metrics::set_gauge( &metrics::BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL, - gossip_block_queue.len() as i64, + gossip_blob_queue.len() as i64, ); metrics::set_gauge( &metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL, diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs index 9082a7d474..fa7d7d7b9a 100644 --- a/beacon_node/beacon_processor/src/metrics.rs +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -49,7 +49,7 @@ lazy_static::lazy_static! { // Gossip blobs. pub static ref BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL: Result = try_create_int_gauge( "beacon_processor_gossip_blob_queue_total", - "Count of blocks from gossip waiting to be verified." + "Count of blobs from gossip waiting to be verified." ); // Gossip Exits. pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result = try_create_int_gauge( diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index bfd55c3beb..9c88eccc70 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -2,6 +2,7 @@ use crate::address_change_broadcast::broadcast_address_changes_at_capella; use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; use crate::Client; +use beacon_chain::attestation_simulator::start_attestation_simulator_service; use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service; use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; @@ -34,6 +35,7 @@ use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; +use std::time::{SystemTime, UNIX_EPOCH}; use timer::spawn_timer; use tokio::sync::oneshot; use types::{ @@ -44,6 +46,11 @@ use types::{ /// Interval between polling the eth1 node for genesis information. pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000; +/// Reduces the blob availability period by some epochs. Helps prevent the user +/// from starting a genesis sync so near to the blob pruning window that blobs +/// have been pruned before they can manage to sync the chain. +const BLOB_AVAILABILITY_REDUCTION_EPOCHS: u64 = 2; + /// Builds a `Client` instance. /// /// ## Notes @@ -251,6 +258,45 @@ where let genesis_state = genesis_state(&runtime_context, &config, log).await?; + // If the user has not explicitly allowed genesis sync, prevent + // them from trying to sync from genesis if we're outside of the + // blob P2P availability window. + // + // It doesn't make sense to try and sync the chain if we can't + // verify blob availability by downloading blobs from the P2P + // network. The user should do a checkpoint sync instead. + if !config.allow_insecure_genesis_sync { + if let Some(deneb_fork_epoch) = spec.deneb_fork_epoch { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to read system time: {e:}"))? + .as_secs(); + let genesis_time = genesis_state.genesis_time(); + let deneb_time = + genesis_time + (deneb_fork_epoch.as_u64() * spec.seconds_per_slot); + + // Shrink the blob availability window so users don't start + // a sync right before blobs start to disappear from the P2P + // network. + let reduced_p2p_availability_epochs = spec + .min_epochs_for_blob_sidecars_requests + .saturating_sub(BLOB_AVAILABILITY_REDUCTION_EPOCHS); + let blob_availability_window = reduced_p2p_availability_epochs + * TEthSpec::slots_per_epoch() + * spec.seconds_per_slot; + + if now > deneb_time + blob_availability_window { + return Err( + "Syncing from genesis is insecure and incompatible with data availability checks. \ + You should instead perform a checkpoint sync from a trusted node using the --checkpoint-sync-url option. \ + For a list of public endpoints, see: https://eth-clients.github.io/checkpoint-sync-endpoints/ \ + Alternatively, use --allow-insecure-genesis-sync if the risks are understood." + .to_string(), + ); + } + } + } + builder.genesis_state(genesis_state).map(|v| (v, None))? } ClientGenesis::WeakSubjSszBytes { @@ -839,6 +885,10 @@ where runtime_context.executor.clone(), beacon_chain.clone(), ); + start_attestation_simulator_service( + beacon_chain.task_executor.clone(), + beacon_chain.clone(), + ); } Ok(Client { diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 20afdb948b..275f999864 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -78,6 +78,7 @@ pub struct Config { pub beacon_processor: BeaconProcessorConfig, pub genesis_state_url: Option, pub genesis_state_url_timeout: Duration, + pub allow_insecure_genesis_sync: bool, } impl Default for Config { @@ -108,6 +109,7 @@ impl Default for Config { genesis_state_url: <_>::default(), // This default value should always be overwritten by the CLI default value. genesis_state_url_timeout: Duration::from_secs(60), + allow_insecure_genesis_sync: false, } } } diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 362f5b0b2b..bc8e4e3140 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -8,17 +8,19 @@ use crate::HttpJsonRpc; use lru::LruCache; use slog::{debug, error, info, warn, Logger}; use std::future::Future; +use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::{watch, Mutex, RwLock}; use tokio_stream::wrappers::WatchStream; +use types::non_zero_usize::new_non_zero_usize; use types::ExecutionBlockHash; /// The number of payload IDs that will be stored for each `Engine`. /// /// Since the size of each value is small (~800 bytes) a large number is used for safety. -const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; +const PAYLOAD_ID_LRU_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(512); const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes /// Stores the remembered state of a engine. diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 6b0277ff31..868d819446 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -29,6 +29,7 @@ use std::collections::HashMap; use std::fmt; use std::future::Future; use std::io::Write; +use std::num::NonZeroUsize; use std::path::PathBuf; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; @@ -42,6 +43,7 @@ use tokio_stream::wrappers::WatchStream; use tree_hash::TreeHash; use types::beacon_block_body::KzgCommitments; use types::builder_bid::BuilderBid; +use types::non_zero_usize::new_non_zero_usize; use types::payload::BlockProductionVersion; use types::{ AbstractExecPayload, BlobsList, ExecutionPayloadDeneb, KzgProofs, SignedBlindedBeaconBlock, @@ -68,7 +70,7 @@ pub const DEFAULT_JWT_FILE: &str = "jwt.hex"; /// Each time the `ExecutionLayer` retrieves a block from an execution node, it stores that block /// in an LRU cache to avoid redundant lookups. This is the size of that cache. -const EXECUTION_BLOCKS_LRU_CACHE_SIZE: usize = 128; +const EXECUTION_BLOCKS_LRU_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(128); /// A fee recipient address for use during block production. Only used as a very last resort if /// there is no address provided by the user. @@ -176,15 +178,26 @@ impl From>> for BlockProposalContents> { fn from(item: BlockProposalContents>) -> Self { - let block_value = item.block_value().to_owned(); - - let blinded_payload: BlockProposalContents> = + match item { BlockProposalContents::Payload { - payload: item.to_payload().execution_payload().into(), + payload, block_value, - }; - - blinded_payload + } => BlockProposalContents::Payload { + payload: payload.execution_payload().into(), + block_value, + }, + BlockProposalContents::PayloadAndBlobs { + payload, + block_value, + kzg_commitments, + blobs_and_proofs: _, + } => BlockProposalContents::PayloadAndBlobs { + payload: payload.execution_payload().into(), + block_value, + kzg_commitments, + blobs_and_proofs: None, + }, + } } } @@ -322,10 +335,7 @@ struct Inner { proposers: RwLock>, executor: TaskExecutor, payload_cache: PayloadCache, - builder_profit_threshold: Uint256, log: Logger, - always_prefer_builder_payload: bool, - ignore_builder_override_suggestion_threshold: f32, /// Track whether the last `newPayload` call errored. /// /// This is used *only* in the informational sync status endpoint, so that a VC using this @@ -352,11 +362,7 @@ pub struct Config { pub jwt_version: Option, /// Default directory for the jwt secret if not provided through cli. pub default_datadir: PathBuf, - /// The minimum value of an external payload for it to be considered in a proposal. - pub builder_profit_threshold: u128, pub execution_timeout_multiplier: Option, - pub always_prefer_builder_payload: bool, - pub ignore_builder_override_suggestion_threshold: f32, } /// Provides access to one execution engine and provides a neat interface for consumption by the @@ -366,40 +372,6 @@ pub struct ExecutionLayer { inner: Arc>, } -/// This function will return the percentage difference between 2 U256 values, using `base_value` -/// as the denominator. It is accurate to 7 decimal places which is about the precision of -/// an f32. -/// -/// If some error is encountered in the calculation, None will be returned. -fn percentage_difference_u256(base_value: Uint256, comparison_value: Uint256) -> Option { - if base_value == Uint256::zero() { - return None; - } - // this is the total supply of ETH in WEI - let max_value = Uint256::from(12u8) * Uint256::exp10(25); - if base_value > max_value || comparison_value > max_value { - return None; - } - - // Now we should be able to calculate the difference without division by zero or overflow - const PRECISION: usize = 7; - let precision_factor = Uint256::exp10(PRECISION); - let scaled_difference = if base_value <= comparison_value { - (comparison_value - base_value) * precision_factor - } else { - (base_value - comparison_value) * precision_factor - }; - let scaled_proportion = scaled_difference / base_value; - // max value of scaled difference is 1.2 * 10^33, well below the max value of a u128 / f64 / f32 - let percentage = - 100.0f64 * scaled_proportion.low_u128() as f64 / precision_factor.low_u128() as f64; - if base_value <= comparison_value { - Some(percentage as f32) - } else { - Some(-percentage as f32) - } -} - impl ExecutionLayer { /// Instantiate `Self` with an Execution engine specified in `Config`, using JSON-RPC via HTTP. pub fn from_config(config: Config, executor: TaskExecutor, log: Logger) -> Result { @@ -412,10 +384,7 @@ impl ExecutionLayer { jwt_id, jwt_version, default_datadir, - builder_profit_threshold, execution_timeout_multiplier, - always_prefer_builder_payload, - ignore_builder_override_suggestion_threshold, } = config; if urls.len() > 1 { @@ -476,10 +445,7 @@ impl ExecutionLayer { execution_blocks: Mutex::new(LruCache::new(EXECUTION_BLOCKS_LRU_CACHE_SIZE)), executor, payload_cache: PayloadCache::default(), - builder_profit_threshold: Uint256::from(builder_profit_threshold), log, - always_prefer_builder_payload, - ignore_builder_override_suggestion_threshold, last_new_payload_errored: RwLock::new(false), }; @@ -517,7 +483,6 @@ impl ExecutionLayer { self.log(), "Using external block builder"; "builder_url" => ?builder_url, - "builder_profit_threshold" => self.inner.builder_profit_threshold.as_u128(), "local_user_agent" => builder_client.get_user_agent(), ); self.inner.builder.swap(Some(Arc::new(builder_client))); @@ -823,6 +788,7 @@ impl ExecutionLayer { builder_params: BuilderParams, current_fork: ForkName, spec: &ChainSpec, + builder_boost_factor: Option, block_production_version: BlockProductionVersion, ) -> Result, Error> { let payload_result_type = match block_production_version { @@ -833,6 +799,7 @@ impl ExecutionLayer { forkchoice_update_params, builder_params, current_fork, + builder_boost_factor, spec, ) .await @@ -857,6 +824,7 @@ impl ExecutionLayer { forkchoice_update_params, builder_params, current_fork, + None, spec, ) .await? @@ -977,6 +945,7 @@ impl ExecutionLayer { (relay_result, local_result) } + #[allow(clippy::too_many_arguments)] async fn determine_and_fetch_payload( &self, parent_hash: ExecutionBlockHash, @@ -984,6 +953,7 @@ impl ExecutionLayer { forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, current_fork: ForkName, + builder_boost_factor: Option, spec: &ChainSpec, ) -> Result>, Error> { let Some(builder) = self.builder() else { @@ -1135,18 +1105,36 @@ impl ExecutionLayer { ))); } - if self.inner.always_prefer_builder_payload { - return ProvenancedPayload::try_from(relay.data.message); - } - let relay_value = *relay.data.message.value(); + + let boosted_relay_value = match builder_boost_factor { + Some(builder_boost_factor) => { + (relay_value / 100).saturating_mul(builder_boost_factor.into()) + } + None => relay_value, + }; + let local_value = *local.block_value(); - if local_value >= relay_value { + if local_value >= boosted_relay_value { info!( self.log(), "Local block is more profitable than relay block"; "local_block_value" => %local_value, + "relay_value" => %relay_value, + "boosted_relay_value" => %boosted_relay_value, + "builder_boost_factor" => ?builder_boost_factor, + ); + return Ok(ProvenancedPayload::Local(BlockProposalContentsType::Full( + local.try_into()?, + ))); + } + + if local.should_override_builder().unwrap_or(false) { + info!( + self.log(), + "Using local payload because execution engine suggested we ignore builder payload"; + "local_block_value" => %local_value, "relay_value" => %relay_value ); return Ok(ProvenancedPayload::Local(BlockProposalContentsType::Full( @@ -1154,43 +1142,13 @@ impl ExecutionLayer { ))); } - if relay_value < self.inner.builder_profit_threshold { - info!( - self.log(), - "Builder payload ignored"; - "info" => "using local payload", - "reason" => format!("payload value of {} does not meet user-configured profit-threshold of {}", relay_value, self.inner.builder_profit_threshold), - "relay_block_hash" => ?header.block_hash(), - "parent_hash" => ?parent_hash, - ); - return Ok(ProvenancedPayload::Local(BlockProposalContentsType::Full( - local.try_into()?, - ))); - } - - if local.should_override_builder().unwrap_or(false) { - let percentage_difference = - percentage_difference_u256(local_value, relay_value); - if percentage_difference.map_or(false, |percentage| { - percentage < self.inner.ignore_builder_override_suggestion_threshold - }) { - info!( - self.log(), - "Using local payload because execution engine suggested we ignore builder payload"; - "local_block_value" => %local_value, - "relay_value" => %relay_value - ); - return Ok(ProvenancedPayload::Local(BlockProposalContentsType::Full( - local.try_into()?, - ))); - } - } - info!( self.log(), "Relay block is more profitable than local block"; "local_block_value" => %local_value, - "relay_value" => %relay_value + "relay_value" => %relay_value, + "boosted_relay_value" => %boosted_relay_value, + "builder_boost_factor" => ?builder_boost_factor ); Ok(ProvenancedPayload::try_from(relay.data.message)?) @@ -2361,42 +2319,4 @@ mod test { }) .await; } - - #[tokio::test] - async fn percentage_difference_u256_tests() { - // ensure function returns `None` when base value is zero - assert_eq!(percentage_difference_u256(0.into(), 1.into()), None); - // ensure function returns `None` when either value is greater than 120 Million ETH - let max_value = Uint256::from(12u8) * Uint256::exp10(25); - assert_eq!( - percentage_difference_u256(1u8.into(), max_value + Uint256::from(1u8)), - None - ); - assert_eq!( - percentage_difference_u256(max_value + Uint256::from(1u8), 1u8.into()), - None - ); - // it should work up to max value - assert_eq!( - percentage_difference_u256(max_value, max_value / Uint256::from(2u8)), - Some(-50f32) - ); - // should work when base value is greater than comparison value - assert_eq!( - percentage_difference_u256(4u8.into(), 3u8.into()), - Some(-25f32) - ); - // should work when comparison value is greater than base value - assert_eq!( - percentage_difference_u256(4u8.into(), 5u8.into()), - Some(25f32) - ); - // should be accurate to 7 decimal places - let result = - percentage_difference_u256(Uint256::from(31415926u64), Uint256::from(13371337u64)) - .expect("should get percentage"); - // result = -57.4377116 - assert!(result > -57.43772); - assert!(result <= -57.43771); - } } diff --git a/beacon_node/execution_layer/src/payload_cache.rs b/beacon_node/execution_layer/src/payload_cache.rs index 1155b1ca3a..1a2864c194 100644 --- a/beacon_node/execution_layer/src/payload_cache.rs +++ b/beacon_node/execution_layer/src/payload_cache.rs @@ -1,10 +1,12 @@ use eth2::types::FullPayloadContents; use lru::LruCache; use parking_lot::Mutex; +use std::num::NonZeroUsize; use tree_hash::TreeHash; +use types::non_zero_usize::new_non_zero_usize; use types::{EthSpec, Hash256}; -pub const DEFAULT_PAYLOAD_CACHE_SIZE: usize = 10; +pub const DEFAULT_PAYLOAD_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(10); /// A cache mapping execution payloads by tree hash roots. pub struct PayloadCache { diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 7da2022d58..3d4ea51f4b 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -335,8 +335,9 @@ pub fn serve( .el .get_payload_by_root(&root) .ok_or_else(|| reject("missing payload for tx root"))?; - let resp = ForkVersionedResponse { + let resp: ForkVersionedResponse<_> = ForkVersionedResponse { version: Some(fork_name), + metadata: Default::default(), data: payload, }; @@ -616,8 +617,9 @@ pub fn serve( .spec .fork_name_at_epoch(slot.epoch(E::slots_per_epoch())); let signed_bid = SignedBuilderBid { message, signature }; - let resp = ForkVersionedResponse { + let resp: ForkVersionedResponse<_> = ForkVersionedResponse { version: Some(fork_name), + metadata: Default::default(), data: signed_bid, }; let json_bid = serde_json::to_string(&resp) diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 72f0388e24..7afeafc321 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -1,7 +1,6 @@ use crate::{ test_utils::{ - MockServer, DEFAULT_BUILDER_THRESHOLD_WEI, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, - DEFAULT_TERMINAL_DIFFICULTY, + MockServer, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, }, Config, *, }; @@ -30,7 +29,6 @@ impl MockExecutionLayer { DEFAULT_TERMINAL_BLOCK, None, None, - None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec, None, @@ -43,7 +41,6 @@ impl MockExecutionLayer { terminal_block: u64, shanghai_time: Option, cancun_time: Option, - builder_threshold: Option, jwt_key: Option, spec: ChainSpec, kzg: Option, @@ -72,7 +69,6 @@ impl MockExecutionLayer { execution_endpoints: vec![url], secret_files: vec![path], suggested_fee_recipient: Some(Address::repeat_byte(42)), - builder_profit_threshold: builder_threshold.unwrap_or(DEFAULT_BUILDER_THRESHOLD_WEI), ..Default::default() }; let el = @@ -143,6 +139,7 @@ impl MockExecutionLayer { builder_params, ForkName::Merge, &self.spec, + None, BlockProductionVersion::FullV2, ) .await @@ -182,6 +179,7 @@ impl MockExecutionLayer { builder_params, ForkName::Merge, &self.spec, + None, BlockProductionVersion::BlindedV2, ) .await diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 92be94e603..f0be511147 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -35,7 +35,6 @@ pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; -pub const DEFAULT_BUILDER_THRESHOLD_WEI: u128 = 1_000_000_000_000_000_000; pub const DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI: u128 = 10_000_000_000_000_000; pub const DEFAULT_BUILDER_PAYLOAD_VALUE_WEI: u128 = 20_000_000_000_000_000; pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 299bc019c4..ad71e9e9d0 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -3,13 +3,13 @@ use eth2::lighthouse::{BlockReward, BlockRewardsQuery}; use lru::LruCache; use slog::{debug, warn, Logger}; use state_processing::BlockReplayer; +use std::num::NonZeroUsize; use std::sync::Arc; use types::beacon_block::BlindedBeaconBlock; -use warp_utils::reject::{ - beacon_chain_error, beacon_state_error, custom_bad_request, custom_server_error, -}; +use types::non_zero_usize::new_non_zero_usize; +use warp_utils::reject::{beacon_chain_error, beacon_state_error, custom_bad_request}; -const STATE_CACHE_SIZE: usize = 2; +const STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(2); /// Fetch block rewards for blocks from the canonical chain. pub fn get_block_rewards( @@ -164,11 +164,7 @@ pub fn compute_block_rewards( .build_all_committee_caches(&chain.spec) .map_err(beacon_state_error)?; - state_cache - .get_or_insert((parent_root, block.slot()), || state) - .ok_or_else(|| { - custom_server_error("LRU cache insert should always succeed".into()) - })? + state_cache.get_or_insert((parent_root, block.slot()), || state) }; // Compute block reward. diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 94926fcc69..a836f6216c 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -76,12 +76,12 @@ use tokio_stream::{ StreamExt, }; use types::{ - Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, - CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, ForkVersionedResponse, Hash256, - ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, - SignedBlindedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, - SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, - SyncContributionData, + fork_versioned_response::EmptyMetadata, Attestation, AttestationData, AttestationShufflingId, + AttesterSlashing, BeaconStateError, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, + ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, RelativeEpoch, + SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, + SyncCommitteeMessage, SyncContributionData, }; use validator::pubkey_to_validator_index; use version::{ @@ -2397,6 +2397,7 @@ pub fn serve( }), _ => Ok(warp::reply::json(&ForkVersionedResponse { version: Some(fork_name), + metadata: EmptyMetadata {}, data: bootstrap, }) .into_response()), @@ -2444,6 +2445,7 @@ pub fn serve( }), _ => Ok(warp::reply::json(&ForkVersionedResponse { version: Some(fork_name), + metadata: EmptyMetadata {}, data: update, }) .into_response()), @@ -2491,6 +2493,7 @@ pub fn serve( }), _ => Ok(warp::reply::json(&ForkVersionedResponse { version: Some(fork_name), + metadata: EmptyMetadata {}, data: update, }) .into_response()), @@ -3191,7 +3194,7 @@ pub fn serve( ); if endpoint_version == V3 { - produce_block_v3(endpoint_version, accept_header, chain, slot, query).await + produce_block_v3(accept_header, chain, slot, query).await } else { produce_block_v2(endpoint_version, accept_header, chain, slot, query).await } diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index 09b95136b5..6b8a1bb1c3 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -1,17 +1,3 @@ -use bytes::Bytes; -use std::sync::Arc; -use types::{payload::BlockProductionVersion, *}; - -use beacon_chain::{ - BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, ProduceBlockVerification, -}; -use eth2::types::{self as api_types, EndpointVersion, SkipRandaoVerification}; -use ssz::Encode; -use warp::{ - hyper::{Body, Response}, - Reply, -}; - use crate::{ build_block_contents, version::{ @@ -20,6 +6,20 @@ use crate::{ fork_versioned_response, inconsistent_fork_rejection, }, }; +use beacon_chain::{ + BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, ProduceBlockVerification, +}; +use bytes::Bytes; +use eth2::types::{ + self as api_types, EndpointVersion, ProduceBlockV3Metadata, SkipRandaoVerification, +}; +use ssz::Encode; +use std::sync::Arc; +use types::{payload::BlockProductionVersion, *}; +use warp::{ + hyper::{Body, Response}, + Reply, +}; pub fn get_randao_verification( query: &api_types::ValidatorBlocksQuery, @@ -40,7 +40,6 @@ pub fn get_randao_verification( } pub async fn produce_block_v3( - endpoint_version: EndpointVersion, accept_header: Option, chain: Arc>, slot: Slot, @@ -59,8 +58,9 @@ pub async fn produce_block_v3( .produce_block_with_verification( randao_reveal, slot, - query.graffiti.map(Into::into), + query.graffiti, randao_verification, + query.builder_boost_factor, BlockProductionVersion::V3, ) .await @@ -68,22 +68,28 @@ pub async fn produce_block_v3( warp_utils::reject::custom_bad_request(format!("failed to fetch a block: {:?}", e)) })?; - build_response_v3(chain, block_response_type, endpoint_version, accept_header) + build_response_v3(chain, block_response_type, accept_header) } pub fn build_response_v3( chain: Arc>, block_response: BeaconBlockResponseWrapper, - endpoint_version: EndpointVersion, accept_header: Option, ) -> Result, warp::Rejection> { let fork_name = block_response .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; let execution_payload_value = block_response.execution_payload_value(); - let consensus_block_value = block_response.consensus_block_value(); + let consensus_block_value = block_response.consensus_block_value_wei(); let execution_payload_blinded = block_response.is_blinded(); + let metadata = ProduceBlockV3Metadata { + consensus_version: fork_name, + execution_payload_blinded, + execution_payload_value, + consensus_block_value, + }; + let block_contents = build_block_contents::build_block_contents(fork_name, block_response)?; match accept_header { @@ -100,12 +106,17 @@ pub fn build_response_v3( .map_err(|e| -> warp::Rejection { warp_utils::reject::custom_server_error(format!("failed to create response: {}", e)) }), - _ => fork_versioned_response(endpoint_version, fork_name, block_contents) - .map(|response| warp::reply::json(&response).into_response()) - .map(|res| add_consensus_version_header(res, fork_name)) - .map(|res| add_execution_payload_blinded_header(res, execution_payload_blinded)) - .map(|res| add_execution_payload_value_header(res, execution_payload_value)) - .map(|res| add_consensus_block_value_header(res, consensus_block_value)), + _ => Ok(warp::reply::json(&ForkVersionedResponse { + version: Some(fork_name), + metadata, + data: block_contents, + }) + .into_response()) + .map(|res| res.into_response()) + .map(|res| add_consensus_version_header(res, fork_name)) + .map(|res| add_execution_payload_blinded_header(res, execution_payload_blinded)) + .map(|res| add_execution_payload_value_header(res, execution_payload_value)) + .map(|res| add_consensus_block_value_header(res, consensus_block_value)), } } @@ -130,6 +141,7 @@ pub async fn produce_blinded_block_v2( slot, query.graffiti.map(Into::into), randao_verification, + None, BlockProductionVersion::BlindedV2, ) .await @@ -160,6 +172,7 @@ pub async fn produce_block_v2( slot, query.graffiti.map(Into::into), randao_verification, + None, BlockProductionVersion::FullV2, ) .await diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 432d91b723..8b03771540 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -60,7 +60,7 @@ pub async fn publish_block (block_contents, true), ProvenancedBlock::Builder(block_contents, _) => (block_contents, false), }; - let block = block_contents.inner_block(); + let block = block_contents.inner_block().clone(); let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); debug!(log, "Signed block received in HTTP API"; "slot" => block.slot()); @@ -113,7 +113,10 @@ pub async fn publish_block b, - Err(BlockContentsError::BlockError(BlockError::BlockIsAlreadyKnown)) => { + Err(BlockContentsError::BlockError(BlockError::BlockIsAlreadyKnown)) + | Err(BlockContentsError::BlobError( + beacon_chain::blob_verification::GossipBlobError::RepeatBlob { .. }, + )) => { // Allow the status code for duplicate blocks to be overridden based on config. return Ok(warp::reply::with_status( warp::reply::json(&ErrorMessage { @@ -172,28 +175,20 @@ pub async fn publish_block { - if chain_clone - .observed_block_producers - .read() - .proposer_has_been_observed(block_clone.message(), block_root) - .map_err(|e| BlockError::BeaconChainError(e.into()))? - .is_slashable() - { - warn!( - log_clone, - "Not publishing equivocating block"; - "slot" => block_clone.slot() - ); - Err(BlockError::Slashable) - } else { - publish_block( - block_clone, - blobs_opt, - sender_clone, - log_clone, - seen_timestamp, - ) - } + check_slashable( + &chain_clone, + &blobs_opt, + block_root, + &block_clone, + &log_clone, + )?; + publish_block( + block_clone, + blobs_opt, + sender_clone, + log_clone, + seen_timestamp, + ) } }; @@ -450,3 +445,46 @@ fn late_block_logging>( ) } } + +/// Check if any of the blobs or the block are slashable. Returns `BlockError::Slashable` if so. +fn check_slashable( + chain_clone: &BeaconChain, + blobs_opt: &Option>, + block_root: Hash256, + block_clone: &SignedBeaconBlock>, + log_clone: &Logger, +) -> Result<(), BlockError> { + let slashable_cache = chain_clone.observed_slashable.read(); + if let Some(blobs) = blobs_opt.as_ref() { + blobs.iter().try_for_each(|blob| { + if slashable_cache + .is_slashable(blob.slot(), blob.block_proposer_index(), blob.block_root()) + .map_err(|e| BlockError::BeaconChainError(e.into()))? + { + warn!( + log_clone, + "Not publishing equivocating blob"; + "slot" => block_clone.slot() + ); + return Err(BlockError::Slashable); + } + Ok(()) + })?; + }; + if slashable_cache + .is_slashable( + block_clone.slot(), + block_clone.message().proposer_index(), + block_root, + ) + .map_err(|e| BlockError::BeaconChainError(e.into()))? + { + warn!( + log_clone, + "Not publishing equivocating block"; + "slot" => block_clone.slot() + ); + return Err(BlockError::Slashable); + } + Ok(()) +} diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index bafb573819..b87fdf6088 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -7,7 +7,7 @@ use beacon_processor::{BeaconProcessor, BeaconProcessorChannels, BeaconProcessor use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; use lighthouse_network::{ - discv5::enr::{CombinedKey, EnrBuilder}, + discv5::enr::CombinedKey, libp2p::swarm::{ behaviour::{ConnectionEstablished, FromSwarm}, ConnectionId, NetworkBehaviour, @@ -138,7 +138,7 @@ pub async fn create_api_server( syncnets: EnrSyncCommitteeBitfield::::default(), }); let enr_key = CombinedKey::generate_secp256k1(); - let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); + let enr = Enr::builder().build(&enr_key).unwrap(); let network_globals = Arc::new(NetworkGlobals::new( enr.clone(), meta_data, diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index 7b06901243..7cd5e6700a 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,11 +1,15 @@ -use crate::api_types::fork_versioned_response::ExecutionOptimisticFinalizedForkVersionedResponse; use crate::api_types::EndpointVersion; use eth2::{ CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, EXECUTION_PAYLOAD_BLINDED_HEADER, EXECUTION_PAYLOAD_VALUE_HEADER, }; use serde::Serialize; -use types::{ForkName, ForkVersionedResponse, InconsistentFork, Uint256}; +use types::{ + fork_versioned_response::{ + ExecutionOptimisticFinalizedForkVersionedResponse, ExecutionOptimisticFinalizedMetadata, + }, + ForkName, ForkVersionedResponse, InconsistentFork, Uint256, +}; use warp::reply::{self, Reply, Response}; pub const V1: EndpointVersion = EndpointVersion(1); @@ -26,6 +30,7 @@ pub fn fork_versioned_response( }; Ok(ForkVersionedResponse { version: fork_name, + metadata: Default::default(), data, }) } @@ -46,8 +51,10 @@ pub fn execution_optimistic_finalized_fork_versioned_response( }; Ok(ExecutionOptimisticFinalizedForkVersionedResponse { version: fork_name, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), + metadata: ExecutionOptimisticFinalizedMetadata { + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }, data, }) } @@ -73,12 +80,12 @@ pub fn add_execution_payload_blinded_header( /// Add the `Eth-Execution-Payload-Value` header to a response. pub fn add_execution_payload_value_header( reply: T, - execution_payload_value: Option, + execution_payload_value: Uint256, ) -> Response { reply::with_header( reply, EXECUTION_PAYLOAD_VALUE_HEADER, - execution_payload_value.unwrap_or_default().to_string(), + execution_payload_value.to_string(), ) .into_response() } @@ -86,12 +93,12 @@ pub fn add_execution_payload_value_header( /// Add the `Eth-Consensus-Block-Value` header to a response. pub fn add_consensus_block_value_header( reply: T, - consensus_payload_value: Option, + consensus_payload_value: Uint256, ) -> Response { reply::with_header( reply, CONSENSUS_BLOCK_VALUE_HEADER, - consensus_payload_value.unwrap_or_default().to_string(), + consensus_payload_value.to_string(), ) .into_response() } diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 7961b32c57..eb39bdd115 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1226,9 +1226,13 @@ pub async fn blinded_equivocation_gossip() { ); } -/// This test checks that a block that is valid from both a gossip and consensus perspective but that equivocates **late** is rejected when using `broadcast_validation=consensus_and_equivocation`. +/// This test checks that a block that is valid from both a gossip and +/// consensus perspective but that equivocates **late** is rejected when using +/// `broadcast_validation=consensus_and_equivocation`. /// -/// This test is unique in that we can't actually test the HTTP API directly, but instead have to hook into the `publish_blocks` code manually. This is in order to handle the late equivocation case. +/// This test is unique in that we can't actually test the HTTP API directly, +/// but instead have to hook into the `publish_blocks` code manually. This is +/// in order to handle the late equivocation case. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn blinded_equivocation_consensus_late_equivocation() { /* this test targets gossip-level validation */ diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 48a2f450e2..210c4d2550 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -4,6 +4,7 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, ChainConfig, }; +use eth2::types::ProduceBlockV3Response; use eth2::types::{DepositContractData, StateId}; use execution_layer::{ForkchoiceState, PayloadAttributes}; use http_api::test_utils::InteractiveTester; @@ -21,8 +22,6 @@ use types::{ MinimalEthSpec, ProposerPreparationData, Slot, }; -use eth2::types::ForkVersionedBeaconBlockType::{Blinded, Full}; - type E = MainnetEthSpec; // Test that the deposit_contract endpoint returns the correct chain_id and address. @@ -113,8 +112,8 @@ async fn state_by_root_pruned_from_fork_choice() { .unwrap() .unwrap(); - assert!(response.finalized.unwrap()); - assert!(!response.execution_optimistic.unwrap()); + assert!(response.metadata.finalized.unwrap()); + assert!(!response.metadata.execution_optimistic.unwrap()); let mut state = response.data; assert_eq!(state.update_tree_hash_cache().unwrap(), state_root); @@ -619,15 +618,17 @@ pub async fn proposer_boost_re_org_test( let randao_reveal = harness .sign_randao_reveal(&state_b, proposer_index, slot_c) .into(); - let unsigned_block_type = tester + let (unsigned_block_type, _) = tester .client - .get_validator_blocks_v3::(slot_c, &randao_reveal, None) + .get_validator_blocks_v3::(slot_c, &randao_reveal, None, None) .await .unwrap(); - let (unsigned_block_c, block_c_blobs) = match unsigned_block_type { - Full(unsigned_block_contents_c) => unsigned_block_contents_c.data.deconstruct(), - Blinded(_) => { + let (unsigned_block_c, block_c_blobs) = match unsigned_block_type.data { + ProduceBlockV3Response::Full(unsigned_block_contents_c) => { + unsigned_block_contents_c.deconstruct() + } + ProduceBlockV3Response::Blinded(_) => { panic!("Should not be a blinded block"); } }; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 5edd8023e4..b1ad6ae360 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -7,12 +7,13 @@ use environment::null_logger; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, - types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *}, + types::{ + BlockId as CoreBlockId, ForkChoiceNode, ProduceBlockV3Response, StateId as CoreStateId, *, + }, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; use execution_layer::test_utils::{ - MockBuilder, Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_BUILDER_THRESHOLD_WEI, - DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, + MockBuilder, Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, }; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; @@ -38,8 +39,6 @@ use types::{ MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot, }; -use eth2::types::ForkVersionedBeaconBlockType::{Blinded, Full}; - type E = MainnetEthSpec; const SECONDS_PER_SLOT: u64 = 12; @@ -80,7 +79,6 @@ struct ApiTester { struct ApiTesterConfig { spec: ChainSpec, retain_historic_states: bool, - builder_threshold: Option, } impl Default for ApiTesterConfig { @@ -90,7 +88,6 @@ impl Default for ApiTesterConfig { Self { spec, retain_historic_states: false, - builder_threshold: None, } } } @@ -132,7 +129,7 @@ impl ApiTester { .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() - .mock_execution_layer_with_config(config.builder_threshold) + .mock_execution_layer_with_config() .build(); harness @@ -391,19 +388,12 @@ impl ApiTester { .test_post_validator_register_validator() .await; // Make sure bids always meet the minimum threshold. - tester - .mock_builder - .as_ref() - .unwrap() - .add_operation(Operation::Value(Uint256::from( - DEFAULT_BUILDER_THRESHOLD_WEI, - ))); + tester.mock_builder.as_ref().unwrap(); tester } - pub async fn new_mev_tester_no_builder_threshold() -> Self { + pub async fn new_mev_tester_default_payload_value() -> Self { let mut config = ApiTesterConfig { - builder_threshold: Some(0), retain_historic_states: false, spec: E::default_spec(), }; @@ -655,6 +645,7 @@ impl ApiTester { .await .unwrap() .unwrap() + .metadata .finalized .unwrap(); @@ -691,6 +682,7 @@ impl ApiTester { .await .unwrap() .unwrap() + .metadata .finalized .unwrap(); @@ -728,6 +720,7 @@ impl ApiTester { .await .unwrap() .unwrap() + .metadata .finalized .unwrap(); @@ -2725,52 +2718,57 @@ impl ApiTester { sk.sign(message).into() }; - let (fork_version_response_bytes, is_blinded_payload) = self + let (response, metadata) = self .client - .get_validator_blocks_v3_ssz::(slot, &randao_reveal, None) + .get_validator_blocks_v3_ssz::(slot, &randao_reveal, None, None) .await .unwrap(); - if is_blinded_payload { - let blinded_block = >::from_ssz_bytes( - &fork_version_response_bytes.unwrap(), - &self.chain.spec, - ) - .expect("block contents bytes can be decoded"); + match response { + ProduceBlockV3Response::Blinded(blinded_block) => { + assert!(metadata.execution_payload_blinded); + assert_eq!( + metadata.consensus_version, + blinded_block.to_ref().fork_name(&self.chain.spec).unwrap() + ); + let signed_blinded_block = + blinded_block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); - let signed_blinded_block = - blinded_block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + self.client + .post_beacon_blinded_blocks_ssz(&signed_blinded_block) + .await + .unwrap(); - self.client - .post_beacon_blinded_blocks_ssz(&signed_blinded_block) - .await - .unwrap(); + let head_block = self.chain.head_beacon_block().clone_as_blinded(); + assert_eq!(head_block, signed_blinded_block); - let head_block = self.chain.head_beacon_block().clone_as_blinded(); - assert_eq!(head_block, signed_blinded_block); + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + ProduceBlockV3Response::Full(block_contents) => { + assert!(!metadata.execution_payload_blinded); + assert_eq!( + metadata.consensus_version, + block_contents + .block() + .to_ref() + .fork_name(&self.chain.spec) + .unwrap() + ); + let signed_block_contents = + block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); - self.chain.slot_clock.set_slot(slot.as_u64() + 1); - } else { - let block_contents = >::from_ssz_bytes( - &fork_version_response_bytes.unwrap(), - &self.chain.spec, - ) - .expect("block contents bytes can be decoded"); + self.client + .post_beacon_blocks_ssz(&signed_block_contents) + .await + .unwrap(); - let signed_block_contents = - block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + assert_eq!( + self.chain.head_beacon_block().as_ref(), + signed_block_contents.signed_block() + ); - self.client - .post_beacon_blocks_ssz(&signed_block_contents) - .await - .unwrap(); - - assert_eq!( - self.chain.head_beacon_block().as_ref(), - signed_block_contents.signed_block() - ); - - self.chain.slot_clock.set_slot(slot.as_u64() + 1); + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } } } @@ -3543,15 +3541,69 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); - let payload: BlindedPayload = match payload_type { - Blinded(payload) => payload.data.body().execution_payload().unwrap().into(), - Full(_) => panic!("Expecting a blinded payload"), + let payload: BlindedPayload = match payload_type.data { + ProduceBlockV3Response::Blinded(payload) => { + payload.body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), + }; + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); + assert_eq!(payload.gas_limit(), 11_111_111); + + self + } + + pub async fn test_payload_v3_zero_builder_boost_factor(self) -> Self { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, Some(0)) + .await + .unwrap(); + + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), + }; + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); + assert_eq!(payload.gas_limit(), 16_384); + + self + } + + pub async fn test_payload_v3_max_builder_boost_factor(self) -> Self { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let (payload_type, _) = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None, Some(u64::MAX)) + .await + .unwrap(); + + let payload: BlindedPayload = match payload_type.data { + ProduceBlockV3Response::Blinded(payload) => { + payload.body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); @@ -3645,15 +3697,17 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); - let payload: BlindedPayload = match payload_type { - Blinded(payload) => payload.data.body().execution_payload().unwrap().into(), - Full(_) => panic!("Expecting a blinded payload"), + let payload: BlindedPayload = match payload_type.data { + ProduceBlockV3Response::Blinded(payload) => { + payload.body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); @@ -3719,15 +3773,17 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); - let payload: BlindedPayload = match payload_type { - Blinded(payload) => payload.data.body().execution_payload().unwrap().into(), - Full(_) => panic!("Expecting a blinded payload"), + let payload: BlindedPayload = match payload_type.data { + ProduceBlockV3Response::Blinded(payload) => { + payload.body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; assert_eq!(payload.fee_recipient(), test_fee_recipient); @@ -3807,21 +3863,17 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); - let payload: FullPayload = match payload_type { - Full(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), - Blinded(_) => panic!("Expecting a blinded payload"), + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a blinded payload"), }; assert_eq!(payload.parent_hash(), expected_parent_hash); @@ -3897,21 +3949,17 @@ impl ApiTester { .unwrap(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); - let payload: FullPayload = match payload_type { - Full(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), - Blinded(_) => panic!("Expecting a full payload"), + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; assert_eq!(payload.prev_randao(), expected_prev_randao); @@ -3987,21 +4035,17 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); - let payload: FullPayload = match payload_type { - Full(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), - Blinded(_) => panic!("Expecting a full payload"), + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; assert_eq!(payload.block_number(), expected_block_number); @@ -4075,21 +4119,17 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); - let payload: FullPayload = match payload_type { - Full(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), - Blinded(_) => panic!("Expecting a blinded payload"), + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a blinded payload"), }; assert!(payload.timestamp() > min_expected_timestamp); @@ -4135,15 +4175,15 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; self @@ -4201,15 +4241,15 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; self @@ -4309,15 +4349,15 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(next_slot, &randao_reveal, None) + .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) .await .unwrap(); - match payload_type { - Blinded(_) => (), - Full(_) => panic!("Expecting a blinded payload"), + match payload_type.data { + ProduceBlockV3Response::Blinded(_) => (), + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; // Without proposing, advance into the next slot, this should make us cross the threshold @@ -4329,15 +4369,15 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(next_slot, &randao_reveal, None) + .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) .await .unwrap(); - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; self @@ -4457,15 +4497,15 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(next_slot, &randao_reveal, None) + .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) .await .unwrap(); - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; // Fill another epoch with blocks, should be enough to finalize. (Sneaky plus 1 because this @@ -4487,15 +4527,15 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(next_slot, &randao_reveal, None) + .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) .await .unwrap(); - match payload_type { - Blinded(_) => (), - Full(_) => panic!("Expecting a blinded payload"), + match payload_type.data { + ProduceBlockV3Response::Blinded(_) => (), + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; self @@ -4567,21 +4607,17 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); - let payload: FullPayload = match payload_type { - Full(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), - Blinded(_) => panic!("Expecting a full payload"), + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); @@ -4590,70 +4626,6 @@ impl ApiTester { self } - pub async fn test_payload_rejects_inadequate_builder_threshold(self) -> Self { - // Mutate value. - self.mock_builder - .as_ref() - .unwrap() - .add_operation(Operation::Value(Uint256::from( - DEFAULT_BUILDER_THRESHOLD_WEI - 1, - ))); - - let slot = self.chain.slot().unwrap(); - let epoch = self.chain.epoch().unwrap(); - - let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - - let payload: BlindedPayload = self - .client - .get_validator_blinded_blocks::(slot, &randao_reveal, None) - .await - .unwrap() - .data - .body() - .execution_payload() - .unwrap() - .into(); - - // If this cache is populated, it indicates fallback to the local EE was correctly used. - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_some()); - self - } - - pub async fn test_payload_v3_rejects_inadequate_builder_threshold(self) -> Self { - // Mutate value. - self.mock_builder - .as_ref() - .unwrap() - .add_operation(Operation::Value(Uint256::from( - DEFAULT_BUILDER_THRESHOLD_WEI - 1, - ))); - - let slot = self.chain.slot().unwrap(); - let epoch = self.chain.epoch().unwrap(); - - let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - - let payload_type = self - .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) - .await - .unwrap(); - - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), - }; - - self - } - pub async fn test_builder_payload_chosen_when_more_profitable(self) -> Self { // Mutate value. self.mock_builder @@ -4704,15 +4676,15 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); - match payload_type { - Blinded(_) => (), - Full(_) => panic!("Expecting a blinded payload"), + match payload_type.data { + ProduceBlockV3Response::Blinded(_) => (), + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; self @@ -4768,15 +4740,15 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; self @@ -4832,15 +4804,15 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; self @@ -4894,15 +4866,15 @@ impl ApiTester { let epoch = self.chain.epoch().unwrap(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); - let _block_contents = match payload_type { - Blinded(payload) => payload.data, - Full(_) => panic!("Expecting a blinded payload"), + let _block_contents = match payload_type.data { + ProduceBlockV3Response::Blinded(payload) => payload, + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; self @@ -4966,15 +4938,15 @@ impl ApiTester { let epoch = self.chain.epoch().unwrap(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None) .await .unwrap(); - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; self @@ -6054,6 +6026,22 @@ async fn post_validator_register_valid() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_zero_builder_boost_factor() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_zero_builder_boost_factor() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_max_builder_boost_factor() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_max_builder_boost_factor() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_validator_register_valid_v3() { ApiTester::new_mev_tester() @@ -6238,25 +6226,9 @@ async fn builder_chain_health_optimistic_head_v3() { .await; } -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn builder_inadequate_builder_threshold() { - ApiTester::new_mev_tester() - .await - .test_payload_rejects_inadequate_builder_threshold() - .await; -} - -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn builder_inadequate_builder_threshold_v3() { - ApiTester::new_mev_tester() - .await - .test_payload_v3_rejects_inadequate_builder_threshold() - .await; -} - #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_payload_chosen_by_profit() { - ApiTester::new_mev_tester_no_builder_threshold() + ApiTester::new_mev_tester_default_payload_value() .await .test_builder_payload_chosen_when_more_profitable() .await @@ -6268,7 +6240,7 @@ async fn builder_payload_chosen_by_profit() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_payload_chosen_by_profit_v3() { - ApiTester::new_mev_tester_no_builder_threshold() + ApiTester::new_mev_tester_default_payload_value() .await .test_builder_payload_v3_chosen_when_more_profitable() .await @@ -6281,7 +6253,6 @@ async fn builder_payload_chosen_by_profit_v3() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_works_post_capella() { let mut config = ApiTesterConfig { - builder_threshold: Some(0), retain_historic_states: false, spec: E::default_spec(), }; @@ -6302,7 +6273,6 @@ async fn builder_works_post_capella() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_works_post_deneb() { let mut config = ApiTesterConfig { - builder_threshold: Some(0), retain_historic_states: false, spec: E::default_spec(), }; diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 356a6a203b..e1ae62be65 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -43,10 +43,11 @@ prometheus-client = "0.22.0" unused_port = { workspace = true } delay_map = { workspace = true } void = "1" -libp2p-mplex = "0.41.0" +libp2p-mplex = { git = "https://github.com/sigp/rust-libp2p/", rev = "b96b90894faab0a1eed78e1c82c6452138a3538a" } [dependencies.libp2p] -version = "0.53" +git = "https://github.com/sigp/rust-libp2p/" +rev = "b96b90894faab0a1eed78e1c82c6452138a3538a" default-features = false features = ["identify", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic"] diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 0eb3f7bc80..169a061d20 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -5,7 +5,6 @@ use crate::{Enr, PeerIdSerialized}; use directory::{ DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR, }; -use discv5::{Discv5Config, Discv5ConfigBuilder}; use libp2p::gossipsub; use libp2p::Multiaddr; use serde::{Deserialize, Serialize}; @@ -91,7 +90,7 @@ pub struct Config { /// Discv5 configuration parameters. #[serde(skip)] - pub discv5_config: Discv5Config, + pub discv5_config: discv5::Config, /// List of nodes to initially connect to. pub boot_nodes_enr: Vec, @@ -158,6 +157,10 @@ pub struct Config { /// Configuration for the inbound rate limiter (requests received by this node). pub inbound_rate_limiter_config: Option, + + /// Whether to disable logging duplicate gossip messages as WARN. If set to true, duplicate + /// errors will be logged at DEBUG level. + pub disable_duplicate_warn_logs: bool, } impl Config { @@ -320,7 +323,7 @@ impl Default for Config { discv5::ListenConfig::from_ip(Ipv4Addr::UNSPECIFIED.into(), 9000); // discv5 configuration - let discv5_config = Discv5ConfigBuilder::new(discv5_listen_config) + let discv5_config = discv5::ConfigBuilder::new(discv5_listen_config) .enable_packet_filter() .session_cache_capacity(5000) .request_timeout(Duration::from_secs(1)) @@ -375,6 +378,7 @@ impl Default for Config { outbound_rate_limiter_config: None, invalid_block_storage: None, inbound_rate_limiter_config: None, + disable_duplicate_warn_logs: false, } } } diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 0ec7e2ab7a..b0e0a01eec 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -1,12 +1,11 @@ //! Helper functions and an extension trait for Ethereum 2 ENRs. -pub use discv5::enr::{CombinedKey, EnrBuilder}; +pub use discv5::enr::CombinedKey; use super::enr_ext::CombinedKeyExt; use super::ENR_FILENAME; use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use crate::NetworkConfig; -use discv5::enr::EnrKey; use libp2p::identity::Keypair; use slog::{debug, warn}; use ssz::{Decode, Encode}; @@ -142,11 +141,13 @@ pub fn build_or_load_enr( Ok(local_enr) } -pub fn create_enr_builder_from_config( +/// Builds a lighthouse ENR given a `NetworkConfig`. +pub fn build_enr( + enr_key: &CombinedKey, config: &NetworkConfig, - enable_libp2p: bool, -) -> EnrBuilder { - let mut builder = EnrBuilder::new("v4"); + enr_fork_id: &EnrForkId, +) -> Result { + let mut builder = discv5::enr::Enr::builder(); let (maybe_ipv4_address, maybe_ipv6_address) = &config.enr_address; if let Some(ip) = maybe_ipv4_address { @@ -165,63 +166,51 @@ pub fn create_enr_builder_from_config( builder.udp6(udp6_port.get()); } - if enable_libp2p { - // Add QUIC fields to the ENR. - // Since QUIC is used as an alternative transport for the libp2p protocols, - // the related fields should only be added when both QUIC and libp2p are enabled - if !config.disable_quic_support { - // If we are listening on ipv4, add the quic ipv4 port. - if let Some(quic4_port) = config.enr_quic4_port.or_else(|| { - config - .listen_addrs() - .v4() - .and_then(|v4_addr| v4_addr.quic_port.try_into().ok()) - }) { - builder.add_value(QUIC_ENR_KEY, &quic4_port.get()); - } - - // If we are listening on ipv6, add the quic ipv6 port. - if let Some(quic6_port) = config.enr_quic6_port.or_else(|| { - config - .listen_addrs() - .v6() - .and_then(|v6_addr| v6_addr.quic_port.try_into().ok()) - }) { - builder.add_value(QUIC6_ENR_KEY, &quic6_port.get()); - } - } - - // If the ENR port is not set, and we are listening over that ip version, use the listening port instead. - let tcp4_port = config.enr_tcp4_port.or_else(|| { + // Add QUIC fields to the ENR. + // Since QUIC is used as an alternative transport for the libp2p protocols, + // the related fields should only be added when both QUIC and libp2p are enabled + if !config.disable_quic_support { + // If we are listening on ipv4, add the quic ipv4 port. + if let Some(quic4_port) = config.enr_quic4_port.or_else(|| { config .listen_addrs() .v4() - .and_then(|v4_addr| v4_addr.tcp_port.try_into().ok()) - }); - if let Some(tcp4_port) = tcp4_port { - builder.tcp4(tcp4_port.get()); + .and_then(|v4_addr| v4_addr.quic_port.try_into().ok()) + }) { + builder.add_value(QUIC_ENR_KEY, &quic4_port.get()); } - let tcp6_port = config.enr_tcp6_port.or_else(|| { + // If we are listening on ipv6, add the quic ipv6 port. + if let Some(quic6_port) = config.enr_quic6_port.or_else(|| { config .listen_addrs() .v6() - .and_then(|v6_addr| v6_addr.tcp_port.try_into().ok()) - }); - if let Some(tcp6_port) = tcp6_port { - builder.tcp6(tcp6_port.get()); + .and_then(|v6_addr| v6_addr.quic_port.try_into().ok()) + }) { + builder.add_value(QUIC6_ENR_KEY, &quic6_port.get()); } } - builder -} -/// Builds a lighthouse ENR given a `NetworkConfig`. -pub fn build_enr( - enr_key: &CombinedKey, - config: &NetworkConfig, - enr_fork_id: &EnrForkId, -) -> Result { - let mut builder = create_enr_builder_from_config(config, true); + // If the ENR port is not set, and we are listening over that ip version, use the listening port instead. + let tcp4_port = config.enr_tcp4_port.or_else(|| { + config + .listen_addrs() + .v4() + .and_then(|v4_addr| v4_addr.tcp_port.try_into().ok()) + }); + if let Some(tcp4_port) = tcp4_port { + builder.tcp4(tcp4_port.get()); + } + + let tcp6_port = config.enr_tcp6_port.or_else(|| { + config + .listen_addrs() + .v6() + .and_then(|v6_addr| v6_addr.tcp_port.try_into().ok()) + }); + if let Some(tcp6_port) = tcp6_port { + builder.tcp6(tcp6_port.get()); + } // set the `eth2` field on our ENR builder.add_value(ETH2_ENR_KEY, &enr_fork_id.as_ssz_bytes()); diff --git a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs index 2efaa76ac3..bae7235604 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs @@ -366,9 +366,7 @@ mod tests { let libp2p_kp: Keypair = secp256k1_kp.into(); let peer_id = libp2p_kp.public().to_peer_id(); - let enr = discv5::enr::EnrBuilder::new("v4") - .build(&secret_key) - .unwrap(); + let enr = discv5::enr::Enr::builder().build(&secret_key).unwrap(); let node_id = peer_id_to_node_id(&peer_id).unwrap(); assert_eq!(enr.node_id(), node_id); @@ -387,9 +385,7 @@ mod tests { let libp2p_kp: Keypair = secp256k1_kp.into(); let peer_id = libp2p_kp.public().to_peer_id(); - let enr = discv5::enr::EnrBuilder::new("v4") - .build(&secret_key) - .unwrap(); + let enr = discv5::enr::Enr::builder().build(&secret_key).unwrap(); let node_id = peer_id_to_node_id(&peer_id).unwrap(); assert_eq!(enr.node_id(), node_id); diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 0894dc65bd..829124e123 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -10,11 +10,8 @@ pub mod enr_ext; use crate::service::TARGET_SUBNET_PEERS; use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; use crate::{metrics, ClearDialError}; -use discv5::{enr::NodeId, Discv5, Discv5Event}; -pub use enr::{ - build_enr, create_enr_builder_from_config, load_enr_from_disk, use_or_load_enr, CombinedKey, - Eth2Enr, -}; +use discv5::{enr::NodeId, Discv5}; +pub use enr::{build_enr, load_enr_from_disk, use_or_load_enr, CombinedKey, Eth2Enr}; pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt}; pub use libp2p::identity::{Keypair, PublicKey}; @@ -35,6 +32,7 @@ pub use libp2p::{ use lru::LruCache; use slog::{crit, debug, error, info, trace, warn}; use ssz::Encode; +use std::num::NonZeroUsize; use std::{ collections::{HashMap, VecDeque}, net::{IpAddr, SocketAddr}, @@ -49,6 +47,7 @@ use types::{EnrForkId, EthSpec}; mod subnet_predicate; pub use subnet_predicate::subnet_predicate; +use types::non_zero_usize::new_non_zero_usize; /// Local ENR storage filename. pub const ENR_FILENAME: &str = "enr.dat"; @@ -70,6 +69,8 @@ const MAX_SUBNETS_IN_QUERY: usize = 3; pub const FIND_NODE_QUERY_CLOSEST_PEERS: usize = 16; /// The threshold for updating `min_ttl` on a connected peer. const DURATION_DIFFERENCE: Duration = Duration::from_millis(1); +/// The capacity of the Discovery ENR cache. +const ENR_CACHE_CAPACITY: NonZeroUsize = new_non_zero_usize(50); /// A query has completed. This result contains a mapping of discovered peer IDs to the `min_ttl` /// of the peer if it is specified. @@ -143,15 +144,10 @@ enum EventStream { /// Awaiting an event stream to be generated. This is required due to the poll nature of /// `Discovery` Awaiting( - Pin< - Box< - dyn Future, discv5::Discv5Error>> - + Send, - >, - >, + Pin, discv5::Error>> + Send>>, ), /// The future has completed. - Present(mpsc::Receiver), + Present(mpsc::Receiver), // The future has failed or discv5 has been disabled. There are no events from discv5. InActive, } @@ -318,7 +314,7 @@ impl Discovery { }; Ok(Self { - cached_enrs: LruCache::new(50), + cached_enrs: LruCache::new(ENR_CACHE_CAPACITY), network_globals, find_peer_active: false, queued_queries: VecDeque::with_capacity(10), @@ -992,7 +988,7 @@ impl NetworkBehaviour for Discovery { match event { // We filter out unwanted discv5 events here and only propagate useful results to // the peer manager. - Discv5Event::Discovered(_enr) => { + discv5::Event::Discovered(_enr) => { // Peers that get discovered during a query but are not contactable or // don't match a predicate can end up here. For debugging purposes we // log these to see if we are unnecessarily dropping discovered peers @@ -1005,7 +1001,7 @@ impl NetworkBehaviour for Discovery { } */ } - Discv5Event::SocketUpdated(socket_addr) => { + discv5::Event::SocketUpdated(socket_addr) => { info!(self.log, "Address updated"; "ip" => %socket_addr.ip(), "udp_port" => %socket_addr.port()); metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT); metrics::check_nat(); @@ -1026,10 +1022,10 @@ impl NetworkBehaviour for Discovery { // NOTE: We assume libp2p itself can keep track of IP changes and we do // not inform it about IP changes found via discovery. } - Discv5Event::EnrAdded { .. } - | Discv5Event::TalkRequest(_) - | Discv5Event::NodeInserted { .. } - | Discv5Event::SessionEstablished { .. } => {} // Ignore all other discv5 server events + discv5::Event::EnrAdded { .. } + | discv5::Event::TalkRequest(_) + | discv5::Event::NodeInserted { .. } + | discv5::Event::SessionEstablished { .. } => {} // Ignore all other discv5 server events } } } @@ -1140,7 +1136,6 @@ impl Discovery { mod tests { use super::*; use crate::rpc::methods::{MetaData, MetaDataV2}; - use enr::EnrBuilder; use libp2p::identity::secp256k1; use slog::{o, Drain}; use types::{BitVector, MinimalEthSpec, SubnetId}; @@ -1223,7 +1218,7 @@ mod tests { } fn make_enr(subnet_ids: Vec) -> Enr { - let mut builder = EnrBuilder::new("v4"); + let mut builder = Enr::builder(); let keypair = secp256k1::Keypair::generate(); let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); diff --git a/beacon_node/lighthouse_network/src/rpc/codec/base.rs b/beacon_node/lighthouse_network/src/rpc/codec/base.rs index 6d622fcc8a..4085ac17b7 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/base.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/base.rs @@ -316,7 +316,7 @@ mod tests { )); // Request limits - let limit = protocol_id.rpc_request_limits(); + let limit = protocol_id.rpc_request_limits(&fork_context.spec); let mut max = encode_len(limit.max + 1); let mut codec = SSZSnappyOutboundCodec::::new( protocol_id.clone(), diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 787c3dcb7a..7a7f2969f1 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -15,10 +15,11 @@ use std::io::{Read, Write}; use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; +use types::ChainSpec; use types::{ - BlobSidecar, EthSpec, ForkContext, ForkName, Hash256, LightClientBootstrap, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, - SignedBeaconBlockDeneb, SignedBeaconBlockMerge, + BlobSidecar, EthSpec, ForkContext, ForkName, Hash256, LightClientBootstrap, + RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockMerge, }; use unsigned_varint::codec::Uvi; @@ -140,7 +141,7 @@ impl Decoder for SSZSnappyInboundCodec { // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of // packet size for ssz container corresponding to `self.protocol`. - let ssz_limits = self.protocol.rpc_request_limits(); + let ssz_limits = self.protocol.rpc_request_limits(&self.fork_context.spec); if ssz_limits.is_out_of_bounds(length, self.max_packet_size) { return Err(RPCError::InvalidData(format!( "RPC request length for protocol {:?} is out of bounds, length {}", @@ -161,7 +162,11 @@ impl Decoder for SSZSnappyInboundCodec { let n = reader.get_ref().get_ref().position(); self.len = None; let _read_bytes = src.split_to(n as usize); - handle_rpc_request(self.protocol.versioned_protocol, &decoded_buffer) + handle_rpc_request( + self.protocol.versioned_protocol, + &decoded_buffer, + &self.fork_context.spec, + ) } Err(e) => handle_error(e, reader.get_ref().get_ref().position(), max_compressed_len), } @@ -451,6 +456,7 @@ fn handle_length( fn handle_rpc_request( versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], + spec: &ChainSpec, ) -> Result>, RPCError> { match versioned_protocol { SupportedProtocol::StatusV1 => Ok(Some(InboundRequest::Status( @@ -467,12 +473,18 @@ fn handle_rpc_request( ))), SupportedProtocol::BlocksByRootV2 => Ok(Some(InboundRequest::BlocksByRoot( BlocksByRootRequest::V2(BlocksByRootRequestV2 { - block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, + block_roots: RuntimeVariableList::from_ssz_bytes( + decoded_buffer, + spec.max_request_blocks as usize, + )?, }), ))), SupportedProtocol::BlocksByRootV1 => Ok(Some(InboundRequest::BlocksByRoot( BlocksByRootRequest::V1(BlocksByRootRequestV1 { - block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, + block_roots: RuntimeVariableList::from_ssz_bytes( + decoded_buffer, + spec.max_request_blocks as usize, + )?, }), ))), SupportedProtocol::BlobsByRangeV1 => Ok(Some(InboundRequest::BlobsByRange( @@ -480,7 +492,10 @@ fn handle_rpc_request( ))), SupportedProtocol::BlobsByRootV1 => { Ok(Some(InboundRequest::BlobsByRoot(BlobsByRootRequest { - blob_ids: VariableList::from_ssz_bytes(decoded_buffer)?, + blob_ids: RuntimeVariableList::from_ssz_bytes( + decoded_buffer, + spec.max_request_blob_sidecars as usize, + )?, }))) } SupportedProtocol::PingV1 => Ok(Some(InboundRequest::Ping(Ping { @@ -773,21 +788,22 @@ mod tests { } } - fn bbroot_request_v1() -> BlocksByRootRequest { - BlocksByRootRequest::new_v1(vec![Hash256::zero()].into()) + fn bbroot_request_v1(spec: &ChainSpec) -> BlocksByRootRequest { + BlocksByRootRequest::new_v1(vec![Hash256::zero()], spec) } - fn bbroot_request_v2() -> BlocksByRootRequest { - BlocksByRootRequest::new(vec![Hash256::zero()].into()) + fn bbroot_request_v2(spec: &ChainSpec) -> BlocksByRootRequest { + BlocksByRootRequest::new(vec![Hash256::zero()], spec) } - fn blbroot_request() -> BlobsByRootRequest { - BlobsByRootRequest { - blob_ids: VariableList::from(vec![BlobIdentifier { + fn blbroot_request(spec: &ChainSpec) -> BlobsByRootRequest { + BlobsByRootRequest::new( + vec![BlobIdentifier { block_root: Hash256::zero(), index: 0, - }]), - } + }], + spec, + ) } fn ping_message() -> Ping { @@ -1391,22 +1407,22 @@ mod tests { #[test] fn test_encode_then_decode_request() { + let chain_spec = Spec::default_spec(); + let requests: &[OutboundRequest] = &[ OutboundRequest::Ping(ping_message()), OutboundRequest::Status(status_message()), OutboundRequest::Goodbye(GoodbyeReason::Fault), OutboundRequest::BlocksByRange(bbrange_request_v1()), OutboundRequest::BlocksByRange(bbrange_request_v2()), - OutboundRequest::BlocksByRoot(bbroot_request_v1()), - OutboundRequest::BlocksByRoot(bbroot_request_v2()), + OutboundRequest::BlocksByRoot(bbroot_request_v1(&chain_spec)), + OutboundRequest::BlocksByRoot(bbroot_request_v2(&chain_spec)), OutboundRequest::MetaData(MetadataRequest::new_v1()), OutboundRequest::BlobsByRange(blbrange_request()), - OutboundRequest::BlobsByRoot(blbroot_request()), + OutboundRequest::BlobsByRoot(blbroot_request(&chain_spec)), OutboundRequest::MetaData(MetadataRequest::new_v2()), ]; - let chain_spec = Spec::default_spec(); - for req in requests.iter() { for fork_name in ForkName::list_all() { encode_then_decode_request(req.clone(), fork_name, &chain_spec); diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index ad96731141..9895149198 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -4,7 +4,7 @@ use std::{ time::Duration, }; -use super::{methods, rate_limiter::Quota, Protocol}; +use super::{rate_limiter::Quota, Protocol}; use serde::{Deserialize, Serialize}; @@ -99,11 +99,9 @@ impl RateLimiterConfig { pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(2, 5); pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(5, 15); pub const DEFAULT_GOODBYE_QUOTA: Quota = Quota::one_every(10); - pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = - Quota::n_every(methods::MAX_REQUEST_BLOCKS, 10); + pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(1024, 10); pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); - pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = - Quota::n_every(methods::MAX_REQUEST_BLOB_SIDECARS, 10); + pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(768, 10); pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); } diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 9a6ad19ac5..04ec6bac49 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -5,36 +5,22 @@ use regex::bytes::Regex; use serde::Serialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::{ - typenum::{U1024, U128, U256, U768}, - VariableList, -}; +use ssz_types::{typenum::U256, VariableList}; use std::marker::PhantomData; use std::ops::Deref; use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; use types::blob_sidecar::BlobIdentifier; -use types::consts::deneb::MAX_BLOBS_PER_BLOCK; use types::{ - blob_sidecar::BlobSidecar, Epoch, EthSpec, Hash256, LightClientBootstrap, SignedBeaconBlock, - Slot, + blob_sidecar::BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256, LightClientBootstrap, + RuntimeVariableList, SignedBeaconBlock, Slot, }; -/// Maximum number of blocks in a single request. -pub type MaxRequestBlocks = U1024; -pub const MAX_REQUEST_BLOCKS: u64 = 1024; - /// Maximum length of error message. pub type MaxErrorLen = U256; pub const MAX_ERROR_LEN: u64 = 256; -pub type MaxRequestBlocksDeneb = U128; -pub const MAX_REQUEST_BLOCKS_DENEB: u64 = 128; - -pub type MaxRequestBlobSidecars = U768; -pub const MAX_REQUEST_BLOB_SIDECARS: u64 = MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK; - /// Wrapper over SSZ List to represent error message in rpc responses. #[derive(Debug, Clone)] pub struct ErrorType(pub VariableList); @@ -344,22 +330,23 @@ impl OldBlocksByRangeRequest { } /// Request a number of beacon block bodies from a peer. -#[superstruct( - variants(V1, V2), - variant_attributes(derive(Encode, Decode, Clone, Debug, PartialEq)) -)] +#[superstruct(variants(V1, V2), variant_attributes(derive(Clone, Debug, PartialEq)))] #[derive(Clone, Debug, PartialEq)] pub struct BlocksByRootRequest { /// The list of beacon block bodies being requested. - pub block_roots: VariableList, + pub block_roots: RuntimeVariableList, } impl BlocksByRootRequest { - pub fn new(block_roots: VariableList) -> Self { + pub fn new(block_roots: Vec, spec: &ChainSpec) -> Self { + let block_roots = + RuntimeVariableList::from_vec(block_roots, spec.max_request_blocks as usize); Self::V2(BlocksByRootRequestV2 { block_roots }) } - pub fn new_v1(block_roots: VariableList) -> Self { + pub fn new_v1(block_roots: Vec, spec: &ChainSpec) -> Self { + let block_roots = + RuntimeVariableList::from_vec(block_roots, spec.max_request_blocks as usize); Self::V1(BlocksByRootRequestV1 { block_roots }) } } @@ -368,7 +355,15 @@ impl BlocksByRootRequest { #[derive(Clone, Debug, PartialEq)] pub struct BlobsByRootRequest { /// The list of beacon block roots being requested. - pub blob_ids: VariableList, + pub blob_ids: RuntimeVariableList, +} + +impl BlobsByRootRequest { + pub fn new(blob_ids: Vec, spec: &ChainSpec) -> Self { + let blob_ids = + RuntimeVariableList::from_vec(blob_ids, spec.max_request_blob_sidecars as usize); + Self { blob_ids } + } } /* RPC Handling and Grouping */ diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index d6686ff1b1..3606438fb9 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -7,7 +7,8 @@ use futures::future::FutureExt; use handler::RPCHandler; use libp2p::swarm::{ - handler::ConnectionHandler, ConnectionId, NetworkBehaviour, NotifyHandler, ToSwarm, + handler::ConnectionHandler, CloseConnection, ConnectionId, NetworkBehaviour, NotifyHandler, + ToSwarm, }; use libp2p::swarm::{FromSwarm, SubstreamProtocol, THandlerInEvent}; use libp2p::PeerId; @@ -26,7 +27,7 @@ pub(crate) use protocol::InboundRequest; pub use handler::SubstreamId; pub use methods::{ BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest, - MaxRequestBlocks, RPCResponseErrorCode, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS, + RPCResponseErrorCode, ResponseTermination, StatusMessage, }; pub(crate) use outbound::OutboundRequest; pub use protocol::{max_rpc_size, Protocol, RPCError}; @@ -292,68 +293,78 @@ where conn_id: ConnectionId, event: ::ToBehaviour, ) { - if let HandlerEvent::Ok(RPCReceived::Request(ref id, ref req)) = event { - if let Some(limiter) = self.limiter.as_mut() { - // check if the request is conformant to the quota - match limiter.allows(&peer_id, req) { - Ok(()) => { - // send the event to the user - self.events.push(ToSwarm::GenerateEvent(RPCMessage { - peer_id, - conn_id, - event, - })) - } - Err(RateLimitedErr::TooLarge) => { - // we set the batch sizes, so this is a coding/config err for most protocols - let protocol = req.versioned_protocol().protocol(); - if matches!(protocol, Protocol::BlocksByRange) - || matches!(protocol, Protocol::BlobsByRange) - { - debug!(self.log, "By range request will never be processed"; "request" => %req, "protocol" => %protocol); - } else { - crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol); + match event { + HandlerEvent::Ok(RPCReceived::Request(ref id, ref req)) => { + if let Some(limiter) = self.limiter.as_mut() { + // check if the request is conformant to the quota + match limiter.allows(&peer_id, req) { + Ok(()) => { + // send the event to the user + self.events.push(ToSwarm::GenerateEvent(RPCMessage { + peer_id, + conn_id, + event, + })) } - // send an error code to the peer. - // the handler upon receiving the error code will send it back to the behaviour - self.send_response( - peer_id, - (conn_id, *id), - RPCCodedResponse::Error( - RPCResponseErrorCode::RateLimited, - "Rate limited. Request too large".into(), - ), - ); - } - Err(RateLimitedErr::TooSoon(wait_time)) => { - debug!(self.log, "Request exceeds the rate limit"; + Err(RateLimitedErr::TooLarge) => { + // we set the batch sizes, so this is a coding/config err for most protocols + let protocol = req.versioned_protocol().protocol(); + if matches!(protocol, Protocol::BlocksByRange) + || matches!(protocol, Protocol::BlobsByRange) + { + debug!(self.log, "By range request will never be processed"; "request" => %req, "protocol" => %protocol); + } else { + crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol); + } + // send an error code to the peer. + // the handler upon receiving the error code will send it back to the behaviour + self.send_response( + peer_id, + (conn_id, *id), + RPCCodedResponse::Error( + RPCResponseErrorCode::RateLimited, + "Rate limited. Request too large".into(), + ), + ); + } + Err(RateLimitedErr::TooSoon(wait_time)) => { + debug!(self.log, "Request exceeds the rate limit"; "request" => %req, "peer_id" => %peer_id, "wait_time_ms" => wait_time.as_millis()); - // send an error code to the peer. - // the handler upon receiving the error code will send it back to the behaviour - self.send_response( - peer_id, - (conn_id, *id), - RPCCodedResponse::Error( - RPCResponseErrorCode::RateLimited, - format!("Wait {:?}", wait_time).into(), - ), - ); + // send an error code to the peer. + // the handler upon receiving the error code will send it back to the behaviour + self.send_response( + peer_id, + (conn_id, *id), + RPCCodedResponse::Error( + RPCResponseErrorCode::RateLimited, + format!("Wait {:?}", wait_time).into(), + ), + ); + } } + } else { + // No rate limiting, send the event to the user + self.events.push(ToSwarm::GenerateEvent(RPCMessage { + peer_id, + conn_id, + event, + })) } - } else { - // No rate limiting, send the event to the user + } + HandlerEvent::Close(_) => { + // Handle the close event here. + self.events.push(ToSwarm::CloseConnection { + peer_id, + connection: CloseConnection::All, + }); + } + _ => { self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, conn_id, event, - })) + })); } - } else { - self.events.push(ToSwarm::GenerateEvent(RPCMessage { - peer_id, - conn_id, - event, - })); } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 95fdc20838..9c174b8e42 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -2,7 +2,6 @@ use super::methods::*; use crate::rpc::{ codec::{base::BaseInboundCodec, ssz_snappy::SSZSnappyInboundCodec, InboundCodec}, methods::{MaxErrorLen, ResponseTermination, MAX_ERROR_LEN}, - MaxRequestBlocks, MAX_REQUEST_BLOCKS, }; use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; @@ -22,7 +21,7 @@ use tokio_util::{ }; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge, - BlobSidecar, EmptyBlock, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, + BlobSidecar, ChainSpec, EmptyBlock, EthSpec, ForkContext, ForkName, MainnetEthSpec, Signature, SignedBeaconBlock, }; @@ -89,32 +88,6 @@ lazy_static! { + (::ssz_fixed_len() * ::max_blobs_per_block()) + ssz::BYTES_PER_LENGTH_OFFSET; // Length offset for the blob commitments field. - pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = - VariableList::::from(Vec::::new()) - .as_ssz_bytes() - .len(); - pub static ref BLOCKS_BY_ROOT_REQUEST_MAX: usize = - VariableList::::from(vec![ - Hash256::zero(); - MAX_REQUEST_BLOCKS - as usize - ]) - .as_ssz_bytes() - .len(); - - pub static ref BLOBS_BY_ROOT_REQUEST_MIN: usize = - VariableList::::from(Vec::::new()) - .as_ssz_bytes() - .len(); - pub static ref BLOBS_BY_ROOT_REQUEST_MAX: usize = - VariableList::::from(vec![ - Hash256::zero(); - MAX_REQUEST_BLOB_SIDECARS - as usize - ]) - .as_ssz_bytes() - .len(); - pub static ref ERROR_TYPE_MIN: usize = VariableList::::from(Vec::::new()) .as_ssz_bytes() @@ -375,7 +348,7 @@ impl AsRef for ProtocolId { impl ProtocolId { /// Returns min and max size for messages of given protocol id requests. - pub fn rpc_request_limits(&self) -> RpcLimits { + pub fn rpc_request_limits(&self, spec: &ChainSpec) -> RpcLimits { match self.versioned_protocol.protocol() { Protocol::Status => RpcLimits::new( ::ssz_fixed_len(), @@ -390,16 +363,12 @@ impl ProtocolId { ::ssz_fixed_len(), ::ssz_fixed_len(), ), - Protocol::BlocksByRoot => { - RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX) - } + Protocol::BlocksByRoot => RpcLimits::new(0, spec.max_blocks_by_root_request), Protocol::BlobsByRange => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), ), - Protocol::BlobsByRoot => { - RpcLimits::new(*BLOBS_BY_ROOT_REQUEST_MIN, *BLOBS_BY_ROOT_REQUEST_MAX) - } + Protocol::BlobsByRoot => RpcLimits::new(0, spec.max_blobs_by_root_request), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), diff --git a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs index b058fc0ff1..47c2c9e56a 100644 --- a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs +++ b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs @@ -96,6 +96,9 @@ impl PeerScoreSettings { ip_colocation_factor_threshold: 8.0, // Allow up to 8 nodes per IP behaviour_penalty_threshold: 6.0, behaviour_penalty_decay: self.score_parameter_decay(self.epoch * 10), + slow_peer_decay: 0.1, + slow_peer_weight: -10.0, + slow_peer_threshold: 0.0, ..Default::default() }; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 3c2a3f5a95..e85cf75fd8 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -41,8 +41,7 @@ use std::{ }; use types::ForkName; use types::{ - consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, consts::deneb::BLOB_SIDECAR_SUBNET_COUNT, - EnrForkId, EthSpec, ForkContext, Slot, SubnetId, + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, }; use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER}; @@ -128,6 +127,8 @@ pub struct Network { gossip_cache: GossipCache, /// This node's PeerId. pub local_peer_id: PeerId, + /// Flag to disable warning logs for duplicate gossip messages and log at DEBUG level instead. + pub disable_duplicate_warn_logs: bool, /// Logger for behaviour actions. log: slog::Logger, } @@ -224,7 +225,7 @@ impl Network { let max_topics = ctx.chain_spec.attestation_subnet_count as usize + SYNC_COMMITTEE_SUBNET_COUNT as usize - + BLOB_SIDECAR_SUBNET_COUNT as usize + + ctx.chain_spec.blob_sidecar_subnet_count as usize + BASE_CORE_TOPICS.len() + ALTAIR_CORE_TOPICS.len() + CAPELLA_CORE_TOPICS.len() @@ -237,7 +238,7 @@ impl Network { possible_fork_digests, ctx.chain_spec.attestation_subnet_count, SYNC_COMMITTEE_SUBNET_COUNT, - BLOB_SIDECAR_SUBNET_COUNT, + ctx.chain_spec.blob_sidecar_subnet_count, ), // during a fork we subscribe to both the old and new topics max_subscribed_topics: max_topics * 4, @@ -425,6 +426,7 @@ impl Network { update_gossipsub_scores, gossip_cache, local_peer_id, + disable_duplicate_warn_logs: config.disable_duplicate_warn_logs, log, }; @@ -636,7 +638,7 @@ impl Network { } // Subscribe to core topics for the new fork - for kind in fork_core_topics::(&new_fork) { + for kind in fork_core_topics::(&new_fork, &self.fork_context.spec) { let topic = GossipTopic::new(kind, GossipEncoding::default(), new_fork_digest); self.subscribe(topic); } @@ -743,7 +745,21 @@ impl Network { .gossipsub_mut() .publish(Topic::from(topic.clone()), message_data.clone()) { - slog::warn!(self.log, "Could not publish message"; "error" => ?e); + if self.disable_duplicate_warn_logs && matches!(e, PublishError::Duplicate) { + debug!( + self.log, + "Could not publish message"; + "error" => ?e, + "kind" => %topic.kind(), + ); + } else { + warn!( + self.log, + "Could not publish message"; + "error" => ?e, + "kind" => %topic.kind(), + ); + }; // add to metrics match topic.kind() { @@ -1248,6 +1264,32 @@ impl Network { "does_not_support_gossipsub", ); } + gossipsub::Event::SlowPeer { + peer_id, + failed_messages, + } => { + debug!(self.log, "Slow gossipsub peer"; "peer_id" => %peer_id, "publish" => failed_messages.publish, "forward" => failed_messages.forward, "priority" => failed_messages.priority, "non_priority" => failed_messages.non_priority); + // Punish the peer if it cannot handle priority messages + if failed_messages.total_timeout() > 10 { + debug!(self.log, "Slow gossipsub peer penalized for priority failure"; "peer_id" => %peer_id); + self.peer_manager_mut().report_peer( + &peer_id, + PeerAction::HighToleranceError, + ReportSource::Gossipsub, + None, + "publish_timeout_penalty", + ); + } else if failed_messages.total_queue_full() > 10 { + debug!(self.log, "Slow gossipsub peer penalized for send queue full"; "peer_id" => %peer_id); + self.peer_manager_mut().report_peer( + &peer_id, + PeerAction::HighToleranceError, + ReportSource::Gossipsub, + None, + "queue_full_penalty", + ); + } + } } None } @@ -1452,8 +1494,7 @@ impl Network { self.build_response(id, peer_id, response) } HandlerEvent::Close(_) => { - let _ = self.swarm.disconnect_peer_id(peer_id); - // NOTE: we wait for the swarm to report the connection as actually closed + // NOTE: This is handled in the RPC behaviour. None } } diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 5fe5946ce2..34dec1ca6c 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -50,8 +50,7 @@ pub fn build_transport( mplex_config.set_max_buffer_behaviour(libp2p_mplex::MaxBufferBehaviour::Block); // yamux config - let mut yamux_config = yamux::Config::default(); - yamux_config.set_window_update_mode(yamux::WindowUpdateMode::on_read()); + let yamux_config = yamux::Config::default(); // Creates the TCP transport layer let tcp = libp2p::tcp::tokio::Transport::new(libp2p::tcp::Config::default().nodelay(true)) .upgrade(core::upgrade::Version::V1) diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index b2b605e8ae..84a581d56d 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -118,7 +118,7 @@ impl NetworkGlobals { use crate::CombinedKeyExt; let keypair = libp2p::identity::secp256k1::Keypair::generate(); let enr_key: discv5::enr::CombinedKey = discv5::enr::CombinedKey::from_secp256k1(&keypair); - let enr = discv5::enr::EnrBuilder::new("v4").build(&enr_key).unwrap(); + let enr = discv5::enr::Enr::builder().build(&enr_key).unwrap(); NetworkGlobals::new( enr, MetaData::V2(MetaDataV2 { diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index e7e771e1ad..b774905174 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -1,8 +1,7 @@ use libp2p::gossipsub::{IdentTopic as Topic, TopicHash}; use serde::{Deserialize, Serialize}; use strum::AsRefStr; -use types::consts::deneb::BLOB_SIDECAR_SUBNET_COUNT; -use types::{EthSpec, ForkName, SubnetId, SyncSubnetId}; +use types::{ChainSpec, EthSpec, ForkName, SubnetId, SyncSubnetId}; use crate::Subnet; @@ -44,7 +43,7 @@ pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [ pub const DENEB_CORE_TOPICS: [GossipKind; 0] = []; /// Returns the core topics associated with each fork that are new to the previous fork -pub fn fork_core_topics(fork_name: &ForkName) -> Vec { +pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> Vec { match fork_name { ForkName::Base => BASE_CORE_TOPICS.to_vec(), ForkName::Altair => ALTAIR_CORE_TOPICS.to_vec(), @@ -53,7 +52,7 @@ pub fn fork_core_topics(fork_name: &ForkName) -> Vec { ForkName::Deneb => { // All of deneb blob topics are core topics let mut deneb_blob_topics = Vec::new(); - for i in 0..BLOB_SIDECAR_SUBNET_COUNT { + for i in 0..spec.blob_sidecar_subnet_count { deneb_blob_topics.push(GossipKind::BlobSidecar(i)); } let mut deneb_topics = DENEB_CORE_TOPICS.to_vec(); @@ -65,10 +64,13 @@ pub fn fork_core_topics(fork_name: &ForkName) -> Vec { /// Returns all the topics that we need to subscribe to for a given fork /// including topics from older forks and new topics for the current fork. -pub fn core_topics_to_subscribe(mut current_fork: ForkName) -> Vec { - let mut topics = fork_core_topics::(¤t_fork); +pub fn core_topics_to_subscribe( + mut current_fork: ForkName, + spec: &ChainSpec, +) -> Vec { + let mut topics = fork_core_topics::(¤t_fork, spec); while let Some(previous_fork) = current_fork.previous_fork() { - let previous_fork_topics = fork_core_topics::(&previous_fork); + let previous_fork_topics = fork_core_topics::(&previous_fork, spec); topics.extend(previous_fork_topics); current_fork = previous_fork; } @@ -435,14 +437,18 @@ mod tests { #[test] fn test_core_topics_to_subscribe() { type E = MainnetEthSpec; + let spec = E::default_spec(); let mut all_topics = Vec::new(); - let mut deneb_core_topics = fork_core_topics::(&ForkName::Deneb); + let mut deneb_core_topics = fork_core_topics::(&ForkName::Deneb, &spec); all_topics.append(&mut deneb_core_topics); all_topics.extend(CAPELLA_CORE_TOPICS); all_topics.extend(ALTAIR_CORE_TOPICS); all_topics.extend(BASE_CORE_TOPICS); let latest_fork = *ForkName::list_all().last().unwrap(); - assert_eq!(core_topics_to_subscribe::(latest_fork), all_topics); + assert_eq!( + core_topics_to_subscribe::(latest_fork, &spec), + all_topics + ); } } diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 176f783c99..643c1231a0 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -743,15 +743,17 @@ fn test_tcp_blocks_by_root_chunked_rpc() { .await; // BlocksByRoot Request - let rpc_request = - Request::BlocksByRoot(BlocksByRootRequest::new(VariableList::from(vec![ + let rpc_request = Request::BlocksByRoot(BlocksByRootRequest::new( + vec![ Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), - ]))); + ], + &spec, + )); // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); @@ -876,8 +878,8 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { .await; // BlocksByRoot Request - let rpc_request = - Request::BlocksByRoot(BlocksByRootRequest::new(VariableList::from(vec![ + let rpc_request = Request::BlocksByRoot(BlocksByRootRequest::new( + vec![ Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), @@ -888,7 +890,9 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), - ]))); + ], + &spec, + )); // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 430e0571b7..a731dea7c1 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -5,9 +5,7 @@ use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; use beacon_processor::SendOnDrop; use itertools::process_results; -use lighthouse_network::rpc::methods::{ - BlobsByRangeRequest, BlobsByRootRequest, MAX_REQUEST_BLOB_SIDECARS, MAX_REQUEST_BLOCKS_DENEB, -}; +use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; @@ -222,12 +220,14 @@ impl NetworkBeaconProcessor { request_id: PeerRequestId, request: BlobsByRootRequest, ) { - let Some(requested_root) = request.blob_ids.first().map(|id| id.block_root) else { + let Some(requested_root) = request.blob_ids.as_slice().first().map(|id| id.block_root) + else { // No blob ids requested. return; }; let requested_indices = request .blob_ids + .as_slice() .iter() .map(|id| id.index) .collect::>(); @@ -235,9 +235,9 @@ impl NetworkBeaconProcessor { let send_response = true; let mut blob_list_results = HashMap::new(); - for id in request.blob_ids.into_iter() { + for id in request.blob_ids.as_slice() { // First attempt to get the blobs from the RPC cache. - if let Ok(Some(blob)) = self.chain.data_availability_checker.get_blob(&id) { + if let Ok(Some(blob)) = self.chain.data_availability_checker.get_blob(id) { self.send_response(peer_id, Response::BlobsByRoot(Some(blob)), request_id); send_blob_count += 1; } else { @@ -248,7 +248,7 @@ impl NetworkBeaconProcessor { let blob_list_result = match blob_list_results.entry(root) { Entry::Vacant(entry) => { - entry.insert(self.chain.get_blobs_checking_early_attester_cache(&root)) + entry.insert(self.chain.get_blobs_checking_early_attester_cache(root)) } Entry::Occupied(entry) => entry.into_mut(), }; @@ -256,7 +256,7 @@ impl NetworkBeaconProcessor { match blob_list_result.as_ref() { Ok(blobs_sidecar_list) => { 'inner: for blob_sidecar in blobs_sidecar_list.iter() { - if blob_sidecar.index == index { + if blob_sidecar.index == *index { self.send_response( peer_id, Response::BlobsByRoot(Some(blob_sidecar.clone())), @@ -346,14 +346,17 @@ impl NetworkBeaconProcessor { ); // Should not send more than max request blocks - let max_request_size = self.chain.epoch().map_or(MAX_REQUEST_BLOCKS, |epoch| { - match self.chain.spec.fork_name_at_epoch(epoch) { - ForkName::Deneb => MAX_REQUEST_BLOCKS_DENEB, - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - MAX_REQUEST_BLOCKS - } - } - }); + let max_request_size = + self.chain + .epoch() + .map_or(self.chain.spec.max_request_blocks, |epoch| { + match self.chain.spec.fork_name_at_epoch(epoch) { + ForkName::Deneb => self.chain.spec.max_request_blocks_deneb, + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + self.chain.spec.max_request_blocks + } + } + }); if *req.count() > max_request_size { return self.send_error_response( peer_id, @@ -586,7 +589,7 @@ impl NetworkBeaconProcessor { ); // Should not send more than max request blocks - if req.max_blobs_requested::() > MAX_REQUEST_BLOB_SIDECARS { + if req.max_blobs_requested::() > self.chain.spec.max_request_blob_sidecars { return self.send_error_response( peer_id, RPCResponseErrorCode::InvalidRequest, diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 95c1fa33e8..608d10d665 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -155,13 +155,12 @@ impl NetworkBeaconProcessor { // Checks if a block from this proposer is already known. let block_equivocates = || { - match self - .chain - .observed_block_producers - .read() - .proposer_has_been_observed(block.message(), block.canonical_root()) - { - Ok(seen_status) => seen_status.is_slashable(), + match self.chain.observed_slashable.read().is_slashable( + block.slot(), + block.message().proposer_index(), + block.canonical_root(), + ) { + Ok(is_slashable) => is_slashable, //Both of these blocks will be rejected, so reject them now rather // than re-queuing them. Err(ObserveError::FinalizedBlock { .. }) diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 844fc53ab1..48c5334357 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -18,7 +18,7 @@ use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::BlobsByRangeRequest; use lighthouse_network::rpc::SubstreamId; use lighthouse_network::{ - discv5::enr::{CombinedKey, EnrBuilder}, + discv5::enr::{self, CombinedKey}, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, Client, MessageId, NetworkGlobals, PeerId, Response, @@ -203,7 +203,7 @@ impl TestRig { syncnets: EnrSyncCommitteeBitfield::::default(), }); let enr_key = CombinedKey::generate_secp256k1(); - let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); + let enr = enr::Enr::builder().build(&enr_key).unwrap(); let network_globals = Arc::new(NetworkGlobals::new(enr, meta_data, vec![], false, &log)); let executor = harness.runtime.task_executor.clone(); diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 17760cef59..01a7e1f989 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -716,9 +716,10 @@ impl NetworkService { } let mut subscribed_topics: Vec = vec![]; - for topic_kind in - core_topics_to_subscribe::(self.fork_context.current_fork()) - { + for topic_kind in core_topics_to_subscribe::( + self.fork_context.current_fork(), + &self.fork_context.spec, + ) { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new( topic_kind.clone(), @@ -945,7 +946,10 @@ impl NetworkService { } fn subscribed_core_topics(&self) -> bool { - let core_topics = core_topics_to_subscribe::(self.fork_context.current_fork()); + let core_topics = core_topics_to_subscribe::( + self.fork_context.current_fork(), + &self.fork_context.spec, + ); let core_topics: HashSet<&GossipKind> = HashSet::from_iter(&core_topics); let subscriptions = self.network_globals.gossipsub_subscriptions.read(); let subscribed_topics: HashSet<&GossipKind> = diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index 7a1be46e69..78b10473df 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -13,12 +13,11 @@ use beacon_chain::{get_block_root, BeaconChainTypes}; use lighthouse_network::rpc::methods::BlobsByRootRequest; use lighthouse_network::rpc::BlocksByRootRequest; use rand::prelude::IteratorRandom; -use ssz_types::VariableList; use std::ops::IndexMut; use std::sync::Arc; use std::time::Duration; use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; -use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock}; +use types::{BlobSidecar, ChainSpec, EthSpec, Hash256, SignedBeaconBlock}; #[derive(Debug, Copy, Clone)] pub enum ResponseType { @@ -87,11 +86,14 @@ pub trait RequestState { /* Request building methods */ /// Construct a new request. - fn build_request(&mut self) -> Result<(PeerId, Self::RequestType), LookupRequestError> { + fn build_request( + &mut self, + spec: &ChainSpec, + ) -> Result<(PeerId, Self::RequestType), LookupRequestError> { // Verify and construct request. self.too_many_attempts()?; let peer = self.get_peer()?; - let request = self.new_request(); + let request = self.new_request(spec); Ok((peer, request)) } @@ -108,7 +110,7 @@ pub trait RequestState { } // Construct request. - let (peer_id, request) = self.build_request()?; + let (peer_id, request) = self.build_request(&cx.chain.spec)?; // Update request state. self.get_state_mut().state = State::Downloading { peer_id }; @@ -151,7 +153,7 @@ pub trait RequestState { } /// Initialize `Self::RequestType`. - fn new_request(&self) -> Self::RequestType; + fn new_request(&self, spec: &ChainSpec) -> Self::RequestType; /// Send the request to the network service. fn make_request( @@ -254,8 +256,8 @@ impl RequestState for BlockRequestState type VerifiedResponseType = Arc>; type ReconstructedResponseType = RpcBlock; - fn new_request(&self) -> BlocksByRootRequest { - BlocksByRootRequest::new(VariableList::from(vec![self.requested_block_root])) + fn new_request(&self, spec: &ChainSpec) -> BlocksByRootRequest { + BlocksByRootRequest::new(vec![self.requested_block_root], spec) } fn make_request( @@ -353,10 +355,9 @@ impl RequestState for BlobRequestState; type ReconstructedResponseType = FixedBlobSidecarList; - fn new_request(&self) -> BlobsByRootRequest { + fn new_request(&self, spec: &ChainSpec) -> BlobsByRootRequest { let blob_id_vec: Vec = self.requested_ids.clone().into(); - let blob_ids = VariableList::from(blob_id_vec); - BlobsByRootRequest { blob_ids } + BlobsByRootRequest::new(blob_id_vec, spec) } fn make_request( diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index e10e8328cd..4e29816294 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -575,7 +575,7 @@ mod tests { HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone()) .expect("store"); let da_checker = Arc::new( - DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec) + DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec.clone()) .expect("data availability checker"), ); let mut sl = SingleBlockLookup::::new( @@ -587,6 +587,7 @@ mod tests { ); as RequestState>::build_request( &mut sl.block_request_state, + &spec, ) .unwrap(); sl.block_request_state.state.state = State::Downloading { peer_id }; @@ -616,7 +617,7 @@ mod tests { .expect("store"); let da_checker = Arc::new( - DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec) + DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec.clone()) .expect("data availability checker"), ); @@ -630,6 +631,7 @@ mod tests { for _ in 1..TestLookup2::MAX_ATTEMPTS { as RequestState>::build_request( &mut sl.block_request_state, + &spec, ) .unwrap(); sl.block_request_state.state.register_failure_downloading(); @@ -638,6 +640,7 @@ mod tests { // Now we receive the block and send it for processing as RequestState>::build_request( &mut sl.block_request_state, + &spec, ) .unwrap(); sl.block_request_state.state.state = State::Downloading { peer_id }; @@ -654,7 +657,8 @@ mod tests { sl.block_request_state.state.register_failure_processing(); assert_eq!( as RequestState>::build_request( - &mut sl.block_request_state + &mut sl.block_request_state, + &spec ), Err(LookupRequestError::TooManyAttempts { cannot_process: false diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index bcb239aaa0..acb735ea44 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -52,7 +52,6 @@ use beacon_chain::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, EngineState, }; use futures::StreamExt; -use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; use lighthouse_network::rpc::RPCError; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; @@ -230,7 +229,7 @@ pub fn spawn( log: slog::Logger, ) { assert!( - MAX_REQUEST_BLOCKS >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, + beacon_chain.spec.max_request_blocks >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, "Max blocks that can be requested in a single batch greater than max allowed blocks in a single request" ); diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index c01bc3e428..04feb8fdc2 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -475,9 +475,15 @@ impl SyncNetworkContext { }; let request_id = RequestId::Sync(sync_id); - if let Some(block_root) = blob_request.blob_ids.first().map(|id| id.block_root) { + if let Some(block_root) = blob_request + .blob_ids + .as_slice() + .first() + .map(|id| id.block_root) + { let indices = blob_request .blob_ids + .as_slice() .iter() .map(|id| id.index) .collect::>(); diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index d9d4fad5ce..347d37d667 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1003,6 +1003,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("180") ) + .arg( + Arg::with_name("allow-insecure-genesis-sync") + .long("allow-insecure-genesis-sync") + .help("Enable syncing from genesis, which is generally insecure and incompatible with data availability checks. \ + Checkpoint syncing is the preferred method for syncing a node. \ + Only use this flag when testing. DO NOT use on mainnet!") + .conflicts_with("checkpoint-sync-url") + .conflicts_with("checkpoint-state") + .takes_value(false) + ) .arg( Arg::with_name("reconstruct-historic-states") .long("reconstruct-historic-states") @@ -1179,32 +1189,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("builder-profit-threshold") .long("builder-profit-threshold") .value_name("WEI_VALUE") - .help("The minimum reward in wei provided to the proposer by a block builder for \ - an external payload to be considered for inclusion in a proposal. If this \ - threshold is not met, the local EE's payload will be used. This is currently \ - *NOT* in comparison to the value of the local EE's payload. It simply checks \ - whether the total proposer reward from an external payload is equal to or \ - greater than this value. In the future, a comparison to a local payload is \ - likely to be added. Example: Use 250000000000000000 to set the threshold to \ - 0.25 ETH.") - .default_value("0") - .takes_value(true) - ) - .arg( - Arg::with_name("ignore-builder-override-suggestion-threshold") - .long("ignore-builder-override-suggestion-threshold") - .value_name("PERCENTAGE") - .help("When the EE advises Lighthouse to ignore the builder payload, this flag \ - specifies a percentage threshold for the difference between the reward from \ - the builder payload and the local EE's payload. This threshold must be met \ - for Lighthouse to consider ignoring the EE's suggestion. If the reward from \ - the builder's payload doesn't exceed the local payload by at least this \ - percentage, the local payload will be used. The conditions under which the \ - EE may make this suggestion depend on the EE's implementation, with the \ - primary intent being to safeguard against potential censorship attacks \ - from builders. Setting this flag to 0 will cause Lighthouse to always \ - ignore the EE's suggestion. Default: 10.0 (equivalent to 10%).") - .default_value("10.0") + .help("This flag is deprecated and has no effect.") .takes_value(true) ) .arg( @@ -1256,12 +1241,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("always-prefer-builder-payload") .long("always-prefer-builder-payload") - .help("If set, the beacon node always uses the payload from the builder instead of the local payload.") - // The builder profit threshold flag is used to provide preference - // to local payloads, therefore it fundamentally conflicts with - // always using the builder. - .conflicts_with("builder-profit-threshold") - .conflicts_with("ignore-builder-override-suggestion-threshold") + .help("This flag is deprecated and has no effect.") ) .arg( Arg::with_name("invalid-gossip-verified-blocks-path") @@ -1342,5 +1322,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("64") .takes_value(true) ) + .arg( + Arg::with_name("disable-duplicate-warn-logs") + .long("disable-duplicate-warn-logs") + .help("Disable warning logs for duplicate gossip messages. The WARN level log is \ + useful for detecting a duplicate validator key running elsewhere. However, this may \ + result in excessive warning logs if the validator is broadcasting messages to \ + multiple beacon nodes via the validator client --broadcast flag. In this case, \ + disabling these warn logs may be useful.") + .takes_value(false) + ) .group(ArgGroup::with_name("enable_http").args(&["http", "gui", "staking"]).multiple(true)) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index bd50c04d65..88dc74066f 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -310,6 +310,21 @@ pub fn get_config( clap_utils::parse_optional(cli_args, "builder-user-agent")?; } + if cli_args.is_present("builder-profit-threshold") { + warn!( + log, + "Ignoring --builder-profit-threshold"; + "info" => "this flag is deprecated and will be removed" + ); + } + if cli_args.is_present("always-prefer-builder-payload") { + warn!( + log, + "Ignoring --always-prefer-builder-payload"; + "info" => "this flag is deprecated and will be removed" + ); + } + // Set config values from parse values. el_config.secret_files = vec![secret_file.clone()]; el_config.execution_endpoints = vec![execution_endpoint.clone()]; @@ -318,12 +333,6 @@ pub fn get_config( el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?; el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?; el_config.default_datadir = client_config.data_dir().clone(); - el_config.builder_profit_threshold = - clap_utils::parse_required(cli_args, "builder-profit-threshold")?; - el_config.always_prefer_builder_payload = - cli_args.is_present("always-prefer-builder-payload"); - el_config.ignore_builder_override_suggestion_threshold = - clap_utils::parse_required(cli_args, "ignore-builder-override-suggestion-threshold")?; let execution_timeout_multiplier = clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?; el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier); @@ -529,6 +538,8 @@ pub fn get_config( None }; + client_config.allow_insecure_genesis_sync = cli_args.is_present("allow-insecure-genesis-sync"); + client_config.genesis = if eth2_network_config.genesis_state_is_known() { // Set up weak subjectivity sync, or start from the hardcoded genesis state. if let (Some(initial_state_path), Some(initial_block_path)) = ( @@ -1436,6 +1447,9 @@ pub fn set_network_config( Some(config_str.parse()?) } }; + + config.disable_duplicate_warn_logs = cli_args.is_present("disable-duplicate-warn-logs"); + Ok(()) } diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 2fbef2f709..f1902c50ff 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -4,17 +4,19 @@ use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::io::Write; +use std::num::NonZeroUsize; +use types::non_zero_usize::new_non_zero_usize; use types::{EthSpec, Unsigned}; use zstd::Encoder; // Only used in tests. Mainnet sets a higher default on the CLI. pub const DEFAULT_EPOCHS_PER_STATE_DIFF: u64 = 8; -pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 64; -pub const DEFAULT_STATE_CACHE_SIZE: usize = 128; +pub const DEFAULT_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(64); +pub const DEFAULT_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(128); pub const DEFAULT_COMPRESSION_LEVEL: i32 = 1; -pub const DEFAULT_DIFF_BUFFER_CACHE_SIZE: usize = 16; +pub const DEFAULT_DIFF_BUFFER_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16); const EST_COMPRESSION_FACTOR: usize = 2; -pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: usize = 1; +pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(1); pub const DEFAULT_EPOCHS_PER_BLOB_PRUNE: u64 = 1; pub const DEFAULT_BLOB_PUNE_MARGIN_EPOCHS: u64 = 0; @@ -24,15 +26,15 @@ pub struct StoreConfig { /// Number of epochs between state diffs in the hot database. pub epochs_per_state_diff: u64, /// Maximum number of blocks to store in the in-memory block cache. - pub block_cache_size: usize, + pub block_cache_size: NonZeroUsize, /// Maximum number of states to store in the in-memory state cache. - pub state_cache_size: usize, + pub state_cache_size: NonZeroUsize, /// Compression level for blocks, state diffs and other compressed values. pub compression_level: i32, /// Maximum number of `HDiffBuffer`s to store in memory. - pub diff_buffer_cache_size: usize, + pub diff_buffer_cache_size: NonZeroUsize, /// Maximum number of states from freezer database to store in the in-memory state cache. - pub historic_state_cache_size: usize, + pub historic_state_cache_size: NonZeroUsize, /// Whether to compact the database on initialization. pub compact_on_init: bool, /// Whether to compact the database during database pruning. diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index d796d06deb..400a0d2b66 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -42,8 +42,6 @@ use std::path::Path; use std::sync::Arc; use std::time::Duration; use types::blob_sidecar::BlobSidecarList; -use types::consts::deneb::MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS; -use types::EthSpec; use types::*; use zstd::{Decoder, Encoder}; @@ -103,7 +101,7 @@ struct BlockCache { } impl BlockCache { - pub fn new(size: usize) -> Self { + pub fn new(size: NonZeroUsize) -> Self { Self { block_cache: LruCache::new(size), blob_cache: LruCache::new(size), @@ -199,14 +197,10 @@ impl HotColdDB, MemoryStore> { let hierarchy = config.hierarchy_config.to_moduli()?; - let block_cache_size = - NonZeroUsize::new(config.block_cache_size).ok_or(Error::ZeroCacheSize)?; - let state_cache_size = - NonZeroUsize::new(config.state_cache_size).ok_or(Error::ZeroCacheSize)?; - let historic_state_cache_size = - NonZeroUsize::new(config.historic_state_cache_size).ok_or(Error::ZeroCacheSize)?; - let diff_buffer_cache_size = - NonZeroUsize::new(config.diff_buffer_cache_size).ok_or(Error::ZeroCacheSize)?; + let block_cache_size = config.block_cache_size; + let state_cache_size = config.state_cache_size; + let historic_state_cache_size = config.historic_state_cache_size; + let diff_buffer_cache_size = config.diff_buffer_cache_size; let db = HotColdDB { split: RwLock::new(Split::default()), @@ -215,11 +209,11 @@ impl HotColdDB, MemoryStore> { cold_db: MemoryStore::open(), blobs_db: MemoryStore::open(), hot_db: MemoryStore::open(), - block_cache: Mutex::new(BlockCache::new(block_cache_size.get())), + block_cache: Mutex::new(BlockCache::new(block_cache_size)), state_cache: Mutex::new(StateCache::new(state_cache_size)), immutable_validators: Arc::new(RwLock::new(Default::default())), - historic_state_cache: Mutex::new(LruCache::new(historic_state_cache_size.get())), - diff_buffer_cache: Mutex::new(LruCache::new(diff_buffer_cache_size.get())), + historic_state_cache: Mutex::new(LruCache::new(historic_state_cache_size)), + diff_buffer_cache: Mutex::new(LruCache::new(diff_buffer_cache_size)), config, hierarchy, spec, @@ -249,14 +243,10 @@ impl HotColdDB, LevelDB> { let hierarchy = config.hierarchy_config.to_moduli()?; - let block_cache_size = - NonZeroUsize::new(config.block_cache_size).ok_or(Error::ZeroCacheSize)?; - let state_cache_size = - NonZeroUsize::new(config.state_cache_size).ok_or(Error::ZeroCacheSize)?; - let historic_state_cache_size = - NonZeroUsize::new(config.historic_state_cache_size).ok_or(Error::ZeroCacheSize)?; - let diff_buffer_cache_size = - NonZeroUsize::new(config.diff_buffer_cache_size).ok_or(Error::ZeroCacheSize)?; + let block_cache_size = config.block_cache_size; + let state_cache_size = config.state_cache_size; + let historic_state_cache_size = config.historic_state_cache_size; + let diff_buffer_cache_size = config.diff_buffer_cache_size; let db = HotColdDB { split: RwLock::new(Split::default()), @@ -265,11 +255,11 @@ impl HotColdDB, LevelDB> { cold_db: LevelDB::open(cold_path)?, blobs_db: LevelDB::open(blobs_db_path)?, hot_db: LevelDB::open(hot_path)?, - block_cache: Mutex::new(BlockCache::new(block_cache_size.get())), + block_cache: Mutex::new(BlockCache::new(block_cache_size)), state_cache: Mutex::new(StateCache::new(state_cache_size)), immutable_validators: Arc::new(RwLock::new(Default::default())), - historic_state_cache: Mutex::new(LruCache::new(historic_state_cache_size.get())), - diff_buffer_cache: Mutex::new(LruCache::new(diff_buffer_cache_size.get())), + historic_state_cache: Mutex::new(LruCache::new(historic_state_cache_size)), + diff_buffer_cache: Mutex::new(LruCache::new(diff_buffer_cache_size)), config, hierarchy, spec, @@ -2554,7 +2544,7 @@ impl, Cold: ItemStore> HotColdDB let min_current_epoch = self.get_split_slot().epoch(E::slots_per_epoch()) + 2; let min_data_availability_boundary = std::cmp::max( deneb_fork_epoch, - min_current_epoch.saturating_sub(MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS), + min_current_epoch.saturating_sub(self.spec.min_epochs_for_blob_sidecars_requests), ); self.try_prune_blobs(force, min_data_availability_boundary) diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index ccd4c7b667..1bd73c53f8 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -50,7 +50,7 @@ impl StateCache { pub fn new(capacity: NonZeroUsize) -> Self { StateCache { finalized_state: None, - states: LruCache::new(capacity.get()), + states: LruCache::new(capacity), block_map: BlockMap::default(), capacity, max_epoch: Epoch::new(0), diff --git a/book/src/builders.md b/book/src/builders.md index d5cb4c61b2..e48cc0a884 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -176,31 +176,6 @@ By default, Lighthouse is strict with these conditions, but we encourage users t - `--builder-fallback-disable-checks` - This flag disables all checks related to chain health. This means the builder API will always be used for payload construction, regardless of recent chain conditions. -## Builder Profit Threshold - -If you are generally uneasy with the risks associated with outsourced payload production (liveness/censorship) but would -consider using it for the chance of out-sized rewards, this flag may be useful: - -`--builder-profit-threshold ` - -The number provided indicates the minimum reward that an external payload must provide the proposer for it to be considered -for inclusion in a proposal. For example, if you'd only like to use an external payload for a reward of >= 0.25 ETH, you -would provide your beacon node with `--builder-profit-threshold 250000000000000000`. If it's your turn to propose and the -most valuable payload offered by builders is only 0.1 ETH, the local execution engine's payload will be used. - -Since the [Capella](https://ethereum.org/en/history/#capella) upgrade, a comparison of the external payload and local payload will be made according to the [engine_getPayloadV2](https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_getpayloadv2) API. The logic is as follows: - -``` -if local payload value >= builder payload value: - use local payload -else if builder payload value >= builder_profit_threshold or builder_profit_threshold == 0: - use builder payload -else: - use local payload -``` - -If you would like to always use the builder payload, you can add the flag `--always-prefer-builder-payload` to the beacon node. - ## Checking your builder config You can check that your builder is configured correctly by looking for these log messages. diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 2a08f90174..bb4d7a10c6 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -9,8 +9,11 @@ USAGE: lighthouse beacon_node [FLAGS] [OPTIONS] FLAGS: - --always-prefer-builder-payload If set, the beacon node always uses the payload from the builder instead - of the local payload. + --allow-insecure-genesis-sync Enable syncing from genesis, which is generally insecure and incompatible + with data availability checks. Checkpoint syncing is the preferred method + for syncing a node. Only use this flag when testing. DO NOT use on + mainnet! + --always-prefer-builder-payload This flag is deprecated and has no effect. --always-prepare-payload Send payload attributes with every fork choice update. This is intended for use by block builders, relays and developers. You should set a fee recipient on this BN and also consider adjusting the --prepare-payload- @@ -29,6 +32,11 @@ FLAGS: --disable-deposit-contract-sync Explictly disables syncing of deposit logs from the execution node. This overrides any previous option that depends on it. Useful if you intend to run a non-validating beacon node. + --disable-duplicate-warn-logs Disable warning logs for duplicate gossip messages. The WARN level log is + useful for detecting a duplicate validator key running elsewhere. + However, this may result in excessive warning logs if the validator is + broadcasting messages to multiple beacon nodes via the validator client + --broadcast flag. In this case, disabling these warn logs may be useful. -x, --disable-enr-auto-update Discovery automatically updates the nodes local ENR with an external IP address and port as seen by other peers on the network. This disables this feature, fixing the ENR's IP/PORT to those specified on boot. @@ -170,12 +178,8 @@ OPTIONS: `SLOTS_PER_EPOCH`, it will NOT query any connected builders, and will use the local execution engine for payload construction. [default: 8] --builder-profit-threshold - The minimum reward in wei provided to the proposer by a block builder for an external payload to be - considered for inclusion in a proposal. If this threshold is not met, the local EE's payload will be used. - This is currently *NOT* in comparison to the value of the local EE's payload. It simply checks whether the - total proposer reward from an external payload is equal to or greater than this value. In the future, a - comparison to a local payload is likely to be added. Example: Use 250000000000000000 to set the threshold to - 0.25 ETH. [default: 0] + This flag is deprecated and has no effect. + --builder-user-agent The HTTP user agent to send alongside requests to the builder URL. The default is Lighthouse's version string. @@ -308,14 +312,6 @@ OPTIONS: --http-tls-key The path of the private key to be used when serving the HTTP API server over TLS. Must not be password- protected. - --ignore-builder-override-suggestion-threshold - When the EE advises Lighthouse to ignore the builder payload, this flag specifies a percentage threshold for - the difference between the reward from the builder payload and the local EE's payload. This threshold must - be met for Lighthouse to consider ignoring the EE's suggestion. If the reward from the builder's payload - doesn't exceed the local payload by at least this percentage, the local payload will be used. The conditions - under which the EE may make this suggestion depend on the EE's implementation, with the primary intent being - to safeguard against potential censorship attacks from builders. Setting this flag to 0 will cause - Lighthouse to always ignore the EE's suggestion. Default: 10.0 (equivalent to 10%). [default: 10.0] --invalid-gossip-verified-blocks-path If a block succeeds gossip validation whilst failing full validation, store the block SSZ as a file at this path. This feature is only recommended for developers. This directory is not pruned, users should be careful diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 4471b0e104..62b64efd41 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -56,6 +56,10 @@ FLAGS: machine. Note that logs can often contain sensitive information about your validator and so this flag should be used with caution. For Windows users, the log file permissions will be inherited from the parent folder. --metrics Enable the Prometheus metrics HTTP server. Disabled by default. + --produce-block-v3 + Enable block production via the block v3 endpoint for this validator client. This should only be enabled + when paired with a beacon node that has this endpoint implemented. This flag will be enabled by default in + future. --unencrypted-http-transport This is a safety flag to ensure that the user is aware that the http transport is unencrypted and using a custom HTTP address is unsafe. diff --git a/book/src/redundancy.md b/book/src/redundancy.md index 93529295ae..8318aea21e 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -101,6 +101,10 @@ from this list: - `none`: Disable all broadcasting. This option only has an effect when provided alone, otherwise it is ignored. Not recommended except for expert tweakers. +Broadcasting attestation, blocks and sync committee messages may result in excessive warning logs in the beacon node +due to duplicate gossip messages. In this case, it may be desirable to disable warning logs for duplicates using the +beacon node `--disable-duplicate-warn-logs` flag. + The default is `--broadcast subscriptions`. To also broadcast blocks for example, use `--broadcast subscriptions,blocks`. diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index 6cfa8f4cf7..6fb1ea9bf5 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -1,8 +1,7 @@ use beacon_node::{get_data_dir, set_network_config}; use clap::ArgMatches; use eth2_network_config::Eth2NetworkConfig; -use lighthouse_network::discovery::create_enr_builder_from_config; -use lighthouse_network::discv5::{enr::CombinedKey, Discv5Config, Enr}; +use lighthouse_network::discv5::{self, enr::CombinedKey, Enr}; use lighthouse_network::{ discovery::{load_enr_from_disk, use_or_load_enr}, load_private_key, CombinedKeyExt, NetworkConfig, @@ -20,7 +19,7 @@ pub struct BootNodeConfig { pub boot_nodes: Vec, pub local_enr: Enr, pub local_key: CombinedKey, - pub discv5_config: Discv5Config, + pub discv5_config: discv5::Config, phantom: PhantomData, } @@ -130,8 +129,25 @@ impl BootNodeConfig { // Build the local ENR let mut local_enr = { - let enable_tcp = false; - let mut builder = create_enr_builder_from_config(&network_config, enable_tcp); + let (maybe_ipv4_address, maybe_ipv6_address) = network_config.enr_address; + let mut builder = discv5::Enr::builder(); + + if let Some(ip) = maybe_ipv4_address { + builder.ip4(ip); + } + + if let Some(ip) = maybe_ipv6_address { + builder.ip6(ip); + } + + if let Some(udp4_port) = network_config.enr_udp4_port { + builder.udp4(udp4_port.get()); + } + + if let Some(udp6_port) = network_config.enr_udp6_port { + builder.udp6(udp6_port.get()); + } + // If we know of the ENR field, add it to the initial construction if let Some(enr_fork_bytes) = enr_fork { builder.add_value("eth2", &enr_fork_bytes); @@ -157,7 +173,7 @@ impl BootNodeConfig { /// The set of configuration parameters that can safely be (de)serialized. /// -/// Its fields are a subset of the fields of `BootNodeConfig`, some of them are copied from `Discv5Config`. +/// Its fields are a subset of the fields of `BootNodeConfig`, some of them are copied from `discv5::Config`. #[derive(Serialize, Deserialize)] pub struct BootNodeConfigSerialization { pub ipv4_listen_socket: Option, diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index 5a5729dc04..8260038a0b 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -5,7 +5,7 @@ use crate::config::BootNodeConfigSerialization; use clap::ArgMatches; use eth2_network_config::Eth2NetworkConfig; use lighthouse_network::{ - discv5::{enr::NodeId, Discv5, Discv5Event}, + discv5::{self, enr::NodeId, Discv5}, EnrExt, Eth2Enr, }; use slog::info; @@ -144,17 +144,17 @@ pub async fn run( } Some(event) = event_stream.recv() => { match event { - Discv5Event::Discovered(_enr) => { + discv5::Event::Discovered(_enr) => { // An ENR has bee obtained by the server // Ignore these events here } - Discv5Event::EnrAdded { .. } => {} // Ignore - Discv5Event::TalkRequest(_) => {} // Ignore - Discv5Event::NodeInserted { .. } => {} // Ignore - Discv5Event::SocketUpdated(socket_addr) => { + discv5::Event::EnrAdded { .. } => {} // Ignore + discv5::Event::TalkRequest(_) => {} // Ignore + discv5::Event::NodeInserted { .. } => {} // Ignore + discv5::Event::SocketUpdated(socket_addr) => { info!(log, "Advertised socket address updated"; "socket_addr" => %socket_addr); } - Discv5Event::SessionEstablished{ .. } => {} // Ignore + discv5::Event::SessionEstablished{ .. } => {} // Ignore } } } diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 8dc0888e6e..61c65a29ba 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -367,7 +367,8 @@ impl ValidatorDefinitions { pub fn save>(&self, validators_dir: P) -> Result<(), Error> { let config_path = validators_dir.as_ref().join(CONFIG_FILENAME); let temp_path = validators_dir.as_ref().join(CONFIG_TEMP_FILENAME); - let bytes = serde_yaml::to_vec(self).map_err(Error::UnableToEncodeFile)?; + let mut bytes = vec![]; + serde_yaml::to_writer(&mut bytes, self).map_err(Error::UnableToEncodeFile)?; write_file_via_temporary(&config_path, &temp_path, &bytes) .map_err(Error::UnableToWriteFile)?; diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 02460551a9..0f27bb6672 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -36,7 +36,7 @@ pretty_reqwest_error = { workspace = true } tokio = { workspace = true } [target.'cfg(target_os = "linux")'.dependencies] -psutil = { version = "3.2.2", optional = true } +psutil = { version = "3.3.0", optional = true } procfs = { version = "0.15.1", optional = true } [features] diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index e045494c9d..c633a6c6c6 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -31,6 +31,7 @@ use serde::{de::DeserializeOwned, Serialize}; use ssz::Encode; use std::convert::TryFrom; use std::fmt; +use std::future::Future; use std::iter::Iterator; use std::path::PathBuf; use std::time::Duration; @@ -67,6 +68,8 @@ pub enum Error { InvalidJson(serde_json::Error), /// The server returned an invalid server-sent event. InvalidServerSentEvent(String), + /// The server sent invalid response headers. + InvalidHeaders(String), /// The server returned an invalid SSZ response. InvalidSsz(ssz::DecodeError), /// An I/O error occurred while loading an API token from disk. @@ -97,6 +100,7 @@ impl Error { Error::MissingSignatureHeader => None, Error::InvalidJson(_) => None, Error::InvalidServerSentEvent(_) => None, + Error::InvalidHeaders(_) => None, Error::InvalidSsz(_) => None, Error::TokenReadError(..) => None, Error::NoServerPubkey | Error::NoToken => None, @@ -124,7 +128,7 @@ pub struct Timeouts { pub get_beacon_blocks_ssz: Duration, pub get_debug_beacon_states: Duration, pub get_deposit_snapshot: Duration, - pub get_validator_block_ssz: Duration, + pub get_validator_block: Duration, } impl Timeouts { @@ -140,7 +144,7 @@ impl Timeouts { get_beacon_blocks_ssz: timeout, get_debug_beacon_states: timeout, get_deposit_snapshot: timeout, - get_validator_block_ssz: timeout, + get_validator_block: timeout, } } } @@ -273,27 +277,28 @@ impl BeaconNodeHttpClient { } /// Perform a HTTP GET request using an 'accept' header, returning `None` on a 404 error. - pub async fn get_bytes_response_with_response_headers( + pub async fn get_response_with_response_headers( &self, url: U, accept_header: Accept, timeout: Duration, - ) -> Result<(Option>, Option), Error> { + parser: impl FnOnce(Response, HeaderMap) -> F, + ) -> Result, Error> + where + F: Future>, + { let opt_response = self .get_response(url, |b| b.accept(accept_header).timeout(timeout)) .await .optional()?; - // let headers = opt_response.headers(); match opt_response { Some(resp) => { let response_headers = resp.headers().clone(); - Ok(( - Some(resp.bytes().await?.into_iter().collect::>()), - Some(response_headers), - )) + let parsed_response = parser(resp, response_headers).await?; + Ok(Some(parsed_response)) } - None => Ok((None, None)), + None => Ok(None), } } @@ -1816,12 +1821,13 @@ impl BeaconNodeHttpClient { } /// returns `GET v3/validator/blocks/{slot}` URL path - pub async fn get_validator_blocks_v3_path( + pub async fn get_validator_blocks_v3_path( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, + builder_booster_factor: Option, ) -> Result { let mut path = self.eth_path(V3)?; @@ -1844,6 +1850,11 @@ impl BeaconNodeHttpClient { .append_pair("skip_randao_verification", ""); } + if let Some(builder_booster_factor) = builder_booster_factor { + path.query_pairs_mut() + .append_pair("builder_boost_factor", &builder_booster_factor.to_string()); + } + Ok(path) } @@ -1853,12 +1864,14 @@ impl BeaconNodeHttpClient { slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result, Error> { + builder_booster_factor: Option, + ) -> Result<(JsonProduceBlockV3Response, ProduceBlockV3Metadata), Error> { self.get_validator_blocks_v3_modular( slot, randao_reveal, graffiti, SkipRandaoVerification::No, + builder_booster_factor, ) .await } @@ -1870,35 +1883,48 @@ impl BeaconNodeHttpClient { randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, - ) -> Result, Error> { + builder_booster_factor: Option, + ) -> Result<(JsonProduceBlockV3Response, ProduceBlockV3Metadata), Error> { let path = self - .get_validator_blocks_v3_path::( + .get_validator_blocks_v3_path( slot, randao_reveal, graffiti, skip_randao_verification, + builder_booster_factor, ) .await?; - let response = self.get_response(path, |b| b).await?; + let opt_result = self + .get_response_with_response_headers( + path, + Accept::Json, + self.timeouts.get_validator_block, + |response, headers| async move { + let header_metadata = ProduceBlockV3Metadata::try_from(&headers) + .map_err(Error::InvalidHeaders)?; + if header_metadata.execution_payload_blinded { + let blinded_response = response + .json::, + ProduceBlockV3Metadata>>() + .await? + .map_data(ProduceBlockV3Response::Blinded); + Ok((blinded_response, header_metadata)) + } else { + let full_block_response= response + .json::, + ProduceBlockV3Metadata>>() + .await? + .map_data(ProduceBlockV3Response::Full); + Ok((full_block_response, header_metadata)) + } + }, + ) + .await?; - let is_blinded_payload = response - .headers() - .get(EXECUTION_PAYLOAD_BLINDED_HEADER) - .map(|value| value.to_str().unwrap_or_default().to_lowercase() == "true") - .unwrap_or(false); - - if is_blinded_payload { - let blinded_payload = response - .json::>>() - .await?; - Ok(ForkVersionedBeaconBlockType::Blinded(blinded_payload)) - } else { - let full_payload = response - .json::>>() - .await?; - Ok(ForkVersionedBeaconBlockType::Full(full_payload)) - } + // Generic handler is optional but this route should never 404 unless unimplemented, so + // treat that as an error. + opt_result.ok_or(Error::StatusCode(StatusCode::NOT_FOUND)) } /// `GET v3/validator/blocks/{slot}` in ssz format @@ -1907,12 +1933,14 @@ impl BeaconNodeHttpClient { slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result<(Option>, bool), Error> { + builder_booster_factor: Option, + ) -> Result<(ProduceBlockV3Response, ProduceBlockV3Metadata), Error> { self.get_validator_blocks_v3_modular_ssz::( slot, randao_reveal, graffiti, SkipRandaoVerification::No, + builder_booster_factor, ) .await } @@ -1924,33 +1952,55 @@ impl BeaconNodeHttpClient { randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, - ) -> Result<(Option>, bool), Error> { + builder_booster_factor: Option, + ) -> Result<(ProduceBlockV3Response, ProduceBlockV3Metadata), Error> { let path = self - .get_validator_blocks_v3_path::( + .get_validator_blocks_v3_path( slot, randao_reveal, graffiti, skip_randao_verification, + builder_booster_factor, ) .await?; - let (response_content, response_headers) = self - .get_bytes_response_with_response_headers( + let opt_response = self + .get_response_with_response_headers( path, Accept::Ssz, - self.timeouts.get_validator_block_ssz, + self.timeouts.get_validator_block, + |response, headers| async move { + let metadata = ProduceBlockV3Metadata::try_from(&headers) + .map_err(Error::InvalidHeaders)?; + let response_bytes = response.bytes().await?; + + // Parse bytes based on metadata. + let response = if metadata.execution_payload_blinded { + ProduceBlockV3Response::Blinded( + BlindedBeaconBlock::from_ssz_bytes_for_fork( + &response_bytes, + metadata.consensus_version, + ) + .map_err(Error::InvalidSsz)?, + ) + } else { + ProduceBlockV3Response::Full( + FullBlockContents::from_ssz_bytes_for_fork( + &response_bytes, + metadata.consensus_version, + ) + .map_err(Error::InvalidSsz)?, + ) + }; + + Ok((response, metadata)) + }, ) .await?; - let is_blinded_payload = match response_headers { - Some(headers) => headers - .get(EXECUTION_PAYLOAD_BLINDED_HEADER) - .map(|value| value.to_str().unwrap_or_default().to_lowercase() == "true") - .unwrap_or(false), - None => false, - }; - - Ok((response_content, is_blinded_payload)) + // Generic handler is optional but this route should never 404 unless unimplemented, so + // treat that as an error. + opt_response.ok_or(Error::StatusCode(StatusCode::NOT_FOUND)) } /// `GET v2/validator/blocks/{slot}` in ssz format @@ -1981,7 +2031,7 @@ impl BeaconNodeHttpClient { .get_validator_blocks_path::(slot, randao_reveal, graffiti, skip_randao_verification) .await?; - self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block_ssz) + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block) .await } @@ -2085,7 +2135,7 @@ impl BeaconNodeHttpClient { ) .await?; - self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block_ssz) + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block) .await } diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index cd405386b8..11706f3094 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -243,6 +243,8 @@ pub struct ProcessHealth { pub pid_mem_resident_set_size: u64, /// The total virtual memory used by this pid. pub pid_mem_virtual_memory_size: u64, + /// The total shared memory used by this pid. + pub pid_mem_shared_memory_size: u64, /// Number of cpu seconds consumed by this pid. pub pid_process_seconds_total: u64, } @@ -277,6 +279,7 @@ impl ProcessHealth { pid_num_threads: stat.num_threads, pid_mem_resident_set_size: process_mem.rss(), pid_mem_virtual_memory_size: process_mem.vms(), + pid_mem_shared_memory_size: process_mem.shared(), pid_process_seconds_total: process_times.busy().as_secs() + process_times.children_system().as_secs() + process_times.children_system().as_secs(), diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index f19635894c..f25063ec6b 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1,9 +1,13 @@ //! This module exposes a superset of the `types` crate. It adds additional types that are only //! required for the HTTP API. -use crate::Error as ServerError; +use crate::{ + Error as ServerError, CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, + EXECUTION_PAYLOAD_BLINDED_HEADER, EXECUTION_PAYLOAD_VALUE_HEADER, +}; use lighthouse_network::{ConnectionDirection, Enr, Multiaddr, PeerConnectionStatus}; use mediatype::{names, MediaType, MediaTypeList}; +use reqwest::header::HeaderMap; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::Value; use ssz::{Decode, DecodeError}; @@ -725,6 +729,7 @@ pub struct ValidatorBlocksQuery { pub randao_reveal: SignatureBytes, pub graffiti: Option, pub skip_randao_verification: SkipRandaoVerification, + pub builder_boost_factor: Option, } #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Deserialize)] @@ -1431,11 +1436,6 @@ pub mod serde_status_code { } } -pub enum ForkVersionedBeaconBlockType { - Full(ForkVersionedResponse>), - Blinded(ForkVersionedResponse>), -} - #[cfg(test)] mod tests { use super::*; @@ -1522,6 +1522,9 @@ pub enum ProduceBlockV3Response { Blinded(BlindedBeaconBlock), } +pub type JsonProduceBlockV3Response = + ForkVersionedResponse, ProduceBlockV3Metadata>; + /// A wrapper over a [`BeaconBlock`] or a [`BlockContents`]. #[derive(Debug, Encode, Serialize, Deserialize)] #[serde(untagged)] @@ -1536,6 +1539,28 @@ pub enum FullBlockContents { pub type BlockContentsTuple = (BeaconBlock, Option<(KzgProofs, BlobsList)>); +// This value should never be used +fn dummy_consensus_version() -> ForkName { + ForkName::Base +} + +/// Metadata about a `ProduceBlockV3Response` which is returned in the body & headers. +#[derive(Debug, Deserialize, Serialize)] +pub struct ProduceBlockV3Metadata { + // The consensus version is serialized & deserialized by `ForkVersionedResponse`. + #[serde( + skip_serializing, + skip_deserializing, + default = "dummy_consensus_version" + )] + pub consensus_version: ForkName, + pub execution_payload_blinded: bool, + #[serde(with = "serde_utils::u256_dec")] + pub execution_payload_value: Uint256, + #[serde(with = "serde_utils::u256_dec")] + pub consensus_block_value: Uint256, +} + impl FullBlockContents { pub fn new(block: BeaconBlock, blob_data: Option<(KzgProofs, BlobsList)>) -> Self { match blob_data { @@ -1557,13 +1582,19 @@ impl FullBlockContents { len: bytes.len(), expected: slot_len, })?; - let slot = Slot::from_ssz_bytes(slot_bytes)?; let fork_at_slot = spec.fork_name_at_slot::(slot); + Self::from_ssz_bytes_for_fork(bytes, fork_at_slot) + } - match fork_at_slot { + /// SSZ decode with fork variant passed in explicitly. + pub fn from_ssz_bytes_for_fork( + bytes: &[u8], + fork_name: ForkName, + ) -> Result { + match fork_name { ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - BeaconBlock::from_ssz_bytes(bytes, spec) + BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) .map(|block| FullBlockContents::Block(block)) } ForkName::Deneb => { @@ -1574,8 +1605,9 @@ impl FullBlockContents { builder.register_type::>()?; let mut decoder = builder.build()?; - let block = - decoder.decode_next_with(|bytes| BeaconBlock::from_ssz_bytes(bytes, spec))?; + let block = decoder.decode_next_with(|bytes| { + BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + })?; let kzg_proofs = decoder.decode_next()?; let blobs = decoder.decode_next()?; @@ -1644,6 +1676,52 @@ impl Into> for FullBlockContents { pub type SignedBlockContentsTuple = (SignedBeaconBlock, Option<(KzgProofs, BlobsList)>); +fn parse_required_header( + headers: &HeaderMap, + header_name: &str, + parse: impl FnOnce(&str) -> Result, +) -> Result { + let str_value = headers + .get(header_name) + .ok_or_else(|| format!("missing required header {header_name}"))? + .to_str() + .map_err(|e| format!("invalid value in {header_name}: {e}"))?; + parse(str_value) +} + +impl TryFrom<&HeaderMap> for ProduceBlockV3Metadata { + type Error = String; + + fn try_from(headers: &HeaderMap) -> Result { + let consensus_version = parse_required_header(headers, CONSENSUS_VERSION_HEADER, |s| { + s.parse::() + .map_err(|e| format!("invalid {CONSENSUS_VERSION_HEADER}: {e:?}")) + })?; + let execution_payload_blinded = + parse_required_header(headers, EXECUTION_PAYLOAD_BLINDED_HEADER, |s| { + s.parse::() + .map_err(|e| format!("invalid {EXECUTION_PAYLOAD_BLINDED_HEADER}: {e:?}")) + })?; + let execution_payload_value = + parse_required_header(headers, EXECUTION_PAYLOAD_VALUE_HEADER, |s| { + s.parse::() + .map_err(|e| format!("invalid {EXECUTION_PAYLOAD_VALUE_HEADER}: {e:?}")) + })?; + let consensus_block_value = + parse_required_header(headers, CONSENSUS_BLOCK_VALUE_HEADER, |s| { + s.parse::() + .map_err(|e| format!("invalid {CONSENSUS_BLOCK_VALUE_HEADER}: {e:?}")) + })?; + + Ok(ProduceBlockV3Metadata { + consensus_version, + execution_payload_blinded, + execution_payload_value, + consensus_block_value, + }) + } +} + /// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBlockContents`]. #[derive(Clone, Debug, Encode, Serialize, Deserialize)] #[serde(untagged)] diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index d82a2c09b8..1928aeb309 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -1,16 +1,19 @@ # Prater config # Extends the mainnet preset -CONFIG_NAME: 'prater' PRESET_BASE: 'mainnet' +CONFIG_NAME: 'prater' + # Transition # --------------------------------------------------------------- +# Expected August 10, 2022 TERMINAL_TOTAL_DIFFICULTY: 10790000 # By default, don't use these params TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + # Genesis # --------------------------------------------------------------- # `2**14` (= 16,384) @@ -32,19 +35,15 @@ GENESIS_DELAY: 1919188 # Altair ALTAIR_FORK_VERSION: 0x01001020 ALTAIR_FORK_EPOCH: 36660 -# Merge +# Bellatrix BELLATRIX_FORK_VERSION: 0x02001020 BELLATRIX_FORK_EPOCH: 112260 # Capella CAPELLA_FORK_VERSION: 0x03001020 CAPELLA_FORK_EPOCH: 162304 -# Sharding -SHARDING_FORK_VERSION: 0x04001020 -SHARDING_FORK_EPOCH: 18446744073709551615 - -# TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. -TRANSITION_TOTAL_DIFFICULTY: 4294967296 - +# DENEB +DENEB_FORK_VERSION: 0x04001020 +DENEB_FORK_EPOCH: 231680 # Time parameters # --------------------------------------------------------------- @@ -70,11 +69,10 @@ INACTIVITY_SCORE_RECOVERY_RATE: 16 EJECTION_BALANCE: 16000000000 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 -# 2**3 (= 8) -MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 - +# [New in Deneb:EIP7514] 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # Fork choice # --------------------------------------------------------------- @@ -89,16 +87,41 @@ DEPOSIT_NETWORK_ID: 5 # Prater test deposit contract on Goerli Testnet DEPOSIT_CONTRACT_ADDRESS: 0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b -# Network +# Networking # --------------------------------------------------------------- -SUBNETS_PER_NODE: 2 +# `10 * 2**20` (= 10485760, 10 MiB) GOSSIP_MAX_SIZE: 10485760 +# `2**10` (= 1024) +MAX_REQUEST_BLOCKS: 1024 +# `2**8` (= 256) +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +# `10 * 2**20` (=10485760, 10 MiB) MAX_CHUNK_SIZE: 10485760 +# 5s TTFB_TIMEOUT: 5 +# 10s RESP_TIMEOUT: 10 +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**8 (= 64) ATTESTATION_SUBNET_COUNT: 64 ATTESTATION_SUBNET_EXTRA_BITS: 0 +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS ATTESTATION_SUBNET_PREFIX_BITS: 6 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**12` (= 4096 epochs, ~18 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 diff --git a/common/warp_utils/src/metrics.rs b/common/warp_utils/src/metrics.rs index d93b74ca95..eb061c7526 100644 --- a/common/warp_utils/src/metrics.rs +++ b/common/warp_utils/src/metrics.rs @@ -14,6 +14,10 @@ lazy_static::lazy_static! { "process_virtual_memory_bytes", "Virtual memory used by the current process" ); + pub static ref PROCESS_SHR_MEM: Result = try_create_int_gauge( + "process_shared_memory_bytes", + "Shared memory used by the current process" + ); pub static ref PROCESS_SECONDS: Result = try_create_int_gauge( "process_cpu_seconds_total", "Total cpu time taken by the current process" @@ -90,6 +94,7 @@ pub fn scrape_process_health_metrics() { set_gauge(&PROCESS_NUM_THREADS, health.pid_num_threads); set_gauge(&PROCESS_RES_MEM, health.pid_mem_resident_set_size as i64); set_gauge(&PROCESS_VIRT_MEM, health.pid_mem_virtual_memory_size as i64); + set_gauge(&PROCESS_SHR_MEM, health.pid_mem_shared_memory_size as i64); set_gauge(&PROCESS_SECONDS, health.pid_process_seconds_total as i64); } } diff --git a/consensus/state_processing/src/per_block_processing/deneb.rs b/consensus/state_processing/src/per_block_processing/deneb.rs index 8f7cb0514f..217c2ea30b 100644 --- a/consensus/state_processing/src/per_block_processing/deneb.rs +++ b/consensus/state_processing/src/per_block_processing/deneb.rs @@ -1,6 +1,5 @@ use ethereum_hashing::hash_fixed; -use types::consts::deneb::VERSIONED_HASH_VERSION_KZG; -use types::{KzgCommitment, VersionedHash}; +use types::{KzgCommitment, VersionedHash, VERSIONED_HASH_VERSION_KZG}; pub fn kzg_commitment_to_versioned_hash(kzg_commitment: &KzgCommitment) -> VersionedHash { let mut hashed_commitment = hash_fixed(&kzg_commitment.0); diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index a8e013165a..081a1df654 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -112,12 +112,15 @@ impl> BeaconBlock { let slot = Slot::from_ssz_bytes(slot_bytes)?; let fork_at_slot = spec.fork_name_at_slot::(slot); + Self::from_ssz_bytes_for_fork(bytes, fork_at_slot) + } - Ok(map_fork_name!( - fork_at_slot, - Self, - <_>::from_ssz_bytes(bytes)? - )) + /// Custom SSZ decoder that takes a `ForkName` as context. + pub fn from_ssz_bytes_for_fork( + bytes: &[u8], + fork_name: ForkName, + ) -> Result { + Ok(map_fork_name!(fork_name, Self, <_>::from_ssz_bytes(bytes)?)) } /// Try decoding each beacon block variant in sequence. diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index a88fbe7a23..2a69103a3f 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -4,6 +4,7 @@ use int_to_bytes::int_to_bytes4; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_utils::quoted_u64::MaybeQuoted; +use ssz::Encode; use std::fs::File; use std::path::Path; use std::time::Duration; @@ -172,22 +173,40 @@ pub struct ChainSpec { */ pub boot_nodes: Vec, pub network_id: u8, - pub attestation_propagation_slot_range: u64, - pub maximum_gossip_clock_disparity_millis: u64, pub target_aggregators_per_committee: u64, - pub attestation_subnet_count: u64, - pub subnets_per_node: u8, - pub epochs_per_subnet_subscription: u64, pub gossip_max_size: u64, + pub max_request_blocks: u64, + pub epochs_per_subnet_subscription: u64, pub min_epochs_for_block_requests: u64, pub max_chunk_size: u64, pub ttfb_timeout: u64, pub resp_timeout: u64, + pub attestation_propagation_slot_range: u64, + pub maximum_gossip_clock_disparity_millis: u64, pub message_domain_invalid_snappy: [u8; 4], pub message_domain_valid_snappy: [u8; 4], + pub subnets_per_node: u8, + pub attestation_subnet_count: u64, pub attestation_subnet_extra_bits: u8, pub attestation_subnet_prefix_bits: u8, + /* + * Networking Deneb + */ + pub max_request_blocks_deneb: u64, + pub max_request_blob_sidecars: u64, + pub min_epochs_for_blob_sidecars_requests: u64, + pub blob_sidecar_subnet_count: u64, + + /* + * Networking Derived + * + * When adding fields here, make sure any values are derived again during `apply_to_chain_spec`. + */ + pub max_blocks_by_root_request: usize, + pub max_blocks_by_root_request_deneb: usize, + pub max_blobs_by_root_request: usize, + /* * Application params */ @@ -509,6 +528,25 @@ impl ChainSpec { Duration::from_secs(self.resp_timeout) } + pub fn max_blocks_by_root_request(&self, fork_name: ForkName) -> usize { + match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + self.max_blocks_by_root_request + } + ForkName::Deneb => self.max_blocks_by_root_request_deneb, + } + } + + pub fn max_request_blocks(&self, fork_name: ForkName) -> usize { + let max_request_blocks = match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + self.max_request_blocks + } + ForkName::Deneb => self.max_request_blocks_deneb, + }; + max_request_blocks as usize + } + /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. pub fn mainnet() -> Self { Self { @@ -670,12 +708,12 @@ impl ChainSpec { */ boot_nodes: vec![], network_id: 1, // mainnet network id - attestation_propagation_slot_range: 32, + attestation_propagation_slot_range: default_attestation_propagation_slot_range(), attestation_subnet_count: 64, subnets_per_node: 2, - maximum_gossip_clock_disparity_millis: 500, + maximum_gossip_clock_disparity_millis: default_maximum_gossip_clock_disparity_millis(), target_aggregators_per_committee: 16, - epochs_per_subnet_subscription: 256, + epochs_per_subnet_subscription: default_epochs_per_subnet_subscription(), gossip_max_size: default_gossip_max_size(), min_epochs_for_block_requests: default_min_epochs_for_block_requests(), max_chunk_size: default_max_chunk_size(), @@ -685,6 +723,23 @@ impl ChainSpec { message_domain_valid_snappy: default_message_domain_valid_snappy(), attestation_subnet_extra_bits: default_attestation_subnet_extra_bits(), attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), + max_request_blocks: default_max_request_blocks(), + + /* + * Networking Deneb Specific + */ + max_request_blocks_deneb: default_max_request_blocks_deneb(), + max_request_blob_sidecars: default_max_request_blob_sidecars(), + min_epochs_for_blob_sidecars_requests: default_min_epochs_for_blob_sidecars_requests(), + blob_sidecar_subnet_count: default_blob_sidecar_subnet_count(), + + /* + * Derived Deneb Specific + */ + max_blocks_by_root_request: default_max_blocks_by_root_request(), + max_blocks_by_root_request_deneb: default_max_blocks_by_root_request_deneb(), + max_blobs_by_root_request: default_max_blobs_by_root_request(), + /* * Application specific */ @@ -914,12 +969,12 @@ impl ChainSpec { */ boot_nodes: vec![], network_id: 100, // Gnosis Chain network id - attestation_propagation_slot_range: 32, + attestation_propagation_slot_range: default_attestation_propagation_slot_range(), attestation_subnet_count: 64, subnets_per_node: 4, // Make this larger than usual to avoid network damage - maximum_gossip_clock_disparity_millis: 500, + maximum_gossip_clock_disparity_millis: default_maximum_gossip_clock_disparity_millis(), target_aggregators_per_committee: 16, - epochs_per_subnet_subscription: 256, + epochs_per_subnet_subscription: default_epochs_per_subnet_subscription(), gossip_max_size: default_gossip_max_size(), min_epochs_for_block_requests: default_min_epochs_for_block_requests(), max_chunk_size: default_max_chunk_size(), @@ -929,6 +984,22 @@ impl ChainSpec { message_domain_valid_snappy: default_message_domain_valid_snappy(), attestation_subnet_extra_bits: default_attestation_subnet_extra_bits(), attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), + max_request_blocks: default_max_request_blocks(), + + /* + * Networking Deneb Specific + */ + max_request_blocks_deneb: default_max_request_blocks_deneb(), + max_request_blob_sidecars: default_max_request_blob_sidecars(), + min_epochs_for_blob_sidecars_requests: default_min_epochs_for_blob_sidecars_requests(), + blob_sidecar_subnet_count: default_blob_sidecar_subnet_count(), + + /* + * Derived Deneb Specific + */ + max_blocks_by_root_request: default_max_blocks_by_root_request(), + max_blocks_by_root_request_deneb: default_max_blocks_by_root_request_deneb(), + max_blobs_by_root_request: default_max_blobs_by_root_request(), /* * Application specific @@ -1054,6 +1125,12 @@ pub struct Config { #[serde(default = "default_gossip_max_size")] #[serde(with = "serde_utils::quoted_u64")] gossip_max_size: u64, + #[serde(default = "default_max_request_blocks")] + #[serde(with = "serde_utils::quoted_u64")] + max_request_blocks: u64, + #[serde(default = "default_epochs_per_subnet_subscription")] + #[serde(with = "serde_utils::quoted_u64")] + epochs_per_subnet_subscription: u64, #[serde(default = "default_min_epochs_for_block_requests")] #[serde(with = "serde_utils::quoted_u64")] min_epochs_for_block_requests: u64, @@ -1066,6 +1143,12 @@ pub struct Config { #[serde(default = "default_resp_timeout")] #[serde(with = "serde_utils::quoted_u64")] resp_timeout: u64, + #[serde(default = "default_attestation_propagation_slot_range")] + #[serde(with = "serde_utils::quoted_u64")] + attestation_propagation_slot_range: u64, + #[serde(default = "default_maximum_gossip_clock_disparity_millis")] + #[serde(with = "serde_utils::quoted_u64")] + maximum_gossip_clock_disparity_millis: u64, #[serde(default = "default_message_domain_invalid_snappy")] #[serde(with = "serde_utils::bytes_4_hex")] message_domain_invalid_snappy: [u8; 4], @@ -1078,6 +1161,18 @@ pub struct Config { #[serde(default = "default_attestation_subnet_prefix_bits")] #[serde(with = "serde_utils::quoted_u8")] attestation_subnet_prefix_bits: u8, + #[serde(default = "default_max_request_blocks_deneb")] + #[serde(with = "serde_utils::quoted_u64")] + max_request_blocks_deneb: u64, + #[serde(default = "default_max_request_blob_sidecars")] + #[serde(with = "serde_utils::quoted_u64")] + max_request_blob_sidecars: u64, + #[serde(default = "default_min_epochs_for_blob_sidecars_requests")] + #[serde(with = "serde_utils::quoted_u64")] + min_epochs_for_blob_sidecars_requests: u64, + #[serde(default = "default_blob_sidecar_subnet_count")] + #[serde(with = "serde_utils::quoted_u64")] + blob_sidecar_subnet_count: u64, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -1163,6 +1258,70 @@ const fn default_attestation_subnet_prefix_bits() -> u8 { 6 } +const fn default_max_request_blocks() -> u64 { + 1024 +} + +const fn default_max_request_blocks_deneb() -> u64 { + 128 +} + +const fn default_max_request_blob_sidecars() -> u64 { + 768 +} + +const fn default_min_epochs_for_blob_sidecars_requests() -> u64 { + 4096 +} + +const fn default_blob_sidecar_subnet_count() -> u64 { + 6 +} + +const fn default_epochs_per_subnet_subscription() -> u64 { + 256 +} + +const fn default_attestation_propagation_slot_range() -> u64 { + 32 +} + +const fn default_maximum_gossip_clock_disparity_millis() -> u64 { + 500 +} + +fn max_blocks_by_root_request_common(max_request_blocks: u64) -> usize { + let max_request_blocks = max_request_blocks as usize; + RuntimeVariableList::::from_vec( + vec![Hash256::zero(); max_request_blocks], + max_request_blocks, + ) + .as_ssz_bytes() + .len() +} + +fn max_blobs_by_root_request_common(max_request_blob_sidecars: u64) -> usize { + let max_request_blob_sidecars = max_request_blob_sidecars as usize; + RuntimeVariableList::::from_vec( + vec![Hash256::zero(); max_request_blob_sidecars], + max_request_blob_sidecars, + ) + .as_ssz_bytes() + .len() +} + +fn default_max_blocks_by_root_request() -> usize { + max_blocks_by_root_request_common(default_max_request_blocks()) +} + +fn default_max_blocks_by_root_request_deneb() -> usize { + max_blocks_by_root_request_common(default_max_request_blocks_deneb()) +} + +fn default_max_blobs_by_root_request() -> usize { + max_blobs_by_root_request_common(default_max_request_blob_sidecars()) +} + impl Default for Config { fn default() -> Self { let chain_spec = MainnetEthSpec::default_spec(); @@ -1265,14 +1424,22 @@ impl Config { deposit_contract_address: spec.deposit_contract_address, gossip_max_size: spec.gossip_max_size, + max_request_blocks: spec.max_request_blocks, + epochs_per_subnet_subscription: spec.epochs_per_subnet_subscription, min_epochs_for_block_requests: spec.min_epochs_for_block_requests, max_chunk_size: spec.max_chunk_size, ttfb_timeout: spec.ttfb_timeout, resp_timeout: spec.resp_timeout, + attestation_propagation_slot_range: spec.attestation_propagation_slot_range, + maximum_gossip_clock_disparity_millis: spec.maximum_gossip_clock_disparity_millis, message_domain_invalid_snappy: spec.message_domain_invalid_snappy, message_domain_valid_snappy: spec.message_domain_valid_snappy, attestation_subnet_extra_bits: spec.attestation_subnet_extra_bits, attestation_subnet_prefix_bits: spec.attestation_subnet_prefix_bits, + max_request_blocks_deneb: spec.max_request_blocks_deneb, + max_request_blob_sidecars: spec.max_request_blob_sidecars, + min_epochs_for_blob_sidecars_requests: spec.min_epochs_for_blob_sidecars_requests, + blob_sidecar_subnet_count: spec.blob_sidecar_subnet_count, } } @@ -1329,6 +1496,14 @@ impl Config { message_domain_valid_snappy, attestation_subnet_extra_bits, attestation_subnet_prefix_bits, + max_request_blocks, + epochs_per_subnet_subscription, + attestation_propagation_slot_range, + maximum_gossip_clock_disparity_millis, + max_request_blocks_deneb, + max_request_blob_sidecars, + min_epochs_for_blob_sidecars_requests, + blob_sidecar_subnet_count, } = self; if preset_base != T::spec_name().to_string().as_str() { @@ -1378,6 +1553,22 @@ impl Config { message_domain_valid_snappy, attestation_subnet_extra_bits, attestation_subnet_prefix_bits, + max_request_blocks, + epochs_per_subnet_subscription, + attestation_propagation_slot_range, + maximum_gossip_clock_disparity_millis, + max_request_blocks_deneb, + max_request_blob_sidecars, + min_epochs_for_blob_sidecars_requests, + blob_sidecar_subnet_count, + + // We need to re-derive any values that might have changed in the config. + max_blocks_by_root_request: max_blocks_by_root_request_common(max_request_blocks), + max_blocks_by_root_request_deneb: max_blocks_by_root_request_common( + max_request_blocks_deneb, + ), + max_blobs_by_root_request: max_blobs_by_root_request_common(max_request_blob_sidecars), + ..chain_spec.clone() }) } diff --git a/consensus/types/src/consts.rs b/consensus/types/src/consts.rs index f93c75ee8d..a9377bc3e0 100644 --- a/consensus/types/src/consts.rs +++ b/consensus/types/src/consts.rs @@ -22,11 +22,3 @@ pub mod altair { pub mod merge { pub const INTERVALS_PER_SLOT: u64 = 3; } -pub mod deneb { - use crate::Epoch; - - pub const VERSIONED_HASH_VERSION_KZG: u8 = 1; - pub const BLOB_SIDECAR_SUBNET_COUNT: u64 = 6; - pub const MAX_BLOBS_PER_BLOCK: u64 = BLOB_SIDECAR_SUBNET_COUNT; - pub const MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: Epoch = Epoch::new(4096); -} diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 23163f0eec..9992892714 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -9,6 +9,7 @@ pub struct ForkContext { current_fork: RwLock, fork_to_digest: HashMap, digest_to_fork: HashMap<[u8; 4], ForkName>, + pub spec: ChainSpec, } impl ForkContext { @@ -73,6 +74,7 @@ impl ForkContext { current_fork: RwLock::new(spec.fork_name_at_slot::(current_slot)), fork_to_digest, digest_to_fork, + spec: spec.clone(), } } diff --git a/consensus/types/src/fork_versioned_response.rs b/consensus/types/src/fork_versioned_response.rs index 2d97dc1219..195c083e29 100644 --- a/consensus/types/src/fork_versioned_response.rs +++ b/consensus/types/src/fork_versioned_response.rs @@ -4,47 +4,6 @@ use serde::{Deserialize, Deserializer, Serialize}; use serde_json::value::Value; use std::sync::Arc; -// Deserialize is only implemented for types that implement ForkVersionDeserialize -#[derive(Debug, PartialEq, Clone, Serialize)] -pub struct ExecutionOptimisticFinalizedForkVersionedResponse { - #[serde(skip_serializing_if = "Option::is_none")] - pub version: Option, - pub execution_optimistic: Option, - pub finalized: Option, - pub data: T, -} - -impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticFinalizedForkVersionedResponse -where - F: ForkVersionDeserialize, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - #[derive(Deserialize)] - struct Helper { - version: Option, - execution_optimistic: Option, - finalized: Option, - data: serde_json::Value, - } - - let helper = Helper::deserialize(deserializer)?; - let data = match helper.version { - Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, - None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, - }; - - Ok(ExecutionOptimisticFinalizedForkVersionedResponse { - version: helper.version, - execution_optimistic: helper.execution_optimistic, - finalized: helper.finalized, - data, - }) - } -} - pub trait ForkVersionDeserialize: Sized + DeserializeOwned { fn deserialize_by_fork<'de, D: Deserializer<'de>>( value: Value, @@ -52,17 +11,41 @@ pub trait ForkVersionDeserialize: Sized + DeserializeOwned { ) -> Result; } -// Deserialize is only implemented for types that implement ForkVersionDeserialize +/// Deserialize is only implemented for types that implement ForkVersionDeserialize. +/// +/// The metadata of type M should be set to `EmptyMetadata` if you don't care about adding fields other than +/// version. If you *do* care about adding other fields you can mix in any type that implements +/// `Deserialize`. #[derive(Debug, PartialEq, Clone, Serialize)] -pub struct ForkVersionedResponse { +pub struct ForkVersionedResponse { #[serde(skip_serializing_if = "Option::is_none")] pub version: Option, + #[serde(flatten)] + pub metadata: M, pub data: T, } -impl<'de, F> serde::Deserialize<'de> for ForkVersionedResponse +/// Metadata type similar to unit (i.e. `()`) but deserializes from a map (`serde_json::Value`). +/// +/// Unfortunately the braces are semantically significant, i.e. `struct EmptyMetadata;` does not +/// work. +#[derive(Debug, PartialEq, Clone, Default, Deserialize, Serialize)] +pub struct EmptyMetadata {} + +/// Fork versioned response with extra information about finalization & optimistic execution. +pub type ExecutionOptimisticFinalizedForkVersionedResponse = + ForkVersionedResponse; + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct ExecutionOptimisticFinalizedMetadata { + pub execution_optimistic: Option, + pub finalized: Option, +} + +impl<'de, F, M> serde::Deserialize<'de> for ForkVersionedResponse where F: ForkVersionDeserialize, + M: DeserializeOwned, { fn deserialize(deserializer: D) -> Result where @@ -71,6 +54,8 @@ where #[derive(Deserialize)] struct Helper { version: Option, + #[serde(flatten)] + metadata: serde_json::Value, data: serde_json::Value, } @@ -79,9 +64,11 @@ where Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, }; + let metadata = serde_json::from_value(helper.metadata).map_err(serde::de::Error::custom)?; Ok(ForkVersionedResponse { version: helper.version, + metadata, data, }) } @@ -98,6 +85,22 @@ impl ForkVersionDeserialize for Arc { } } +impl ForkVersionedResponse { + /// Apply a function to the inner `data`, potentially changing its type. + pub fn map_data(self, f: impl FnOnce(T) -> U) -> ForkVersionedResponse { + let ForkVersionedResponse { + version, + metadata, + data, + } = self; + ForkVersionedResponse { + version, + metadata, + data: f(data), + } + } +} + #[cfg(test)] mod fork_version_response_tests { use crate::{ @@ -112,6 +115,7 @@ mod fork_version_response_tests { let response_json = serde_json::to_string(&json!(ForkVersionedResponse::> { version: Some(ForkName::Merge), + metadata: Default::default(), data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), })) .unwrap(); @@ -129,6 +133,7 @@ mod fork_version_response_tests { let response_json = serde_json::to_string(&json!(ForkVersionedResponse::> { version: Some(ForkName::Capella), + metadata: Default::default(), data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), })) .unwrap(); diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index b2d5e2547f..6edd4a731d 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -101,6 +101,8 @@ pub mod sqlite; pub mod blob_sidecar; pub mod light_client_header; +pub mod non_zero_usize; +pub mod runtime_var_list; use ethereum_types::{H160, H256}; @@ -170,6 +172,7 @@ pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset, CapellaPreset pub use crate::proposer_preparation_data::ProposerPreparationData; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; +pub use crate::runtime_var_list::RuntimeVariableList; pub use crate::selection_proof::SelectionProof; pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; @@ -217,7 +220,7 @@ pub use bls::{ AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, Signature, SignatureBytes, }; -pub use kzg::{KzgCommitment, KzgProof}; +pub use kzg::{KzgCommitment, KzgProof, VERSIONED_HASH_VERSION_KZG}; pub use milhouse::{self, Vector as FixedVector}; pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, VariableList}; pub use superstruct::superstruct; diff --git a/consensus/types/src/non_zero_usize.rs b/consensus/types/src/non_zero_usize.rs new file mode 100644 index 0000000000..d61000c9a6 --- /dev/null +++ b/consensus/types/src/non_zero_usize.rs @@ -0,0 +1,8 @@ +use std::num::NonZeroUsize; + +pub const fn new_non_zero_usize(x: usize) -> NonZeroUsize { + match NonZeroUsize::new(x) { + Some(n) => n, + None => panic!("Expected a non zero usize."), + } +} diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs new file mode 100644 index 0000000000..84ad5d074e --- /dev/null +++ b/consensus/types/src/runtime_var_list.rs @@ -0,0 +1,137 @@ +use ssz::{Decode, Encode}; +use ssz_derive::Encode; + +#[derive(Debug, Clone, PartialEq, Encode)] +#[ssz(struct_behaviour = "transparent")] +pub struct RuntimeVariableList { + vec: Vec, + #[ssz(skip_serializing, skip_deserializing)] + max_len: usize, +} + +impl RuntimeVariableList { + pub fn new(vec: Vec, max_len: usize) -> Result { + if vec.len() <= max_len { + Ok(Self { vec, max_len }) + } else { + Err(ssz_types::Error::OutOfBounds { + i: vec.len(), + len: max_len, + }) + } + } + + pub fn from_vec(mut vec: Vec, max_len: usize) -> Self { + vec.truncate(max_len); + + Self { vec, max_len } + } + + pub fn to_vec(&self) -> Vec { + self.vec.clone() + } + + pub fn as_slice(&self) -> &[T] { + self.vec.as_slice() + } + + pub fn len(&self) -> usize { + self.vec.len() + } + + pub fn is_empty(&self) -> bool { + self.vec.is_empty() + } + + pub fn from_ssz_bytes(bytes: &[u8], max_len: usize) -> Result { + let vec = if bytes.is_empty() { + vec![] + } else if ::is_ssz_fixed_len() { + let num_items = bytes + .len() + .checked_div(::ssz_fixed_len()) + .ok_or(ssz::DecodeError::ZeroLengthItem)?; + + if num_items > max_len { + return Err(ssz::DecodeError::BytesInvalid(format!( + "VariableList of {} items exceeds maximum of {}", + num_items, max_len + ))); + } + + bytes + .chunks(::ssz_fixed_len()) + .try_fold(Vec::with_capacity(num_items), |mut vec, chunk| { + vec.push(::from_ssz_bytes(chunk)?); + Ok(vec) + }) + .map(Into::into)? + } else { + ssz::decode_list_of_variable_length_items(bytes, Some(max_len))? + }; + Ok(Self { vec, max_len }) + } +} + +#[cfg(test)] +mod test { + use ssz_types::{typenum::U4, VariableList}; + + use super::*; + + #[test] + fn new() { + let vec = vec![42; 5]; + let runtime_var_list: Result, _> = + RuntimeVariableList::new(vec, 4); + assert!(runtime_var_list.is_err()); + + let vec = vec![42; 3]; + let runtime_var_list: Result, _> = + RuntimeVariableList::new(vec, 4); + assert!(runtime_var_list.is_ok()); + + let vec = vec![42; 4]; + let runtime_var_list: Result, _> = + RuntimeVariableList::new(vec, 4); + assert!(runtime_var_list.is_ok()); + } + + #[test] + fn length() { + let vec = vec![42; 3]; + let runtime_var_list: RuntimeVariableList = + RuntimeVariableList::new(vec.clone(), 4).unwrap(); + let var_list: VariableList = VariableList::from(vec.clone()); + assert_eq!(&runtime_var_list.as_slice()[0..3], &vec[..]); + assert_eq!(runtime_var_list.as_slice(), &vec![42, 42, 42][..]); + assert_eq!(runtime_var_list.len(), var_list.len()); + + let vec = vec![]; + let runtime_var_list: RuntimeVariableList = RuntimeVariableList::new(vec, 4).unwrap(); + assert_eq!(runtime_var_list.as_slice(), &[] as &[u64]); + assert!(runtime_var_list.is_empty()); + } + + #[test] + fn encode() { + let runtime_var_list: RuntimeVariableList = + RuntimeVariableList::new(vec![0; 2], 2).unwrap(); + + assert_eq!(runtime_var_list.as_ssz_bytes(), vec![0, 0, 0, 0]); + assert_eq!( as Encode>::ssz_fixed_len(), 4); + } + + #[test] + fn round_trip() { + let item = RuntimeVariableList::::new(vec![42; 8], 8).unwrap(); + let encoded = &item.as_ssz_bytes(); + assert_eq!(item.ssz_bytes_len(), encoded.len()); + assert_eq!(RuntimeVariableList::from_ssz_bytes(encoded, 8), Ok(item)); + + let item = RuntimeVariableList::::new(vec![0; 8], 8).unwrap(); + let encoded = &item.as_ssz_bytes(); + assert_eq!(item.ssz_bytes_len(), encoded.len()); + assert_eq!(RuntimeVariableList::from_ssz_bytes(encoded, 8), Ok(item)); + } +} diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index cb5212aeaa..0e096ba55c 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -4,12 +4,15 @@ mod trusted_setup; use std::fmt::Debug; -pub use crate::{kzg_commitment::KzgCommitment, kzg_proof::KzgProof, trusted_setup::TrustedSetup}; +pub use crate::{ + kzg_commitment::{KzgCommitment, VERSIONED_HASH_VERSION_KZG}, + kzg_proof::KzgProof, + trusted_setup::TrustedSetup, +}; pub use c_kzg::{ Blob, Bytes32, Bytes48, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_FIELD_ELEMENT, BYTES_PER_PROOF, FIELD_ELEMENTS_PER_BLOB, }; - #[derive(Debug)] pub enum Error { /// An error from the underlying kzg library. diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index cb2a91bf8c..708773e46a 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -19,6 +19,7 @@ use std::string::ToString; use std::time::Duration; use store::hdiff::HierarchyConfig; use tempfile::TempDir; +use types::non_zero_usize::new_non_zero_usize; use types::{ Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec, ProgressiveBalancesMode, @@ -93,6 +94,22 @@ fn staking_flag() { }); } +#[test] +fn allow_insecure_genesis_sync() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.allow_insecure_genesis_sync, false); + }); + + CommandLineTest::new() + .flag("allow-insecure-genesis-sync", None) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.allow_insecure_genesis_sync, true); + }); +} + #[test] fn wss_checkpoint_flag() { let state = Some(Checkpoint { @@ -611,102 +628,6 @@ fn builder_fallback_flags() { assert_eq!(config.chain.builder_fallback_disable_checks, true); }, ); - run_payload_builder_flag_test_with_config( - "builder", - "http://meow.cats", - Some("builder-profit-threshold"), - Some("1000000000000000000000000"), - |config| { - assert_eq!( - config - .execution_layer - .as_ref() - .unwrap() - .builder_profit_threshold, - 1000000000000000000000000 - ); - }, - ); - run_payload_builder_flag_test_with_config( - "builder", - "http://meow.cats", - None, - None, - |config| { - assert_eq!( - config - .execution_layer - .as_ref() - .unwrap() - .builder_profit_threshold, - 0 - ); - }, - ); - run_payload_builder_flag_test_with_config( - "builder", - "http://meow.cats", - Some("always-prefer-builder-payload"), - None, - |config| { - assert_eq!( - config - .execution_layer - .as_ref() - .unwrap() - .always_prefer_builder_payload, - true - ); - }, - ); - run_payload_builder_flag_test_with_config( - "builder", - "http://meow.cats", - None, - None, - |config| { - assert_eq!( - config - .execution_layer - .as_ref() - .unwrap() - .always_prefer_builder_payload, - false - ); - }, - ); - run_payload_builder_flag_test_with_config( - "builder", - "http://meow.cats", - Some("ignore-builder-override-suggestion-threshold"), - Some("53.4"), - |config| { - assert_eq!( - config - .execution_layer - .as_ref() - .unwrap() - .ignore_builder_override_suggestion_threshold, - 53.4f32 - ); - }, - ); - run_payload_builder_flag_test_with_config( - "builder", - "http://meow.cats", - None, - None, - |config| { - assert_eq!( - config - .execution_layer - .as_ref() - .unwrap() - .ignore_builder_override_suggestion_threshold, - 10.0f32 - ); - }, - ); } #[test] @@ -1769,14 +1690,19 @@ fn block_cache_size_flag() { CommandLineTest::new() .flag("block-cache-size", Some("4")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.store.block_cache_size, 4_usize)); + .with_config(|config| assert_eq!(config.store.block_cache_size, new_non_zero_usize(4))); } #[test] fn historic_state_cache_size_flag() { CommandLineTest::new() .flag("historic-state-cache-size", Some("4")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.store.historic_state_cache_size, 4_usize)); + .with_config(|config| { + assert_eq!( + config.store.historic_state_cache_size, + new_non_zero_usize(4) + ) + }); } #[test] fn historic_state_cache_size_default() { @@ -2015,7 +1941,10 @@ fn slasher_attestation_cache_size_flag() { .slasher .as_ref() .expect("Unable to parse Slasher config"); - assert_eq!(slasher_config.attestation_root_cache_size, 10000); + assert_eq!( + slasher_config.attestation_root_cache_size, + new_non_zero_usize(10000) + ); }); } #[test] @@ -2572,3 +2501,22 @@ fn genesis_state_url_value() { assert_eq!(config.genesis_state_url_timeout, Duration::from_secs(42)); }); } + +#[test] +fn disable_duplicate_warn_logs_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.network.disable_duplicate_warn_logs, false); + }); +} + +#[test] +fn disable_duplicate_warn_logs() { + CommandLineTest::new() + .flag("disable-duplicate-warn-logs", None) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.network.disable_duplicate_warn_logs, true); + }); +} diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 4234de613d..025188fed4 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -421,6 +421,21 @@ fn no_doppelganger_protection_flag() { .run() .with_config(|config| assert!(!config.enable_doppelganger_protection)); } +#[test] +fn produce_block_v3_flag() { + CommandLineTest::new() + .flag("produce-block-v3", None) + .run() + .with_config(|config| assert!(config.produce_block_v3)); +} + +#[test] +fn no_produce_block_v3_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(!config.produce_block_v3)); +} + #[test] fn no_gas_limit_flag() { CommandLineTest::new() diff --git a/scripts/cli.sh b/scripts/cli.sh index d9def7624f..768ec7b301 100755 --- a/scripts/cli.sh +++ b/scripts/cli.sh @@ -2,7 +2,7 @@ # IMPORTANT # This script should NOT be run directly. -# Run `make cli` from the root of the repository instead. +# Run `make cli` or `make cli-local` from the root of the repository instead. set -e @@ -90,7 +90,7 @@ rm -f help_general.md help_bn.md help_vc.md help_am.md help_vm.md help_vm_create # only exit at the very end if [[ $changes == true ]]; then - echo "Exiting with error to indicate changes occurred..." + echo "Exiting with error to indicate changes occurred. To fix, run `make cli-local` or `make cli` and commit the changes." exit 1 else echo "CLI help texts are up to date." diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index 87565b0cae..2862fde075 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -187,10 +187,9 @@ Update the genesis time to now using: 1. Add builder URL to `BN_ARGS` in `./vars.env`, e.g. `--builder http://localhost:8650`. Some mock builder server options: - [`mock-relay`](https://github.com/realbigsean/mock-relay) - [`dummy-builder`](https://github.com/michaelsproul/dummy_builder) -2. (Optional) Add `--always-prefer-builder-payload` to `BN_ARGS`. -3. The above mock builders do not support non-mainnet presets as of now, and will require setting `SECONDS_PER_SLOT` and `SECONDS_PER_ETH1_BLOCK` to `12` in `./vars.env`. -4. Start the testnet with the following command (the `-p` flag enables the validator client `--builder-proposals` flag): +2. The above mock builders do not support non-mainnet presets as of now, and will require setting `SECONDS_PER_SLOT` and `SECONDS_PER_ETH1_BLOCK` to `12` in `./vars.env`. +3. Start the testnet with the following command (the `-p` flag enables the validator client `--builder-proposals` flag): ```bash ./start_local_testnet.sh -p genesis.json ``` -5. Block production using builder flow will start at epoch 4. +4. Block production using builder flow will start at epoch 4. diff --git a/slasher/src/config.rs b/slasher/src/config.rs index 894760d277..4fd74343e7 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -1,7 +1,9 @@ use crate::Error; use serde::{Deserialize, Serialize}; +use std::num::NonZeroUsize; use std::path::PathBuf; use strum::{Display, EnumString, EnumVariantNames}; +use types::non_zero_usize::new_non_zero_usize; use types::{Epoch, EthSpec, IndexedAttestation}; pub const DEFAULT_CHUNK_SIZE: usize = 16; @@ -10,7 +12,7 @@ pub const DEFAULT_HISTORY_LENGTH: usize = 4096; pub const DEFAULT_UPDATE_PERIOD: u64 = 12; pub const DEFAULT_SLOT_OFFSET: f64 = 10.5; pub const DEFAULT_MAX_DB_SIZE: usize = 256 * 1024; // 256 GiB -pub const DEFAULT_ATTESTATION_ROOT_CACHE_SIZE: usize = 100_000; +pub const DEFAULT_ATTESTATION_ROOT_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(100_000); pub const DEFAULT_BROADCAST: bool = false; #[cfg(all(feature = "mdbx", not(feature = "lmdb")))] @@ -38,7 +40,7 @@ pub struct Config { /// Maximum size of the database in megabytes. pub max_db_size_mbs: usize, /// Maximum size of the in-memory cache for attestation roots. - pub attestation_root_cache_size: usize, + pub attestation_root_cache_size: NonZeroUsize, /// Whether to broadcast slashings found to the network. pub broadcast: bool, /// Database backend to use. diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 42667f27c6..b0701e80a1 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -334,6 +334,7 @@ impl TestRig { builder_params, TEST_FORK, &self.spec, + None, BlockProductionVersion::FullV2, ) .await @@ -485,6 +486,7 @@ impl TestRig { builder_params, TEST_FORK, &self.spec, + None, BlockProductionVersion::FullV2, ) .await diff --git a/testing/network_testing/README.md b/testing/network_testing/README.md new file mode 100644 index 0000000000..3591514682 --- /dev/null +++ b/testing/network_testing/README.md @@ -0,0 +1,74 @@ +# Lighthouse live network testing + + +## DISCLAIMER + +This document describes how to run a lighthouse node with minimal resources and time on a live +network. + +This procedure should ONLY be used for testing networks and never in production and never with +attached validators. The Lighthouse node described in this state is only a partially functioning +node. + + +## Overview + +We are going to run a single lighthouse node connected to a live network, without syncing and +without an execution engine. This should only ever be done for testing. + +There two main components needed. + +1. A lighthouse node that doesn't sync +2. A fake execution client that does nothing + +We will start with the second + +## Mock-EL + +This is a service that runs and fakes an execution engine. We firstly need to install the lighthouse +`lcli` tool. + +``` +$ make install-lcli +``` + +Once installed, run the fake execution client: + +``` +$ lcli mock-el --jwt-output-path /tmp/mockel.jwt +``` + +This will create a server listening on localhost:8551 + +## Lighthouse no sync + +To create a lighthouse node that doesn't sync we need to compile it with a special flag. + +``` +$ cargo build --release --bin lighthouse --features network/disable-backfill +``` + +Once built, it can run via checkpoint sync on any network, making sure we point to our mock-el + +Prater testnet: + +``` +$ lighthouse --network prater bn --execution-jwt /tmp/mockel.jwt --checkpoint-sync-url +https://prater.checkpoint.sigp.io --execution-endpoint http://localhost:8551 +``` + +Mainnet: + +``` +$ lighthouse --network prater bn --execution-jwt /tmp/mockel.jwt --checkpoint-sync-url +https://checkpoint.sigp.io --execution-endpoint http://localhost:8551 +``` + +Additional flags, such as metrics may be added. + + +## Additional Notes + +The above is assuming that you have not run the command in the past. If you have a database in +existence for the network you are testing, checkpoint sync will not start. You may need to add the +`--purge-db` flag to remove any past database and force checkpoint sync to run. diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 00d9b2e86d..b65e301de8 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -28,7 +28,10 @@ use types::{ #[derive(Debug)] pub enum BlockError { + /// A recoverable error that can be retried, as the validator has not signed anything. Recoverable(String), + /// An irrecoverable error has occurred during block proposal and should not be retried, as a + /// block may have already been signed. Irrecoverable(String), } @@ -320,174 +323,138 @@ impl BlockService { ) } - for validator_pubkey in proposers { - let builder_proposals = self - .validator_store - .get_builder_proposals(&validator_pubkey); - let service = self.clone(); - let log = log.clone(); - self.inner.context.executor.spawn( - async move { - if builder_proposals { - let result = service.publish_block(slot, validator_pubkey, true).await; + if self.validator_store.produce_block_v3() { + for validator_pubkey in proposers { + let builder_proposals = self + .validator_store + .get_builder_proposals(&validator_pubkey); + // Translate `builder_proposals` to a boost factor. Builder proposals set to `true` + // requires no boost factor, it just means "use a builder proposal if the BN returns + // one". On the contrary, `builder_proposals: false` indicates a preference for + // local payloads, so we set the builder boost factor to 0. + let builder_boost_factor = if !builder_proposals { Some(0) } else { None }; + let service = self.clone(); + let log = log.clone(); + self.inner.context.executor.spawn( + async move { + let result = service + .publish_block_v3(slot, validator_pubkey, builder_boost_factor) + .await; + match result { - Err(BlockError::Recoverable(e)) => { + Ok(_) => {} + Err(BlockError::Recoverable(e)) | Err(BlockError::Irrecoverable(e)) => { error!( log, "Error whilst producing block"; "error" => ?e, "block_slot" => ?slot, - "info" => "blinded proposal failed, attempting full block" + "info" => "block v3 proposal failed, this error may or may not result in a missed block" ); - if let Err(e) = - service.publish_block(slot, validator_pubkey, false).await - { - // Log a `crit` since a full block - // (non-builder) proposal failed. - crit!( + } + } + }, + "block service", + ) + } + } else { + for validator_pubkey in proposers { + let builder_proposals = self + .validator_store + .get_builder_proposals(&validator_pubkey); + let service = self.clone(); + let log = log.clone(); + self.inner.context.executor.spawn( + async move { + if builder_proposals { + let result = service + .publish_block(slot, validator_pubkey, true) + .await; + + match result { + Err(BlockError::Recoverable(e)) => { + error!( log, "Error whilst producing block"; "error" => ?e, "block_slot" => ?slot, - "info" => "full block attempted after a blinded failure", + "info" => "blinded proposal failed, attempting full block" ); + if let Err(e) = service + .publish_block(slot, validator_pubkey, false) + .await + { + // Log a `crit` since a full block + // (non-builder) proposal failed. + crit!( + log, + "Error whilst producing block"; + "error" => ?e, + "block_slot" => ?slot, + "info" => "full block attempted after a blinded failure", + ); + } } - } - Err(BlockError::Irrecoverable(e)) => { - // Only log an `error` since it's common for - // builders to timeout on their response, only - // to publish the block successfully themselves. - error!( + Err(BlockError::Irrecoverable(e)) => { + // Only log an `error` since it's common for + // builders to timeout on their response, only + // to publish the block successfully themselves. + error!( + log, + "Error whilst producing block"; + "error" => ?e, + "block_slot" => ?slot, + "info" => "this error may or may not result in a missed block", + ) + } + Ok(_) => {} + }; + } else if let Err(e) = service + .publish_block(slot, validator_pubkey, false) + .await + { + // Log a `crit` since a full block (non-builder) + // proposal failed. + crit!( log, "Error whilst producing block"; - "error" => ?e, + "message" => ?e, "block_slot" => ?slot, - "info" => "this error may or may not result in a missed block", - ) + "info" => "proposal did not use a builder", + ); } - Ok(_) => {} - }; - } else if let Err(e) = - service.publish_block(slot, validator_pubkey, false).await - { - // Log a `crit` since a full block (non-builder) - // proposal failed. - crit!( - log, - "Error whilst producing block"; - "message" => ?e, - "block_slot" => ?slot, - "info" => "proposal did not use a builder", - ); - } - }, - "block service", - ); + }, + "block service", + ) + } } Ok(()) } - /// Produce a block at the given slot for validator_pubkey - async fn publish_block( + #[allow(clippy::too_many_arguments)] + async fn sign_and_publish_block( &self, + proposer_fallback: ProposerFallback, slot: Slot, - validator_pubkey: PublicKeyBytes, - builder_proposal: bool, + graffiti: Option, + validator_pubkey: &PublicKeyBytes, + unsigned_block: UnsignedBlock, ) -> Result<(), BlockError> { let log = self.context.log(); - let _timer = - metrics::start_timer_vec(&metrics::BLOCK_SERVICE_TIMES, &[metrics::BEACON_BLOCK]); - - let current_slot = self.slot_clock.now().ok_or_else(|| { - BlockError::Recoverable("Unable to determine current slot from clock".to_string()) - })?; - - let randao_reveal = match self - .validator_store - .randao_reveal(validator_pubkey, slot.epoch(E::slots_per_epoch())) - .await - { - Ok(signature) => signature.into(), - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - // A pubkey can be missing when a validator was recently removed - // via the API. - warn!( - log, - "Missing pubkey for block randao"; - "info" => "a validator may have recently been removed from this VC", - "pubkey" => ?pubkey, - "slot" => ?slot - ); - return Ok(()); - } - Err(e) => { - return Err(BlockError::Recoverable(format!( - "Unable to produce randao reveal signature: {:?}", - e - ))) - } - }; - - let graffiti = determine_graffiti( - &validator_pubkey, - log, - self.graffiti_file.clone(), - self.validator_store.graffiti(&validator_pubkey), - self.graffiti, - ); - - let randao_reveal_ref = &randao_reveal; - let self_ref = &self; - let proposer_index = self.validator_store.validator_index(&validator_pubkey); - let validator_pubkey_ref = &validator_pubkey; - let proposer_fallback = ProposerFallback { - beacon_nodes: self.beacon_nodes.clone(), - proposer_nodes: self.proposer_nodes.clone(), - }; - - info!( - log, - "Requesting unsigned block"; - "slot" => slot.as_u64(), - ); - - // Request block from first responsive beacon node. - // - // Try the proposer nodes last, since it's likely that they don't have a - // great view of attestations on the network. - let unsigned_block = proposer_fallback - .request_proposers_last( - RequireSynced::No, - OfflineOnFailure::Yes, - move |beacon_node| { - Self::get_validator_block( - beacon_node, - slot, - randao_reveal_ref, - graffiti, - proposer_index, - builder_proposal, - log, - ) - }, - ) - .await?; - let signing_timer = metrics::start_timer(&metrics::BLOCK_SIGNING_TIMES); let res = match unsigned_block { UnsignedBlock::Full(block_contents) => { let (block, maybe_blobs) = block_contents.deconstruct(); - self_ref - .validator_store - .sign_block(*validator_pubkey_ref, block, current_slot) + self.validator_store + .sign_block(*validator_pubkey, block, slot) .await .map(|b| SignedBlock::Full(PublishBlockRequest::new(b, maybe_blobs))) } - UnsignedBlock::Blinded(block) => self_ref + UnsignedBlock::Blinded(block) => self .validator_store - .sign_block(*validator_pubkey_ref, block, current_slot) + .sign_block(*validator_pubkey, block, slot) .await .map(SignedBlock::Blinded), }; @@ -549,6 +516,205 @@ impl BlockService { "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), "slot" => signed_block.slot().as_u64(), ); + Ok(()) + } + + async fn publish_block_v3( + self, + slot: Slot, + validator_pubkey: PublicKeyBytes, + builder_boost_factor: Option, + ) -> Result<(), BlockError> { + let log = self.context.log(); + let _timer = + metrics::start_timer_vec(&metrics::BLOCK_SERVICE_TIMES, &[metrics::BEACON_BLOCK]); + + let randao_reveal = match self + .validator_store + .randao_reveal(validator_pubkey, slot.epoch(E::slots_per_epoch())) + .await + { + Ok(signature) => signature.into(), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently removed + // via the API. + warn!( + log, + "Missing pubkey for block randao"; + "info" => "a validator may have recently been removed from this VC", + "pubkey" => ?pubkey, + "slot" => ?slot + ); + return Ok(()); + } + Err(e) => { + return Err(BlockError::Recoverable(format!( + "Unable to produce randao reveal signature: {:?}", + e + ))) + } + }; + + let graffiti = determine_graffiti( + &validator_pubkey, + log, + self.graffiti_file.clone(), + self.validator_store.graffiti(&validator_pubkey), + self.graffiti, + ); + + let randao_reveal_ref = &randao_reveal; + let self_ref = &self; + let proposer_index = self.validator_store.validator_index(&validator_pubkey); + let proposer_fallback = ProposerFallback { + beacon_nodes: self.beacon_nodes.clone(), + proposer_nodes: self.proposer_nodes.clone(), + }; + + info!( + log, + "Requesting unsigned block"; + "slot" => slot.as_u64(), + ); + + // Request block from first responsive beacon node. + // + // Try the proposer nodes last, since it's likely that they don't have a + // great view of attestations on the network. + let unsigned_block = proposer_fallback + .request_proposers_last( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], + ); + let block_response = Self::get_validator_block_v3( + beacon_node, + slot, + randao_reveal_ref, + graffiti, + proposer_index, + builder_boost_factor, + log, + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + }); + + Ok::<_, BlockError>(block_response) + }, + ) + .await??; + + self_ref + .sign_and_publish_block( + proposer_fallback, + slot, + graffiti, + &validator_pubkey, + unsigned_block, + ) + .await?; + + Ok(()) + } + + /// Produce a block at the given slot for validator_pubkey + async fn publish_block( + &self, + slot: Slot, + validator_pubkey: PublicKeyBytes, + builder_proposal: bool, + ) -> Result<(), BlockError> { + let log = self.context.log(); + let _timer = + metrics::start_timer_vec(&metrics::BLOCK_SERVICE_TIMES, &[metrics::BEACON_BLOCK]); + + let randao_reveal = match self + .validator_store + .randao_reveal(validator_pubkey, slot.epoch(E::slots_per_epoch())) + .await + { + Ok(signature) => signature.into(), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently removed + // via the API. + warn!( + log, + "Missing pubkey for block"; + "info" => "a validator may have recently been removed from this VC", + "pubkey" => ?pubkey, + "slot" => ?slot + ); + return Ok(()); + } + Err(e) => { + return Err(BlockError::Recoverable(format!( + "Unable to sign block: {:?}", + e + ))) + } + }; + + let graffiti = determine_graffiti( + &validator_pubkey, + log, + self.graffiti_file.clone(), + self.validator_store.graffiti(&validator_pubkey), + self.graffiti, + ); + + let randao_reveal_ref = &randao_reveal; + let self_ref = &self; + let proposer_index = self.validator_store.validator_index(&validator_pubkey); + let proposer_fallback = ProposerFallback { + beacon_nodes: self.beacon_nodes.clone(), + proposer_nodes: self.proposer_nodes.clone(), + }; + + info!( + log, + "Requesting unsigned block"; + "slot" => slot.as_u64(), + ); + + // Request block from first responsive beacon node. + // + // Try the proposer nodes last, since it's likely that they don't have a + // great view of attestations on the network. + let unsigned_block = proposer_fallback + .request_proposers_last( + RequireSynced::No, + OfflineOnFailure::Yes, + move |beacon_node| { + Self::get_validator_block( + beacon_node, + slot, + randao_reveal_ref, + graffiti, + proposer_index, + builder_proposal, + log, + ) + }, + ) + .await?; + + self_ref + .sign_and_publish_block( + proposer_fallback, + slot, + graffiti, + &validator_pubkey, + unsigned_block, + ) + .await?; Ok(()) } @@ -585,6 +751,49 @@ impl BlockService { Ok::<_, BlockError>(()) } + async fn get_validator_block_v3( + beacon_node: &BeaconNodeHttpClient, + slot: Slot, + randao_reveal_ref: &SignatureBytes, + graffiti: Option, + proposer_index: Option, + builder_boost_factor: Option, + log: &Logger, + ) -> Result, BlockError> { + let (block_response, _) = beacon_node + .get_validator_blocks_v3::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + builder_boost_factor, + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })?; + + let unsigned_block = match block_response.data { + eth2::types::ProduceBlockV3Response::Full(block) => UnsignedBlock::Full(block), + eth2::types::ProduceBlockV3Response::Blinded(block) => UnsignedBlock::Blinded(block), + }; + + info!( + log, + "Received unsigned block"; + "slot" => slot.as_u64(), + ); + if proposer_index != Some(unsigned_block.proposer_index()) { + return Err(BlockError::Recoverable( + "Proposer index does not match block proposer. Beacon chain re-orged".to_string(), + )); + } + + Ok::<_, BlockError>(unsigned_block) + } + async fn get_validator_block( beacon_node: &BeaconNodeHttpClient, slot: Slot, diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 6957934fb8..cd3ad494da 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -136,6 +136,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("FEE-RECIPIENT") .takes_value(true) ) + .arg( + Arg::with_name("produce-block-v3") + .long("produce-block-v3") + .help("Enable block production via the block v3 endpoint for this validator client. \ + This should only be enabled when paired with a beacon node \ + that has this endpoint implemented. This flag will be enabled by default in \ + future.") + .takes_value(false) + ) /* REST API related arguments */ .arg( Arg::with_name("http") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 95d42d6d83..4b7da76428 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -75,6 +75,8 @@ pub struct Config { pub enable_latency_measurement_service: bool, /// Defines the number of validators per `validator/register_validator` request sent to the BN. pub validator_registration_batch_size: usize, + /// Enables block production via the block v3 endpoint. This configuration option can be removed post deneb. + pub produce_block_v3: bool, } impl Default for Config { @@ -115,6 +117,7 @@ impl Default for Config { broadcast_topics: vec![ApiTopic::Subscriptions], enable_latency_measurement_service: true, validator_registration_batch_size: 500, + produce_block_v3: false, } } } @@ -339,6 +342,10 @@ impl Config { config.builder_proposals = true; } + if cli_args.is_present("produce-block-v3") { + config.produce_block_v3 = true; + } + config.gas_limit = cli_args .value_of("gas-limit") .map(|gas_limit| { diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index d52247df4d..89fc037621 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -83,7 +83,7 @@ const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4; const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4; -const HTTP_GET_VALIDATOR_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT: u32 = 4; const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger"; @@ -311,8 +311,7 @@ impl ProductionValidatorClient { / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, get_debug_beacon_states: slot_duration / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, get_deposit_snapshot: slot_duration / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, - get_validator_block_ssz: slot_duration - / HTTP_GET_VALIDATOR_BLOCK_SSZ_TIMEOUT_QUOTIENT, + get_validator_block: slot_duration / HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT, } } else { Timeouts::set_all(slot_duration) diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 60155d8efb..19726c2aec 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -97,6 +97,7 @@ pub struct ValidatorStore { fee_recipient_process: Option
, gas_limit: Option, builder_proposals: bool, + produce_block_v3: bool, task_executor: TaskExecutor, _phantom: PhantomData, } @@ -128,6 +129,7 @@ impl ValidatorStore { fee_recipient_process: config.fee_recipient, gas_limit: config.gas_limit, builder_proposals: config.builder_proposals, + produce_block_v3: config.produce_block_v3, task_executor, _phantom: PhantomData, } @@ -336,6 +338,10 @@ impl ValidatorStore { self.spec.fork_at_epoch(epoch) } + pub fn produce_block_v3(&self) -> bool { + self.produce_block_v3 + } + /// Returns a `SigningMethod` for `validator_pubkey` *only if* that validator is considered safe /// by doppelganger protection. fn doppelganger_checked_signing_method( diff --git a/watch/Cargo.toml b/watch/Cargo.toml index 67cbc3cc23..aaaf50aa40 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -21,7 +21,7 @@ types = { workspace = true } eth2 = { workspace = true } beacon_node = { workspace = true } tokio = { workspace = true } -axum = "0.6.18" +axum = "0.7" hyper = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } @@ -41,8 +41,7 @@ tokio-postgres = "0.7.5" http_api = { workspace = true } beacon_chain = { workspace = true } network = { workspace = true } -# TODO: update to 0.15 when released: https://github.com/testcontainers/testcontainers-rs/issues/497 -testcontainers = { git = "https://github.com/testcontainers/testcontainers-rs/", rev = "0f2c9851" } +testcontainers = "0.15" unused_port = { workspace = true } task_executor = { workspace = true } logging = { workspace = true } diff --git a/watch/src/cli.rs b/watch/src/cli.rs index a8e5f3716f..97dc217293 100644 --- a/watch/src/cli.rs +++ b/watch/src/cli.rs @@ -1,6 +1,5 @@ use crate::{config::Config, logger, server, updater}; use clap::{App, Arg}; -use tokio::sync::oneshot; pub const SERVE: &str = "serve"; pub const RUN_UPDATER: &str = "run-updater"; @@ -44,12 +43,9 @@ pub async fn run() -> Result<(), String> { (RUN_UPDATER, Some(_)) => updater::run_updater(config) .await .map_err(|e| format!("Failure: {:?}", e)), - (SERVE, Some(_)) => { - let (_shutdown_tx, shutdown_rx) = oneshot::channel(); - server::serve(config, shutdown_rx) - .await - .map_err(|e| format!("Failure: {:?}", e)) - } + (SERVE, Some(_)) => server::serve(config) + .await + .map_err(|e| format!("Failure: {:?}", e)), _ => Err("Unsupported subcommand. See --help".into()), } } diff --git a/watch/src/server/error.rs b/watch/src/server/error.rs index d1542f7841..0db3df2a0d 100644 --- a/watch/src/server/error.rs +++ b/watch/src/server/error.rs @@ -3,12 +3,14 @@ use axum::Error as AxumError; use axum::{http::StatusCode, response::IntoResponse, Json}; use hyper::Error as HyperError; use serde_json::json; +use std::io::Error as IoError; #[derive(Debug)] pub enum Error { Axum(AxumError), Hyper(HyperError), Database(DbError), + IoError(IoError), BadRequest, NotFound, Other(String), @@ -43,6 +45,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: IoError) -> Self { + Error::IoError(e) + } +} + impl From for Error { fn from(e: String) -> Self { Error::Other(e) diff --git a/watch/src/server/mod.rs b/watch/src/server/mod.rs index d8ae0eb6c6..25dd242aab 100644 --- a/watch/src/server/mod.rs +++ b/watch/src/server/mod.rs @@ -11,9 +11,8 @@ use axum::{ }; use eth2::types::ErrorMessage; use log::info; -use std::future::Future; -use std::net::SocketAddr; -use tokio::sync::oneshot; +use std::future::{Future, IntoFuture}; +use std::net::{SocketAddr, TcpListener}; pub use config::Config; pub use error::Error; @@ -22,7 +21,7 @@ mod config; mod error; mod handler; -pub async fn serve(config: FullConfig, shutdown: oneshot::Receiver<()>) -> Result<(), Error> { +pub async fn serve(config: FullConfig) -> Result<(), Error> { let db = database::build_connection_pool(&config.database)?; let (_, slots_per_epoch) = database::get_active_config(&mut database::get_connection(&db)?)? .ok_or_else(|| { @@ -32,9 +31,7 @@ pub async fn serve(config: FullConfig, shutdown: oneshot::Receiver<()>) -> Resul ) })?; - let server = start_server(&config, slots_per_epoch as u64, db, async { - let _ = shutdown.await; - })?; + let server = start_server(&config, slots_per_epoch as u64, db)?; server.await?; @@ -61,8 +58,7 @@ pub fn start_server( config: &FullConfig, slots_per_epoch: u64, pool: PgPool, - shutdown: impl Future + Send + Sync + 'static, -) -> Result> + 'static, Error> { +) -> Result> + 'static, Error> { let mut routes = Router::new() .route("/v1/slots", get(handler::get_slots_by_range)) .route("/v1/slots/:slot", get(handler::get_slot)) @@ -108,16 +104,13 @@ pub fn start_server( .layer(Extension(slots_per_epoch)); let addr = SocketAddr::new(config.server.listen_addr, config.server.listen_port); - - let server = axum::Server::try_bind(&addr)?.serve(app.into_make_service()); - - let server = server.with_graceful_shutdown(async { - shutdown.await; - }); + let listener = TcpListener::bind(addr)?; + listener.set_nonblocking(true)?; + let serve = axum::serve(tokio::net::TcpListener::from_std(listener)?, app); info!("HTTP server listening on {}", addr); - Ok(server) + Ok(serve.into_future()) } // The default route indicating that no available routes matched the request. diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs index dc0b8af6e3..0e29e7f0cd 100644 --- a/watch/tests/tests.rs +++ b/watch/tests/tests.rs @@ -17,7 +17,6 @@ use std::env; use std::net::SocketAddr; use std::time::Duration; use testcontainers::{clients::Cli, core::WaitFor, Image, RunnableImage}; -use tokio::sync::oneshot; use tokio::{runtime, task::JoinHandle}; use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; use types::{Hash256, MainnetEthSpec, Slot}; @@ -188,11 +187,7 @@ impl TesterBuilder { /* * Spawn a Watch HTTP API. */ - let (_watch_shutdown_tx, watch_shutdown_rx) = oneshot::channel(); - let watch_server = start_server(&self.config, SLOTS_PER_EPOCH, pool, async { - let _ = watch_shutdown_rx.await; - }) - .unwrap(); + let watch_server = start_server(&self.config, SLOTS_PER_EPOCH, pool).unwrap(); tokio::spawn(watch_server); let addr = SocketAddr::new( @@ -228,7 +223,6 @@ impl TesterBuilder { config: self.config, updater, _bn_network_rx: self._bn_network_rx, - _watch_shutdown_tx, } } async fn initialize_database(&self) -> PgPool { @@ -245,7 +239,6 @@ struct Tester { pub config: Config, pub updater: UpdateHandler, _bn_network_rx: NetworkReceivers, - _watch_shutdown_tx: oneshot::Sender<()>, } impl Tester {