diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index f2ccaf438a..c3119db378 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -5,7 +5,6 @@ on: branches: - unstable - stable - - capella tags: - v* @@ -35,11 +34,6 @@ jobs: run: | echo "VERSION=latest" >> $GITHUB_ENV echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV - - name: Extract version (if capella) - if: github.event.ref == 'refs/heads/capella' - run: | - echo "VERSION=capella" >> $GITHUB_ENV - echo "VERSION_SUFFIX=" >> $GITHUB_ENV - name: Extract version (if tagged release) if: startsWith(github.event.ref, 'refs/tags') run: | diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml index 4d4e92ae14..8428c0a3b0 100644 --- a/.github/workflows/linkcheck.yml +++ b/.github/workflows/linkcheck.yml @@ -7,6 +7,7 @@ on: pull_request: paths: - 'book/**' + merge_group: jobs: linkcheck: diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index a522f2efb9..1ca1006c1f 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -6,6 +6,7 @@ on: branches: - unstable pull_request: + merge_group: jobs: run-local-testnet: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2e63b4d6c2..e6d79bd5ef 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -73,7 +73,7 @@ jobs: - uses: KyleMayes/install-llvm-action@v1 if: startsWith(matrix.arch, 'x86_64-windows') with: - version: "13.0" + version: "15.0" directory: ${{ runner.temp }}/llvm - name: Set LIBCLANG_PATH if: startsWith(matrix.arch, 'x86_64-windows') diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 445f71fa09..27c91f2262 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -8,19 +8,20 @@ on: - trying - 'pr/*' pull_request: + merge_group: env: # Deny warnings in CI # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) RUSTFLAGS: "-D warnings -C debuginfo=0" # The Nightly version used for cargo-udeps, might need updating from time to time. - PINNED_NIGHTLY: nightly-2022-12-15 + PINNED_NIGHTLY: nightly-2023-04-16 # Prevent Github API rate limiting. LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} jobs: target-branch-check: name: target-branch-check runs-on: ubuntu-latest - if: github.event_name == 'pull_request' + if: github.event_name == 'pull_request' || github.event_name == 'merge_group' steps: - name: Check that the pull request is not targeting the stable branch run: test ${{ github.base_ref }} != "stable" @@ -83,7 +84,7 @@ jobs: run: choco install -y make - uses: KyleMayes/install-llvm-action@v1 with: - version: "13.0" + version: "15.0" directory: ${{ runner.temp }}/llvm - name: Set LIBCLANG_PATH run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ef23e1ed57..a408fcdd52 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -45,8 +45,8 @@ questions. 2. **Work in a feature branch** of your personal fork (github.com/YOUR_NAME/lighthouse) of the main repository (github.com/sigp/lighthouse). -3. Once you feel you have addressed the issue, **create a pull-request** to merge - your changes into the main repository. +3. Once you feel you have addressed the issue, **create a pull-request** with + `unstable` as the base branch to merge your changes into the main repository. 4. Wait for the repository maintainers to **review your changes** to ensure the issue is addressed satisfactorily. Optionally, mention your PR on [discord](https://discord.gg/cyAszAh). diff --git a/Cargo.lock b/Cargo.lock index 93d0a8c9bd..901c2e8aff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -90,6 +90,16 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + [[package]] name = "aes" version = "0.6.0" @@ -115,17 +125,14 @@ dependencies = [ ] [[package]] -name = "aes-gcm" -version = "0.8.0" +name = "aes" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5278b5fabbb9bd46e24aa69b2fdea62c99088e0a950a9be40e3e0101298f88da" +checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" dependencies = [ - "aead 0.3.2", - "aes 0.6.0", - "cipher 0.2.5", - "ctr 0.6.0", - "ghash 0.3.1", - "subtle", + "cfg-if", + "cipher 0.4.4", + "cpufeatures", ] [[package]] @@ -142,6 +149,20 @@ dependencies = [ "subtle", ] +[[package]] +name = "aes-gcm" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c" +dependencies = [ + "aead 0.5.2", + "aes 0.8.2", + "cipher 0.4.4", + "ctr 0.9.2", + "ghash 0.5.0", + "subtle", +] + [[package]] name = "aes-soft" version = "0.6.4" @@ -207,14 +228,14 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.69" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" +checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" [[package]] name = "arbitrary" -version = "1.2.2" -source = "git+https://github.com/michaelsproul/arbitrary?rev=a572fd8743012a4f1ada5ee5968b1b3619c427ba#a572fd8743012a4f1ada5ee5968b1b3619c427ba" +version = "1.3.0" +source = "git+https://github.com/michaelsproul/arbitrary?rev=f002b99989b561ddce62e4cf2887b0f8860ae991#f002b99989b561ddce62e4cf2887b0f8860ae991" dependencies = [ "derive_arbitrary", ] @@ -227,9 +248,9 @@ checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -250,14 +271,14 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] name = "asn1-rs" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf6690c370453db30743b373a60ba498fc0d6d83b11f4abfd87a84a075db5dd4" +checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" dependencies = [ "asn1-rs-derive 0.4.0", "asn1-rs-impl", @@ -266,7 +287,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -277,7 +298,7 @@ checksum = "db8b7511298d5b7784b40b092d9e9dcd3a627a5707e4b5e507931ab0d44eeebf" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -289,7 +310,7 @@ checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -301,7 +322,7 @@ checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -312,64 +333,64 @@ checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" [[package]] name = "async-io" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ "async-lock", "autocfg 1.1.0", + "cfg-if", "concurrent-queue", "futures-lite", - "libc", "log", "parking", "polling", + "rustix", "slab", - "socket2", + "socket2 0.4.9", "waker-fn", - "windows-sys 0.42.0", ] [[package]] name = "async-lock" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", - "futures-lite", ] [[package]] name = "async-stream" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e" dependencies = [ "async-stream-impl", "futures-core", + "pin-project-lite 0.2.9", ] [[package]] name = "async-stream-impl" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "async-trait" -version = "0.1.64" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -404,13 +425,14 @@ checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" [[package]] name = "attohttpc" -version = "0.10.1" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf13118df3e3dce4b5ac930641343b91b656e4e72c8f8325838b01a4b1c9d45" +checksum = "fdb8867f378f33f78a811a8eb9bf108ad99430d7aad43315dd9319c827ef6247" dependencies = [ "http", "log", "url", + "wildmatch", ] [[package]] @@ -433,7 +455,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -539,14 +561,14 @@ checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "base64ct" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "beacon-api-client" version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client#53690a711e33614d59d4d44fb09762b4699e2a4e" +source = "git+https://github.com/ralexstokes/beacon-api-client#30679e9e25d61731cde54e14cd8a3688a39d8e5b" dependencies = [ "ethereum-consensus", "http", @@ -571,10 +593,9 @@ dependencies = [ "environment", "eth1", "eth2", - "eth2_hashing", - "eth2_ssz", - "eth2_ssz_derive", - "eth2_ssz_types", + "ethereum_hashing", + "ethereum_ssz", + "ethereum_ssz_derive", "execution_layer", "exit-future", "fork_choice", @@ -605,6 +626,7 @@ dependencies = [ "sloggers", "slot_clock", "smallvec", + "ssz_types", "state_processing", "store", "strum", @@ -620,7 +642,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "4.0.1-rc.0" +version = "4.1.0" dependencies = [ "beacon_chain", "clap", @@ -731,9 +753,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] @@ -760,10 +782,10 @@ version = "0.2.0" dependencies = [ "arbitrary", "blst", - "eth2_hashing", - "eth2_serde_utils", - "eth2_ssz", "ethereum-types 0.14.1", + "ethereum_hashing", + "ethereum_serde_utils", + "ethereum_ssz", "hex", "milagro_bls", "rand 0.7.3", @@ -786,15 +808,26 @@ dependencies = [ "zeroize", ] +[[package]] +name = "bollard-stubs" +version = "1.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2f2e73fffe9455141e170fb9c1feb0ac521ec7e7dcd47a7cab72a658490fb8" +dependencies = [ + "chrono", + "serde", + "serde_with", +] + [[package]] name = "boot_node" -version = "4.0.1-rc.0" +version = "4.1.0" dependencies = [ "beacon_node", "clap", "clap_utils", "eth2_network_config", - "eth2_ssz", + "ethereum_ssz", "hex", "lighthouse_network", "log", @@ -833,6 +866,7 @@ name = "builder_client" version = "0.1.0" dependencies = [ "eth2", + "lighthouse_version", "reqwest", "sensitive_url", "serde", @@ -891,14 +925,14 @@ dependencies = [ name = "cached_tree_hash" version = "0.1.0" dependencies = [ - "eth2_hashing", - "eth2_ssz", - "eth2_ssz_derive", - "eth2_ssz_types", "ethereum-types 0.14.1", + "ethereum_hashing", + "ethereum_ssz", + "ethereum_ssz_derive", "quickcheck", "quickcheck_macros", "smallvec", + "ssz_types", "tree_hash", ] @@ -967,14 +1001,15 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "js-sys", "num-integer", "num-traits", + "serde", "time 0.1.45", "wasm-bindgen", "winapi", @@ -999,10 +1034,20 @@ dependencies = [ ] [[package]] -name = "clang-sys" -version = "1.4.0" +name = "cipher" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + +[[package]] +name = "clang-sys" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -1031,8 +1076,8 @@ dependencies = [ "clap", "dirs", "eth2_network_config", - "eth2_ssz", "ethereum-types 0.14.1", + "ethereum_ssz", "hex", "serde", "serde_json", @@ -1075,7 +1120,7 @@ dependencies = [ "state_processing", "store", "task_executor", - "time 0.3.17", + "time 0.3.20", "timer", "tokio", "types", @@ -1083,9 +1128,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.49" +version = "0.1.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" dependencies = [ "cc", ] @@ -1112,7 +1157,7 @@ name = "compare_fields_derive" version = "0.2.0" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1124,21 +1169,11 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "console_error_panic_hook" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" -dependencies = [ - "cfg-if", - "wasm-bindgen", -] - [[package]] name = "const-oid" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" +checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" [[package]] name = "convert_case" @@ -1158,9 +1193,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "core2" @@ -1173,19 +1208,13 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181" dependencies = [ "libc", ] -[[package]] -name = "cpuid-bool" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" - [[package]] name = "crc" version = "3.0.1" @@ -1248,9 +1277,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1258,9 +1287,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -1269,22 +1298,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg 1.1.0", "cfg-if", "crossbeam-utils", - "memoffset 0.7.1", + "memoffset 0.8.0", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", ] @@ -1314,6 +1343,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", + "rand_core 0.6.4", "typenum", ] @@ -1327,16 +1357,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "crypto-mac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "crypto-mac" version = "0.11.1" @@ -1349,9 +1369,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af91f40b7355f82b0a891f50e70399475945bb0b0da4f1700ce60761c9d3e359" +checksum = "0b015497079b9a9d69c02ad25de6c0a6edef051ea6360a327d0bd05802ef64ad" dependencies = [ "csv-core", "itoa", @@ -1368,15 +1388,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "ctr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" -dependencies = [ - "cipher 0.2.5", -] - [[package]] name = "ctr" version = "0.8.0" @@ -1386,6 +1397,15 @@ dependencies = [ "cipher 0.3.0", ] +[[package]] +name = "ctr" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher 0.4.4", +] + [[package]] name = "ctrlc" version = "3.2.5" @@ -1411,9 +1431,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-rc.0" +version = "4.0.0-rc.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da00a7a9a4eb92a0a0f8e75660926d48f0d0f3c537e455c457bcdaa1e16b1ac" +checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585" dependencies = [ "cfg-if", "fiat-crypto", @@ -1425,9 +1445,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.90" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90d59d9acd2a682b4e40605a242f6670eaa58c5957471cbf85e8aa6a0b97a5e8" +checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" dependencies = [ "cc", "cxxbridge-flags", @@ -1437,9 +1457,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.90" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebfa40bda659dd5c864e65f4c9a2b0aff19bea56b017b9b77c73d3766a453a38" +checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" dependencies = [ "cc", "codespan-reporting", @@ -1447,24 +1467,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn", + "syn 2.0.13", ] [[package]] name = "cxxbridge-flags" -version = "1.0.90" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "457ce6757c5c70dc6ecdbda6925b958aae7f959bda7d8fb9bde889e34a09dc03" +checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" [[package]] name = "cxxbridge-macro" -version = "1.0.90" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebf883b7aacd7b2aeb2a7b338648ee19f57c140d4ee8e52c68979c6b2f7f2263" +checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -1479,12 +1499,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0808e1bd8671fb44a113a14e13497557533369847788fa2ae912b6ebfce9fa8" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ - "darling_core 0.14.3", - "darling_macro 0.14.3", + "darling_core 0.14.4", + "darling_macro 0.14.4", ] [[package]] @@ -1498,21 +1518,21 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn", + "syn 1.0.109", ] [[package]] name = "darling_core" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001d80444f28e193f30c2f293455da62dcf9a6b29918a4253152ae2b1de592cb" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim 0.10.0", - "syn", + "syn 1.0.109", ] [[package]] @@ -1523,18 +1543,18 @@ checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "darling_macro" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b36230598a2d5de7ec1c6f51f72d8a99a9208daff41de2084d06e3fd3ea56685" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ - "darling_core 0.14.3", + "darling_core 0.14.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1580,7 +1600,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" dependencies = [ "data-encoding", - "syn", + "syn 1.0.109", ] [[package]] @@ -1621,8 +1641,8 @@ dependencies = [ name = "deposit_contract" version = "0.2.0" dependencies = [ - "eth2_ssz", "ethabi 16.0.0", + "ethereum_ssz", "hex", "reqwest", "serde_json", @@ -1658,11 +1678,11 @@ dependencies = [ [[package]] name = "der-parser" -version = "8.1.0" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d4bc9b0db0a0df9ae64634ac5bdefb7afcb534e182275ca0beadbe486701c1" +checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" dependencies = [ - "asn1-rs 0.5.1", + "asn1-rs 0.5.2", "displaydoc", "nom 7.1.3", "num-bigint", @@ -1678,18 +1698,17 @@ checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "derive_arbitrary" -version = "1.2.2" -source = "git+https://github.com/michaelsproul/arbitrary?rev=a572fd8743012a4f1ada5ee5968b1b3619c427ba#a572fd8743012a4f1ada5ee5968b1b3619c427ba" +version = "1.3.0" +source = "git+https://github.com/michaelsproul/arbitrary?rev=f002b99989b561ddce62e4cf2887b0f8860ae991#f002b99989b561ddce62e4cf2887b0f8860ae991" dependencies = [ - "darling 0.14.3", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1707,10 +1726,10 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" dependencies = [ - "darling 0.14.3", + "darling 0.14.4", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1720,7 +1739,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" dependencies = [ "derive_builder_core", - "syn", + "syn 1.0.109", ] [[package]] @@ -1733,7 +1752,44 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "diesel" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4391a22b19c916e50bec4d6140f29bdda3e3bb187223fe6e3ea0b6e4d1021c04" +dependencies = [ + "bitflags", + "byteorder", + "diesel_derives", + "itoa", + "pq-sys", + "r2d2", +] + +[[package]] +name = "diesel_derives" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ad74fdcf086be3d4fdd142f67937678fe60ed431c3b2f08599e7687269410c4" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "diesel_migrations" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9ae22beef5e9d6fab9225ddb073c1c6c1a7a6ded5019d5da11d1e5c5adc34e2" +dependencies = [ + "diesel", + "migrations_internals", + "migrations_macros", ] [[package]] @@ -1751,7 +1807,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", "subtle", ] @@ -1830,7 +1886,7 @@ dependencies = [ "rand 0.8.5", "rlp", "smallvec", - "socket2", + "socket2 0.4.9", "tokio", "tokio-stream", "tokio-util 0.6.10", @@ -1848,14 +1904,14 @@ checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "dtoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c00704156a7de8df8da0911424e30c2049957b0a714542a44e05fe693dd85313" +checksum = "65d09067bfacaa79114679b279d7f5885b53295b1e2cfb4e79c8e4bd3d633169" [[package]] name = "ecdsa" @@ -1902,9 +1958,9 @@ dependencies = [ "compare_fields", "compare_fields_derive", "derivative", - "eth2_ssz", - "eth2_ssz_derive", "ethereum-types 0.14.1", + "ethereum_ssz", + "ethereum_ssz_derive", "execution_layer", "fork_choice", "fs2", @@ -2008,7 +2064,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2056,6 +2112,27 @@ dependencies = [ "types", ] +[[package]] +name = "errno" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50d6a0976c999d473fe89ad888d5a284e55366d9dc9038b1ba2aa15128c4afa0" +dependencies = [ + "errno-dragonfly", + "libc", + "windows-sys 0.45.0", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "error-chain" version = "0.12.4" @@ -2073,8 +2150,8 @@ dependencies = [ "environment", "eth1_test_rig", "eth2", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "execution_layer", "futures", "hex", @@ -2117,9 +2194,9 @@ dependencies = [ "account_utils", "bytes", "eth2_keystore", - "eth2_serde_utils", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_serde_utils", + "ethereum_ssz", + "ethereum_ssz_derive", "futures", "futures-util", "libsecp256k1", @@ -2146,25 +2223,13 @@ dependencies = [ "types", ] -[[package]] -name = "eth2_hashing" -version = "0.3.0" -dependencies = [ - "cpufeatures", - "lazy_static", - "ring", - "rustc-hex", - "sha2 0.10.6", - "wasm-bindgen-test", -] - [[package]] name = "eth2_interop_keypairs" version = "0.2.0" dependencies = [ "base64 0.13.1", "bls", - "eth2_hashing", + "ethereum_hashing", "hex", "lazy_static", "num-bigint", @@ -2213,62 +2278,13 @@ version = "0.2.0" dependencies = [ "discv5", "eth2_config", - "eth2_ssz", + "ethereum_ssz", "serde_yaml", "tempfile", "types", "zip", ] -[[package]] -name = "eth2_serde_utils" -version = "0.1.1" -dependencies = [ - "ethereum-types 0.14.1", - "hex", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "eth2_ssz" -version = "0.4.1" -dependencies = [ - "eth2_ssz_derive", - "ethereum-types 0.14.1", - "itertools", - "smallvec", -] - -[[package]] -name = "eth2_ssz_derive" -version = "0.3.1" -dependencies = [ - "darling 0.13.4", - "eth2_ssz", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "eth2_ssz_types" -version = "0.2.2" -dependencies = [ - "arbitrary", - "derivative", - "eth2_serde_utils", - "eth2_ssz", - "serde", - "serde_derive", - "serde_json", - "smallvec", - "tree_hash", - "tree_hash_derive", - "typenum", -] - [[package]] name = "eth2_wallet" version = "0.1.0" @@ -2366,7 +2382,7 @@ dependencies = [ "hex", "integer-sqrt", "multiaddr 0.14.0", - "multihash", + "multihash 0.16.3", "rand 0.8.5", "serde", "serde_json", @@ -2407,6 +2423,54 @@ dependencies = [ "uint", ] +[[package]] +name = "ethereum_hashing" +version = "1.0.0-beta.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233dc6f434ce680dbabf4451ee3380cec46cb3c45d66660445a435619710dd35" +dependencies = [ + "cpufeatures", + "lazy_static", + "ring", + "sha2 0.10.6", +] + +[[package]] +name = "ethereum_serde_utils" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f8cb04ea380a33e9c269fa5f8df6f2d63dee19728235f3e639e7674e038686a" +dependencies = [ + "ethereum-types 0.14.1", + "hex", + "serde", + "serde_derive", + "serde_json", +] + +[[package]] +name = "ethereum_ssz" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32749e96305376af40d7a7ee8ea4c4c64c68d09ff94a81ab78c8d9bc7153c221" +dependencies = [ + "ethereum-types 0.14.1", + "itertools", + "smallvec", +] + +[[package]] +name = "ethereum_ssz_derive" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9cac7ef2107926cea34c0064056f9bb134d2085eef882388d151d2e59174cf0" +dependencies = [ + "darling 0.13.4", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "ethers-core" version = "1.0.2" @@ -2509,10 +2573,9 @@ dependencies = [ "bytes", "environment", "eth2", - "eth2_serde_utils", - "eth2_ssz", - "eth2_ssz_types", "ethereum-consensus", + "ethereum_serde_utils", + "ethereum_ssz", "ethers-core", "exit-future", "fork_choice", @@ -2535,6 +2598,7 @@ dependencies = [ "slog", "slot_clock", "ssz-rs", + "ssz_types", "state_processing", "strum", "superstruct 0.6.0", @@ -2598,18 +2662,18 @@ checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" [[package]] name = "fiat-crypto" -version = "0.1.17" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a214f5bb88731d436478f3ae1f8a277b62124089ba9fb67f4f93fb100ef73c90" +checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" [[package]] name = "field-offset" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e1c54951450cbd39f3dbcf1005ac413b49487dabf18a720ad2383eccfeffb92" +checksum = "a3cf3a800ff6e860c863ca6d4b16fd999db8b752819c1606884047b73e468535" dependencies = [ - "memoffset 0.6.5", - "rustc_version 0.3.3", + "memoffset 0.8.0", + "rustc_version 0.4.0", ] [[package]] @@ -2688,8 +2752,8 @@ name = "fork_choice" version = "0.1.0" dependencies = [ "beacon_chain", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "proto_array", "slog", "state_processing", @@ -2731,9 +2795,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -2746,9 +2810,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -2756,15 +2820,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -2774,9 +2838,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" @@ -2795,13 +2859,13 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -2817,15 +2881,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-timer" @@ -2835,9 +2899,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -2862,9 +2926,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -2877,8 +2941,8 @@ dependencies = [ "environment", "eth1", "eth1_test_rig", - "eth2_hashing", - "eth2_ssz", + "ethereum_hashing", + "ethereum_ssz", "futures", "int_to_bytes", "merkle_proof", @@ -2917,16 +2981,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "ghash" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" -dependencies = [ - "opaque-debug", - "polyval 0.4.5", -] - [[package]] name = "ghash" version = "0.4.4" @@ -2938,10 +2992,20 @@ dependencies = [ ] [[package]] -name = "gimli" -version = "0.27.1" +name = "ghash" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec" +checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +dependencies = [ + "opaque-debug", + "polyval 0.6.0", +] + +[[package]] +name = "gimli" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "git-version" @@ -2962,7 +3026,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2984,9 +3048,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.15" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" dependencies = [ "bytes", "fnv", @@ -3116,6 +3180,12 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + [[package]] name = "hex" version = "0.4.3" @@ -3147,16 +3217,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" -dependencies = [ - "crypto-mac 0.10.1", - "digest 0.9.0", -] - [[package]] name = "hmac" version = "0.11.0" @@ -3200,9 +3260,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", @@ -3236,8 +3296,8 @@ dependencies = [ "environment", "eth1", "eth2", - "eth2_serde_utils", - "eth2_ssz", + "ethereum_serde_utils", + "ethereum_ssz", "execution_layer", "futures", "genesis", @@ -3313,9 +3373,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.24" +version = "0.14.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" +checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" dependencies = [ "bytes", "futures-channel", @@ -3328,7 +3388,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.9", - "socket2", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -3363,16 +3423,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "0c17cc76786e99f8d2f055c11159e7f0091c42474dcc3189fbab96072e873e6d" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows 0.46.0", ] [[package]] @@ -3445,9 +3505,9 @@ dependencies = [ [[package]] name = "if-watch" -version = "3.0.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba7abdbb86e485125dad06c2691e1e393bf3b08c7b743b43aa162a00fd39062e" +checksum = "a9465340214b296cd17a0009acdb890d6160010b8adf8f78a00d0d7ab270f79f" dependencies = [ "async-io", "core-foundation", @@ -3459,18 +3519,18 @@ dependencies = [ "rtnetlink", "system-configuration", "tokio", - "windows", + "windows 0.34.0", ] [[package]] name = "igd" -version = "0.11.1" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd32c880165b2f776af0b38d206d1cabaebcf46c166ac6ae004a5d45f7d48ef" +checksum = "556b5a75cd4adb7c4ea21c64af1c48cefb2ce7d43dc4352c720a1fe47c21f355" dependencies = [ "attohttpc", "log", - "rand 0.7.3", + "rand 0.8.5", "url", "xmltree", ] @@ -3528,19 +3588,28 @@ checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg 1.1.0", "hashbrown 0.12.3", ] +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array", +] + [[package]] name = "instant" version = "0.1.12" @@ -3590,13 +3659,24 @@ dependencies = [ "webrtc-util", ] +[[package]] +name = "io-lifetimes" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09270fd4fa1111bc614ed2246c7ef56239a3063d5be0d1ec3b589c505d400aeb" +dependencies = [ + "hermit-abi 0.3.1", + "libc", + "windows-sys 0.45.0", +] + [[package]] name = "ipconfig" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" dependencies = [ - "socket2", + "socket2 0.4.9", "widestring 0.5.1", "winapi", "winreg", @@ -3604,9 +3684,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "itertools" @@ -3619,9 +3699,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "jemalloc-ctl" @@ -3680,11 +3760,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.2.0" +version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.13.1", + "base64 0.21.0", "pem", "ring", "serde", @@ -3741,7 +3821,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "4.0.1-rc.0" +version = "4.1.0" dependencies = [ "account_utils", "beacon_chain", @@ -3755,8 +3835,8 @@ dependencies = [ "eth1_test_rig", "eth2", "eth2_network_config", - "eth2_ssz", "eth2_wallet", + "ethereum_ssz", "genesis", "int_to_bytes", "lighthouse_network", @@ -3801,15 +3881,15 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.139" +version = "0.2.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" [[package]] name = "libflate" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05605ab2bce11bcfc0e9c635ff29ef8b2ea83f29be257ee7d730cac3ee373093" +checksum = "97822bf791bd4d5b403713886a5fbe8bf49520fe78e323b0dc480ca1a03e50b0" dependencies = [ "adler32", "crc32fast", @@ -3818,9 +3898,9 @@ dependencies = [ [[package]] name = "libflate_lz77" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39a734c0493409afcd49deee13c006a04e3586b9761a03543c6272c9c51f2f5a" +checksum = "a52d3a8bfc85f250440e4424db7d857e241a3aebbbe301f3eb606ab15c39acbf" dependencies = [ "rle-decode-fast", ] @@ -3864,9 +3944,9 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.50.0" +version = "0.50.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e0a0d2f693675f49ded13c5d510c48b78069e23cbd9108d7ccd59f6dc568819" +checksum = "9c7b0104790be871edcf97db9bd2356604984e623a08d825c3f27852290266b8" dependencies = [ "bytes", "futures", @@ -3912,7 +3992,7 @@ dependencies = [ "libsecp256k1", "log", "multiaddr 0.14.0", - "multihash", + "multihash 0.16.3", "multistream-select 0.11.0", "p256", "parking_lot 0.12.1", @@ -3946,7 +4026,7 @@ dependencies = [ "libsecp256k1", "log", "multiaddr 0.16.0", - "multihash", + "multihash 0.16.3", "multistream-select 0.12.1", "once_cell", "p256", @@ -3965,6 +4045,34 @@ dependencies = [ "zeroize", ] +[[package]] +name = "libp2p-core" +version = "0.39.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b7f8b7d65c070a5a1b5f8f0510648189da08f787b8963f8e21219e0710733af" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "instant", + "libp2p-identity", + "log", + "multiaddr 0.17.1", + "multihash 0.17.0", + "multistream-select 0.12.1", + "once_cell", + "parking_lot 0.12.1", + "pin-project", + "quick-protobuf", + "rand 0.8.5", + "rw-stream-sink", + "smallvec", + "thiserror", + "unsigned-varint 0.7.1", + "void", +] + [[package]] name = "libp2p-dns" version = "0.38.0" @@ -4030,6 +4138,24 @@ dependencies = [ "void", ] +[[package]] +name = "libp2p-identity" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a8ea433ae0cea7e3315354305237b9897afe45278b2118a7a57ca744e70fd27" +dependencies = [ + "bs58", + "ed25519-dalek", + "log", + "multiaddr 0.17.1", + "multihash 0.17.0", + "prost", + "quick-protobuf", + "rand 0.8.5", + "thiserror", + "zeroize", +] + [[package]] name = "libp2p-mdns" version = "0.42.0" @@ -4044,7 +4170,7 @@ dependencies = [ "log", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.4.9", "tokio", "trust-dns-proto", "void", @@ -4172,7 +4298,7 @@ checksum = "9d527d5827582abd44a6d80c07ff8b50b4ee238a8979e05998474179e79dc400" dependencies = [ "heck", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -4187,19 +4313,20 @@ dependencies = [ "libc", "libp2p-core 0.38.0", "log", - "socket2", + "socket2 0.4.9", "tokio", ] [[package]] name = "libp2p-tls" -version = "0.1.0-alpha" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7905ce0d040576634e8a3229a7587cc8beab83f79db6023800f1792895defa8" +checksum = "ff08d13d0dc66e5e9ba6279c1de417b84fa0d0adc3b03e5732928c180ec02781" dependencies = [ "futures", "futures-rustls", - "libp2p-core 0.38.0", + "libp2p-core 0.39.1", + "libp2p-identity", "rcgen 0.10.0", "ring", "rustls 0.20.8", @@ -4225,7 +4352,7 @@ dependencies = [ "libp2p-core 0.38.0", "libp2p-noise", "log", - "multihash", + "multihash 0.16.3", "prost", "prost-build", "prost-codec", @@ -4345,7 +4472,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "4.0.1-rc.0" +version = "4.1.0" dependencies = [ "account_manager", "account_utils", @@ -4360,8 +4487,8 @@ dependencies = [ "environment", "eth1", "eth2", - "eth2_hashing", "eth2_network_config", + "ethereum_hashing", "futures", "lazy_static", "lighthouse_metrics", @@ -4402,9 +4529,8 @@ dependencies = [ "dirs", "discv5", "error-chain", - "eth2_ssz", - "eth2_ssz_derive", - "eth2_ssz_types", + "ethereum_ssz", + "ethereum_ssz_derive", "exit-future", "fnv", "futures", @@ -4429,6 +4555,7 @@ dependencies = [ "slog-term", "smallvec", "snap", + "ssz_types", "strum", "superstruct 0.5.0", "task_executor", @@ -4469,6 +4596,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linux-raw-sys" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d59d8c75012853d2e872fb56bc8a2e53718e2cafe1a4c823143141c6d90c322f" + [[package]] name = "lmdb-rkv" version = "0.14.0" @@ -4653,9 +4786,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg 1.1.0", ] @@ -4664,8 +4797,8 @@ dependencies = [ name = "merkle_proof" version = "0.2.0" dependencies = [ - "eth2_hashing", "ethereum-types 0.14.1", + "ethereum_hashing", "lazy_static", "quickcheck", "quickcheck_macros", @@ -4692,7 +4825,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn", + "syn 1.0.109", ] [[package]] @@ -4712,6 +4845,27 @@ dependencies = [ "tracing", ] +[[package]] +name = "migrations_internals" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c493c09323068c01e54c685f7da41a9ccf9219735c3766fbfd6099806ea08fbc" +dependencies = [ + "serde", + "toml", +] + +[[package]] +name = "migrations_macros" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a8ff27a350511de30cdabb77147501c36ef02e0451d957abea2f30caffb2b58" +dependencies = [ + "migrations_internals", + "proc-macro2", + "quote", +] + [[package]] name = "milagro_bls" version = "1.4.2" @@ -4726,9 +4880,9 @@ dependencies = [ [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" @@ -4803,7 +4957,7 @@ dependencies = [ "bs58", "byteorder", "data-encoding", - "multihash", + "multihash 0.16.3", "percent-encoding", "serde", "static_assertions", @@ -4821,7 +4975,26 @@ dependencies = [ "byteorder", "data-encoding", "multibase", - "multihash", + "multihash 0.16.3", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint 0.7.1", + "url", +] + +[[package]] +name = "multiaddr" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b36f567c7099511fa8612bbbb52dda2419ce0bdbacf31714e3a5ffdb766d3bd" +dependencies = [ + "arrayref", + "byteorder", + "data-encoding", + "log", + "multibase", + "multihash 0.17.0", "percent-encoding", "serde", "static_assertions", @@ -4853,6 +5026,19 @@ dependencies = [ "unsigned-varint 0.7.1", ] +[[package]] +name = "multihash" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" +dependencies = [ + "core2", + "digest 0.10.6", + "multihash-derive", + "sha2 0.10.6", + "unsigned-varint 0.7.1", +] + [[package]] name = "multihash-derive" version = "0.8.1" @@ -4863,7 +5049,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -4992,9 +5178,9 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260e21fbb6f3d253a14df90eb0000a6066780a15dd901a7519ce02d77a94985b" +checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" dependencies = [ "bytes", "futures", @@ -5012,9 +5198,8 @@ dependencies = [ "derivative", "environment", "error-chain", - "eth2_ssz", - "eth2_ssz_types", "ethereum-types 0.14.1", + "ethereum_ssz", "execution_layer", "exit-future", "fnv", @@ -5040,6 +5225,7 @@ dependencies = [ "sloggers", "slot_clock", "smallvec", + "ssz_types", "store", "strum", "task_executor", @@ -5245,7 +5431,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" dependencies = [ - "asn1-rs 0.5.1", + "asn1-rs 0.5.2", ] [[package]] @@ -5295,14 +5481,14 @@ dependencies = [ "bytes", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "openssl" -version = "0.10.45" +version = "0.10.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +checksum = "4d2f106ab837a24e03672c59b1239669a0596406ff657c3c0835b6b7f0f35a33" dependencies = [ "bitflags", "cfg-if", @@ -5315,13 +5501,13 @@ dependencies = [ [[package]] name = "openssl-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -5332,20 +5518,19 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.25.0+1.1.1t" +version = "111.25.2+1.1.1t" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3173cd3626c43e3854b1b727422a276e568d9ec5fe8cec197822cf52cfb743d6" +checksum = "320708a054ad9b3bf314688b5db87cf4d6683d64cfc835e2337924ae62bf4431" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.80" +version = "0.9.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +checksum = "3a20eace9dc2d82904039cb76dcf50fb1a0bba071cfd1629720b5d6f1ddba0fa" dependencies = [ - "autocfg 1.1.0", "cc", "libc", "openssl-src", @@ -5360,8 +5545,8 @@ dependencies = [ "beacon_chain", "bitvec 1.0.1", "derivative", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "itertools", "lazy_static", "lighthouse_metrics", @@ -5452,7 +5637,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5464,7 +5649,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5503,7 +5688,7 @@ dependencies = [ "cfg-if", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "winapi", ] @@ -5516,16 +5701,16 @@ checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "windows-sys 0.45.0", ] [[package]] name = "paste" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "pbkdf2" @@ -5575,16 +5760,6 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" -[[package]] -name = "pest" -version = "2.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028accff104c4e513bad663bbcd2ad7cfd5304144404c31ed0a77ac103d00660" -dependencies = [ - "thiserror", - "ucd-trie", -] - [[package]] name = "petgraph" version = "0.6.3" @@ -5605,6 +5780,24 @@ dependencies = [ "rustc_version 0.4.0", ] +[[package]] +name = "phf" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "928c6535de93548188ef63bb7c4036bd415cd8f36ad25af44b9789b2ee72a48c" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_shared" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1fb5f6f826b772a8d4c0394209441e7d37cbbb967ae9c7e0e8134365c9ee676" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.0.12" @@ -5622,7 +5815,7 @@ checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5701,16 +5894,18 @@ dependencies = [ [[package]] name = "polling" -version = "2.5.2" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" +checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" dependencies = [ "autocfg 1.1.0", + "bitflags", "cfg-if", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "windows-sys 0.42.0", + "pin-project-lite 0.2.9", + "windows-sys 0.45.0", ] [[package]] @@ -5721,18 +5916,7 @@ checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" dependencies = [ "cpufeatures", "opaque-debug", - "universal-hash", -] - -[[package]] -name = "polyval" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" -dependencies = [ - "cpuid-bool", - "opaque-debug", - "universal-hash", + "universal-hash 0.4.1", ] [[package]] @@ -5744,7 +5928,48 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash", + "universal-hash 0.4.1", +] + +[[package]] +name = "polyval" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef234e08c11dfcb2e56f79fd70f6f2eb7f025c0ce2333e82f4f0518ecad30c6" +dependencies = [ + "cfg-if", + "cpufeatures", + "opaque-debug", + "universal-hash 0.5.0", +] + +[[package]] +name = "postgres-protocol" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b7fa9f396f51dffd61546fd8573ee20592287996568e6175ceb0f8699ad75d" +dependencies = [ + "base64 0.21.0", + "byteorder", + "bytes", + "fallible-iterator", + "hmac 0.12.1", + "md-5", + "memchr", + "rand 0.8.5", + "sha2 0.10.6", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f028f05971fe20f512bcc679e2c10227e57809a3af86a7606304435bc8896cd6" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-protocol", ] [[package]] @@ -5754,13 +5979,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] -name = "prettyplease" -version = "0.1.23" +name = "pq-sys" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97e3215779627f01ee256d2fad52f3d95e8e1c11e9fc6fd08f7cd455d5d5c78" +checksum = "3b845d6d8ec554f972a2c5298aad68953fd64e7441e846075450b44656a016d1" +dependencies = [ + "vcpkg", +] + +[[package]] +name = "prettyplease" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ "proc-macro2", - "syn", + "syn 1.0.109", ] [[package]] @@ -5809,7 +6043,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -5832,9 +6066,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.51" +version = "1.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +checksum = "1d0dd4be24fcdcfeaa12a432d588dc59bbad6cad3510c67e74a2b6b2fc950564" dependencies = [ "unicode-ident", ] @@ -5886,14 +6120,14 @@ checksum = "66a455fbcb954c1a7decf3c586e860fd7889cddf4b8e164be736dbac95a953cd" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "prost" -version = "0.11.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21dc42e00223fc37204bd4aa177e69420c604ca4a183209a8f9de30c6d934698" +checksum = "e48e50df39172a3e7eb17e14642445da64996989bc212b583015435d39a58537" dependencies = [ "bytes", "prost-derive", @@ -5901,9 +6135,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f8ad728fb08fe212df3c05169e940fbb6d9d16a877ddde14644a983ba2012e" +checksum = "2c828f93f5ca4826f97fedcbd3f9a536c16b12cff3dbbb4a007f932bbad95b12" dependencies = [ "bytes", "heck", @@ -5916,7 +6150,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn", + "syn 1.0.109", "tempfile", "which", ] @@ -5936,24 +6170,23 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda8c0881ea9f722eb9629376db3d0b903b462477c1aafcb0566610ac28ac5d" +checksum = "4ea9b0f8cbe5e15a8a042d030bd96668db28ecb567ec37d691971ff5731d2b1b" dependencies = [ "anyhow", "itertools", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "prost-types" -version = "0.11.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e0526209433e96d83d750dd81a99118edbc55739e7e61a46764fd2ad537788" +checksum = "379119666929a1afd7a043aa6cf96fa67a6dce9af60c88095a4686dbce4c9c88" dependencies = [ - "bytes", "prost", ] @@ -5961,12 +6194,13 @@ dependencies = [ name = "proto_array" version = "0.2.0" dependencies = [ - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "safe_arith", "serde", "serde_derive", "serde_yaml", + "superstruct 0.5.0", "types", ] @@ -6001,6 +6235,15 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quick-protobuf" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +dependencies = [ + "byteorder", +] + [[package]] name = "quickcheck" version = "0.9.2" @@ -6021,7 +6264,7 @@ checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6037,9 +6280,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4ced82a24bb281af338b9e8f94429b6eca01b4e66d899f40031f074e74c9" +checksum = "67c10f662eee9c94ddd7135043e544f3c82fa839a1e7b865911331961b53186c" dependencies = [ "bytes", "rand 0.8.5", @@ -6055,9 +6298,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.23" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -6177,9 +6420,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -6187,9 +6430,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.2" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -6205,7 +6448,7 @@ checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" dependencies = [ "pem", "ring", - "time 0.3.17", + "time 0.3.20", "x509-parser 0.13.2", "yasna", ] @@ -6218,7 +6461,7 @@ checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", "ring", - "time 0.3.17", + "time 0.3.20", "yasna", ] @@ -6231,6 +6474,15 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.4.3" @@ -6238,15 +6490,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ "getrandom 0.2.8", - "redox_syscall", + "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.7.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" dependencies = [ "aho-corasick", "memchr", @@ -6264,24 +6516,15 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "reqwest" -version = "0.11.14" +version = "0.11.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" +checksum = "27b71749df584b7f4cac2c426c127a7c785a5106cc98f7a8feb044115f0fa254" dependencies = [ "base64 0.21.0", "bytes", @@ -6381,7 +6624,7 @@ checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6450,9 +6693,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b" [[package]] name = "rustc-hash" @@ -6475,22 +6718,13 @@ dependencies = [ "semver 0.9.0", ] -[[package]] -name = "rustc_version" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" -dependencies = [ - "semver 0.11.0", -] - [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.16", + "semver 1.0.17", ] [[package]] @@ -6502,6 +6736,20 @@ dependencies = [ "nom 7.1.3", ] +[[package]] +name = "rustix" +version = "0.37.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d097081ed288dfe45699b72f5b5d648e5f15d64d900c7080273baa20c16a6849" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.45.0", +] + [[package]] name = "rustls" version = "0.19.1" @@ -6538,9 +6786,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "rw-stream-sink" @@ -6555,9 +6803,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "safe_arith" @@ -6589,9 +6837,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" +checksum = "0cfdffd972d76b22f3d7f81c8be34b2296afd3a25e0a547bd9abe340a4dbbe97" dependencies = [ "cfg-if", "derive_more", @@ -6601,14 +6849,14 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "303959cf613a6f6efd19ed4b4ad5bf79966a13352716299ad532cfb115f4205c" +checksum = "61fa974aea2d63dd18a4ec3a49d59af9f34178c73a4f56d2f18205628d00681e" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6622,9 +6870,9 @@ dependencies = [ [[package]] name = "scheduled-thread-pool" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ "parking_lot 0.12.1", ] @@ -6643,9 +6891,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] name = "scrypt" @@ -6752,23 +7000,14 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser 0.7.0", + "semver-parser", ] [[package]] name = "semver" -version = "0.11.0" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" -dependencies = [ - "semver-parser 0.10.2", -] - -[[package]] -name = "semver" -version = "1.0.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "semver-parser" @@ -6776,15 +7015,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -[[package]] -name = "semver-parser" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = [ - "pest", -] - [[package]] name = "send_wrapper" version = "0.6.0" @@ -6801,9 +7031,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.152" +version = "1.0.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "3c04e8343c3daeec41f58990b9d77068df31209f2af111e059e9fe9646693065" dependencies = [ "serde_derive", ] @@ -6830,20 +7060,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "4c614d17805b093df4b147b51339e7e44bf05ef59fba1e45d83500bcfb4d8585" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] name = "serde_json" -version = "1.0.93" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" +checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744" dependencies = [ "itoa", "ryu", @@ -6852,13 +7082,13 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a5ec9fa74a20ebbe5d9ac23dac1fc96ba0ecfe9f50f2843b52e537b10fbcb4e" +checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -6892,7 +7122,7 @@ dependencies = [ "darling 0.13.4", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -7031,7 +7261,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -7053,10 +7283,16 @@ dependencies = [ ] [[package]] -name = "slab" -version = "0.4.7" +name = "siphasher" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" + +[[package]] +name = "slab" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg 1.1.0", ] @@ -7067,8 +7303,8 @@ version = "0.1.0" dependencies = [ "bincode", "byteorder", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "filesystem", "flate2", "lazy_static", @@ -7116,7 +7352,7 @@ name = "slashing_protection" version = "0.1.0" dependencies = [ "arbitrary", - "eth2_serde_utils", + "ethereum_serde_utils", "filesystem", "lazy_static", "r2d2", @@ -7157,7 +7393,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -7202,7 +7438,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -7253,14 +7489,14 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ba5f4d4ff12bdb6a169ed51b7c48c0e0ac4b0b4b31012b2571e97d78d3201d" +checksum = "774d05a3edae07ce6d68ea6984f3c05e9bba8927e3dd591e3b479e5b03213d0d" dependencies = [ "aes-gcm 0.9.4", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-rc.0", + "curve25519-dalek 4.0.0-rc.2", "rand_core 0.6.4", "ring", "rustc_version 0.4.0", @@ -7270,14 +7506,24 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", ] +[[package]] +name = "socket2" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc8d618c6641ae355025c449427f9e96b98abf99a772be3cef6708d15c77147a" +dependencies = [ + "libc", + "windows-sys 0.45.0", +] + [[package]] name = "soketto" version = "0.7.1" @@ -7331,7 +7577,25 @@ source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28e dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "ssz_types" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8052a1004e979c0be24b9e55940195553103cc57d0b34f7e2c4e32793325e402" +dependencies = [ + "arbitrary", + "derivative", + "ethereum_serde_utils", + "ethereum_ssz", + "itertools", + "serde", + "serde_derive", + "smallvec", + "tree_hash", + "typenum", ] [[package]] @@ -7343,10 +7607,9 @@ dependencies = [ "bls", "derivative", "env_logger 0.9.3", - "eth2_hashing", - "eth2_ssz", - "eth2_ssz_derive", - "eth2_ssz_types", + "ethereum_hashing", + "ethereum_ssz", + "ethereum_ssz_derive", "int_to_bytes", "integer-sqrt", "itertools", @@ -7356,6 +7619,7 @@ dependencies = [ "rayon", "safe_arith", "smallvec", + "ssz_types", "tokio", "tree_hash", "types", @@ -7366,7 +7630,7 @@ name = "state_transition_vectors" version = "0.1.0" dependencies = [ "beacon_chain", - "eth2_ssz", + "ethereum_ssz", "lazy_static", "state_processing", "tokio", @@ -7386,8 +7650,8 @@ dependencies = [ "beacon_chain", "db-key", "directory", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "itertools", "lazy_static", "leveldb", @@ -7404,6 +7668,16 @@ dependencies = [ "types", ] +[[package]] +name = "stringprep" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "strsim" version = "0.8.0" @@ -7435,7 +7709,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn", + "syn 1.0.109", ] [[package]] @@ -7483,7 +7757,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn", + "syn 1.0.109", ] [[package]] @@ -7497,7 +7771,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn", + "syn 1.0.109", ] [[package]] @@ -7505,15 +7779,26 @@ name = "swap_or_not_shuffle" version = "0.2.0" dependencies = [ "criterion", - "eth2_hashing", "ethereum-types 0.14.1", + "ethereum_hashing", ] [[package]] name = "syn" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c9da457c5285ac1f936ebd076af6dac17a61cfe7826f2076b4d015cf47bc8ec" dependencies = [ "proc-macro2", "quote", @@ -7534,7 +7819,7 @@ checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "unicode-xid", ] @@ -7627,16 +7912,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.3.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if", "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", + "redox_syscall 0.3.5", + "rustix", + "windows-sys 0.45.0", ] [[package]] @@ -7672,7 +7956,24 @@ name = "test_random_derive" version = "0.2.0" dependencies = [ "quote", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "testcontainers" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e2b1567ca8a2b819ea7b28c92be35d9f76fb9edb214321dcc86eb96023d1f87" +dependencies = [ + "bollard-stubs", + "futures", + "hex", + "hmac 0.12.1", + "log", + "rand 0.8.5", + "serde", + "serde_json", + "sha2 0.10.6", ] [[package]] @@ -7686,22 +7987,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -7736,9 +8037,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.17" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ "itoa", "libc", @@ -7756,9 +8057,9 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" dependencies = [ "time-core", ] @@ -7829,22 +8130,21 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.25.0" +version = "1.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" +checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001" dependencies = [ "autocfg 1.1.0", "bytes", "libc", - "memchr", "mio", "num_cpus", "parking_lot 0.12.1", "pin-project-lite 0.2.9", "signal-hook-registry", - "socket2", + "socket2 0.4.9", "tokio-macros", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -7859,13 +8159,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -7878,6 +8178,30 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-postgres" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e89f6234aa8fd43779746012fcf53603cdb91fdd8399aa0de868c2d56b6dde1" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator", + "futures-channel", + "futures-util", + "log", + "parking_lot 0.12.1", + "percent-encoding", + "phf", + "pin-project-lite 0.2.9", + "postgres-protocol", + "postgres-types", + "socket2 0.5.1", + "tokio", + "tokio-util 0.7.7", +] + [[package]] name = "tokio-rustls" version = "0.22.0" @@ -7902,9 +8226,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" dependencies = [ "futures-core", "pin-project-lite 0.2.9", @@ -8050,7 +8374,7 @@ checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -8118,31 +8442,29 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebeb235c5847e2f82cfe0f07eb971d1e5f6804b18dac2ae16349cc604380f82f" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "tree_hash" -version = "0.4.1" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca8488e272d45adc36db8f6c99d09613f58a7cd06c7b347546c87d9a29ca11e8" dependencies = [ - "beacon_chain", - "eth2_hashing", - "eth2_ssz", - "eth2_ssz_derive", "ethereum-types 0.14.1", - "rand 0.8.5", + "ethereum_hashing", "smallvec", - "tree_hash_derive", - "types", ] [[package]] name = "tree_hash_derive" -version = "0.4.0" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83baa26594d96889e5fef7638dfb0f41e16070301a5cf6da99b9a6a0804cec89" dependencies = [ "darling 0.13.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -8173,7 +8495,7 @@ dependencies = [ "lazy_static", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.4.9", "thiserror", "tinyvec", "tokio", @@ -8293,13 +8615,12 @@ dependencies = [ "compare_fields_derive", "criterion", "derivative", - "eth2_hashing", "eth2_interop_keypairs", - "eth2_serde_utils", - "eth2_ssz", - "eth2_ssz_derive", - "eth2_ssz_types", "ethereum-types 0.14.1", + "ethereum_hashing", + "ethereum_serde_utils", + "ethereum_ssz", + "ethereum_ssz_derive", "hex", "int_to_bytes", "itertools", @@ -8322,6 +8643,7 @@ dependencies = [ "serde_yaml", "slog", "smallvec", + "ssz_types", "state_processing", "superstruct 0.6.0", "swap_or_not_shuffle", @@ -8332,12 +8654,6 @@ dependencies = [ "tree_hash_derive", ] -[[package]] -name = "ucd-trie" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" - [[package]] name = "uint" version = "0.9.5" @@ -8368,15 +8684,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -8409,6 +8725,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "universal-hash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5" +dependencies = [ + "crypto-common", + "subtle", +] + [[package]] name = "unsigned-varint" version = "0.6.0" @@ -8438,6 +8764,11 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "unused_port" version = "0.1.0" +dependencies = [ + "lazy_static", + "lru_cache", + "parking_lot 0.12.1", +] [[package]] name = "url" @@ -8490,7 +8821,7 @@ dependencies = [ "environment", "eth2", "eth2_keystore", - "eth2_serde_utils", + "ethereum_serde_utils", "exit-future", "filesystem", "futures", @@ -8560,8 +8891,8 @@ dependencies = [ "eth2", "eth2_keystore", "eth2_network_config", - "eth2_serde_utils", "eth2_wallet", + "ethereum_serde_utils", "hex", "regex", "serde", @@ -8620,12 +8951,11 @@ checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi", "winapi-util", ] @@ -8726,7 +9056,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-shared", ] @@ -8760,7 +9090,7 @@ checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8771,30 +9101,6 @@ version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" -[[package]] -name = "wasm-bindgen-test" -version = "0.3.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db36fc0f9fb209e88fb3642590ae0205bb5a56216dabd963ba15879fe53a30b" -dependencies = [ - "console_error_panic_hook", - "js-sys", - "scoped-tls", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-bindgen-test-macro", -] - -[[package]] -name = "wasm-bindgen-test-macro" -version = "0.3.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0734759ae6b3b1717d661fe4f016efcfb9828f5edb4520c18eaee05af3b43be9" -dependencies = [ - "proc-macro2", - "quote", -] - [[package]] name = "wasm-streams" version = "0.2.3" @@ -8823,6 +9129,39 @@ dependencies = [ "web-sys", ] +[[package]] +name = "watch" +version = "0.1.0" +dependencies = [ + "axum", + "beacon_chain", + "beacon_node", + "bls", + "byteorder", + "clap", + "diesel", + "diesel_migrations", + "env_logger 0.9.3", + "eth2", + "hex", + "http_api", + "hyper", + "log", + "network", + "r2d2", + "rand 0.7.3", + "reqwest", + "serde", + "serde_json", + "serde_yaml", + "testcontainers", + "tokio", + "tokio-postgres", + "types", + "unused_port", + "url", +] + [[package]] name = "web-sys" version = "0.3.61" @@ -8890,6 +9229,8 @@ dependencies = [ "eth2_network_config", "exit-future", "futures", + "lazy_static", + "parking_lot 0.12.1", "reqwest", "serde", "serde_derive", @@ -8960,7 +9301,7 @@ dependencies = [ "sha2 0.10.6", "stun", "thiserror", - "time 0.3.17", + "time 0.3.20", "tokio", "turn", "url", @@ -8992,22 +9333,22 @@ dependencies = [ [[package]] name = "webrtc-dtls" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7021987ae0a2ed6c8cd33f68e98e49bb6e74ffe9543310267b48a1bbe3900e5f" +checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05" dependencies = [ "aes 0.6.0", - "aes-gcm 0.8.0", + "aes-gcm 0.10.1", "async-trait", "bincode", "block-modes", "byteorder", "ccm", "curve25519-dalek 3.2.0", - "der-parser 8.1.0", + "der-parser 8.2.0", "elliptic-curve", "hkdf", - "hmac 0.10.1", + "hmac 0.12.1", "log", "oid-registry 0.6.1", "p256", @@ -9019,15 +9360,15 @@ dependencies = [ "rustls 0.19.1", "sec1", "serde", - "sha-1 0.9.8", - "sha2 0.9.9", + "sha1", + "sha2 0.10.6", "signature", "subtle", "thiserror", "tokio", "webpki 0.21.4", "webrtc-util", - "x25519-dalek 2.0.0-pre.1", + "x25519-dalek 2.0.0-rc.2", "x509-parser 0.13.2", ] @@ -9062,7 +9403,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f08dfd7a6e3987e255c4dbe710dde5d94d0f0574f8a21afa95d171376c143106" dependencies = [ "log", - "socket2", + "socket2 0.4.9", "thiserror", "tokio", "webrtc-util", @@ -9146,15 +9487,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "which" version = "4.4.0" @@ -9178,6 +9510,12 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" +[[package]] +name = "wildmatch" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f44b95f62d34113cf558c93511ac93027e03e9c29a60dd0fd70e6e025c7270a" + [[package]] name = "winapi" version = "0.3.9" @@ -9222,6 +9560,15 @@ dependencies = [ "windows_x86_64_msvc 0.34.0", ] +[[package]] +name = "windows" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdacb41e6a96a052c6cb63a144f24900236121c6f63f4f8219fef5977ecb0c25" +dependencies = [ + "windows-targets", +] + [[package]] name = "windows-acl" version = "0.3.0" @@ -9241,12 +9588,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.1", - "windows_i686_gnu 0.42.1", - "windows_i686_msvc 0.42.1", - "windows_x86_64_gnu 0.42.1", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.1", + "windows_x86_64_msvc 0.42.2", ] [[package]] @@ -9260,24 +9607,24 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.1", - "windows_i686_gnu 0.42.1", - "windows_i686_msvc 0.42.1", - "windows_x86_64_gnu 0.42.1", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.1", + "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_msvc" @@ -9287,9 +9634,9 @@ checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_i686_gnu" @@ -9299,9 +9646,9 @@ checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_msvc" @@ -9311,9 +9658,9 @@ checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_x86_64_gnu" @@ -9323,15 +9670,15 @@ checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_msvc" @@ -9341,9 +9688,9 @@ checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "winreg" @@ -9401,12 +9748,13 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "2.0.0-pre.1" +version = "2.0.0-rc.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5da623d8af10a62342bcbbb230e33e58a63255a58012f8653c578e54bab48df" +checksum = "fabd6e16dd08033932fc3265ad4510cc2eab24656058a6dcb107ffe274abcc95" dependencies = [ - "curve25519-dalek 3.2.0", + "curve25519-dalek 4.0.0-rc.2", "rand_core 0.6.4", + "serde", "zeroize", ] @@ -9426,7 +9774,7 @@ dependencies = [ "ring", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -9435,16 +9783,16 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" dependencies = [ - "asn1-rs 0.5.1", + "asn1-rs 0.5.2", "base64 0.13.1", "data-encoding", - "der-parser 8.1.0", + "der-parser 8.2.0", "lazy_static", "nom 7.1.3", "oid-registry 0.6.1", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -9491,28 +9839,27 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aed2e7a52e3744ab4d0c05c20aa065258e84c49fd4226f5191b2ed29712710b4" dependencies = [ - "time 0.3.17", + "time 0.3.20", ] [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.3.3" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.13", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 4ffb54c198..1c84d55287 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,17 +53,10 @@ members = [ "consensus/fork_choice", "consensus/proto_array", "consensus/safe_arith", - "consensus/ssz", - "consensus/ssz_derive", - "consensus/ssz_types", - "consensus/serde_utils", "consensus/state_processing", "consensus/swap_or_not_shuffle", - "consensus/tree_hash", - "consensus/tree_hash_derive", "crypto/bls", - "crypto/eth2_hashing", "crypto/eth2_key_derivation", "crypto/eth2_keystore", "crypto/eth2_wallet", @@ -89,20 +82,15 @@ members = [ "validator_client/slashing_protection", "validator_manager", + + "watch", ] resolver = "2" [patch] [patch.crates-io] warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } -eth2_ssz = { path = "consensus/ssz" } -eth2_ssz_derive = { path = "consensus/ssz_derive" } -eth2_ssz_types = { path = "consensus/ssz_types" } -eth2_hashing = { path = "crypto/eth2_hashing" } -tree_hash = { path = "consensus/tree_hash" } -tree_hash_derive = { path = "consensus/tree_hash_derive" } -eth2_serde_utils = { path = "consensus/serde_utils" } -arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="a572fd8743012a4f1ada5ee5968b1b3619c427ba" } +arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" } [patch."https://github.com/ralexstokes/mev-rs"] mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" } diff --git a/Dockerfile b/Dockerfile index 25ca075387..be01ad7c57 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,10 @@ -FROM rust:1.66.0-bullseye AS builder +FROM rust:1.68.2-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG FEATURES +ARG PROFILE=release ENV FEATURES $FEATURES +ENV PROFILE $PROFILE RUN cd lighthouse && make FROM ubuntu:22.04 diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 9e5b57a297..5755a355f3 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -27,7 +27,6 @@ pub const PASSWORD_PROMPT: &str = "Enter the keystore password"; pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; pub const CONFIRMATION_PHRASE: &str = "Exit my validator"; pub const WEBSITE_URL: &str = "https://lighthouse-book.sigmaprime.io/voluntary-exit.html"; -pub const PROMPT: &str = "WARNING: WITHDRAWING STAKED ETH IS NOT CURRENTLY POSSIBLE"; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new("exit") @@ -161,7 +160,6 @@ async fn publish_voluntary_exit( ); if !no_confirmation { eprintln!("WARNING: THIS IS AN IRREVERSIBLE OPERATION\n"); - eprintln!("{}\n", PROMPT); eprintln!( "PLEASE VISIT {} TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT.", WEBSITE_URL diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index fed3b96ca1..95f145a557 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "4.0.1-rc.0" +version = "4.1.0" authors = ["Paul Hauner ", "Age Manning ; @@ -128,12 +128,6 @@ pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1) /// The timeout for the eth1 finalization cache pub const ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_millis(200); -/// The latest delay from the start of the slot at which to attempt a 1-slot re-org. -fn max_re_org_slot_delay(seconds_per_slot: u64) -> Duration { - // Allow at least half of the attestation deadline for the block to propagate. - Duration::from_secs(seconds_per_slot) / INTERVALS_PER_SLOT as u32 / 2 -} - // These keys are all zero because they get stored in different columns, see `DBColumn` type. pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero(); pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); @@ -359,7 +353,7 @@ pub struct BeaconChain { /// in recent epochs. pub(crate) observed_sync_aggregators: RwLock>, /// Maintains a record of which validators have proposed blocks for each slot. - pub(crate) observed_block_producers: RwLock>, + pub observed_block_producers: RwLock>, /// Maintains a record of which validators have submitted voluntary exits. pub(crate) observed_voluntary_exits: Mutex>, /// Maintains a record of which validators we've seen proposer slashings for. @@ -427,11 +421,53 @@ pub struct BeaconChain { pub slasher: Option>>, /// Provides monitoring of a set of explicitly defined validators. pub validator_monitor: RwLock>, + /// The slot at which blocks are downloaded back to. + pub genesis_backfill_slot: Slot, } type BeaconBlockAndState = (BeaconBlock, BeaconState); impl BeaconChain { + /// Checks if a block is finalized. + /// The finalization check is done with the block slot. The block root is used to verify that + /// the finalized slot is in the canonical chain. + pub fn is_finalized_block( + &self, + block_root: &Hash256, + block_slot: Slot, + ) -> Result { + let finalized_slot = self + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let is_canonical = self + .block_root_at_slot(block_slot, WhenSlotSkipped::None)? + .map_or(false, |canonical_root| block_root == &canonical_root); + Ok(block_slot <= finalized_slot && is_canonical) + } + + /// Checks if a state is finalized. + /// The finalization check is done with the slot. The state root is used to verify that + /// the finalized state is in the canonical chain. + pub fn is_finalized_state( + &self, + state_root: &Hash256, + state_slot: Slot, + ) -> Result { + let finalized_slot = self + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let is_canonical = self + .state_root_at_slot(state_slot)? + .map_or(false, |canonical_root| state_root == &canonical_root); + Ok(state_slot <= finalized_slot && is_canonical) + } + /// Persists the head tracker and fork choice. /// /// We do it atomically even though no guarantees need to be made about blocks from @@ -1010,7 +1046,7 @@ impl BeaconChain { .execution_layer .as_ref() .ok_or(Error::ExecutionLayerMissing)? - .get_payload_by_block_hash(exec_block_hash, fork) + .get_payload_for_header(&execution_payload_header, fork) .await .map_err(|e| { Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, Box::new(e)) @@ -2173,12 +2209,14 @@ impl BeaconChain { &self, exit: SignedVoluntaryExit, ) -> Result, Error> { - // NOTE: this could be more efficient if it avoided cloning the head state - let wall_clock_state = self.wall_clock_state()?; + let head_snapshot = self.head().snapshot; + let head_state = &head_snapshot.beacon_state; + let wall_clock_epoch = self.epoch()?; + Ok(self .observed_voluntary_exits .lock() - .verify_and_observe(exit, &wall_clock_state, &self.spec) + .verify_and_observe_at(exit, wall_clock_epoch, head_state, &self.spec) .map(|exit| { // this method is called for both API and gossip exits, so this covers all exit events if let Some(event_handler) = self.event_handler.as_ref() { @@ -2853,7 +2891,7 @@ impl BeaconChain { metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES); let block_delay = self .slot_clock - .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .seconds_from_current_slot_start() .ok_or(Error::UnableToComputeTimeAtSlot)?; fork_choice @@ -3584,7 +3622,7 @@ impl BeaconChain { let (state, state_root_opt) = self .task_executor .spawn_blocking_handle( - move || chain.load_state_for_block_production::(slot), + move || chain.load_state_for_block_production(slot), "produce_partial_beacon_block", ) .ok_or(BlockProductionError::ShuttingDown)? @@ -3607,7 +3645,7 @@ impl BeaconChain { /// Load a beacon state from the database for block production. This is a long-running process /// that should not be performed in an `async` context. - fn load_state_for_block_production>( + fn load_state_for_block_production( self: &Arc, slot: Slot, ) -> Result<(BeaconState, Option), BlockProductionError> { @@ -3706,7 +3744,7 @@ impl BeaconChain { let slot_delay = self .slot_clock - .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .seconds_from_current_slot_start() .or_else(|| { warn!( self.log, @@ -3721,7 +3759,7 @@ impl BeaconChain { // 1. It seems we have time to propagate and still receive the proposer boost. // 2. The current head block was seen late. // 3. The `get_proposer_head` conditions from fork choice pass. - let proposing_on_time = slot_delay < max_re_org_slot_delay(self.spec.seconds_per_slot); + let proposing_on_time = slot_delay < self.config.re_org_cutoff(self.spec.seconds_per_slot); if !proposing_on_time { debug!( self.log, @@ -3751,6 +3789,7 @@ impl BeaconChain { slot, canonical_head, re_org_threshold, + &self.config.re_org_disallowed_offsets, self.config.re_org_max_epochs_since_finalization, ) .map_err(|e| match e { @@ -4029,6 +4068,7 @@ impl BeaconChain { .get_preliminary_proposer_head( head_block_root, re_org_threshold, + &self.config.re_org_disallowed_offsets, self.config.re_org_max_epochs_since_finalization, ) .map_err(|e| e.map_inner_error(Error::ProposerHeadForkChoiceError))?; @@ -4039,7 +4079,7 @@ impl BeaconChain { let re_org_block_slot = head_slot + 1; let fork_choice_slot = info.current_slot; - // If a re-orging proposal isn't made by the `max_re_org_slot_delay` then we give up + // If a re-orging proposal isn't made by the `re_org_cutoff` then we give up // and allow the fork choice update for the canonical head through so that we may attest // correctly. let current_slot_ok = if head_slot == fork_choice_slot { @@ -4050,7 +4090,7 @@ impl BeaconChain { .and_then(|slot_start| { let now = self.slot_clock.now_duration()?; let slot_delay = now.saturating_sub(slot_start); - Some(slot_delay <= max_re_org_slot_delay(self.spec.seconds_per_slot)) + Some(slot_delay <= self.config.re_org_cutoff(self.spec.seconds_per_slot)) }) .unwrap_or(false) } else { @@ -4612,6 +4652,7 @@ impl BeaconChain { &mut state, &block, signature_strategy, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &self.spec, diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 71160fcb63..9b2edbd8b5 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -218,7 +218,6 @@ where finalized_checkpoint: self.finalized_checkpoint, justified_checkpoint: self.justified_checkpoint, justified_balances: self.justified_balances.effective_balances.clone(), - best_justified_checkpoint: JUNK_BEST_JUSTIFIED_CHECKPOINT, unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, proposer_boost_root: self.proposer_boost_root, @@ -355,24 +354,62 @@ where } } +pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV17; + /// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database. -#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)] +#[superstruct( + variants(V11, V17), + variant_attributes(derive(Encode, Decode)), + no_enum +)] pub struct PersistedForkChoiceStore { - #[superstruct(only(V11))] + #[superstruct(only(V11, V17))] pub balances_cache: BalancesCacheV8, pub time: Slot, pub finalized_checkpoint: Checkpoint, pub justified_checkpoint: Checkpoint, pub justified_balances: Vec, + #[superstruct(only(V11))] pub best_justified_checkpoint: Checkpoint, - #[superstruct(only(V11))] + #[superstruct(only(V11, V17))] pub unrealized_justified_checkpoint: Checkpoint, - #[superstruct(only(V11))] + #[superstruct(only(V11, V17))] pub unrealized_finalized_checkpoint: Checkpoint, - #[superstruct(only(V11))] + #[superstruct(only(V11, V17))] pub proposer_boost_root: Hash256, - #[superstruct(only(V11))] + #[superstruct(only(V11, V17))] pub equivocating_indices: BTreeSet, } -pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV11; +impl Into for PersistedForkChoiceStoreV11 { + fn into(self) -> PersistedForkChoiceStore { + PersistedForkChoiceStore { + balances_cache: self.balances_cache, + time: self.time, + finalized_checkpoint: self.finalized_checkpoint, + justified_checkpoint: self.justified_checkpoint, + justified_balances: self.justified_balances, + unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, + proposer_boost_root: self.proposer_boost_root, + equivocating_indices: self.equivocating_indices, + } + } +} + +impl Into for PersistedForkChoiceStore { + fn into(self) -> PersistedForkChoiceStoreV11 { + PersistedForkChoiceStoreV11 { + balances_cache: self.balances_cache, + time: self.time, + finalized_checkpoint: self.finalized_checkpoint, + justified_checkpoint: self.justified_checkpoint, + justified_balances: self.justified_balances, + best_justified_checkpoint: JUNK_BEST_JUSTIFIED_CHECKPOINT, + unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, + proposer_boost_root: self.proposer_boost_root, + equivocating_indices: self.equivocating_indices, + } + } +} diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 5102381a1a..ca4df864db 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -78,7 +78,7 @@ use state_processing::{ per_block_processing, per_slot_processing, state_advance::partial_state_advance, BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, - VerifyBlockRoot, + StateProcessingStrategy, VerifyBlockRoot, }; use std::borrow::Cow; use std::fs; @@ -1400,6 +1400,7 @@ impl ExecutionPendingBlock { &block, // Signatures were verified earlier in this function. BlockSignatureStrategy::NoVerification, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut consensus_context, &chain.spec, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 8ad874ea09..ca377635d6 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -22,7 +22,7 @@ use fork_choice::{CountUnrealized, ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; -use proto_array::ReOrgThreshold; +use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use slasher::Slasher; use slog::{crit, error, info, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; @@ -175,6 +175,15 @@ where self } + /// Sets the proposer re-org disallowed offsets list. + pub fn proposer_re_org_disallowed_offsets( + mut self, + disallowed_offsets: DisallowedReOrgOffsets, + ) -> Self { + self.chain_config.re_org_disallowed_offsets = disallowed_offsets; + self + } + /// Sets the store (database). /// /// Should generally be called early in the build chain. @@ -763,6 +772,29 @@ where let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); let shuffling_cache_size = self.chain_config.shuffling_cache_size; + // Calculate the weak subjectivity point in which to backfill blocks to. + let genesis_backfill_slot = if self.chain_config.genesis_backfill { + Slot::new(0) + } else { + let backfill_epoch_range = (self.spec.min_validator_withdrawability_delay + + self.spec.churn_limit_quotient) + .as_u64() + / 2; + match slot_clock.now() { + Some(current_slot) => { + let genesis_backfill_epoch = current_slot + .epoch(TEthSpec::slots_per_epoch()) + .saturating_sub(backfill_epoch_range); + genesis_backfill_epoch.start_slot(TEthSpec::slots_per_epoch()) + } + None => { + // The slot clock cannot derive the current slot. We therefore assume we are + // at or prior to genesis and backfill should sync all the way to genesis. + Slot::new(0) + } + } + }; + let beacon_chain = BeaconChain { spec: self.spec, config: self.chain_config, @@ -830,6 +862,7 @@ where graffiti: self.graffiti, slasher: self.slasher.clone(), validator_monitor: RwLock::new(validator_monitor), + genesis_backfill_slot, }; let head = beacon_chain.head_snapshot(); @@ -990,7 +1023,7 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String { mod test { use super::*; use crate::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; - use eth2_hashing::hash; + use ethereum_hashing::hash; use genesis::{ generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH, }; diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 1a53942562..a74fdced1f 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,10 +1,12 @@ -pub use proto_array::ReOrgThreshold; +pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use serde_derive::{Deserialize, Serialize}; use std::time::Duration; use types::{Checkpoint, Epoch}; pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2); +/// Default to 1/12th of the slot, which is 1 second on mainnet. +pub const DEFAULT_RE_ORG_CUTOFF_DENOMINATOR: u32 = 12; pub const DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT: u64 = 250; /// Default fraction of a slot lookahead for payload preparation (12/3 = 4 seconds on mainnet). @@ -34,6 +36,13 @@ pub struct ChainConfig { pub re_org_threshold: Option, /// Maximum number of epochs since finalization for attempting a proposer re-org. pub re_org_max_epochs_since_finalization: Epoch, + /// Maximum delay after the start of the slot at which to propose a reorging block. + pub re_org_cutoff_millis: Option, + /// Additional epoch offsets at which re-orging block proposals are not permitted. + /// + /// By default this list is empty, but it can be useful for reacting to network conditions, e.g. + /// slow gossip of re-org blocks at slot 1 in the epoch. + pub re_org_disallowed_offsets: DisallowedReOrgOffsets, /// Number of milliseconds to wait for fork choice before proposing a block. /// /// If set to 0 then block proposal will not wait for fork choice at all. @@ -64,10 +73,15 @@ pub struct ChainConfig { pub optimistic_finalized_sync: bool, /// The size of the shuffling cache, pub shuffling_cache_size: usize, + /// If using a weak-subjectivity sync, whether we should download blocks all the way back to + /// genesis. + pub genesis_backfill: bool, /// Whether to send payload attributes every slot, regardless of connected proposers. /// /// This is useful for block builders and testing. pub always_prepare_payload: bool, + /// Whether backfill sync processing should be rate-limited. + pub enable_backfill_rate_limiting: bool, } impl Default for ChainConfig { @@ -80,6 +94,8 @@ impl Default for ChainConfig { max_network_size: 10 * 1_048_576, // 10M re_org_threshold: Some(DEFAULT_RE_ORG_THRESHOLD), re_org_max_epochs_since_finalization: DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, + re_org_cutoff_millis: None, + re_org_disallowed_offsets: DisallowedReOrgOffsets::default(), fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT, // Builder fallback configs that are set in `clap` will override these. builder_fallback_skips: 3, @@ -93,7 +109,20 @@ impl Default for ChainConfig { // This value isn't actually read except in tests. optimistic_finalized_sync: true, shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, + genesis_backfill: false, always_prepare_payload: false, + enable_backfill_rate_limiting: true, } } } + +impl ChainConfig { + /// The latest delay from the start of the slot at which to attempt a 1-slot re-org. + pub fn re_org_cutoff(&self, seconds_per_slot: u64) -> Duration { + self.re_org_cutoff_millis + .map(Duration::from_millis) + .unwrap_or_else(|| { + Duration::from_secs(seconds_per_slot) / DEFAULT_RE_ORG_CUTOFF_DENOMINATOR + }) + } +} diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 25971bf85b..8b6c6b3740 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -1,7 +1,7 @@ use crate::metrics; use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService}; use eth2::lighthouse::Eth1SyncStatusData; -use eth2_hashing::hash; +use ethereum_hashing::hash; use int_to_bytes::int_to_bytes32; use slog::{debug, error, trace, Logger}; use ssz::{Decode, Encode}; @@ -88,7 +88,7 @@ fn get_sync_status( let period = T::SlotsPerEth1VotingPeriod::to_u64(); let voting_period_start_slot = (current_slot / period) * period; - let period_start = slot_start_seconds::( + let period_start = slot_start_seconds( genesis_time, spec.seconds_per_slot, voting_period_start_slot, @@ -470,7 +470,7 @@ impl Eth1ChainBackend for CachingEth1Backend { fn eth1_data(&self, state: &BeaconState, spec: &ChainSpec) -> Result { let period = T::SlotsPerEth1VotingPeriod::to_u64(); let voting_period_start_slot = (state.slot() / period) * period; - let voting_period_start_seconds = slot_start_seconds::( + let voting_period_start_seconds = slot_start_seconds( state.genesis_time(), spec.seconds_per_slot, voting_period_start_slot, @@ -658,11 +658,7 @@ fn find_winning_vote(valid_votes: Eth1DataVoteCount) -> Option { } /// Returns the unix-epoch seconds at the start of the given `slot`. -fn slot_start_seconds( - genesis_unix_seconds: u64, - seconds_per_slot: u64, - slot: Slot, -) -> u64 { +fn slot_start_seconds(genesis_unix_seconds: u64, seconds_per_slot: u64, slot: Slot) -> u64 { genesis_unix_seconds + slot.as_u64() * seconds_per_slot } @@ -698,7 +694,7 @@ mod test { fn get_voting_period_start_seconds(state: &BeaconState, spec: &ChainSpec) -> u64 { let period = ::SlotsPerEth1VotingPeriod::to_u64(); let voting_period_start_slot = (state.slot() / period) * period; - slot_start_seconds::( + slot_start_seconds( state.genesis_time(), spec.seconds_per_slot, voting_period_start_slot, @@ -708,23 +704,23 @@ mod test { #[test] fn slot_start_time() { let zero_sec = 0; - assert_eq!(slot_start_seconds::(100, zero_sec, Slot::new(2)), 100); + assert_eq!(slot_start_seconds(100, zero_sec, Slot::new(2)), 100); let one_sec = 1; - assert_eq!(slot_start_seconds::(100, one_sec, Slot::new(0)), 100); - assert_eq!(slot_start_seconds::(100, one_sec, Slot::new(1)), 101); - assert_eq!(slot_start_seconds::(100, one_sec, Slot::new(2)), 102); + assert_eq!(slot_start_seconds(100, one_sec, Slot::new(0)), 100); + assert_eq!(slot_start_seconds(100, one_sec, Slot::new(1)), 101); + assert_eq!(slot_start_seconds(100, one_sec, Slot::new(2)), 102); let three_sec = 3; - assert_eq!(slot_start_seconds::(100, three_sec, Slot::new(0)), 100); - assert_eq!(slot_start_seconds::(100, three_sec, Slot::new(1)), 103); - assert_eq!(slot_start_seconds::(100, three_sec, Slot::new(2)), 106); + assert_eq!(slot_start_seconds(100, three_sec, Slot::new(0)), 100); + assert_eq!(slot_start_seconds(100, three_sec, Slot::new(1)), 103); + assert_eq!(slot_start_seconds(100, three_sec, Slot::new(2)), 106); let five_sec = 5; - assert_eq!(slot_start_seconds::(100, five_sec, Slot::new(0)), 100); - assert_eq!(slot_start_seconds::(100, five_sec, Slot::new(1)), 105); - assert_eq!(slot_start_seconds::(100, five_sec, Slot::new(2)), 110); - assert_eq!(slot_start_seconds::(100, five_sec, Slot::new(3)), 115); + assert_eq!(slot_start_seconds(100, five_sec, Slot::new(0)), 100); + assert_eq!(slot_start_seconds(100, five_sec, Slot::new(1)), 105); + assert_eq!(slot_start_seconds(100, five_sec, Slot::new(2)), 110); + assert_eq!(slot_start_seconds(100, five_sec, Slot::new(3)), 115); } fn get_eth1_block(timestamp: u64, number: u64) -> Eth1Block { diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index ef23248aba..ccd17af243 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -5,7 +5,7 @@ use slog::{info, warn, Logger}; use state_processing::state_advance::complete_state_advance; use state_processing::{ per_block_processing, per_block_processing::BlockSignatureStrategy, ConsensusContext, - VerifyBlockRoot, + StateProcessingStrategy, VerifyBlockRoot, }; use std::sync::Arc; use std::time::Duration; @@ -177,6 +177,7 @@ pub fn reset_fork_choice_to_finalization, Cold: It &mut state, &block, BlockSignatureStrategy::NoVerification, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index cc45a6bb9a..5f59073500 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -189,13 +189,17 @@ impl BeaconChain { oldest_block_parent: expected_block_root, ..anchor_info }; - let backfill_complete = new_anchor.block_backfill_complete(); + let backfill_complete = new_anchor.block_backfill_complete(self.genesis_backfill_slot); self.store .compare_and_set_anchor_info_with_write(Some(anchor_info), Some(new_anchor))?; // If backfill has completed and the chain is configured to reconstruct historic states, // send a message to the background migrator instructing it to begin reconstruction. - if backfill_complete && self.config.reconstruct_historic_states { + // This can only happen if we have backfilled all the way to genesis. + if backfill_complete + && self.genesis_backfill_slot == Slot::new(0) + && self.config.reconstruct_historic_states + { self.store_migrator.process_reconstruction(); } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index af4780e46e..be1522a3b8 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -32,7 +32,7 @@ pub mod migrate; mod naive_aggregation_pool; mod observed_aggregates; mod observed_attesters; -mod observed_block_producers; +pub mod observed_block_producers; pub mod observed_operations; pub mod otb_verification_service; mod persisted_beacon_chain; diff --git a/beacon_node/beacon_chain/src/observed_operations.rs b/beacon_node/beacon_chain/src/observed_operations.rs index 6e53373939..4121111b3e 100644 --- a/beacon_node/beacon_chain/src/observed_operations.rs +++ b/beacon_node/beacon_chain/src/observed_operations.rs @@ -1,11 +1,11 @@ use derivative::Derivative; use smallvec::{smallvec, SmallVec}; use ssz::{Decode, Encode}; -use state_processing::{SigVerifiedOp, VerifyOperation}; +use state_processing::{SigVerifiedOp, VerifyOperation, VerifyOperationAt}; use std::collections::HashSet; use std::marker::PhantomData; use types::{ - AttesterSlashing, BeaconState, ChainSpec, EthSpec, ForkName, ProposerSlashing, + AttesterSlashing, BeaconState, ChainSpec, Epoch, EthSpec, ForkName, ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, }; @@ -87,12 +87,16 @@ impl ObservableOperation for SignedBlsToExecutionChange { } impl, E: EthSpec> ObservedOperations { - pub fn verify_and_observe( + pub fn verify_and_observe_parametric( &mut self, op: T, + validate: F, head_state: &BeaconState, spec: &ChainSpec, - ) -> Result, T::Error> { + ) -> Result, T::Error> + where + F: Fn(T) -> Result, T::Error>, + { self.reset_at_fork_boundary(head_state.slot(), spec); let observed_validator_indices = &mut self.observed_validator_indices; @@ -112,7 +116,7 @@ impl, E: EthSpec> ObservedOperations { } // Validate the op using operation-specific logic (`verify_attester_slashing`, etc). - let verified_op = op.validate(head_state, spec)?; + let verified_op = validate(op)?; // Add the relevant indices to the set of known indices to prevent processing of duplicates // in the future. @@ -121,6 +125,16 @@ impl, E: EthSpec> ObservedOperations { Ok(ObservationOutcome::New(verified_op)) } + pub fn verify_and_observe( + &mut self, + op: T, + head_state: &BeaconState, + spec: &ChainSpec, + ) -> Result, T::Error> { + let validate = |op: T| op.validate(head_state, spec); + self.verify_and_observe_parametric(op, validate, head_state, spec) + } + /// Reset the cache when crossing a fork boundary. /// /// This prevents an attacker from crafting a self-slashing which is only valid before the fork @@ -140,3 +154,16 @@ impl, E: EthSpec> ObservedOperations { } } } + +impl + VerifyOperationAt, E: EthSpec> ObservedOperations { + pub fn verify_and_observe_at( + &mut self, + op: T, + verify_at_epoch: Epoch, + head_state: &BeaconState, + spec: &ChainSpec, + ) -> Result, T::Error> { + let validate = |op: T| op.validate_at(head_state, verify_at_epoch, spec); + self.verify_and_observe_parametric(op, validate, head_state, spec) + } +} diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index 829dc2a8a7..8297ea9345 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -1,17 +1,41 @@ -use crate::beacon_fork_choice_store::PersistedForkChoiceStoreV11; +use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV11, PersistedForkChoiceStoreV17}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use store::{DBColumn, Error, StoreItem}; use superstruct::superstruct; // If adding a new version you should update this type alias and fix the breakages. -pub type PersistedForkChoice = PersistedForkChoiceV11; +pub type PersistedForkChoice = PersistedForkChoiceV17; -#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)] +#[superstruct( + variants(V11, V17), + variant_attributes(derive(Encode, Decode)), + no_enum +)] pub struct PersistedForkChoice { pub fork_choice: fork_choice::PersistedForkChoice, #[superstruct(only(V11))] pub fork_choice_store: PersistedForkChoiceStoreV11, + #[superstruct(only(V17))] + pub fork_choice_store: PersistedForkChoiceStoreV17, +} + +impl Into for PersistedForkChoiceV11 { + fn into(self) -> PersistedForkChoice { + PersistedForkChoice { + fork_choice: self.fork_choice, + fork_choice_store: self.fork_choice_store.into(), + } + } +} + +impl Into for PersistedForkChoice { + fn into(self) -> PersistedForkChoiceV11 { + PersistedForkChoiceV11 { + fork_choice: self.fork_choice, + fork_choice_store: self.fork_choice_store.into(), + } + } } macro_rules! impl_store_item { @@ -33,3 +57,4 @@ macro_rules! impl_store_item { } impl_store_item!(PersistedForkChoiceV11); +impl_store_item!(PersistedForkChoiceV17); diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 5808e648a2..7b398db2f5 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -4,6 +4,7 @@ mod migration_schema_v13; mod migration_schema_v14; mod migration_schema_v15; mod migration_schema_v16; +mod migration_schema_v17; use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; use crate::eth1_chain::SszEth1; @@ -141,6 +142,14 @@ pub fn migrate_schema( let ops = migration_schema_v16::downgrade_from_v16::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(16), SchemaVersion(17)) => { + let ops = migration_schema_v17::upgrade_to_v17::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(17), SchemaVersion(16)) => { + let ops = migration_schema_v17::downgrade_from_v17::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v17.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v17.rs new file mode 100644 index 0000000000..770cbb8ab5 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v17.rs @@ -0,0 +1,88 @@ +use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; +use crate::persisted_fork_choice::{PersistedForkChoiceV11, PersistedForkChoiceV17}; +use proto_array::core::{SszContainerV16, SszContainerV17}; +use slog::{debug, Logger}; +use ssz::{Decode, Encode}; +use std::sync::Arc; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; + +pub fn upgrade_fork_choice( + mut fork_choice: PersistedForkChoiceV11, +) -> Result { + let ssz_container_v16 = SszContainerV16::from_ssz_bytes( + &fork_choice.fork_choice.proto_array_bytes, + ) + .map_err(|e| { + Error::SchemaMigrationError(format!( + "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", + e + )) + })?; + + let ssz_container_v17: SszContainerV17 = ssz_container_v16.try_into().map_err(|e| { + Error::SchemaMigrationError(format!( + "Missing checkpoint during schema migration: {:?}", + e + )) + })?; + fork_choice.fork_choice.proto_array_bytes = ssz_container_v17.as_ssz_bytes(); + + Ok(fork_choice.into()) +} + +pub fn downgrade_fork_choice( + mut fork_choice: PersistedForkChoiceV17, +) -> Result { + let ssz_container_v17 = SszContainerV17::from_ssz_bytes( + &fork_choice.fork_choice.proto_array_bytes, + ) + .map_err(|e| { + Error::SchemaMigrationError(format!( + "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", + e + )) + })?; + + let ssz_container_v16: SszContainerV16 = ssz_container_v17.into(); + fork_choice.fork_choice.proto_array_bytes = ssz_container_v16.as_ssz_bytes(); + + Ok(fork_choice.into()) +} + +pub fn upgrade_to_v17( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Get persisted_fork_choice. + let v11 = db + .get_item::(&FORK_CHOICE_DB_KEY)? + .ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?; + + let v17 = upgrade_fork_choice(v11)?; + + debug!( + log, + "Removing unused best_justified_checkpoint from fork choice store." + ); + + Ok(vec![v17.as_kv_store_op(FORK_CHOICE_DB_KEY)]) +} + +pub fn downgrade_from_v17( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Get persisted_fork_choice. + let v17 = db + .get_item::(&FORK_CHOICE_DB_KEY)? + .ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?; + + let v11 = downgrade_fork_choice(v17)?; + + debug!( + log, + "Adding junk best_justified_checkpoint to fork choice store." + ); + + Ok(vec![v11.as_kv_store_op(FORK_CHOICE_DB_KEY)]) +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 3c5d1fd3b1..c5615b6185 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,3 +1,4 @@ +use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, @@ -26,6 +27,7 @@ use futures::channel::mpsc::Receiver; pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use merkle_proof::MerkleTree; +use operation_pool::ReceivedPreCapella; use parking_lot::Mutex; use parking_lot::RwLockWriteGuard; use rand::rngs::StdRng; @@ -38,7 +40,7 @@ use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::{ state_advance::{complete_state_advance, partial_state_advance}, - StateRootStrategy, + StateProcessingStrategy, }; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; @@ -62,7 +64,7 @@ const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; // // You should mutate the `ChainSpec` prior to initialising the harness if you would like to use // a different value. -pub const DEFAULT_TARGET_AGGREGATORS: u64 = u64::max_value(); +pub const DEFAULT_TARGET_AGGREGATORS: u64 = u64::MAX; pub type BaseHarnessType = Witness, TEthSpec, THotStore, TColdStore>; @@ -83,7 +85,7 @@ pub type AddBlocksResult = ( BeaconState, ); -/// Deprecated: Indicates how the `BeaconChainHarness` should produce blocks. +/// Indicates how the `BeaconChainHarness` should produce blocks. #[derive(Clone, Copy, Debug)] pub enum BlockStrategy { /// Produce blocks upon the canonical head (normal case). @@ -99,7 +101,7 @@ pub enum BlockStrategy { }, } -/// Deprecated: Indicates how the `BeaconChainHarness` should produce attestations. +/// Indicates how the `BeaconChainHarness` should produce attestations. #[derive(Clone, Debug)] pub enum AttestationStrategy { /// All validators attest to whichever block the `BeaconChainHarness` has produced. @@ -709,7 +711,7 @@ where pub fn get_hot_state(&self, state_hash: BeaconStateHash) -> Option> { self.chain .store - .load_hot_state(&state_hash.into(), StateRootStrategy::Accurate) + .load_hot_state(&state_hash.into(), StateProcessingStrategy::Accurate) .unwrap() } @@ -732,6 +734,7 @@ where state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() } + /// Returns a newly created block, signed by the proposer for the given slot. pub async fn make_block( &self, mut state: BeaconState, @@ -938,31 +941,31 @@ where head_block_root: SignedBeaconBlockHash, attestation_slot: Slot, ) -> Vec> { - self.make_unaggregated_attestations_with_limit( + let fork = self + .spec + .fork_at_epoch(attestation_slot.epoch(E::slots_per_epoch())); + self.make_unaggregated_attestations_with_opts( attesting_validators, state, state_root, head_block_root, attestation_slot, - None, + MakeAttestationOptions { limit: None, fork }, ) .0 } - pub fn make_unaggregated_attestations_with_limit( + pub fn make_unaggregated_attestations_with_opts( &self, attesting_validators: &[usize], state: &BeaconState, state_root: Hash256, head_block_root: SignedBeaconBlockHash, attestation_slot: Slot, - limit: Option, + opts: MakeAttestationOptions, ) -> (Vec>, Vec) { + let MakeAttestationOptions { limit, fork } = opts; let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap(); - let fork = self - .spec - .fork_at_epoch(attestation_slot.epoch(E::slots_per_epoch())); - let attesters = Mutex::new(vec![]); let attestations = state @@ -1095,8 +1098,6 @@ where .collect() } - /// Deprecated: Use make_unaggregated_attestations() instead. - /// /// A list of attestations for each committee for the given slot. /// /// The first layer of the Vec is organised per committee. For example, if the return value is @@ -1154,16 +1155,35 @@ where slot: Slot, limit: Option, ) -> (HarnessAttestations, Vec) { - let (unaggregated_attestations, attesters) = self - .make_unaggregated_attestations_with_limit( - attesting_validators, - state, - state_root, - block_hash, - slot, - limit, - ); let fork = self.spec.fork_at_epoch(slot.epoch(E::slots_per_epoch())); + self.make_attestations_with_opts( + attesting_validators, + state, + state_root, + block_hash, + slot, + MakeAttestationOptions { limit, fork }, + ) + } + + pub fn make_attestations_with_opts( + &self, + attesting_validators: &[usize], + state: &BeaconState, + state_root: Hash256, + block_hash: SignedBeaconBlockHash, + slot: Slot, + opts: MakeAttestationOptions, + ) -> (HarnessAttestations, Vec) { + let MakeAttestationOptions { fork, .. } = opts; + let (unaggregated_attestations, attesters) = self.make_unaggregated_attestations_with_opts( + attesting_validators, + state, + state_root, + block_hash, + slot, + opts, + ); let aggregated_attestations: Vec>> = unaggregated_attestations @@ -1495,6 +1515,26 @@ where .sign(sk, &fork, genesis_validators_root, &self.chain.spec) } + pub fn add_bls_to_execution_change( + &self, + validator_index: u64, + address: Address, + ) -> Result<(), String> { + let signed_bls_change = self.make_bls_to_execution_change(validator_index, address); + if let ObservationOutcome::New(verified_bls_change) = self + .chain + .verify_bls_to_execution_change_for_gossip(signed_bls_change) + .expect("should verify BLS to execution change for gossip") + { + self.chain + .import_bls_to_execution_change(verified_bls_change, ReceivedPreCapella::No) + .then_some(()) + .ok_or("should import BLS to execution change to the op pool".to_string()) + } else { + Err("should observe new BLS to execution change".to_string()) + } + } + pub fn make_bls_to_execution_change( &self, validator_index: u64, @@ -2001,9 +2041,6 @@ where .collect() } - /// Deprecated: Do not modify the slot clock manually; rely on add_attested_blocks_at_slots() - /// instead - /// /// Advance the slot of the `BeaconChain`. /// /// Does not produce blocks or attestations. @@ -2017,18 +2054,6 @@ where self.chain.slot_clock.set_current_time(time); } - /// Deprecated: Use make_block() instead - /// - /// Returns a newly created block, signed by the proposer for the given slot. - pub async fn build_block( - &self, - state: BeaconState, - slot: Slot, - _block_strategy: BlockStrategy, - ) -> (SignedBeaconBlock, BeaconState) { - self.make_block(state, slot).await - } - /// Uses `Self::extend_chain` to build the chain out to the `target_slot`. pub async fn extend_to_slot(&self, target_slot: Slot) -> Hash256 { if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { @@ -2064,8 +2089,6 @@ where .await } - /// Deprecated: Use add_attested_blocks_at_slots() instead - /// /// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the /// last-produced block (the head of the chain). /// @@ -2219,3 +2242,10 @@ impl fmt::Debug for BeaconChainHarness { write!(f, "BeaconChainHarness") } } + +pub struct MakeAttestationOptions { + /// Produce exactly `limit` attestations. + pub limit: Option, + /// Fork to use for signing attestations. + pub fork: Fork, +} diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index d79a56df6b..396aac71b0 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -199,6 +199,7 @@ pub struct ValidatorMetrics { pub attestation_head_misses: u64, pub attestation_target_hits: u64, pub attestation_target_misses: u64, + pub latest_attestation_inclusion_distance: u64, } impl ValidatorMetrics { @@ -225,6 +226,10 @@ impl ValidatorMetrics { pub fn increment_head_misses(&mut self) { self.attestation_head_misses += 1; } + + pub fn set_latest_inclusion_distance(&mut self, distance: u64) { + self.latest_attestation_inclusion_distance = distance; + } } /// A validator that is being monitored by the `ValidatorMonitor`. @@ -568,7 +573,6 @@ impl ValidatorMonitor { } else { validator_metrics.increment_misses() } - drop(validator_metrics); // Indicates if any attestation made it on-chain. // @@ -693,8 +697,10 @@ impl ValidatorMonitor { &[id], inclusion_delay as i64, ); + validator_metrics.set_latest_inclusion_distance(inclusion_delay); } } + drop(validator_metrics); // Indicates the number of sync committee signatures that made it into // a sync aggregate in the current_epoch (state.epoch - 1). diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 6a9e604793..1040521e5a 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -1,5 +1,9 @@ #![cfg(not(debug_assertions))] +use beacon_chain::attestation_verification::{ + batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations, Error, +}; +use beacon_chain::test_utils::{MakeAttestationOptions, HARNESS_GENESIS_TIME}; use beacon_chain::{ attestation_verification::Error as AttnError, test_utils::{ @@ -7,6 +11,7 @@ use beacon_chain::{ }, BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped, }; +use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use lazy_static::lazy_static; use state_processing::{ @@ -14,9 +19,9 @@ use state_processing::{ }; use tree_hash::TreeHash; use types::{ - test_utils::generate_deterministic_keypair, AggregateSignature, Attestation, BeaconStateError, - BitList, Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, SecretKey, SelectionProof, - SignedAggregateAndProof, Slot, SubnetId, Unsigned, + test_utils::generate_deterministic_keypair, Address, AggregateSignature, Attestation, + BeaconStateError, BitList, ChainSpec, Epoch, EthSpec, ForkName, Hash256, Keypair, + MainnetEthSpec, SecretKey, SelectionProof, SignedAggregateAndProof, Slot, SubnetId, Unsigned, }; pub type E = MainnetEthSpec; @@ -25,6 +30,8 @@ pub type E = MainnetEthSpec; /// have committees where _some_ validators are aggregators but not _all_. pub const VALIDATOR_COUNT: usize = 256; +pub const CAPELLA_FORK_EPOCH: usize = 1; + lazy_static! { /// A cached set of keys. static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); @@ -50,6 +57,50 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness (BeaconChainHarness>, ChainSpec) { + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH as u64)); + + let validator_keypairs = KEYPAIRS[0..validator_count].to_vec(); + let genesis_state = interop_genesis_state( + &validator_keypairs, + HARNESS_GENESIS_TIME, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + &spec, + ) + .unwrap(); + + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec.clone()) + .keypairs(validator_keypairs) + .withdrawal_keypairs( + KEYPAIRS[0..validator_count] + .iter() + .cloned() + .map(Some) + .collect(), + ) + .genesis_state_ephemeral_store(genesis_state) + .mock_execution_layer() + .build(); + + harness + .execution_block_generator() + .move_to_terminal_block() + .unwrap(); + + harness.advance_slot(); + + (harness, spec) +} + /// Returns an attestation that is valid for some slot in the given `chain`. /// /// Also returns some info about who created it. @@ -998,6 +1049,100 @@ async fn attestation_that_skips_epochs() { .expect("should gossip verify attestation that skips slots"); } +/// Ensures that an attestation can be processed when a validator receives proposer reward +/// in an epoch _and_ is scheduled for a withdrawal. This is a regression test for a scenario where +/// inconsistent state lookup could cause withdrawal root mismatch. +#[tokio::test] +async fn attestation_validator_receive_proposer_reward_and_withdrawals() { + let (harness, _) = get_harness_capella_spec(VALIDATOR_COUNT); + + // Advance to a Capella block. Make sure the blocks have attestations. + let two_thirds = (VALIDATOR_COUNT / 3) * 2; + let attesters = (0..two_thirds).collect(); + harness + .extend_chain( + // To trigger the bug we need the proposer attestation reward to be signed at a block + // that isn't the first in the epoch. + MainnetEthSpec::slots_per_epoch() as usize + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(attesters), + ) + .await; + + // Add BLS change for the block proposer at slot 33. This sets up a withdrawal for the block proposer. + let proposer_index = harness + .chain + .block_at_slot(harness.get_current_slot(), WhenSlotSkipped::None) + .expect("should not error getting block at slot") + .expect("should find block at slot") + .message() + .proposer_index(); + harness + .add_bls_to_execution_change(proposer_index, Address::from_low_u64_be(proposer_index)) + .unwrap(); + + // Apply two blocks: one to process the BLS change, and another to process the withdrawal. + harness.advance_slot(); + harness + .extend_chain( + 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; + let earlier_slot = harness.get_current_slot(); + let earlier_block = harness + .chain + .block_at_slot(earlier_slot, WhenSlotSkipped::None) + .expect("should not error getting block at slot") + .expect("should find block at slot"); + + // Extend the chain out a few epochs so we have some chain depth to play with. + harness.advance_slot(); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; + + let current_slot = harness.get_current_slot(); + let mut state = harness + .chain + .get_state(&earlier_block.state_root(), Some(earlier_slot)) + .expect("should not error getting state") + .expect("should find state"); + + while state.slot() < current_slot { + per_slot_processing(&mut state, None, &harness.spec).expect("should process slot"); + } + + let state_root = state.update_tree_hash_cache().unwrap(); + + // Get an attestation pointed to an old block (where we do not have its shuffling cached). + // Verifying the attestation triggers an inconsistent state replay. + let remaining_attesters = (two_thirds..VALIDATOR_COUNT).collect(); + let (attestation, subnet_id) = harness + .get_unaggregated_attestations( + &AttestationStrategy::SomeValidators(remaining_attesters), + &state, + state_root, + earlier_block.canonical_root(), + current_slot, + ) + .first() + .expect("should have at least one committee") + .first() + .cloned() + .expect("should have at least one attestation in committee"); + + harness + .chain + .verify_unaggregated_attestation_for_gossip(&attestation, Some(subnet_id)) + .expect("should gossip verify attestation without checking withdrawals root"); +} + #[tokio::test] async fn attestation_to_finalized_block() { let harness = get_harness(VALIDATOR_COUNT); @@ -1189,3 +1334,198 @@ async fn verify_attestation_for_gossip_doppelganger_detection() { .validator_has_been_observed(epoch, index) .expect("should check if gossip aggregator was observed")); } + +#[tokio::test] +async fn attestation_verification_use_head_state_fork() { + let (harness, spec) = get_harness_capella_spec(VALIDATOR_COUNT); + + // Advance to last block of the pre-Capella fork epoch. Capella is at slot 32. + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * CAPELLA_FORK_EPOCH - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; + + // Assert our head is a block at slot 31 in the pre-Capella fork epoch. + let pre_capella_slot = harness.get_current_slot(); + let pre_capella_block = harness + .chain + .block_at_slot(pre_capella_slot, WhenSlotSkipped::Prev) + .expect("should not error getting block at slot") + .expect("should find block at slot"); + assert_eq!(pre_capella_block.fork_name(&spec).unwrap(), ForkName::Merge); + + // Advance slot clock to Capella fork. + harness.advance_slot(); + let first_capella_slot = harness.get_current_slot(); + assert_eq!( + spec.fork_name_at_slot::(first_capella_slot), + ForkName::Capella + ); + + let (state, state_root) = harness.get_current_state_and_root(); + + // Scenario 1: other node signed attestation using the Capella fork epoch. + { + let attesters = (0..VALIDATOR_COUNT / 2).collect::>(); + let capella_fork = spec.fork_for_name(ForkName::Capella).unwrap(); + let committee_attestations = harness + .make_unaggregated_attestations_with_opts( + attesters.as_slice(), + &state, + state_root, + pre_capella_block.canonical_root().into(), + first_capella_slot, + MakeAttestationOptions { + fork: capella_fork, + limit: None, + }, + ) + .0 + .first() + .cloned() + .expect("should have at least one committee"); + let attestations_and_subnets = committee_attestations + .iter() + .map(|(attestation, subnet_id)| (attestation, Some(*subnet_id))); + + assert!( + batch_verify_unaggregated_attestations(attestations_and_subnets, &harness.chain).is_ok(), + "should accept attestations with `data.slot` >= first capella slot signed using the Capella fork" + ); + } + + // Scenario 2: other node forgot to update their node and signed attestations using bellatrix fork + { + let attesters = (VALIDATOR_COUNT / 2..VALIDATOR_COUNT).collect::>(); + let merge_fork = spec.fork_for_name(ForkName::Merge).unwrap(); + let committee_attestations = harness + .make_unaggregated_attestations_with_opts( + attesters.as_slice(), + &state, + state_root, + pre_capella_block.canonical_root().into(), + first_capella_slot, + MakeAttestationOptions { + fork: merge_fork, + limit: None, + }, + ) + .0 + .first() + .cloned() + .expect("should have at least one committee"); + let attestations_and_subnets = committee_attestations + .iter() + .map(|(attestation, subnet_id)| (attestation, Some(*subnet_id))); + + let results = + batch_verify_unaggregated_attestations(attestations_and_subnets, &harness.chain) + .expect("should return attestation results"); + let error = results + .into_iter() + .collect::, _>>() + .err() + .expect("should return an error"); + assert!( + matches!(error, Error::InvalidSignature), + "should reject attestations with `data.slot` >= first capella slot signed using the pre-Capella fork" + ); + } +} + +#[tokio::test] +async fn aggregated_attestation_verification_use_head_state_fork() { + let (harness, spec) = get_harness_capella_spec(VALIDATOR_COUNT); + + // Advance to last block of the pre-Capella fork epoch. Capella is at slot 32. + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * CAPELLA_FORK_EPOCH - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; + + // Assert our head is a block at slot 31 in the pre-Capella fork epoch. + let pre_capella_slot = harness.get_current_slot(); + let pre_capella_block = harness + .chain + .block_at_slot(pre_capella_slot, WhenSlotSkipped::Prev) + .expect("should not error getting block at slot") + .expect("should find block at slot"); + assert_eq!(pre_capella_block.fork_name(&spec).unwrap(), ForkName::Merge); + + // Advance slot clock to Capella fork. + harness.advance_slot(); + let first_capella_slot = harness.get_current_slot(); + assert_eq!( + spec.fork_name_at_slot::(first_capella_slot), + ForkName::Capella + ); + + let (state, state_root) = harness.get_current_state_and_root(); + + // Scenario 1: other node signed attestation using the Capella fork epoch. + { + let attesters = (0..VALIDATOR_COUNT / 2).collect::>(); + let capella_fork = spec.fork_for_name(ForkName::Capella).unwrap(); + let aggregates = harness + .make_attestations_with_opts( + attesters.as_slice(), + &state, + state_root, + pre_capella_block.canonical_root().into(), + first_capella_slot, + MakeAttestationOptions { + fork: capella_fork, + limit: None, + }, + ) + .0 + .into_iter() + .map(|(_, aggregate)| aggregate.expect("should have signed aggregate and proof")) + .collect::>(); + + assert!( + batch_verify_aggregated_attestations(aggregates.iter(), &harness.chain).is_ok(), + "should accept aggregates with `data.slot` >= first capella slot signed using the Capella fork" + ); + } + + // Scenario 2: other node forgot to update their node and signed attestations using bellatrix fork + { + let attesters = (VALIDATOR_COUNT / 2..VALIDATOR_COUNT).collect::>(); + let merge_fork = spec.fork_for_name(ForkName::Merge).unwrap(); + let aggregates = harness + .make_attestations_with_opts( + attesters.as_slice(), + &state, + state_root, + pre_capella_block.canonical_root().into(), + first_capella_slot, + MakeAttestationOptions { + fork: merge_fork, + limit: None, + }, + ) + .0 + .into_iter() + .map(|(_, aggregate)| aggregate.expect("should have signed aggregate and proof")) + .collect::>(); + + let results = batch_verify_aggregated_attestations(aggregates.iter(), &harness.chain) + .expect("should return attestation results"); + let error = results + .into_iter() + .collect::, _>>() + .err() + .expect("should return an error"); + assert!( + matches!(error, Error::InvalidSignature), + "should reject aggregates with `data.slot` >= first capella slot signed using the pre-Capella fork" + ); + } +} diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 38a55e2212..c66ed60a9c 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -11,7 +11,8 @@ use slasher::{Config as SlasherConfig, Slasher}; use state_processing::{ common::get_indexed_attestation, per_block_processing::{per_block_processing, BlockSignatureStrategy}, - per_slot_processing, BlockProcessingError, ConsensusContext, VerifyBlockRoot, + per_slot_processing, BlockProcessingError, ConsensusContext, StateProcessingStrategy, + VerifyBlockRoot, }; use std::marker::PhantomData; use std::sync::Arc; @@ -1167,6 +1168,7 @@ async fn add_base_block_to_altair_chain() { &mut state, &base_block, BlockSignatureStrategy::NoVerification, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &harness.chain.spec, @@ -1305,6 +1307,7 @@ async fn add_altair_block_to_base_chain() { &mut state, &altair_block, BlockSignatureStrategy::NoVerification, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &harness.chain.spec, diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index 48ac0300c9..b79fc5e407 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -10,3 +10,4 @@ sensitive_url = { path = "../../common/sensitive_url" } eth2 = { path = "../../common/eth2" } serde = { version = "1.0.116", features = ["derive"] } serde_json = "1.0.58" +lighthouse_version = { path = "../../common/lighthouse_version" } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index fecf6512ac..255c2fdd19 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -17,6 +17,9 @@ pub const DEFAULT_TIMEOUT_MILLIS: u64 = 15000; /// This timeout is in accordance with v0.2.0 of the [builder specs](https://github.com/flashbots/mev-boost/pull/20). pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 1000; +/// Default user agent for HTTP requests. +pub const DEFAULT_USER_AGENT: &str = lighthouse_version::VERSION; + #[derive(Clone)] pub struct Timeouts { get_header: Duration, @@ -41,23 +44,23 @@ pub struct BuilderHttpClient { client: reqwest::Client, server: SensitiveUrl, timeouts: Timeouts, + user_agent: String, } impl BuilderHttpClient { - pub fn new(server: SensitiveUrl) -> Result { + pub fn new(server: SensitiveUrl, user_agent: Option) -> Result { + let user_agent = user_agent.unwrap_or(DEFAULT_USER_AGENT.to_string()); + let client = reqwest::Client::builder().user_agent(&user_agent).build()?; Ok(Self { - client: reqwest::Client::new(), + client, server, timeouts: Timeouts::default(), + user_agent, }) } - pub fn new_with_timeouts(server: SensitiveUrl, timeouts: Timeouts) -> Result { - Ok(Self { - client: reqwest::Client::new(), - server, - timeouts, - }) + pub fn get_user_agent(&self) -> &str { + &self.user_agent } async fn get_with_timeout( diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 5fa2fddc3e..5ef1f28fb4 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -250,6 +250,12 @@ where genesis_state_bytes, } => { info!(context.log(), "Starting checkpoint sync"); + if config.chain.genesis_backfill { + info!( + context.log(), + "Blocks will downloaded all the way back to genesis" + ); + } let anchor_state = BeaconState::from_ssz_bytes(&anchor_state_bytes, &spec) .map_err(|e| format!("Unable to parse weak subj state SSZ: {:?}", e))?; @@ -271,6 +277,12 @@ where "Starting checkpoint sync"; "remote_url" => %url, ); + if config.chain.genesis_backfill { + info!( + context.log(), + "Blocks will be downloaded all the way back to genesis" + ); + } let remote = BeaconNodeHttpClient::new( url, @@ -347,12 +359,6 @@ where while block.slot() % slots_per_epoch != 0 { block_slot = (block_slot / slots_per_epoch - 1) * slots_per_epoch; - debug!( - context.log(), - "Searching for aligned checkpoint block"; - "block_slot" => block_slot, - ); - debug!( context.log(), "Searching for aligned checkpoint block"; diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1105bc41f6..1ff469fe30 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -142,7 +142,8 @@ pub fn spawn_notifier( .get_anchor_info() .map(|ai| ai.oldest_block_slot) { - sync_distance = current_anchor_slot; + sync_distance = current_anchor_slot + .saturating_sub(beacon_chain.genesis_backfill_slot); speedo // For backfill sync use a fake slot which is the distance we've progressed from the starting `oldest_block_slot`. .observe( @@ -207,14 +208,14 @@ pub fn spawn_notifier( "Downloading historical blocks"; "distance" => distance, "speed" => sync_speed_pretty(speed), - "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot))), + "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot).saturating_sub(beacon_chain.genesis_backfill_slot))), ); } else { info!( log, "Downloading historical blocks"; "distance" => distance, - "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot))), + "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot).saturating_sub(beacon_chain.genesis_backfill_slot))), ); } } else if !is_backfilling && last_backfill_log_slot.is_some() { diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index e0dd797bfa..1148f063d8 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -20,9 +20,9 @@ serde = { version = "1.0.116", features = ["derive"] } hex = "0.4.2" types = { path = "../../consensus/types"} merkle_proof = { path = "../../consensus/merkle_proof"} -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +tree_hash = "0.5.0" parking_lot = "0.12.0" slog = "2.5.2" superstruct = "0.5.0" diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 1b687a8b60..3ed7ba65d6 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -13,7 +13,7 @@ slog = "2.5.2" futures = "0.3.7" sensitive_url = { path = "../../common/sensitive_url" } reqwest = { version = "0.11.0", features = ["json","stream"] } -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" serde_json = "1.0.58" serde = { version = "1.0.116", features = ["derive"] } warp = { version = "0.3.2", features = ["tls"] } @@ -22,15 +22,15 @@ environment = { path = "../../lighthouse/environment" } bytes = "1.1.0" task_executor = { path = "../../common/task_executor" } hex = "0.4.2" -eth2_ssz = "0.4.1" -eth2_ssz_types = "0.2.2" +ethereum_ssz = "0.5.0" +ssz_types = "0.5.0" eth2 = { path = "../../common/eth2" } state_processing = { path = "../../consensus/state_processing" } superstruct = "0.6.0" lru = "0.7.1" exit-future = "0.2.0" -tree_hash = "0.4.1" -tree_hash_derive = { path = "../../consensus/tree_hash_derive"} +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" parking_lot = "0.12.0" slot_clock = { path = "../../common/slot_clock" } tempfile = "3.1.0" diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 3ecb36d093..4d2eb565e1 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -127,11 +127,11 @@ pub enum BlockByNumberQuery<'a> { pub struct ExecutionBlock { #[serde(rename = "hash")] pub block_hash: ExecutionBlockHash, - #[serde(rename = "number", with = "eth2_serde_utils::u64_hex_be")] + #[serde(rename = "number", with = "serde_utils::u64_hex_be")] pub block_number: u64, pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub timestamp: u64, } @@ -157,13 +157,13 @@ pub struct ExecutionBlockWithTransactions { pub logs_bloom: FixedVector, #[serde(alias = "mixHash")] pub prev_randao: Hash256, - #[serde(rename = "number", with = "eth2_serde_utils::u64_hex_be")] + #[serde(rename = "number", with = "serde_utils::u64_hex_be")] pub block_number: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub gas_used: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 993957450b..029866d95b 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -917,7 +917,7 @@ impl HttpJsonRpc { ) -> Result>>, Error> { #[derive(Serialize)] #[serde(transparent)] - struct Quantity(#[serde(with = "eth2_serde_utils::u64_hex_be")] u64); + struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] u64); let params = json!([Quantity(start), Quantity(count)]); let response: Vec>> = self @@ -1187,7 +1187,7 @@ mod test { transactions, ..<_>::default() }); - let json = serde_json::to_value(&ep)?; + let json = serde_json::to_value(ep)?; Ok(json.get("transactions").unwrap().clone()) } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 6d33bbabe2..d85d294c83 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -35,7 +35,7 @@ pub struct JsonResponseBody { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(transparent)] -pub struct TransparentJsonPayloadId(#[serde(with = "eth2_serde_utils::bytes_8_hex")] pub PayloadId); +pub struct TransparentJsonPayloadId(#[serde(with = "serde_utils::bytes_8_hex")] pub PayloadId); impl From for TransparentJsonPayloadId { fn from(id: PayloadId) -> Self { @@ -56,7 +56,7 @@ pub type JsonPayloadIdRequest = TransparentJsonPayloadId; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonPayloadIdResponse { - #[serde(with = "eth2_serde_utils::bytes_8_hex")] + #[serde(with = "serde_utils::bytes_8_hex")] pub payload_id: PayloadId, } @@ -79,17 +79,17 @@ pub struct JsonExecutionPayload { #[serde(with = "serde_logs_bloom")] pub logs_bloom: FixedVector, pub prev_randao: Hash256, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub block_number: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub gas_used: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, - #[serde(with = "eth2_serde_utils::u256_hex_be")] + #[serde(with = "serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] @@ -226,7 +226,7 @@ pub struct JsonGetPayloadResponse { pub execution_payload: JsonExecutionPayloadV1, #[superstruct(only(V2), partial_getter(rename = "execution_payload_v2"))] pub execution_payload: JsonExecutionPayloadV2, - #[serde(with = "eth2_serde_utils::u256_hex_be")] + #[serde(with = "serde_utils::u256_hex_be")] pub block_value: Uint256, } @@ -252,12 +252,12 @@ impl From> for GetPayloadResponse { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonWithdrawal { - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub index: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub validator_index: u64, pub address: Address, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub amount: u64, } @@ -295,7 +295,7 @@ impl From for Withdrawal { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(untagged)] pub struct JsonPayloadAttributes { - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub timestamp: u64, pub prev_randao: Hash256, pub suggested_fee_recipient: Address, @@ -520,18 +520,18 @@ impl From> for ExecutionPayloadBodyV1< #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransitionConfigurationV1 { - #[serde(with = "eth2_serde_utils::u256_hex_be")] + #[serde(with = "serde_utils::u256_hex_be")] pub terminal_total_difficulty: Uint256, pub terminal_block_hash: ExecutionBlockHash, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub terminal_block_number: u64, } /// Serializes the `logs_bloom` field of an `ExecutionPayload`. pub mod serde_logs_bloom { use super::*; - use eth2_serde_utils::hex::PrefixedHexVisitor; use serde::{Deserializer, Serializer}; + use serde_utils::hex::PrefixedHexVisitor; pub fn serialize(bytes: &FixedVector, serializer: S) -> Result where diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 2c2d8c7dce..16a7f3665f 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -103,6 +103,8 @@ pub enum Error { transactions_root: Hash256, }, InvalidJWTSecret(String), + InvalidForkForPayload, + InvalidPayloadBody(String), BeaconStateError(BeaconStateError), } @@ -228,6 +230,8 @@ pub struct Config { pub execution_endpoints: Vec, /// Endpoint urls for services providing the builder api. pub builder_url: Option, + /// User agent to send with requests to the builder API. + pub builder_user_agent: Option, /// JWT secrets for the above endpoints running the engine api. pub secret_files: Vec, /// The default fee recipient to use on the beacon node if none if provided from @@ -258,6 +262,7 @@ impl ExecutionLayer { let Config { execution_endpoints: urls, builder_url, + builder_user_agent, secret_files, suggested_fee_recipient, jwt_id, @@ -318,12 +323,17 @@ impl ExecutionLayer { let builder = builder_url .map(|url| { - let builder_client = BuilderHttpClient::new(url.clone()).map_err(Error::Builder); - info!(log, - "Connected to external block builder"; + let builder_client = BuilderHttpClient::new(url.clone(), builder_user_agent) + .map_err(Error::Builder)?; + + info!( + log, + "Using external block builder"; "builder_url" => ?url, - "builder_profit_threshold" => builder_profit_threshold); - builder_client + "builder_profit_threshold" => builder_profit_threshold, + "local_user_agent" => builder_client.get_user_agent(), + ); + Ok::<_, Error>(builder_client) }) .transpose()?; @@ -1602,14 +1612,59 @@ impl ExecutionLayer { .map_err(Error::EngineError) } - pub async fn get_payload_by_block_hash( + /// Fetch a full payload from the execution node. + /// + /// This will fail if the payload is not from the finalized portion of the chain. + pub async fn get_payload_for_header( + &self, + header: &ExecutionPayloadHeader, + fork: ForkName, + ) -> Result>, Error> { + let hash = header.block_hash(); + let block_number = header.block_number(); + + // Handle default payload body. + if header.block_hash() == ExecutionBlockHash::zero() { + let payload = match fork { + ForkName::Merge => ExecutionPayloadMerge::default().into(), + ForkName::Capella => ExecutionPayloadCapella::default().into(), + ForkName::Base | ForkName::Altair => { + return Err(Error::InvalidForkForPayload); + } + }; + return Ok(Some(payload)); + } + + // Use efficient payload bodies by range method if supported. + let capabilities = self.get_engine_capabilities(None).await?; + if capabilities.get_payload_bodies_by_range_v1 { + let mut payload_bodies = self.get_payload_bodies_by_range(block_number, 1).await?; + + if payload_bodies.len() != 1 { + return Ok(None); + } + + let opt_payload_body = payload_bodies.pop().flatten(); + opt_payload_body + .map(|body| { + body.to_payload(header.clone()) + .map_err(Error::InvalidPayloadBody) + }) + .transpose() + } else { + // Fall back to eth_blockByHash. + self.get_payload_by_hash_legacy(hash, fork).await + } + } + + pub async fn get_payload_by_hash_legacy( &self, hash: ExecutionBlockHash, fork: ForkName, ) -> Result>, Error> { self.engine() .request(|engine| async move { - self.get_payload_by_block_hash_from_engine(engine, hash, fork) + self.get_payload_by_hash_from_engine(engine, hash, fork) .await }) .await @@ -1617,7 +1672,7 @@ impl ExecutionLayer { .map_err(Error::EngineError) } - async fn get_payload_by_block_hash_from_engine( + async fn get_payload_by_hash_from_engine( &self, engine: &Engine, hash: ExecutionBlockHash, @@ -1630,7 +1685,7 @@ impl ExecutionLayer { ForkName::Merge => Ok(Some(ExecutionPayloadMerge::default().into())), ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())), ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant( - format!("called get_payload_by_block_hash_from_engine with {}", fork), + format!("called get_payload_by_hash_from_engine with {}", fork), )), }; } diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index bda0c782dc..e3c58cfc27 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -362,7 +362,7 @@ pub async fn handle_rpc( ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1 => { #[derive(Deserialize)] #[serde(transparent)] - struct Quantity(#[serde(with = "eth2_serde_utils::u64_hex_be")] pub u64); + struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] pub u64); let start = get_param::(params, 0) .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 87c56d360b..8a7d224963 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -16,9 +16,9 @@ eth1 = { path = "../eth1"} rayon = "1.4.1" state_processing = { path = "../../consensus/state_processing" } merkle_proof = { path = "../../consensus/merkle_proof" } -eth2_ssz = "0.4.1" -eth2_hashing = "0.3.0" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +ethereum_hashing = "1.0.0-beta.2" +tree_hash = "0.5.0" tokio = { version = "1.14.0", features = ["full"] } slog = "2.5.2" int_to_bytes = { path = "../../consensus/int_to_bytes" } diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index 122ca8eda6..d012983430 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -1,5 +1,5 @@ use crate::common::genesis_deposits; -use eth2_hashing::hash; +use ethereum_hashing::hash; use rayon::prelude::*; use ssz::Encode; use state_processing::initialize_beacon_state_from_eth1; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 0aa626be0c..8f253e2f24 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -24,7 +24,7 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lazy_static = "1.4.0" warp_utils = { path = "../../common/warp_utils" } slot_clock = { path = "../../common/slot_clock" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" bs58 = "0.4.0" futures = "0.3.8" execution_layer = {path = "../execution_layer"} @@ -32,21 +32,21 @@ parking_lot = "0.12.0" safe_arith = {path = "../../consensus/safe_arith"} task_executor = { path = "../../common/task_executor" } lru = "0.7.7" -tree_hash = "0.4.1" +tree_hash = "0.5.0" sysinfo = "0.26.5" system_health = { path = "../../common/system_health" } directory = { path = "../../common/directory" } -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" operation_pool = { path = "../operation_pool" } +sensitive_url = { path = "../../common/sensitive_url" } +unused_port = {path = "../../common/unused_port"} +logging = { path = "../../common/logging" } +store = { path = "../store" } [dev-dependencies] -store = { path = "../store" } environment = { path = "../../lighthouse/environment" } -sensitive_url = { path = "../../common/sensitive_url" } -logging = { path = "../../common/logging" } serde_json = "1.0.58" proto_array = { path = "../../consensus/proto_array" } -unused_port = {path = "../../common/unused_port"} genesis = { path = "../genesis" } [[test]] diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index ca68d4d04c..3e7d8d5e31 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -77,8 +77,8 @@ pub fn get_attestation_performance( // query is within permitted bounds to prevent potential OOM errors. if (end_epoch - start_epoch).as_usize() > MAX_REQUEST_RANGE_EPOCHS { return Err(custom_bad_request(format!( - "end_epoch must not exceed start_epoch by more than 100 epochs. start: {}, end: {}", - query.start_epoch, query.end_epoch + "end_epoch must not exceed start_epoch by more than {} epochs. start: {}, end: {}", + MAX_REQUEST_RANGE_EPOCHS, query.start_epoch, query.end_epoch ))); } diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 9febae5b19..5c3e420839 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -114,8 +114,10 @@ fn compute_historic_attester_duties( )?; (state, execution_optimistic) } else { - StateId::from_slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())) - .state(chain)? + let (state, execution_optimistic, _finalized) = + StateId::from_slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())) + .state(chain)?; + (state, execution_optimistic) }; // Sanity-check the state lookup. diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 5c785fe651..f1a42b8744 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -4,13 +4,15 @@ use eth2::types::BlockId as CoreBlockId; use std::fmt; use std::str::FromStr; use std::sync::Arc; -use types::{Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; +use types::{EthSpec, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. #[derive(Debug)] pub struct BlockId(pub CoreBlockId); +type Finalized = bool; + impl BlockId { pub fn from_slot(slot: Slot) -> Self { Self(CoreBlockId::Slot(slot)) @@ -24,7 +26,7 @@ impl BlockId { pub fn root( &self, chain: &BeaconChain, - ) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> { + ) -> Result<(Hash256, ExecutionOptimistic, Finalized), warp::Rejection> { match &self.0 { CoreBlockId::Head => { let (cached_head, execution_status) = chain @@ -34,22 +36,23 @@ impl BlockId { Ok(( cached_head.head_block_root(), execution_status.is_optimistic_or_invalid(), + false, )) } - CoreBlockId::Genesis => Ok((chain.genesis_block_root, false)), + CoreBlockId::Genesis => Ok((chain.genesis_block_root, false, true)), CoreBlockId::Finalized => { let finalized_checkpoint = chain.canonical_head.cached_head().finalized_checkpoint(); let (_slot, execution_optimistic) = checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; - Ok((finalized_checkpoint.root, execution_optimistic)) + Ok((finalized_checkpoint.root, execution_optimistic, true)) } CoreBlockId::Justified => { let justified_checkpoint = chain.canonical_head.cached_head().justified_checkpoint(); let (_slot, execution_optimistic) = checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; - Ok((justified_checkpoint.root, execution_optimistic)) + Ok((justified_checkpoint.root, execution_optimistic, false)) } CoreBlockId::Slot(slot) => { let execution_optimistic = chain @@ -66,7 +69,14 @@ impl BlockId { )) }) })?; - Ok((root, execution_optimistic)) + let finalized = *slot + <= chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + Ok((root, execution_optimistic, finalized)) } CoreBlockId::Root(root) => { // This matches the behaviour of other consensus clients (e.g. Teku). @@ -88,7 +98,20 @@ impl BlockId { .is_optimistic_or_invalid_block(root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; - Ok((*root, execution_optimistic)) + let blinded_block = chain + .get_blinded_block(root) + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + )) + })?; + let block_slot = blinded_block.slot(); + let finalized = chain + .is_finalized_block(root, block_slot) + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok((*root, execution_optimistic, finalized)) } else { Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -103,7 +126,14 @@ impl BlockId { pub fn blinded_block( &self, chain: &BeaconChain, - ) -> Result<(SignedBlindedBeaconBlock, ExecutionOptimistic), warp::Rejection> { + ) -> Result< + ( + SignedBlindedBeaconBlock, + ExecutionOptimistic, + Finalized, + ), + warp::Rejection, + > { match &self.0 { CoreBlockId::Head => { let (cached_head, execution_status) = chain @@ -113,10 +143,11 @@ impl BlockId { Ok(( cached_head.snapshot.beacon_block.clone_as_blinded(), execution_status.is_optimistic_or_invalid(), + false, )) } CoreBlockId::Slot(slot) => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; chain .get_blinded_block(&root) .map_err(warp_utils::reject::beacon_chain_error) @@ -128,7 +159,7 @@ impl BlockId { slot ))); } - Ok((block, execution_optimistic)) + Ok((block, execution_optimistic, finalized)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -137,7 +168,7 @@ impl BlockId { }) } _ => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; let block = chain .get_blinded_block(&root) .map_err(warp_utils::reject::beacon_chain_error) @@ -149,7 +180,7 @@ impl BlockId { )) }) })?; - Ok((block, execution_optimistic)) + Ok((block, execution_optimistic, finalized)) } } } @@ -158,7 +189,14 @@ impl BlockId { pub async fn full_block( &self, chain: &BeaconChain, - ) -> Result<(Arc>, ExecutionOptimistic), warp::Rejection> { + ) -> Result< + ( + Arc>, + ExecutionOptimistic, + Finalized, + ), + warp::Rejection, + > { match &self.0 { CoreBlockId::Head => { let (cached_head, execution_status) = chain @@ -168,10 +206,11 @@ impl BlockId { Ok(( cached_head.snapshot.beacon_block.clone(), execution_status.is_optimistic_or_invalid(), + false, )) } CoreBlockId::Slot(slot) => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; chain .get_block(&root) .await @@ -184,7 +223,7 @@ impl BlockId { slot ))); } - Ok((Arc::new(block), execution_optimistic)) + Ok((Arc::new(block), execution_optimistic, finalized)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -193,14 +232,14 @@ impl BlockId { }) } _ => { - let (root, execution_optimistic) = self.root(chain)?; + let (root, execution_optimistic, finalized) = self.root(chain)?; chain .get_block(&root) .await .map_err(warp_utils::reject::beacon_chain_error) .and_then(|block_opt| { block_opt - .map(|block| (Arc::new(block), execution_optimistic)) + .map(|block| (Arc::new(block), execution_optimistic, finalized)) .ok_or_else(|| { warp_utils::reject::custom_not_found(format!( "beacon block with root {}", diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index d3d99c5c9f..096d99f3f1 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -18,6 +18,7 @@ mod standard_block_rewards; mod state_id; mod sync_committee_rewards; mod sync_committees; +pub mod test_utils; mod ui; mod validator_inclusion; mod version; @@ -30,7 +31,8 @@ use beacon_chain::{ pub use block_id::BlockId; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ - self as api_types, EndpointVersion, SkipRandaoVerification, ValidatorId, ValidatorStatus, + self as api_types, EndpointVersion, ForkChoice, ForkChoiceNode, SkipRandaoVerification, + ValidatorId, ValidatorStatus, }; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; @@ -62,7 +64,7 @@ use types::{ SyncCommitteeMessage, SyncContributionData, }; use version::{ - add_consensus_version_header, execution_optimistic_fork_versioned_response, + add_consensus_version_header, execution_optimistic_finalized_fork_versioned_response, fork_versioned_response, inconsistent_fork_rejection, unsupported_version_rejection, V1, V2, }; use warp::http::StatusCode; @@ -521,12 +523,13 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - let (root, execution_optimistic) = state_id.root(&chain)?; - + let (root, execution_optimistic, finalized) = state_id.root(&chain)?; Ok(root) .map(api_types::RootData::from) .map(api_types::GenericResponse::from) - .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }); @@ -537,11 +540,12 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - let (fork, execution_optimistic) = - state_id.fork_and_execution_optimistic(&chain)?; - Ok(api_types::ExecutionOptimisticResponse { + let (fork, execution_optimistic, finalized) = + state_id.fork_and_execution_optimistic_and_finalized(&chain)?; + Ok(api_types::ExecutionOptimisticFinalizedResponse { data: fork, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }); @@ -553,23 +557,26 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - let (data, execution_optimistic) = state_id.map_state_and_execution_optimistic( - &chain, - |state, execution_optimistic| { - Ok(( - api_types::FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }, - execution_optimistic, - )) - }, - )?; + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + Ok(( + api_types::FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + execution_optimistic, + finalized, + )) + }, + )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }); @@ -586,10 +593,10 @@ pub fn serve( query_res: Result| { blocking_json_task(move || { let query = query_res?; - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { Ok(( state .validators() @@ -617,13 +624,15 @@ pub fn serve( }) .collect::>(), execution_optimistic, + finalized, )) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -641,10 +650,10 @@ pub fn serve( query_res: Result| { blocking_json_task(move || { let query = query_res?; - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let epoch = state.current_epoch(); let far_future_epoch = chain.spec.far_future_epoch; @@ -694,13 +703,15 @@ pub fn serve( }) .collect::>(), execution_optimistic, + finalized, )) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -719,10 +730,10 @@ pub fn serve( .and_then( |state_id: StateId, chain: Arc>, validator_id: ValidatorId| { blocking_json_task(move || { - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let index_opt = match &validator_id { ValidatorId::PublicKey(pubkey) => { state.validators().iter().position(|v| v.pubkey == *pubkey) @@ -756,13 +767,15 @@ pub fn serve( )) })?, execution_optimistic, + finalized, )) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -777,10 +790,10 @@ pub fn serve( .and_then( |state_id: StateId, chain: Arc>, query: api_types::CommitteesQuery| { blocking_json_task(move || { - let (data, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let current_epoch = state.current_epoch(); let epoch = query.epoch.unwrap_or(current_epoch); @@ -936,12 +949,13 @@ pub fn serve( } } - Ok((response, execution_optimistic)) + Ok((response, execution_optimistic, finalized)) }, )?; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { data, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }) }) }, @@ -958,10 +972,10 @@ pub fn serve( chain: Arc>, query: api_types::SyncCommitteesQuery| { blocking_json_task(move || { - let (sync_committee, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (sync_committee, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let current_epoch = state.current_epoch(); let epoch = query.epoch.unwrap_or(current_epoch); Ok(( @@ -971,9 +985,10 @@ pub fn serve( .map_err(|e| match e { BeaconStateError::SyncCommitteeNotKnown { .. } => { warp_utils::reject::custom_bad_request(format!( - "state at epoch {} has no sync committee for epoch {}", - current_epoch, epoch - )) + "state at epoch {} has no \ + sync committee for epoch {}", + current_epoch, epoch + )) } BeaconStateError::IncorrectStateVariant => { warp_utils::reject::custom_bad_request(format!( @@ -984,6 +999,7 @@ pub fn serve( e => warp_utils::reject::beacon_state_error(e), })?, execution_optimistic, + finalized, )) }, )?; @@ -1005,7 +1021,7 @@ pub fn serve( }; Ok(api_types::GenericResponse::from(response) - .add_execution_optimistic(execution_optimistic)) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) }) }, ); @@ -1019,23 +1035,23 @@ pub fn serve( .and_then( |state_id: StateId, chain: Arc>, query: api_types::RandaoQuery| { blocking_json_task(move || { - let (randao, execution_optimistic) = state_id - .map_state_and_execution_optimistic( + let (randao, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let epoch = query.epoch.unwrap_or_else(|| state.current_epoch()); let randao = *state.get_randao_mix(epoch).map_err(|e| { warp_utils::reject::custom_bad_request(format!( "epoch out of range: {e:?}" )) })?; - Ok((randao, execution_optimistic)) + Ok((randao, execution_optimistic, finalized)) }, )?; Ok( api_types::GenericResponse::from(api_types::RandaoMix { randao }) - .add_execution_optimistic(execution_optimistic), + .add_execution_optimistic_finalized(execution_optimistic, finalized), ) }) }, @@ -1057,72 +1073,73 @@ pub fn serve( .and_then( |query: api_types::HeadersQuery, chain: Arc>| { blocking_json_task(move || { - let (root, block, execution_optimistic) = match (query.slot, query.parent_root) - { - // No query parameters, return the canonical head block. - (None, None) => { - let (cached_head, execution_status) = chain - .canonical_head - .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; - ( - cached_head.head_block_root(), - cached_head.snapshot.beacon_block.clone_as_blinded(), - execution_status.is_optimistic_or_invalid(), - ) - } - // Only the parent root parameter, do a forwards-iterator lookup. - (None, Some(parent_root)) => { - let (parent, execution_optimistic) = - BlockId::from_root(parent_root).blinded_block(&chain)?; - let (root, _slot) = chain - .forwards_iter_block_roots(parent.slot()) - .map_err(warp_utils::reject::beacon_chain_error)? - // Ignore any skip-slots immediately following the parent. - .find(|res| { - res.as_ref().map_or(false, |(root, _)| *root != parent_root) - }) - .transpose() - .map_err(warp_utils::reject::beacon_chain_error)? - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "child of block with root {}", - parent_root - )) - })?; - - BlockId::from_root(root) - .blinded_block(&chain) - // Ignore this `execution_optimistic` since the first value has - // more information about the original request. - .map(|(block, _execution_optimistic)| { - (root, block, execution_optimistic) - })? - } - // Slot is supplied, search by slot and optionally filter by - // parent root. - (Some(slot), parent_root_opt) => { - let (root, execution_optimistic) = - BlockId::from_slot(slot).root(&chain)?; - // Ignore the second `execution_optimistic`, the first one is the - // most relevant since it knows that we queried by slot. - let (block, _execution_optimistic) = - BlockId::from_root(root).blinded_block(&chain)?; - - // If the parent root was supplied, check that it matches the block - // obtained via a slot lookup. - if let Some(parent_root) = parent_root_opt { - if block.parent_root() != parent_root { - return Err(warp_utils::reject::custom_not_found(format!( - "no canonical block at slot {} with parent root {}", - slot, parent_root - ))); - } + let (root, block, execution_optimistic, finalized) = + match (query.slot, query.parent_root) { + // No query parameters, return the canonical head block. + (None, None) => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + ( + cached_head.head_block_root(), + cached_head.snapshot.beacon_block.clone_as_blinded(), + execution_status.is_optimistic_or_invalid(), + false, + ) } + // Only the parent root parameter, do a forwards-iterator lookup. + (None, Some(parent_root)) => { + let (parent, execution_optimistic, _parent_finalized) = + BlockId::from_root(parent_root).blinded_block(&chain)?; + let (root, _slot) = chain + .forwards_iter_block_roots(parent.slot()) + .map_err(warp_utils::reject::beacon_chain_error)? + // Ignore any skip-slots immediately following the parent. + .find(|res| { + res.as_ref().map_or(false, |(root, _)| *root != parent_root) + }) + .transpose() + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "child of block with root {}", + parent_root + )) + })?; - (root, block, execution_optimistic) - } - }; + BlockId::from_root(root) + .blinded_block(&chain) + // Ignore this `execution_optimistic` since the first value has + // more information about the original request. + .map(|(block, _execution_optimistic, finalized)| { + (root, block, execution_optimistic, finalized) + })? + } + // Slot is supplied, search by slot and optionally filter by + // parent root. + (Some(slot), parent_root_opt) => { + let (root, execution_optimistic, finalized) = + BlockId::from_slot(slot).root(&chain)?; + // Ignore the second `execution_optimistic`, the first one is the + // most relevant since it knows that we queried by slot. + let (block, _execution_optimistic, _finalized) = + BlockId::from_root(root).blinded_block(&chain)?; + + // If the parent root was supplied, check that it matches the block + // obtained via a slot lookup. + if let Some(parent_root) = parent_root_opt { + if block.parent_root() != parent_root { + return Err(warp_utils::reject::custom_not_found(format!( + "no canonical block at slot {} with parent root {}", + slot, parent_root + ))); + } + } + + (root, block, execution_optimistic, finalized) + } + }; let data = api_types::BlockHeaderData { root, @@ -1134,7 +1151,7 @@ pub fn serve( }; Ok(api_types::GenericResponse::from(vec![data]) - .add_execution_optimistic(execution_optimistic)) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) }) }, ); @@ -1152,10 +1169,10 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - let (root, execution_optimistic) = block_id.root(&chain)?; + let (root, execution_optimistic, finalized) = block_id.root(&chain)?; // Ignore the second `execution_optimistic` since the first one has more // information about the original request. - let (block, _execution_optimistic) = + let (block, _execution_optimistic, _finalized) = BlockId::from_root(root).blinded_block(&chain)?; let canonical = chain @@ -1172,8 +1189,9 @@ pub fn serve( }, }; - Ok(api_types::ExecutionOptimisticResponse { + Ok(api_types::ExecutionOptimisticFinalizedResponse { execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), data, }) }) @@ -1262,7 +1280,8 @@ pub fn serve( chain: Arc>, accept_header: Option| { async move { - let (block, execution_optimistic) = block_id.full_block(&chain).await?; + let (block, execution_optimistic, finalized) = + block_id.full_block(&chain).await?; let fork_name = block .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -1278,10 +1297,11 @@ pub fn serve( e )) }), - _ => execution_optimistic_fork_versioned_response( + _ => execution_optimistic_finalized_fork_versioned_response( endpoint_version, fork_name, execution_optimistic, + finalized, block, ) .map(|res| warp::reply::json(&res).into_response()), @@ -1298,12 +1318,11 @@ pub fn serve( .and(warp::path::end()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; - + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; Ok(api_types::GenericResponse::from(api_types::RootData::from( block.canonical_root(), )) - .add_execution_optimistic(execution_optimistic)) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) }) }); @@ -1314,11 +1333,10 @@ pub fn serve( .and(warp::path::end()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; - + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; Ok( api_types::GenericResponse::from(block.message().body().attestations().clone()) - .add_execution_optimistic(execution_optimistic), + .add_execution_optimistic_finalized(execution_optimistic, finalized), ) }) }); @@ -1336,7 +1354,8 @@ pub fn serve( chain: Arc>, accept_header: Option| { blocking_response_task(move || { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + let (block, execution_optimistic, finalized) = + block_id.blinded_block(&chain)?; let fork_name = block .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -1354,10 +1373,11 @@ pub fn serve( }), _ => { // Post as a V2 endpoint so we return the fork version. - execution_optimistic_fork_versioned_response( + execution_optimistic_finalized_fork_versioned_response( V2, fork_name, execution_optimistic, + finalized, block, ) .map(|res| warp::reply::json(&res).into_response()) @@ -1898,11 +1918,13 @@ pub fn serve( .and(warp::path::end()) .and_then(|chain: Arc>, block_id: BlockId| { blocking_json_task(move || { - let (rewards, execution_optimistic) = + let (rewards, execution_optimistic, finalized) = standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?; Ok(rewards) .map(api_types::GenericResponse::from) - .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }); @@ -1981,14 +2003,16 @@ pub fn serve( validators: Vec, log: Logger| { blocking_json_task(move || { - let (rewards, execution_optimistic) = + let (rewards, execution_optimistic, finalized) = sync_committee_rewards::compute_sync_committee_rewards( chain, block_id, validators, log, )?; Ok(rewards) .map(api_types::GenericResponse::from) - .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }, ); @@ -2071,7 +2095,7 @@ pub fn serve( // We can ignore the optimistic status for the "fork" since it's a // specification constant that doesn't change across competing heads of the // beacon chain. - let (state, _execution_optimistic) = state_id.state(&chain)?; + let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; let fork_name = state .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -2089,16 +2113,17 @@ pub fn serve( )) }) } - _ => state_id.map_state_and_execution_optimistic( + _ => state_id.map_state_and_execution_optimistic_and_finalized( &chain, - |state, execution_optimistic| { + |state, execution_optimistic, finalized| { let fork_name = state .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; - let res = execution_optimistic_fork_versioned_response( + let res = execution_optimistic_finalized_fork_versioned_response( endpoint_version, fork_name, execution_optimistic, + finalized, &state, )?; Ok(add_consensus_version_header( @@ -2148,6 +2173,54 @@ pub fn serve( }, ); + // GET debug/fork_choice + let get_debug_fork_choice = eth_v1 + .and(warp::path("debug")) + .and(warp::path("fork_choice")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let beacon_fork_choice = chain.canonical_head.fork_choice_read_lock(); + + let proto_array = beacon_fork_choice.proto_array().core_proto_array(); + + let fork_choice_nodes = proto_array + .nodes + .iter() + .map(|node| { + let execution_status = if node.execution_status.is_execution_enabled() { + Some(node.execution_status.to_string()) + } else { + None + }; + + ForkChoiceNode { + slot: node.slot, + block_root: node.root, + parent_root: node + .parent + .and_then(|index| proto_array.nodes.get(index)) + .map(|parent| parent.root), + justified_epoch: node.justified_checkpoint.epoch, + finalized_epoch: node.finalized_checkpoint.epoch, + weight: node.weight, + validity: execution_status, + execution_block_hash: node + .execution_status + .block_hash() + .map(|block_hash| block_hash.into_root()), + } + }) + .collect::>(); + Ok(ForkChoice { + justified_checkpoint: proto_array.justified_checkpoint, + finalized_checkpoint: proto_array.finalized_checkpoint, + fork_choice_nodes, + }) + }) + }); + /* * node */ @@ -3430,7 +3503,7 @@ pub fn serve( .and_then(|state_id: StateId, chain: Arc>| { blocking_response_task(move || { // This debug endpoint provides no indication of optimistic status. - let (state, _execution_optimistic) = state_id.state(&chain)?; + let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; Response::builder() .status(200) .header("Content-Type", "application/ssz") @@ -3676,6 +3749,7 @@ pub fn serve( .uor(get_config_deposit_contract) .uor(get_debug_beacon_states) .uor(get_debug_beacon_heads) + .uor(get_debug_fork_choice) .uor(get_node_identity) .uor(get_node_version) .uor(get_node_syncing) diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 877d64e20f..7e946b89e7 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -209,7 +209,9 @@ fn compute_historic_proposer_duties( .map_err(warp_utils::reject::beacon_chain_error)?; (state, execution_optimistic) } else { - StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)? + let (state, execution_optimistic, _finalized) = + StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)?; + (state, execution_optimistic) }; // Ensure the state lookup was correct. diff --git a/beacon_node/http_api/src/standard_block_rewards.rs b/beacon_node/http_api/src/standard_block_rewards.rs index b3c90d08a4..de7e5eb7d3 100644 --- a/beacon_node/http_api/src/standard_block_rewards.rs +++ b/beacon_node/http_api/src/standard_block_rewards.rs @@ -10,8 +10,8 @@ use warp_utils::reject::beacon_chain_error; pub fn compute_beacon_block_rewards( chain: Arc>, block_id: BlockId, -) -> Result<(StandardBlockReward, ExecutionOptimistic), warp::Rejection> { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; +) -> Result<(StandardBlockReward, ExecutionOptimistic, bool), warp::Rejection> { + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; let block_ref = block.message(); @@ -23,5 +23,5 @@ pub fn compute_beacon_block_rewards( .compute_beacon_block_reward(block_ref, block_root, &mut state) .map_err(beacon_chain_error)?; - Ok((rewards, execution_optimistic)) + Ok((rewards, execution_optimistic, finalized)) } diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 44354217bc..9e4aadef17 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -10,6 +10,9 @@ use types::{BeaconState, Checkpoint, EthSpec, Fork, Hash256, Slot}; #[derive(Debug)] pub struct StateId(pub CoreStateId); +// More clarity when returning if the state is finalized or not in the root function. +type Finalized = bool; + impl StateId { pub fn from_slot(slot: Slot) -> Self { Self(CoreStateId::Slot(slot)) @@ -19,8 +22,8 @@ impl StateId { pub fn root( &self, chain: &BeaconChain, - ) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> { - let (slot, execution_optimistic) = match &self.0 { + ) -> Result<(Hash256, ExecutionOptimistic, Finalized), warp::Rejection> { + let (slot, execution_optimistic, finalized) = match &self.0 { CoreStateId::Head => { let (cached_head, execution_status) = chain .canonical_head @@ -29,24 +32,36 @@ impl StateId { return Ok(( cached_head.head_state_root(), execution_status.is_optimistic_or_invalid(), + false, )); } - CoreStateId::Genesis => return Ok((chain.genesis_state_root, false)), + CoreStateId::Genesis => return Ok((chain.genesis_state_root, false, true)), CoreStateId::Finalized => { let finalized_checkpoint = chain.canonical_head.cached_head().finalized_checkpoint(); - checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)? + let (slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; + (slot, execution_optimistic, true) } CoreStateId::Justified => { let justified_checkpoint = chain.canonical_head.cached_head().justified_checkpoint(); - checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)? + let (slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; + (slot, execution_optimistic, false) } CoreStateId::Slot(slot) => ( *slot, chain .is_optimistic_or_invalid_head() .map_err(warp_utils::reject::beacon_chain_error)?, + *slot + <= chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), ), CoreStateId::Root(root) => { if let Some(hot_summary) = chain @@ -61,7 +76,10 @@ impl StateId { .is_optimistic_or_invalid_block_no_fallback(&hot_summary.latest_block_root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; - return Ok((*root, execution_optimistic)); + let finalized = chain + .is_finalized_state(root, hot_summary.slot) + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok((*root, execution_optimistic, finalized)); } else if let Some(_cold_state_slot) = chain .store .load_cold_state_slot(root) @@ -77,7 +95,7 @@ impl StateId { .is_optimistic_or_invalid_block_no_fallback(&finalized_root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; - return Ok((*root, execution_optimistic)); + return Ok((*root, execution_optimistic, true)); } else { return Err(warp_utils::reject::custom_not_found(format!( "beacon state for state root {}", @@ -94,7 +112,7 @@ impl StateId { warp_utils::reject::custom_not_found(format!("beacon state at slot {}", slot)) })?; - Ok((root, execution_optimistic)) + Ok((root, execution_optimistic, finalized)) } /// Return the `fork` field of the state identified by `self`. @@ -103,9 +121,25 @@ impl StateId { &self, chain: &BeaconChain, ) -> Result<(Fork, bool), warp::Rejection> { - self.map_state_and_execution_optimistic(chain, |state, execution_optimistic| { - Ok((state.fork(), execution_optimistic)) - }) + self.map_state_and_execution_optimistic_and_finalized( + chain, + |state, execution_optimistic, _finalized| Ok((state.fork(), execution_optimistic)), + ) + } + + /// Return the `fork` field of the state identified by `self`. + /// Also returns the `execution_optimistic` value of the state. + /// Also returns the `finalized` value of the state. + pub fn fork_and_execution_optimistic_and_finalized( + &self, + chain: &BeaconChain, + ) -> Result<(Fork, bool, bool), warp::Rejection> { + self.map_state_and_execution_optimistic_and_finalized( + chain, + |state, execution_optimistic, finalized| { + Ok((state.fork(), execution_optimistic, finalized)) + }, + ) } /// Convenience function to compute `fork` when `execution_optimistic` isn't desired. @@ -121,8 +155,8 @@ impl StateId { pub fn state( &self, chain: &BeaconChain, - ) -> Result<(BeaconState, ExecutionOptimistic), warp::Rejection> { - let ((state_root, execution_optimistic), slot_opt) = match &self.0 { + ) -> Result<(BeaconState, ExecutionOptimistic, Finalized), warp::Rejection> { + let ((state_root, execution_optimistic, finalized), slot_opt) = match &self.0 { CoreStateId::Head => { let (cached_head, execution_status) = chain .canonical_head @@ -134,6 +168,7 @@ impl StateId { .beacon_state .clone_with_only_committee_caches(), execution_status.is_optimistic_or_invalid(), + false, )); } CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)), @@ -152,24 +187,25 @@ impl StateId { }) })?; - Ok((state, execution_optimistic)) + Ok((state, execution_optimistic, finalized)) } /// Map a function across the `BeaconState` identified by `self`. /// - /// The optimistic status of the requested state is also provided to the `func` closure. + /// The optimistic and finalization status of the requested state is also provided to the `func` + /// closure. /// /// This function will avoid instantiating/copying a new state when `self` points to the head /// of the chain. - pub fn map_state_and_execution_optimistic( + pub fn map_state_and_execution_optimistic_and_finalized( &self, chain: &BeaconChain, func: F, ) -> Result where - F: Fn(&BeaconState, bool) -> Result, + F: Fn(&BeaconState, bool, bool) -> Result, { - let (state, execution_optimistic) = match &self.0 { + let (state, execution_optimistic, finalized) = match &self.0 { CoreStateId::Head => { let (head, execution_status) = chain .canonical_head @@ -178,12 +214,13 @@ impl StateId { return func( &head.snapshot.beacon_state, execution_status.is_optimistic_or_invalid(), + false, ); } _ => self.state(chain)?, }; - func(&state, execution_optimistic) + func(&state, execution_optimistic, finalized) } } diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index cefa98db41..68a06b1ce8 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -13,8 +13,8 @@ pub fn compute_sync_committee_rewards( block_id: BlockId, validators: Vec, log: Logger, -) -> Result<(Option>, ExecutionOptimistic), warp::Rejection> { - let (block, execution_optimistic) = block_id.blinded_block(&chain)?; +) -> Result<(Option>, ExecutionOptimistic, bool), warp::Rejection> { + let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; let mut state = get_state_before_applying_block(chain.clone(), &block)?; @@ -44,7 +44,7 @@ pub fn compute_sync_committee_rewards( ) }; - Ok((data, execution_optimistic)) + Ok((data, execution_optimistic, finalized)) } pub fn get_state_before_applying_block( diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/src/test_utils.rs similarity index 96% rename from beacon_node/http_api/tests/common.rs rename to beacon_node/http_api/src/test_utils.rs index 3e34bafe84..8dc9be7dd4 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -1,3 +1,4 @@ +use crate::{Config, Context}; use beacon_chain::{ test_utils::{ BeaconChainHarness, BoxedMutator, Builder as HarnessBuilder, EphemeralHarnessType, @@ -6,7 +7,6 @@ use beacon_chain::{ }; use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; -use http_api::{Config, Context}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, libp2p::{ @@ -155,6 +155,7 @@ pub async fn create_api_server_on_port( None, meta_data, vec![], + false, &log, )); @@ -182,7 +183,7 @@ pub async fn create_api_server_on_port( let eth1_service = eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); - let context = Arc::new(Context { + let ctx = Arc::new(Context { config: Config { enabled: true, listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), @@ -193,19 +194,19 @@ pub async fn create_api_server_on_port( data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), spec_fork_name: None, }, - chain: Some(chain.clone()), + chain: Some(chain), network_senders: Some(network_senders), network_globals: Some(network_globals), eth1_service: Some(eth1_service), log, }); - let ctx = context.clone(); + let (shutdown_tx, shutdown_rx) = oneshot::channel(); let server_shutdown = async { // It's not really interesting why this triggered, just that it happened. let _ = shutdown_rx.await; }; - let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap(); + let (listening_socket, server) = crate::serve(ctx, server_shutdown).unwrap(); ApiServer { server, diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs index e8280a796a..616745dbef 100644 --- a/beacon_node/http_api/src/ui.rs +++ b/beacon_node/http_api/src/ui.rs @@ -75,15 +75,15 @@ pub fn get_validator_count( #[derive(PartialEq, Serialize, Deserialize)] pub struct ValidatorInfoRequestData { - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] indices: Vec, } #[derive(PartialEq, Serialize, Deserialize)] pub struct ValidatorInfoValues { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] epoch: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] total_balance: u64, } @@ -165,6 +165,7 @@ pub struct ValidatorMetrics { attestation_target_hits: u64, attestation_target_misses: u64, attestation_target_hit_percentage: f64, + latest_attestation_inclusion_distance: u64, } #[derive(PartialEq, Serialize, Deserialize)] @@ -210,6 +211,8 @@ pub fn post_validator_monitor_metrics( let attestation_head_misses = val_metrics.attestation_head_misses; let attestation_target_hits = val_metrics.attestation_target_hits; let attestation_target_misses = val_metrics.attestation_target_misses; + let latest_attestation_inclusion_distance = + val_metrics.latest_attestation_inclusion_distance; drop(val_metrics); let attestations = attestation_hits + attestation_misses; @@ -242,6 +245,7 @@ pub fn post_validator_monitor_metrics( attestation_target_hits, attestation_target_misses, attestation_target_hit_percentage, + latest_attestation_inclusion_distance, }; validators.insert(id.clone(), metrics); diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs index 917e85e649..f22ced1e69 100644 --- a/beacon_node/http_api/src/validator_inclusion.rs +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -18,7 +18,7 @@ fn end_of_epoch_state( let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch()); // The execution status is not returned, any functions which rely upon this method might return // optimistic information without explicitly declaring so. - let (state, _execution_status) = StateId::from_slot(target_slot).state(chain)?; + let (state, _execution_status, _finalized) = StateId::from_slot(target_slot).state(chain)?; Ok(state) } diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index e7fd8910b1..e01ff98220 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,9 +1,8 @@ +use crate::api_types::fork_versioned_response::ExecutionOptimisticFinalizedForkVersionedResponse; use crate::api_types::EndpointVersion; use eth2::CONSENSUS_VERSION_HEADER; use serde::Serialize; -use types::{ - ExecutionOptimisticForkVersionedResponse, ForkName, ForkVersionedResponse, InconsistentFork, -}; +use types::{ForkName, ForkVersionedResponse, InconsistentFork}; use warp::reply::{self, Reply, Response}; pub const V1: EndpointVersion = EndpointVersion(1); @@ -27,12 +26,13 @@ pub fn fork_versioned_response( }) } -pub fn execution_optimistic_fork_versioned_response( +pub fn execution_optimistic_finalized_fork_versioned_response( endpoint_version: EndpointVersion, fork_name: ForkName, execution_optimistic: bool, + finalized: bool, data: T, -) -> Result, warp::reject::Rejection> { +) -> Result, warp::reject::Rejection> { let fork_name = if endpoint_version == V1 { None } else if endpoint_version == V2 { @@ -40,9 +40,10 @@ pub fn execution_optimistic_fork_versioned_response( } else { return Err(unsupported_version_rejection(endpoint_version)); }; - Ok(ExecutionOptimisticForkVersionedResponse { + Ok(ExecutionOptimisticFinalizedForkVersionedResponse { version: fork_name, execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), data, }) } diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 6144123565..8a3ba887b3 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,11 +1,11 @@ //! Tests for API behaviour across fork boundaries. -use crate::common::*; use beacon_chain::{ test_utils::{RelativeSyncCommittee, DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME}, StateSkipConfig, }; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials}; +use http_api::test_utils::*; use std::collections::HashSet; use types::{ test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 7db1b22d67..da92419744 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,11 +1,11 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` -use crate::common::*; use beacon_chain::{ - chain_config::ReOrgThreshold, + chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, }; use eth2::types::DepositContractData; use execution_layer::{ForkchoiceState, PayloadAttributes}; +use http_api::test_utils::InteractiveTester; use parking_lot::Mutex; use slot_clock::SlotClock; use state_processing::{ @@ -110,6 +110,8 @@ pub struct ReOrgTest { misprediction: bool, /// Whether to expect withdrawals to change on epoch boundaries. expect_withdrawals_change_on_epoch: bool, + /// Epoch offsets to avoid proposing reorg blocks at. + disallowed_offsets: Vec, } impl Default for ReOrgTest { @@ -127,6 +129,7 @@ impl Default for ReOrgTest { should_re_org: true, misprediction: false, expect_withdrawals_change_on_epoch: false, + disallowed_offsets: vec![], } } } @@ -238,6 +241,32 @@ pub async fn proposer_boost_re_org_head_distance() { .await; } +// Check that a re-org at a disallowed offset fails. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_disallowed_offset() { + let offset = 4; + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(E::slots_per_epoch() + offset - 1), + disallowed_offsets: vec![offset], + should_re_org: false, + ..Default::default() + }) + .await; +} + +// Check that a re-org at the *only* allowed offset succeeds. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_disallowed_offset_exact() { + let offset = 4; + let disallowed_offsets = (0..E::slots_per_epoch()).filter(|o| *o != offset).collect(); + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(E::slots_per_epoch() + offset - 1), + disallowed_offsets, + ..Default::default() + }) + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn proposer_boost_re_org_very_unhealthy() { proposer_boost_re_org_test(ReOrgTest { @@ -286,6 +315,7 @@ pub async fn proposer_boost_re_org_test( should_re_org, misprediction, expect_withdrawals_change_on_epoch, + disallowed_offsets, }: ReOrgTest, ) { assert!(head_slot > 0); @@ -320,6 +350,9 @@ pub async fn proposer_boost_re_org_test( .proposer_re_org_max_epochs_since_finalization(Epoch::new( max_epochs_since_finalization, )) + .proposer_re_org_disallowed_offsets( + DisallowedReOrgOffsets::new::(disallowed_offsets).unwrap(), + ) })), ) .await; diff --git a/beacon_node/http_api/tests/main.rs b/beacon_node/http_api/tests/main.rs index 88e0032ecd..342b72cc7d 100644 --- a/beacon_node/http_api/tests/main.rs +++ b/beacon_node/http_api/tests/main.rs @@ -1,6 +1,5 @@ #![cfg(not(debug_assertions))] // Tests are too slow in debug. -pub mod common; pub mod fork_tests; pub mod interactive_tests; pub mod tests; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 977c737fd0..fc78b2a9bf 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,4 +1,3 @@ -use crate::common::{create_api_server, create_api_server_on_port, ApiServer}; use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, @@ -8,7 +7,7 @@ use environment::null_logger; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, - types::{BlockId as CoreBlockId, StateId as CoreStateId, *}, + types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *}, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; use execution_layer::test_utils::TestingBuilder; @@ -18,7 +17,10 @@ use execution_layer::test_utils::{ }; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; -use http_api::{BlockId, StateId}; +use http_api::{ + test_utils::{create_api_server, create_api_server_on_port, ApiServer}, + BlockId, StateId, +}; use lighthouse_network::{Enr, EnrExt, PeerId}; use network::NetworkReceivers; use proto_array::ExecutionStatus; @@ -462,6 +464,264 @@ impl ApiTester { self } + // finalization tests + pub async fn test_beacon_states_root_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_states_root(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_fork_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_states_fork(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_finality_checkpoints_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_states_finality_checkpoints(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_headers_block_id_finalized(self) -> Self { + for block_id in self.interesting_block_ids() { + let block_root = block_id.root(&self.chain); + let block = block_id.full_block(&self.chain).await; + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if block_root.is_err() || block.is_err() { + continue; + } + + // now that we know the block is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_headers_block_id(block_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (block_root, _, _) = block_root.unwrap(); + let (block, _, _) = block.unwrap(); + let block_slot = block.slot(); + let expected = self + .chain + .is_finalized_block(&block_root, block_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_beacon_blocks_finalized(self) -> Self { + for block_id in self.interesting_block_ids() { + let block_root = block_id.root(&self.chain); + let block = block_id.full_block(&self.chain).await; + + // if .root or .full_block fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_block + // occurs after those calls, and that they were correct. + if block_root.is_err() || block.is_err() { + continue; + } + + // now that we know the block is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_blocks::(block_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (block_root, _, _) = block_root.unwrap(); + let (block, _, _) = block.unwrap(); + let block_slot = block.slot(); + let expected = self + .chain + .is_finalized_block(&block_root, block_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_beacon_blinded_blocks_finalized(self) -> Self { + for block_id in self.interesting_block_ids() { + let block_root = block_id.root(&self.chain); + let block = block_id.full_block(&self.chain).await; + + // if .root or .full_block fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_block + // occurs after those calls, and that they were correct. + if block_root.is_err() || block.is_err() { + continue; + } + + // now that we know the block is valid, we can unwrap() everything we need + let result = self + .client + .get_beacon_blinded_blocks::(block_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (block_root, _, _) = block_root.unwrap(); + let (block, _, _) = block.unwrap(); + let block_slot = block.slot(); + let expected = self + .chain + .is_finalized_block(&block_root, block_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_debug_beacon_states_finalized(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_root = state_id.root(&self.chain); + let state = state_id.state(&self.chain); + + // if .root or .state fail, skip the test. those would be errors outside the scope + // of this test, here we're testing the finalized field assuming the call to .is_finalized_state + // occurs after the state_root and state calls, and that the state_root and state calls + // were correct. + if state_root.is_err() || state.is_err() { + continue; + } + + // now that we know the state is valid, we can unwrap() everything we need + let result = self + .client + .get_debug_beacon_states::(state_id.0) + .await + .unwrap() + .unwrap() + .finalized + .unwrap(); + + let (state_root, _, _) = state_root.unwrap(); + let (state, _, _) = state.unwrap(); + let state_slot = state.slot(); + let expected = self + .chain + .is_finalized_state(&state_root, state_slot) + .unwrap(); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + pub async fn test_beacon_states_root(self) -> Self { for state_id in self.interesting_state_ids() { let result = self @@ -474,7 +734,7 @@ impl ApiTester { let expected = state_id .root(&self.chain) .ok() - .map(|(root, _execution_optimistic)| root); + .map(|(root, _execution_optimistic, _finalized)| root); assert_eq!(result, expected, "{:?}", state_id); } @@ -508,15 +768,13 @@ impl ApiTester { .unwrap() .map(|res| res.data); - let expected = - state_id - .state(&self.chain) - .ok() - .map(|(state, _execution_optimistic)| FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }); + let expected = state_id.state(&self.chain).ok().map( + |(state, _execution_optimistic, _finalized)| FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + ); assert_eq!(result, expected, "{:?}", state_id); } @@ -529,7 +787,9 @@ impl ApiTester { for validator_indices in self.interesting_validator_indices() { let state_opt = state_id.state(&self.chain).ok(); let validators: Vec = match state_opt.as_ref() { - Some((state, _execution_optimistic)) => state.validators().clone().into(), + Some((state, _execution_optimistic, _finalized)) => { + state.validators().clone().into() + } None => vec![], }; let validator_index_ids = validator_indices @@ -568,7 +828,7 @@ impl ApiTester { .unwrap() .map(|res| res.data); - let expected = state_opt.map(|(state, _execution_optimistic)| { + let expected = state_opt.map(|(state, _execution_optimistic, _finalized)| { let mut validators = Vec::with_capacity(validator_indices.len()); for i in validator_indices { @@ -598,7 +858,7 @@ impl ApiTester { let state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let validators: Vec = match state_opt.as_ref() { Some(state) => state.validators().clone().into(), None => vec![], @@ -688,7 +948,7 @@ impl ApiTester { let state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let validators = match state_opt.as_ref() { Some(state) => state.validators().clone().into(), None => vec![], @@ -743,7 +1003,7 @@ impl ApiTester { let mut state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch()); let results = self @@ -790,7 +1050,7 @@ impl ApiTester { let mut state_opt = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch()); let result = self @@ -900,7 +1160,7 @@ impl ApiTester { let block_root_opt = block_id .root(&self.chain) .ok() - .map(|(root, _execution_optimistic)| root); + .map(|(root, _execution_optimistic, _finalized)| root); if let CoreBlockId::Slot(slot) = block_id.0 { if block_root_opt.is_none() { @@ -914,7 +1174,7 @@ impl ApiTester { .full_block(&self.chain) .await .ok() - .map(|(block, _execution_optimistic)| block); + .map(|(block, _execution_optimistic, _finalized)| block); if block_opt.is_none() && result.is_none() { continue; @@ -960,7 +1220,7 @@ impl ApiTester { let expected = block_id .root(&self.chain) .ok() - .map(|(root, _execution_optimistic)| root); + .map(|(root, _execution_optimistic, _finalized)| root); if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); @@ -1007,7 +1267,7 @@ impl ApiTester { .full_block(&self.chain) .await .ok() - .map(|(block, _execution_optimistic)| block); + .map(|(block, _execution_optimistic, _finalized)| block); if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { @@ -1091,7 +1351,7 @@ impl ApiTester { let expected = block_id .blinded_block(&self.chain) .ok() - .map(|(block, _execution_optimistic)| block); + .map(|(block, _execution_optimistic, _finalized)| block); if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { @@ -1172,7 +1432,7 @@ impl ApiTester { .map(|res| res.data); let expected = block_id.full_block(&self.chain).await.ok().map( - |(block, _execution_optimistic)| { + |(block, _execution_optimistic, _finalized)| { block.message().body().attestations().clone().into() }, ); @@ -1593,7 +1853,7 @@ impl ApiTester { let mut expected = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); expected.as_mut().map(|state| state.drop_all_caches()); if let (Some(json), Some(expected)) = (&result_json, &expected) { @@ -1615,21 +1875,6 @@ impl ApiTester { .unwrap(); assert_eq!(result_ssz, expected, "{:?}", state_id); - // Check legacy v1 API. - let result_v1 = self - .client - .get_debug_beacon_states_v1(state_id.0) - .await - .unwrap(); - - if let (Some(json), Some(expected)) = (&result_v1, &expected) { - assert_eq!(json.version, None); - assert_eq!(json.data, *expected, "{:?}", state_id); - } else { - assert_eq!(result_v1, None); - assert_eq!(expected, None); - } - // Check that version headers are provided. let url = self .client @@ -1679,6 +1924,59 @@ impl ApiTester { self } + pub async fn test_get_debug_fork_choice(self) -> Self { + let result = self.client.get_debug_fork_choice().await.unwrap(); + + let beacon_fork_choice = self.chain.canonical_head.fork_choice_read_lock(); + + let expected_proto_array = beacon_fork_choice.proto_array().core_proto_array(); + + assert_eq!( + result.justified_checkpoint, + expected_proto_array.justified_checkpoint + ); + assert_eq!( + result.finalized_checkpoint, + expected_proto_array.finalized_checkpoint + ); + + let expected_fork_choice_nodes: Vec = expected_proto_array + .nodes + .iter() + .map(|node| { + let execution_status = if node.execution_status.is_execution_enabled() { + Some(node.execution_status.to_string()) + } else { + None + }; + ForkChoiceNode { + slot: node.slot, + block_root: node.root, + parent_root: node + .parent + .and_then(|index| expected_proto_array.nodes.get(index)) + .map(|parent| parent.root), + justified_epoch: node.justified_checkpoint.epoch, + finalized_epoch: node.finalized_checkpoint.epoch, + weight: node.weight, + validity: execution_status, + execution_block_hash: node + .execution_status + .block_hash() + .map(|block_hash| block_hash.into_root()), + } + }) + .collect(); + + assert_eq!(result.fork_choice_nodes, expected_fork_choice_nodes); + + // need to drop beacon_fork_choice here, else borrow checker will complain + // that self cannot be moved out since beacon_fork_choice borrowed self.chain + // and might still live after self is moved out + drop(beacon_fork_choice); + self + } + fn validator_count(&self) -> usize { self.chain.head_snapshot().beacon_state.validators().len() } @@ -3604,7 +3902,7 @@ impl ApiTester { let mut expected = state_id .state(&self.chain) .ok() - .map(|(state, _execution_optimistic)| state); + .map(|(state, _execution_optimistic, _finalized)| state); expected.as_mut().map(|state| state.drop_all_caches()); assert_eq!(result, expected, "{:?}", state_id); @@ -4012,6 +4310,20 @@ async fn beacon_get() { .await .test_beacon_genesis() .await + .test_beacon_states_root_finalized() + .await + .test_beacon_states_fork_finalized() + .await + .test_beacon_states_finality_checkpoints_finalized() + .await + .test_beacon_headers_block_id_finalized() + .await + .test_beacon_blocks_finalized::() + .await + .test_beacon_blinded_blocks_finalized::() + .await + .test_debug_beacon_states_finalized() + .await .test_beacon_states_root() .await .test_beacon_states_fork() @@ -4148,6 +4460,8 @@ async fn debug_get() { .test_get_debug_beacon_states() .await .test_get_debug_beacon_heads() + .await + .test_get_debug_fork_choice() .await; } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index dda797187b..c1b4d72174 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -8,13 +8,13 @@ edition = "2021" discv5 = { version = "0.2.2", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } -eth2_ssz_types = "0.2.2" +ssz_types = "0.5.0" serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" -tree_hash = "0.4.1" -tree_hash_derive = "0.4.0" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" slog = { version = "2.5.2", features = ["max_level_trace"] } lighthouse_version = { path = "../../common/lighthouse_version" } tokio = { version = "1.14.0", features = ["time", "macros"] } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 79041f6d90..f4b3b78d04 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -101,6 +101,9 @@ pub struct Config { /// List of trusted libp2p nodes which are not scored. pub trusted_peers: Vec, + /// Disables peer scoring altogether. + pub disable_peer_scoring: bool, + /// Client version pub client_version: String, @@ -131,6 +134,9 @@ pub struct Config { /// List of extra topics to initially subscribe to as strings. pub topics: Vec, + /// Whether we are running a block proposer only node. + pub proposer_only: bool, + /// Whether metrics are enabled. pub metrics_enabled: bool, @@ -309,6 +315,7 @@ impl Default for Config { boot_nodes_multiaddr: vec![], libp2p_nodes: vec![], trusted_peers: vec![], + disable_peer_scoring: false, client_version: lighthouse_version::version_with_platform(), disable_discovery: false, upnp_enabled: true, @@ -318,6 +325,7 @@ impl Default for Config { import_all_attestations: false, shutdown_after_sync: false, topics: Vec::new(), + proposer_only: false, metrics_enabled: false, enable_light_client_server: false, outbound_rate_limiter_config: None, diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index dda68aff95..13fdf8ed57 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -207,7 +207,7 @@ impl Discovery { let local_node_id = local_enr.node_id(); info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), - "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp6() + "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6() ); let listen_socket = match config.listen_addrs() { crate::listen_addr::ListenAddress::V4(v4_addr) => v4_addr.udp_socket_addr(), @@ -1162,6 +1162,7 @@ mod tests { syncnets: Default::default(), }), vec![], + false, &log, ); Discovery::new(&keypair, &config, Arc::new(globals), &log) diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 3d5c862e8b..b2096013bf 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -290,11 +290,20 @@ impl PeerManager { // If a peer is being banned, this trumps any temporary ban the peer might be // under. We no longer track it in the temporary ban list. - self.temporary_banned_peers.raw_remove(peer_id); - - // Inform the Swarm to ban the peer - self.events - .push(PeerManagerEvent::Banned(*peer_id, banned_ips)); + if !self.temporary_banned_peers.raw_remove(peer_id) { + // If the peer is not already banned, inform the Swarm to ban the peer + self.events + .push(PeerManagerEvent::Banned(*peer_id, banned_ips)); + // If the peer was in the process of being un-banned, remove it (a rare race + // condition) + self.events.retain(|event| { + if let PeerManagerEvent::UnBanned(unbanned_peer_id, _) = event { + unbanned_peer_id != peer_id // Remove matching peer ids + } else { + true + } + }); + } } } } @@ -552,8 +561,8 @@ impl PeerManager { Protocol::BlocksByRoot => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, - Protocol::MetaData => PeerAction::LowToleranceError, - Protocol::Status => PeerAction::LowToleranceError, + Protocol::MetaData => PeerAction::Fatal, + Protocol::Status => PeerAction::Fatal, } } RPCError::StreamTimeout => match direction { @@ -931,6 +940,10 @@ impl PeerManager { /// MIN_SYNC_COMMITTEE_PEERS /// number should be set low as an absolute lower bound to maintain peers on the sync /// committees. + /// - Do not prune trusted peers. NOTE: This means if a user has more trusted peers than the + /// excess peer limit, all of the following logic is subverted as we will not prune any peers. + /// Also, the more trusted peers a user has, the less room Lighthouse has to efficiently manage + /// its peers across the subnets. /// /// Prune peers in the following order: /// 1. Remove worst scoring peers @@ -961,7 +974,9 @@ impl PeerManager { .read() .worst_connected_peers() .iter() - .filter(|(_, info)| !info.has_future_duty() && $filter(*info)) + .filter(|(_, info)| { + !info.has_future_duty() && !info.is_trusted() && $filter(*info) + }) { if peers_to_prune.len() >= connected_peer_count.saturating_sub(self.target_peers) @@ -1011,8 +1026,8 @@ impl PeerManager { > = HashMap::new(); for (peer_id, info) in self.network_globals.peers.read().connected_peers() { - // Ignore peers we are already pruning - if peers_to_prune.contains(peer_id) { + // Ignore peers we trust or that we are already pruning + if info.is_trusted() || peers_to_prune.contains(peer_id) { continue; } @@ -1309,25 +1324,47 @@ mod tests { ..Default::default() }; let log = build_log(slog::Level::Debug, false); - let globals = NetworkGlobals::new_test_globals(&log); + let globals = NetworkGlobals::new_test_globals(vec![], &log); + PeerManager::new(config, Arc::new(globals), &log).unwrap() + } + + async fn build_peer_manager_with_trusted_peers( + trusted_peers: Vec, + target_peer_count: usize, + ) -> PeerManager { + let config = config::Config { + target_peer_count, + discovery_enabled: false, + ..Default::default() + }; + let log = build_log(slog::Level::Debug, false); + let globals = NetworkGlobals::new_test_globals(trusted_peers, &log); PeerManager::new(config, Arc::new(globals), &log).unwrap() } #[tokio::test] async fn test_peer_manager_disconnects_correctly_during_heartbeat() { - let mut peer_manager = build_peer_manager(3).await; - - // Create 5 peers to connect to. + // Create 6 peers to connect to with a target of 3. // 2 will be outbound-only, and have the lowest score. + // 1 will be a trusted peer. + // The other 3 will be ingoing peers. + + // We expect this test to disconnect from 3 peers. 1 from the outbound peer (the other must + // remain due to the outbound peer limit) and 2 from the ingoing peers (the trusted peer + // should remain connected). let peer0 = PeerId::random(); let peer1 = PeerId::random(); let peer2 = PeerId::random(); let outbound_only_peer1 = PeerId::random(); let outbound_only_peer2 = PeerId::random(); + let trusted_peer = PeerId::random(); + + let mut peer_manager = build_peer_manager_with_trusted_peers(vec![trusted_peer], 3).await; peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None); peer_manager.inject_connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap(), None); peer_manager.inject_connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None); peer_manager.inject_connect_outgoing( &outbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap(), @@ -1357,7 +1394,7 @@ mod tests { .add_to_score(-2.0); // Check initial connected peers. - assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 5); + assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 6); peer_manager.heartbeat(); @@ -1376,8 +1413,22 @@ mod tests { .read() .is_connected(&outbound_only_peer2)); + // The trusted peer remains connected + assert!(peer_manager + .network_globals + .peers + .read() + .is_connected(&trusted_peer)); + peer_manager.heartbeat(); + // The trusted peer remains connected, even after subsequent heartbeats. + assert!(peer_manager + .network_globals + .peers + .read() + .is_connected(&trusted_peer)); + // Check that if we are at target number of peers, we do not disconnect any. assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 3); } @@ -2122,7 +2173,7 @@ mod tests { #[cfg(test)] mod property_based_tests { use crate::peer_manager::config::DEFAULT_TARGET_PEERS; - use crate::peer_manager::tests::build_peer_manager; + use crate::peer_manager::tests::build_peer_manager_with_trusted_peers; use crate::rpc::MetaData; use libp2p::PeerId; use quickcheck::{Arbitrary, Gen, TestResult}; @@ -2133,10 +2184,12 @@ mod tests { #[derive(Clone, Debug)] struct PeerCondition { + peer_id: PeerId, outgoing: bool, attestation_net_bitfield: Vec, sync_committee_net_bitfield: Vec, score: f64, + trusted: bool, gossipsub_score: f64, } @@ -2161,10 +2214,12 @@ mod tests { }; PeerCondition { + peer_id: PeerId::random(), outgoing: bool::arbitrary(g), attestation_net_bitfield, sync_committee_net_bitfield, score: f64::arbitrary(g), + trusted: bool::arbitrary(g), gossipsub_score: f64::arbitrary(g), } } @@ -2176,26 +2231,36 @@ mod tests { if peer_conditions.len() < target_peer_count { return TestResult::discard(); } + let trusted_peers: Vec<_> = peer_conditions + .iter() + .filter_map(|p| if p.trusted { Some(p.peer_id) } else { None }) + .collect(); + // If we have a high percentage of trusted peers, it is very difficult to reason about + // the expected results of the pruning. + if trusted_peers.len() > peer_conditions.len() / 3_usize { + return TestResult::discard(); + } let rt = Runtime::new().unwrap(); rt.block_on(async move { - let mut peer_manager = build_peer_manager(target_peer_count).await; + // Collect all the trusted peers + let mut peer_manager = + build_peer_manager_with_trusted_peers(trusted_peers, target_peer_count).await; // Create peers based on the randomly generated conditions. for condition in &peer_conditions { - let peer = PeerId::random(); let mut attnets = crate::types::EnrAttestationBitfield::::new(); let mut syncnets = crate::types::EnrSyncCommitteeBitfield::::new(); if condition.outgoing { peer_manager.inject_connect_outgoing( - &peer, + &condition.peer_id, "/ip4/0.0.0.0".parse().unwrap(), None, ); } else { peer_manager.inject_connect_ingoing( - &peer, + &condition.peer_id, "/ip4/0.0.0.0".parse().unwrap(), None, ); @@ -2216,22 +2281,51 @@ mod tests { }; let mut peer_db = peer_manager.network_globals.peers.write(); - let peer_info = peer_db.peer_info_mut(&peer).unwrap(); + let peer_info = peer_db.peer_info_mut(&condition.peer_id).unwrap(); peer_info.set_meta_data(MetaData::V2(metadata)); peer_info.set_gossipsub_score(condition.gossipsub_score); peer_info.add_to_score(condition.score); for subnet in peer_info.long_lived_subnets() { - peer_db.add_subscription(&peer, subnet); + peer_db.add_subscription(&condition.peer_id, subnet); } } // Perform the heartbeat. peer_manager.heartbeat(); - TestResult::from_bool( + // The minimum number of connected peers cannot be less than the target peer count + // or submitted peers. + + let expected_peer_count = target_peer_count.min(peer_conditions.len()); + // Trusted peers could make this larger however. + let no_of_trusted_peers = peer_conditions + .iter() + .filter(|condition| condition.trusted) + .count(); + let expected_peer_count = expected_peer_count.max(no_of_trusted_peers); + + let target_peer_condition = peer_manager.network_globals.connected_or_dialing_peers() - == target_peer_count.min(peer_conditions.len()), + == expected_peer_count; + + // It could be that we reach our target outbound limit and are unable to prune any + // extra, which violates the target_peer_condition. + let outbound_peers = peer_manager.network_globals.connected_outbound_only_peers(); + let hit_outbound_limit = outbound_peers == peer_manager.target_outbound_peers(); + + // No trusted peers should be disconnected + let trusted_peer_disconnected = peer_conditions.iter().any(|condition| { + condition.trusted + && !peer_manager + .network_globals + .peers + .read() + .is_connected(&condition.peer_id) + }); + + TestResult::from_bool( + (target_peer_condition || hit_outbound_limit) && !trusted_peer_disconnected, ) }) } diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index a29f243c9e..24de83a61d 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -156,8 +156,10 @@ impl PeerManager { BanResult::BadScore => { // This is a faulty state error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id); - // Reban the peer + // Disconnect the peer. self.goodbye_peer(&peer_id, GoodbyeReason::Banned, ReportSource::PeerManager); + // Re-ban the peer to prevent repeated errors. + self.events.push(PeerManagerEvent::Banned(peer_id, vec![])); return; } BanResult::BannedIp(ip_addr) => { diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 70d3399d6a..52f0bbd9df 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -41,12 +41,14 @@ pub struct PeerDB { disconnected_peers: usize, /// Counts banned peers in total and per ip banned_peers_count: BannedPeersCount, + /// Specifies if peer scoring is disabled. + disable_peer_scoring: bool, /// PeerDB's logger log: slog::Logger, } impl PeerDB { - pub fn new(trusted_peers: Vec, log: &slog::Logger) -> Self { + pub fn new(trusted_peers: Vec, disable_peer_scoring: bool, log: &slog::Logger) -> Self { // Initialize the peers hashmap with trusted peers let peers = trusted_peers .into_iter() @@ -56,6 +58,7 @@ impl PeerDB { log: log.clone(), disconnected_peers: 0, banned_peers_count: BannedPeersCount::default(), + disable_peer_scoring, peers, } } @@ -704,7 +707,11 @@ impl PeerDB { warn!(log_ref, "Updating state of unknown peer"; "peer_id" => %peer_id, "new_state" => ?new_state); } - PeerInfo::default() + if self.disable_peer_scoring { + PeerInfo::trusted_peer_info() + } else { + PeerInfo::default() + } }); // Ban the peer if the score is not already low enough. @@ -1055,7 +1062,7 @@ impl PeerDB { if let Some(to_drop) = self .peers .iter() - .filter(|(_, info)| info.is_disconnected()) + .filter(|(_, info)| info.is_disconnected() && !info.is_trusted()) .filter_map(|(id, info)| match info.connection_status() { PeerConnectionStatus::Disconnected { since } => Some((id, since)), _ => None, @@ -1300,7 +1307,7 @@ mod tests { fn get_db() -> PeerDB { let log = build_log(slog::Level::Debug, false); - PeerDB::new(vec![], &log) + PeerDB::new(vec![], false, &log) } #[test] @@ -1999,7 +2006,7 @@ mod tests { fn test_trusted_peers_score() { let trusted_peer = PeerId::random(); let log = build_log(slog::Level::Debug, false); - let mut pdb: PeerDB = PeerDB::new(vec![trusted_peer], &log); + let mut pdb: PeerDB = PeerDB::new(vec![trusted_peer], false, &log); pdb.connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None); @@ -2018,4 +2025,28 @@ mod tests { Score::max_score().score() ); } + + #[test] + fn test_disable_peer_scoring() { + let peer = PeerId::random(); + let log = build_log(slog::Level::Debug, false); + let mut pdb: PeerDB = PeerDB::new(vec![], true, &log); + + pdb.connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap(), None); + + // Check trusted status and score + assert!(pdb.peer_info(&peer).unwrap().is_trusted()); + assert_eq!( + pdb.peer_info(&peer).unwrap().score().score(), + Score::max_score().score() + ); + + // Adding/Subtracting score should have no effect on a trusted peer + add_score(&mut pdb, &peer, -50.0); + + assert_eq!( + pdb.peer_info(&peer).unwrap().score().score(), + Score::max_score().score() + ); + } } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 5cdcdeaf85..f815e3bd36 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -171,6 +171,7 @@ impl Network { .iter() .map(|x| PeerId::from(x.clone())) .collect(), + config.disable_peer_scoring, &log, ); Arc::new(globals) @@ -1119,7 +1120,7 @@ impl Network { debug!(self.log, "Peer does not support gossipsub"; "peer_id" => %peer_id); self.peer_manager_mut().report_peer( &peer_id, - PeerAction::LowToleranceError, + PeerAction::Fatal, ReportSource::Gossipsub, Some(GoodbyeReason::Unknown), "does_not_support_gossipsub", diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index ee2b300e20..295616f36b 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -39,6 +39,7 @@ impl NetworkGlobals { listen_port_tcp6: Option, local_metadata: MetaData, trusted_peers: Vec, + disable_peer_scoring: bool, log: &slog::Logger, ) -> Self { NetworkGlobals { @@ -48,7 +49,7 @@ impl NetworkGlobals { listen_port_tcp4, listen_port_tcp6, local_metadata: RwLock::new(local_metadata), - peers: RwLock::new(PeerDB::new(trusted_peers, log)), + peers: RwLock::new(PeerDB::new(trusted_peers, disable_peer_scoring, log)), gossipsub_subscriptions: RwLock::new(HashSet::new()), sync_state: RwLock::new(SyncState::Stalled), backfill_state: RwLock::new(BackFillState::NotRequired), @@ -128,7 +129,10 @@ impl NetworkGlobals { } /// TESTING ONLY. Build a dummy NetworkGlobals instance. - pub fn new_test_globals(log: &slog::Logger) -> NetworkGlobals { + pub fn new_test_globals( + trusted_peers: Vec, + log: &slog::Logger, + ) -> NetworkGlobals { use crate::CombinedKeyExt; let keypair = libp2p::identity::Keypair::generate_secp256k1(); let enr_key: discv5::enr::CombinedKey = @@ -143,7 +147,8 @@ impl NetworkGlobals { attnets: Default::default(), syncnets: Default::default(), }), - vec![], + trusted_peers, + false, log, ) } diff --git a/beacon_node/lighthouse_network/src/types/sync_state.rs b/beacon_node/lighthouse_network/src/types/sync_state.rs index 5f09aec27a..b82e63bd9c 100644 --- a/beacon_node/lighthouse_network/src/types/sync_state.rs +++ b/beacon_node/lighthouse_network/src/types/sync_state.rs @@ -13,7 +13,7 @@ pub enum SyncState { /// The node is undertaking a backfill sync. This occurs when a user has specified a trusted /// state. The node first syncs "forward" by downloading blocks up to the current head as /// specified by its peers. Once completed, the node enters this sync state and attempts to - /// download all required historical blocks to complete its chain. + /// download all required historical blocks. BackFillSyncing { completed: usize, remaining: usize }, /// The node has completed syncing a finalized chain and is in the process of re-evaluating /// which sync state to progress to. diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index d068a20079..a234165d11 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -21,8 +21,8 @@ types = { path = "../../consensus/types" } slot_clock = { path = "../../common/slot_clock" } slog = { version = "2.5.2", features = ["max_level_trace"] } hex = "0.4.2" -eth2_ssz = "0.4.1" -eth2_ssz_types = "0.2.2" +ethereum_ssz = "0.5.0" +ssz_types = "0.5.0" futures = "0.3.7" error-chain = "0.12.4" tokio = { version = "1.14.0", features = ["full"] } @@ -35,7 +35,7 @@ lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } logging = { path = "../../common/logging" } task_executor = { path = "../../common/task_executor" } -igd = "0.11.1" +igd = "0.12.1" itertools = "0.10.0" num_cpus = "1.13.0" lru_cache = { path = "../../common/lru_cache" } diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 61e3367e2f..9603205228 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -61,6 +61,7 @@ use std::time::Duration; use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; use tokio::sync::mpsc; +use tokio::sync::mpsc::error::TrySendError; use types::{ Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, @@ -77,7 +78,9 @@ mod tests; mod work_reprocessing_queue; mod worker; -use crate::beacon_processor::work_reprocessing_queue::QueuedGossipBlock; +use crate::beacon_processor::work_reprocessing_queue::{ + QueuedBackfillBatch, QueuedGossipBlock, ReprocessQueueMessage, +}; pub use worker::{ChainSegmentProcessId, GossipAggregatePackage, GossipAttestationPackage}; /// The maximum size of the channel for work events to the `BeaconProcessor`. @@ -218,6 +221,7 @@ pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_upd pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; pub const RPC_BLOCK: &str = "rpc_block"; pub const CHAIN_SEGMENT: &str = "chain_segment"; +pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill"; pub const STATUS_PROCESSING: &str = "status_processing"; pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; @@ -738,6 +742,9 @@ impl std::convert::From> for WorkEvent { seen_timestamp, }, }, + ReadyWork::BackfillSync(QueuedBackfillBatch { process_id, blocks }) => { + WorkEvent::chain_segment(process_id, blocks) + } } } } @@ -893,6 +900,10 @@ impl Work { Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::RpcBlock { .. } => RPC_BLOCK, + Work::ChainSegment { + process_id: ChainSegmentProcessId::BackSyncBatchId { .. }, + .. + } => CHAIN_SEGMENT_BACKFILL, Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::Status { .. } => STATUS_PROCESSING, Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, @@ -1054,23 +1065,23 @@ impl BeaconProcessor { FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN); let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN); + + let chain = match self.beacon_chain.upgrade() { + Some(chain) => chain, + // No need to proceed any further if the beacon chain has been dropped, the client + // is shutting down. + None => return, + }; + // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). let (ready_work_tx, ready_work_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN); - let work_reprocessing_tx = { - if let Some(chain) = self.beacon_chain.upgrade() { - spawn_reprocess_scheduler( - ready_work_tx, - &self.executor, - chain.slot_clock.clone(), - self.log.clone(), - ) - } else { - // No need to proceed any further if the beacon chain has been dropped, the client - // is shutting down. - return; - } - }; + let work_reprocessing_tx = spawn_reprocess_scheduler( + ready_work_tx, + &self.executor, + chain.slot_clock.clone(), + self.log.clone(), + ); let executor = self.executor.clone(); @@ -1083,12 +1094,55 @@ impl BeaconProcessor { reprocess_work_rx: ready_work_rx, }; + let enable_backfill_rate_limiting = chain.config.enable_backfill_rate_limiting; + loop { let work_event = match inbound_events.next().await { Some(InboundEvent::WorkerIdle) => { self.current_workers = self.current_workers.saturating_sub(1); None } + Some(InboundEvent::WorkEvent(event)) if enable_backfill_rate_limiting => { + match QueuedBackfillBatch::try_from(event) { + Ok(backfill_batch) => { + match work_reprocessing_tx + .try_send(ReprocessQueueMessage::BackfillSync(backfill_batch)) + { + Err(e) => { + warn!( + self.log, + "Unable to queue backfill work event. Will try to process now."; + "error" => %e + ); + match e { + TrySendError::Full(reprocess_queue_message) + | TrySendError::Closed(reprocess_queue_message) => { + match reprocess_queue_message { + ReprocessQueueMessage::BackfillSync( + backfill_batch, + ) => Some(backfill_batch.into()), + other => { + crit!( + self.log, + "Unexpected queue message type"; + "message_type" => other.as_ref() + ); + // This is an unhandled exception, drop the message. + continue; + } + } + } + } + } + Ok(..) => { + // backfill work sent to "reprocessing" queue. Process the next event. + continue; + } + } + } + Err(event) => Some(event), + } + } Some(InboundEvent::WorkEvent(event)) | Some(InboundEvent::ReprocessingWork(event)) => Some(event), None => { diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index eb66e434c9..4b0a159eb4 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -9,7 +9,7 @@ use crate::{service::NetworkMessage, sync::SyncMessage}; use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; -use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; +use beacon_chain::{BeaconChain, ChainConfig, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, rpc::methods::{MetaData, MetaDataV2}, @@ -23,8 +23,8 @@ use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, EthSpec, MainnetEthSpec, ProposerSlashing, SignedBeaconBlock, - SignedVoluntaryExit, SubnetId, + Attestation, AttesterSlashing, Epoch, EthSpec, MainnetEthSpec, ProposerSlashing, + SignedBeaconBlock, SignedVoluntaryExit, SubnetId, }; type E = MainnetEthSpec; @@ -70,6 +70,10 @@ impl Drop for TestRig { impl TestRig { pub async fn new(chain_length: u64) -> Self { + Self::new_with_chain_config(chain_length, ChainConfig::default()).await + } + + pub async fn new_with_chain_config(chain_length: u64, chain_config: ChainConfig) -> Self { // This allows for testing voluntary exits without building out a massive chain. let mut spec = E::default_spec(); spec.shard_committee_period = 2; @@ -78,6 +82,7 @@ impl TestRig { .spec(spec) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() + .chain_config(chain_config) .build(); harness.advance_slot(); @@ -180,6 +185,7 @@ impl TestRig { None, meta_data, vec![], + false, &log, )); @@ -261,6 +267,14 @@ impl TestRig { self.beacon_processor_tx.try_send(event).unwrap(); } + pub fn enqueue_backfill_batch(&self) { + let event = WorkEvent::chain_segment( + ChainSegmentProcessId::BackSyncBatchId(Epoch::default()), + Vec::default(), + ); + self.beacon_processor_tx.try_send(event).unwrap(); + } + pub fn enqueue_unaggregated_attestation(&self) { let (attestation, subnet_id) = self.attestations.first().unwrap().clone(); self.beacon_processor_tx @@ -873,3 +887,49 @@ async fn test_rpc_block_reprocessing() { // cache handle was dropped. assert_eq!(next_block_root, rig.head_root()); } + +/// Ensure that backfill batches get rate-limited and processing is scheduled at specified intervals. +#[tokio::test] +async fn test_backfill_sync_processing() { + let mut rig = TestRig::new(SMALL_CHAIN).await; + // Note: to verify the exact event times in an integration test is not straight forward here + // (not straight forward to manipulate `TestingSlotClock` due to cloning of `SlotClock` in code) + // and makes the test very slow, hence timing calculation is unit tested separately in + // `work_reprocessing_queue`. + for _ in 0..1 { + rig.enqueue_backfill_batch(); + // ensure queued batch is not processed until later + rig.assert_no_events_for(Duration::from_millis(100)).await; + // A new batch should be processed within a slot. + rig.assert_event_journal_with_timeout( + &[CHAIN_SEGMENT_BACKFILL, WORKER_FREED, NOTHING_TO_DO], + rig.chain.slot_clock.slot_duration(), + ) + .await; + } +} + +/// Ensure that backfill batches get processed as fast as they can when rate-limiting is disabled. +#[tokio::test] +async fn test_backfill_sync_processing_rate_limiting_disabled() { + let chain_config = ChainConfig { + enable_backfill_rate_limiting: false, + ..Default::default() + }; + let mut rig = TestRig::new_with_chain_config(SMALL_CHAIN, chain_config).await; + + for _ in 0..3 { + rig.enqueue_backfill_batch(); + } + + // ensure all batches are processed + rig.assert_event_journal_with_timeout( + &[ + CHAIN_SEGMENT_BACKFILL, + CHAIN_SEGMENT_BACKFILL, + CHAIN_SEGMENT_BACKFILL, + ], + Duration::from_millis(100), + ) + .await; +} diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index 21fc2b6416..427be6d513 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -11,21 +11,25 @@ //! Aggregated and unaggregated attestations that failed verification due to referencing an unknown //! block will be re-queued until their block is imported, or until they expire. use super::MAX_SCHEDULED_WORK_QUEUE_LEN; +use crate::beacon_processor::{ChainSegmentProcessId, Work, WorkEvent}; use crate::metrics; use crate::sync::manager::BlockProcessType; use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use fnv::FnvHashMap; use futures::task::Poll; use futures::{Stream, StreamExt}; +use itertools::Itertools; use lighthouse_network::{MessageId, PeerId}; use logging::TimeLatch; use slog::{crit, debug, error, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; +use std::future::Future; use std::pin::Pin; use std::sync::Arc; use std::task::Context; use std::time::Duration; +use strum::AsRefStr; use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; @@ -52,7 +56,7 @@ pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); pub const QUEUED_LIGHT_CLIENT_UPDATE_DELAY: Duration = Duration::from_secs(12); /// For how long to queue rpc blocks before sending them back for reprocessing. -pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(3); +pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(4); /// Set an arbitrary upper-bound on the number of queued blocks to avoid DoS attacks. The fact that /// we signature-verify blocks before putting them in the queue *should* protect against this, but @@ -65,7 +69,21 @@ const MAXIMUM_QUEUED_ATTESTATIONS: usize = 16_384; /// How many light client updates we keep before new ones get dropped. const MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES: usize = 128; +// Process backfill batch 50%, 60%, 80% through each slot. +// +// Note: use caution to set these fractions in a way that won't cause panic-y +// arithmetic. +pub const BACKFILL_SCHEDULE_IN_SLOT: [(u32, u32); 3] = [ + // One half: 6s on mainnet, 2.5s on Gnosis. + (1, 2), + // Three fifths: 7.2s on mainnet, 3s on Gnosis. + (3, 5), + // Four fifths: 9.6s on mainnet, 4s on Gnosis. + (4, 5), +]; + /// Messages that the scheduler can receive. +#[derive(AsRefStr)] pub enum ReprocessQueueMessage { /// A block that has been received early and we should queue for later processing. EarlyBlock(QueuedGossipBlock), @@ -84,6 +102,8 @@ pub enum ReprocessQueueMessage { UnknownBlockAggregate(QueuedAggregate), /// A light client optimistic update that references a parent root that has not been seen as a parent. UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate), + /// A new backfill batch that needs to be scheduled for processing. + BackfillSync(QueuedBackfillBatch), } /// Events sent by the scheduler once they are ready for re-processing. @@ -93,6 +113,7 @@ pub enum ReadyWork { Unaggregate(QueuedUnaggregate), Aggregate(QueuedAggregate), LightClientUpdate(QueuedLightClientUpdate), + BackfillSync(QueuedBackfillBatch), } /// An Attestation for which the corresponding block was not seen while processing, queued for @@ -144,6 +165,40 @@ pub struct QueuedRpcBlock { pub should_process: bool, } +/// A backfill batch work that has been queued for processing later. +#[derive(Clone)] +pub struct QueuedBackfillBatch { + pub process_id: ChainSegmentProcessId, + pub blocks: Vec>>, +} + +impl TryFrom> for QueuedBackfillBatch { + type Error = WorkEvent; + + fn try_from(event: WorkEvent) -> Result> { + match event { + WorkEvent { + work: + Work::ChainSegment { + process_id: process_id @ ChainSegmentProcessId::BackSyncBatchId(_), + blocks, + }, + .. + } => Ok(QueuedBackfillBatch { process_id, blocks }), + _ => Err(event), + } + } +} + +impl From> for WorkEvent { + fn from(queued_backfill_batch: QueuedBackfillBatch) -> WorkEvent { + WorkEvent::chain_segment( + queued_backfill_batch.process_id, + queued_backfill_batch.blocks, + ) + } +} + /// Unifies the different messages processed by the block delay queue. enum InboundEvent { /// A gossip block that was queued for later processing and is ready for import. @@ -155,6 +210,8 @@ enum InboundEvent { ReadyAttestation(QueuedAttestationId), /// A light client update that is ready for re-processing. ReadyLightClientUpdate(QueuedLightClientUpdateId), + /// A backfill batch that was queued is ready for processing. + ReadyBackfillSync(QueuedBackfillBatch), /// A `DelayQueue` returned an error. DelayQueueError(TimeError, &'static str), /// A message sent to the `ReprocessQueue` @@ -191,6 +248,8 @@ struct ReprocessQueue { queued_lc_updates: FnvHashMap, DelayKey)>, /// Light Client Updates per parent_root. awaiting_lc_updates_per_parent_root: HashMap>, + /// Queued backfill batches + queued_backfill_batches: Vec>, /* Aux */ /// Next attestation id, used for both aggregated and unaggregated attestations @@ -200,6 +259,8 @@ struct ReprocessQueue { rpc_block_debounce: TimeLatch, attestation_delay_debounce: TimeLatch, lc_update_delay_debounce: TimeLatch, + next_backfill_batch_event: Option>>, + slot_clock: Pin>, } pub type QueuedLightClientUpdateId = usize; @@ -287,6 +348,20 @@ impl Stream for ReprocessQueue { Poll::Ready(None) | Poll::Pending => (), } + if let Some(next_backfill_batch_event) = self.next_backfill_batch_event.as_mut() { + match next_backfill_batch_event.as_mut().poll(cx) { + Poll::Ready(_) => { + let maybe_batch = self.queued_backfill_batches.pop(); + self.recompute_next_backfill_batch_event(); + + if let Some(batch) = maybe_batch { + return Poll::Ready(Some(InboundEvent::ReadyBackfillSync(batch))); + } + } + Poll::Pending => (), + } + } + // Last empty the messages channel. match self.work_reprocessing_rx.poll_recv(cx) { Poll::Ready(Some(message)) => return Poll::Ready(Some(InboundEvent::Msg(message))), @@ -323,12 +398,15 @@ pub fn spawn_reprocess_scheduler( queued_unaggregates: FnvHashMap::default(), awaiting_attestations_per_root: HashMap::new(), awaiting_lc_updates_per_parent_root: HashMap::new(), + queued_backfill_batches: Vec::new(), next_attestation: 0, next_lc_update: 0, early_block_debounce: TimeLatch::default(), rpc_block_debounce: TimeLatch::default(), attestation_delay_debounce: TimeLatch::default(), lc_update_delay_debounce: TimeLatch::default(), + next_backfill_batch_event: None, + slot_clock: Box::pin(slot_clock.clone()), }; executor.spawn( @@ -443,7 +521,7 @@ impl ReprocessQueue { return; } - // Queue the block for 1/4th of a slot + // Queue the block for 1/3rd of a slot self.rpc_block_delay_queue .insert(rpc_block, QUEUED_RPC_BLOCK_DELAY); } @@ -679,6 +757,14 @@ impl ReprocessQueue { } } } + InboundEvent::Msg(BackfillSync(queued_backfill_batch)) => { + self.queued_backfill_batches + .insert(0, queued_backfill_batch); + // only recompute if there is no `next_backfill_batch_event` already scheduled + if self.next_backfill_batch_event.is_none() { + self.recompute_next_backfill_batch_event(); + } + } // A block that was queued for later processing is now ready to be processed. InboundEvent::ReadyGossipBlock(ready_block) => { let block_root = ready_block.block.block_root; @@ -786,6 +872,33 @@ impl ReprocessQueue { } } } + InboundEvent::ReadyBackfillSync(queued_backfill_batch) => { + let millis_from_slot_start = slot_clock + .millis_from_current_slot_start() + .map_or("null".to_string(), |duration| { + duration.as_millis().to_string() + }); + + debug!( + log, + "Sending scheduled backfill work"; + "millis_from_slot_start" => millis_from_slot_start + ); + + if self + .ready_work_tx + .try_send(ReadyWork::BackfillSync(queued_backfill_batch.clone())) + .is_err() + { + error!( + log, + "Failed to send scheduled backfill work"; + "info" => "sending work back to queue" + ); + self.queued_backfill_batches + .insert(0, queued_backfill_batch); + } + } } metrics::set_gauge_vec( @@ -809,4 +922,95 @@ impl ReprocessQueue { self.lc_updates_delay_queue.len() as i64, ); } + + fn recompute_next_backfill_batch_event(&mut self) { + // only recompute the `next_backfill_batch_event` if there are backfill batches in the queue + if !self.queued_backfill_batches.is_empty() { + self.next_backfill_batch_event = Some(Box::pin(tokio::time::sleep( + ReprocessQueue::::duration_until_next_backfill_batch_event(&self.slot_clock), + ))); + } else { + self.next_backfill_batch_event = None + } + } + + /// Returns duration until the next scheduled processing time. The schedule ensure that backfill + /// processing is done in windows of time that aren't critical + fn duration_until_next_backfill_batch_event(slot_clock: &T::SlotClock) -> Duration { + let slot_duration = slot_clock.slot_duration(); + slot_clock + .millis_from_current_slot_start() + .and_then(|duration_from_slot_start| { + BACKFILL_SCHEDULE_IN_SLOT + .into_iter() + // Convert fractions to seconds from slot start. + .map(|(multiplier, divisor)| (slot_duration / divisor) * multiplier) + .find_or_first(|&event_duration_from_slot_start| { + event_duration_from_slot_start > duration_from_slot_start + }) + .map(|next_event_time| { + if duration_from_slot_start >= next_event_time { + // event is in the next slot, add duration to next slot + let duration_to_next_slot = slot_duration - duration_from_slot_start; + duration_to_next_slot + next_event_time + } else { + next_event_time - duration_from_slot_start + } + }) + }) + // If we can't read the slot clock, just wait another slot. + .unwrap_or(slot_duration) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use beacon_chain::builder::Witness; + use beacon_chain::eth1_chain::CachingEth1Backend; + use slot_clock::TestingSlotClock; + use store::MemoryStore; + use types::MainnetEthSpec as E; + use types::Slot; + + type TestBeaconChainType = + Witness, E, MemoryStore, MemoryStore>; + + #[test] + fn backfill_processing_schedule_calculation() { + let slot_duration = Duration::from_secs(12); + let slot_clock = TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), slot_duration); + let current_slot_start = slot_clock.start_of(Slot::new(100)).unwrap(); + slot_clock.set_current_time(current_slot_start); + + let event_times = BACKFILL_SCHEDULE_IN_SLOT + .map(|(multiplier, divisor)| (slot_duration / divisor) * multiplier); + + for &event_duration_from_slot_start in event_times.iter() { + let duration_to_next_event = + ReprocessQueue::::duration_until_next_backfill_batch_event( + &slot_clock, + ); + + let current_time = slot_clock.millis_from_current_slot_start().unwrap(); + + assert_eq!( + duration_to_next_event, + event_duration_from_slot_start - current_time + ); + + slot_clock.set_current_time(current_slot_start + event_duration_from_slot_start) + } + + // check for next event beyond the current slot + let duration_to_next_slot = slot_clock.duration_to_next_slot().unwrap(); + let duration_to_next_event = + ReprocessQueue::::duration_until_next_backfill_batch_event( + &slot_clock, + ); + assert_eq!( + duration_to_next_event, + duration_to_next_slot + event_times[0] + ); + } } diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index e8182a1d5a..ca2095348a 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -9,12 +9,15 @@ use crate::sync::manager::{BlockProcessType, SyncMessage}; use crate::sync::{BatchProcessResult, ChainId}; use beacon_chain::CountUnrealized; use beacon_chain::{ + observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms, BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, }; use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; +use slot_clock::SlotClock; use std::sync::Arc; +use std::time::{SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use types::{Epoch, Hash256, SignedBeaconBlock}; @@ -83,6 +86,66 @@ impl Worker { return; } }; + + // Returns `true` if the time now is after the 4s attestation deadline. + let block_is_late = SystemTime::now() + .duration_since(UNIX_EPOCH) + // If we can't read the system time clock then indicate that the + // block is late (and therefore should *not* be requeued). This + // avoids infinite loops. + .map_or(true, |now| { + get_block_delay_ms(now, block.message(), &self.chain.slot_clock) + > self.chain.slot_clock.unagg_attestation_production_delay() + }); + + // Checks if a block from this proposer is already known. + let proposal_already_known = || { + match self + .chain + .observed_block_producers + .read() + .proposer_has_been_observed(block.message()) + { + Ok(is_observed) => is_observed, + // Both of these blocks will be rejected, so reject them now rather + // than re-queuing them. + Err(ObserveError::FinalizedBlock { .. }) + | Err(ObserveError::ValidatorIndexTooHigh { .. }) => false, + } + }; + + // If we've already seen a block from this proposer *and* the block + // arrived before the attestation deadline, requeue it to ensure it is + // imported late enough that it won't receive a proposer boost. + if !block_is_late && proposal_already_known() { + debug!( + self.log, + "Delaying processing of duplicate RPC block"; + "block_root" => ?block_root, + "proposer" => block.message().proposer_index(), + "slot" => block.slot() + ); + + // Send message to work reprocess queue to retry the block + let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + block_root, + block: block.clone(), + process_type, + seen_timestamp, + should_process: true, + }); + + if reprocess_tx.try_send(reprocess_msg).is_err() { + error!( + self.log, + "Failed to inform block import"; + "source" => "rpc", + "block_root" => %block_root + ); + } + return; + } + let slot = block.slot(); let parent_root = block.message().parent_root(); let result = self diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 3e86d2099f..d630cf9c39 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -13,6 +13,7 @@ use futures::future::OptionFuture; use futures::prelude::*; use futures::StreamExt; use lighthouse_network::service::Network; +use lighthouse_network::types::GossipKind; use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ rpc::{GoodbyeReason, RPCResponseErrorCode}, @@ -23,7 +24,7 @@ use lighthouse_network::{ MessageId, NetworkEvent, NetworkGlobals, PeerId, }; use slog::{crit, debug, error, info, o, trace, warn}; -use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; +use std::{collections::HashSet, net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; use store::HotColdDB; use strum::IntoStaticStr; use task_executor::ShutdownReason; @@ -671,6 +672,10 @@ impl NetworkService { source, } => self.libp2p.goodbye_peer(&peer_id, reason, source), NetworkMessage::SubscribeCoreTopics => { + if self.subscribed_core_topics() { + return; + } + if self.shutdown_after_sync { if let Err(e) = shutdown_sender .send(ShutdownReason::Success( @@ -909,6 +914,16 @@ impl NetworkService { crit!(self.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); } } + + fn subscribed_core_topics(&self) -> bool { + let core_topics = core_topics_to_subscribe(self.fork_context.current_fork()); + let core_topics: HashSet<&GossipKind> = HashSet::from_iter(&core_topics); + let subscriptions = self.network_globals.gossipsub_subscriptions.read(); + let subscribed_topics: HashSet<&GossipKind> = + subscriptions.iter().map(|topic| topic.kind()).collect(); + + core_topics.is_subset(&subscribed_topics) + } } /// Returns a `Sleep` that triggers after the next change in the beacon chain fork version. diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index 70ba1c8170..e46a52cfb2 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -112,6 +112,9 @@ pub struct AttestationService { #[cfg(feature = "deterministic_long_lived_attnets")] next_long_lived_subscription_event: Pin>, + /// Whether this node is a block proposer-only node. + proposer_only: bool, + /// The logger for the attestation service. log: slog::Logger, } @@ -155,6 +158,7 @@ impl AttestationService { known_validators: HashSetDelay::new(last_seen_val_timeout), waker: None, discovery_disabled: config.disable_discovery, + proposer_only: config.proposer_only, subscribe_all_subnets: config.subscribe_all_subnets, long_lived_subnet_subscription_slots, log, @@ -256,6 +260,11 @@ impl AttestationService { &mut self, subscriptions: Vec, ) -> Result<(), String> { + // If the node is in a proposer-only state, we ignore all subnet subscriptions. + if self.proposer_only { + return Ok(()); + } + // Maps each subnet_id subscription to it's highest slot let mut subnets_to_discover: HashMap = HashMap::new(); for subscription in subscriptions { @@ -450,6 +459,10 @@ impl AttestationService { subnet: SubnetId, attestation: &Attestation, ) -> bool { + // Proposer-only mode does not need to process attestations + if self.proposer_only { + return false; + } self.aggregate_validators_on_subnet .as_ref() .map(|tracked_vals| { diff --git a/beacon_node/network/src/subnet_service/sync_subnets.rs b/beacon_node/network/src/subnet_service/sync_subnets.rs index 0b27ff527f..eda7ce8efb 100644 --- a/beacon_node/network/src/subnet_service/sync_subnets.rs +++ b/beacon_node/network/src/subnet_service/sync_subnets.rs @@ -54,6 +54,9 @@ pub struct SyncCommitteeService { /// We are always subscribed to all subnets. subscribe_all_subnets: bool, + /// Whether this node is a block proposer-only node. + proposer_only: bool, + /// The logger for the attestation service. log: slog::Logger, } @@ -82,6 +85,7 @@ impl SyncCommitteeService { waker: None, subscribe_all_subnets: config.subscribe_all_subnets, discovery_disabled: config.disable_discovery, + proposer_only: config.proposer_only, log, } } @@ -110,6 +114,11 @@ impl SyncCommitteeService { &mut self, subscriptions: Vec, ) -> Result<(), String> { + // A proposer-only node does not subscribe to any sync-committees + if self.proposer_only { + return Ok(()); + } + let mut subnets_to_discover = Vec::new(); for subscription in subscriptions { metrics::inc_counter(&metrics::SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS); diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index d36bbbc79b..460c8b1ee9 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -159,20 +159,20 @@ impl BackFillSync { // If, for some reason a backfill has already been completed (or we've used a trusted // genesis root) then backfill has been completed. - let (state, current_start) = if let Some(anchor_info) = beacon_chain.store.get_anchor_info() - { - if anchor_info.block_backfill_complete() { - (BackFillState::Completed, Epoch::new(0)) - } else { - ( - BackFillState::Paused, - anchor_info - .oldest_block_slot - .epoch(T::EthSpec::slots_per_epoch()), - ) + let (state, current_start) = match beacon_chain.store.get_anchor_info() { + Some(anchor_info) => { + if anchor_info.block_backfill_complete(beacon_chain.genesis_backfill_slot) { + (BackFillState::Completed, Epoch::new(0)) + } else { + ( + BackFillState::Paused, + anchor_info + .oldest_block_slot + .epoch(T::EthSpec::slots_per_epoch()), + ) + } } - } else { - (BackFillState::NotRequired, Epoch::new(0)) + None => (BackFillState::NotRequired, Epoch::new(0)), }; let bfs = BackFillSync { @@ -287,6 +287,7 @@ impl BackFillSync { remaining: self .current_start .start_slot(T::EthSpec::slots_per_epoch()) + .saturating_sub(self.beacon_chain.genesis_backfill_slot) .as_usize(), }) } @@ -1097,7 +1098,12 @@ impl BackFillSync { match self.batches.entry(batch_id) { Entry::Occupied(_) => { // this batch doesn't need downloading, let this same function decide the next batch - if batch_id == 0 { + if batch_id + == self + .beacon_chain + .genesis_backfill_slot + .epoch(T::EthSpec::slots_per_epoch()) + { self.last_batch_downloaded = true; } @@ -1108,7 +1114,12 @@ impl BackFillSync { } Entry::Vacant(entry) => { entry.insert(BatchInfo::new(&batch_id, BACKFILL_EPOCHS_PER_BATCH)); - if batch_id == 0 { + if batch_id + == self + .beacon_chain + .genesis_backfill_slot + .epoch(T::EthSpec::slots_per_epoch()) + { self.last_batch_downloaded = true; } self.to_be_downloaded = self @@ -1125,7 +1136,7 @@ impl BackFillSync { /// not required. fn reset_start_epoch(&mut self) -> Result<(), ResetEpochError> { if let Some(anchor_info) = self.beacon_chain.store.get_anchor_info() { - if anchor_info.block_backfill_complete() { + if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) { Err(ResetEpochError::SyncCompleted) } else { self.current_start = anchor_info @@ -1140,12 +1151,17 @@ impl BackFillSync { /// Checks with the beacon chain if backfill sync has completed. fn check_completed(&mut self) -> bool { - if self.current_start == 0 { + if self.current_start + == self + .beacon_chain + .genesis_backfill_slot + .epoch(T::EthSpec::slots_per_epoch()) + { // Check that the beacon chain agrees if let Some(anchor_info) = self.beacon_chain.store.get_anchor_info() { // Conditions that we have completed a backfill sync - if anchor_info.block_backfill_complete() { + if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) { return true; } else { error!(self.log, "Backfill out of sync with beacon chain"); diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 8ade622f8d..5a70944f6c 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -50,7 +50,7 @@ impl TestRig { }; let bl = BlockLookups::new(log.new(slog::o!("component" => "block_lookups"))); let cx = { - let globals = Arc::new(NetworkGlobals::new_test_globals(&log)); + let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); SyncNetworkContext::new( network_tx, globals, diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 2531454387..0f1c00e509 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -599,7 +599,7 @@ mod tests { log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); - let globals = Arc::new(NetworkGlobals::new_test_globals(&log)); + let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); let cx = SyncNetworkContext::new( network_tx, globals.clone(), diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index cc4eacde89..fdbecb656f 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -12,8 +12,8 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } parking_lot = "0.12.0" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" rayon = "1.5.0" serde = "1.0.116" serde_derive = "1.0.116" diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index c5be4f0a61..24c0623f5c 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -497,7 +497,8 @@ impl OperationPool { |exit| { filter(exit.as_inner()) && exit.signature_is_still_valid(&state.fork()) - && verify_exit(state, exit.as_inner(), VerifySignatures::False, spec).is_ok() + && verify_exit(state, None, exit.as_inner(), VerifySignatures::False, spec) + .is_ok() }, |exit| exit.as_inner().clone(), T::MaxVoluntaryExits::to_usize(), diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 1e18493474..633cbf0438 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -123,7 +123,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("target-peers") .long("target-peers") .help("The target number of peers.") - .default_value("80") .takes_value(true), ) .arg( @@ -233,6 +232,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Disables the discv5 discovery protocol. The node will not search for new peers or participate in the discovery protocol.") .takes_value(false), ) + .arg( + Arg::with_name("disable-peer-scoring") + .long("disable-peer-scoring") + .help("Disables peer scoring in lighthouse. WARNING: This is a dev only flag is only meant to be used in local testing scenarios \ + Using this flag on a real network may cause your node to become eclipsed and see a different view of the network") + .takes_value(false) + .hidden(true), + ) .arg( Arg::with_name("trusted-peers") .long("trusted-peers") @@ -240,6 +247,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("One or more comma-delimited trusted peer ids which always have the highest score according to the peer scoring system.") .takes_value(true), ) + .arg( + Arg::with_name("genesis-backfill") + .long("genesis-backfill") + .help("Attempts to download blocks all the way back to genesis when checkpoint syncing.") + .takes_value(false), + ) .arg( Arg::with_name("enable-private-discovery") .long("enable-private-discovery") @@ -261,6 +274,23 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .min_values(0) .hidden(true) ) + .arg( + Arg::with_name("proposer-only") + .long("proposer-only") + .help("Sets this beacon node at be a block proposer only node. \ + This will run the beacon node in a minimal configuration that is sufficient for block publishing only. This flag should be used \ + for a beacon node being referenced by validator client using the --proposer-node flag. This configuration is for enabling more secure setups.") + .takes_value(false), + ) + + .arg( + Arg::with_name("disable-backfill-rate-limiting") + .long("disable-backfill-rate-limiting") + .help("Disable the backfill sync rate-limiting. This allow users to just sync the entire chain as fast \ + as possible, however it can result in resource contention which degrades staking performance. Stakers \ + should generally choose to avoid this flag since backfill sync is not required for staking.") + .takes_value(false), + ) /* REST API related arguments */ .arg( Arg::with_name("http") @@ -495,6 +525,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Specifies how many blocks the database should cache in memory [default: 5]") .takes_value(true) ) + .arg( + Arg::with_name("historic-state-cache-size") + .long("historic-state-cache-size") + .value_name("SIZE") + .help("Specifies how many states from the freezer database should cache in memory [default: 1]") + .takes_value(true) + ) /* * Execution Layer Integration */ @@ -800,7 +837,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("reconstruct-historic-states") .long("reconstruct-historic-states") - .help("After a checkpoint sync, reconstruct historic states in the database.") + .help("After a checkpoint sync, reconstruct historic states in the database. This requires syncing all the way back to genesis.") .takes_value(false) ) .arg( @@ -869,6 +906,28 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { allowed. Default: 2") .conflicts_with("disable-proposer-reorgs") ) + .arg( + Arg::with_name("proposer-reorg-cutoff") + .long("proposer-reorg-cutoff") + .value_name("MILLISECONDS") + .help("Maximum delay after the start of the slot at which to propose a reorging \ + block. Lower values can prevent failed reorgs by ensuring the block has \ + ample time to propagate and be processed by the network. The default is \ + 1/12th of a slot (1 second on mainnet)") + .conflicts_with("disable-proposer-reorgs") + ) + .arg( + Arg::with_name("proposer-reorg-disallowed-offsets") + .long("proposer-reorg-disallowed-offsets") + .value_name("N1,N2,...") + .help("Comma-separated list of integer offsets which can be used to avoid \ + proposing reorging blocks at certain slots. An offset of N means that \ + reorging proposals will not be attempted at any slot such that \ + `slot % SLOTS_PER_EPOCH == N`. By default only re-orgs at offset 0 will be \ + avoided. Any offsets supplied with this flag will impose additional \ + restrictions.") + .conflicts_with("disable-proposer-reorgs") + ) .arg( Arg::with_name("prepare-payload-lookahead") .long("prepare-payload-lookahead") @@ -962,6 +1021,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("0") .takes_value(true) ) + .arg( + Arg::with_name("builder-user-agent") + .long("builder-user-agent") + .value_name("STRING") + .help("The HTTP user agent to send alongside requests to the builder URL. The \ + default is Lighthouse's version string.") + .requires("builder") + .takes_value(true) + ) .arg( Arg::with_name("count-unrealized") .long("count-unrealized") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c77fa49b12..f05fea2db1 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,5 +1,5 @@ use beacon_chain::chain_config::{ - ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, + DisallowedReOrgOffsets, ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, }; use clap::ArgMatches; @@ -329,6 +329,9 @@ pub fn get_config( let payload_builder = parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?; el_config.builder_url = Some(payload_builder); + + el_config.builder_user_agent = + clap_utils::parse_optional(cli_args, "builder-user-agent")?; } // Set config values from parse values. @@ -380,6 +383,12 @@ pub fn get_config( .map_err(|_| "block-cache-size is not a valid integer".to_string())?; } + if let Some(historic_state_cache_size) = cli_args.value_of("historic-state-cache-size") { + client_config.store.historic_state_cache_size = historic_state_cache_size + .parse() + .map_err(|_| "historic-state-cache-size is not a valid integer".to_string())?; + } + client_config.store.compact_on_init = cli_args.is_present("compact-db"); if let Some(compact_on_prune) = cli_args.value_of("auto-compact-db") { client_config.store.compact_on_prune = compact_on_prune @@ -499,6 +508,7 @@ pub fn get_config( if cli_args.is_present("reconstruct-historic-states") { client_config.chain.reconstruct_historic_states = true; + client_config.chain.genesis_backfill = true; } let raw_graffiti = if let Some(graffiti) = cli_args.value_of("graffiti") { @@ -686,6 +696,23 @@ pub fn get_config( client_config.chain.re_org_max_epochs_since_finalization = clap_utils::parse_optional(cli_args, "proposer-reorg-epochs-since-finalization")? .unwrap_or(DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION); + client_config.chain.re_org_cutoff_millis = + clap_utils::parse_optional(cli_args, "proposer-reorg-cutoff")?; + + if let Some(disallowed_offsets_str) = + clap_utils::parse_optional::(cli_args, "proposer-reorg-disallowed-offsets")? + { + let disallowed_offsets = disallowed_offsets_str + .split(',') + .map(|s| { + s.parse() + .map_err(|e| format!("invalid disallowed-offsets: {e:?}")) + }) + .collect::, _>>()?; + client_config.chain.re_org_disallowed_offsets = + DisallowedReOrgOffsets::new::(disallowed_offsets) + .map_err(|e| format!("invalid disallowed-offsets: {e:?}"))?; + } } // Note: This overrides any previous flags that enable this option. @@ -754,11 +781,18 @@ pub fn get_config( client_config.chain.optimistic_finalized_sync = !cli_args.is_present("disable-optimistic-finalized-sync"); + if cli_args.is_present("genesis-backfill") { + client_config.chain.genesis_backfill = true; + } // Payload selection configs if cli_args.is_present("always-prefer-builder-payload") { client_config.always_prefer_builder_payload = true; } + // Backfill sync rate-limiting + client_config.chain.enable_backfill_rate_limiting = + !cli_args.is_present("disable-backfill-rate-limiting"); + Ok(client_config) } @@ -955,10 +989,13 @@ pub fn set_network_config( config.set_listening_addr(parse_listening_addresses(cli_args, log)?); + // A custom target-peers command will overwrite the --proposer-only default. if let Some(target_peers_str) = cli_args.value_of("target-peers") { config.target_peers = target_peers_str .parse::() .map_err(|_| format!("Invalid number of target peers: {}", target_peers_str))?; + } else { + config.target_peers = 80; // default value } if let Some(value) = cli_args.value_of("network-load") { @@ -1004,6 +1041,10 @@ pub fn set_network_config( .collect::, _>>()?; } + if cli_args.is_present("disable-peer-scoring") { + config.disable_peer_scoring = true; + } + if let Some(trusted_peers_str) = cli_args.value_of("trusted-peers") { config.trusted_peers = trusted_peers_str .split(',') @@ -1013,6 +1054,9 @@ pub fn set_network_config( .map_err(|_| format!("Invalid trusted peer id: {}", peer_id)) }) .collect::, _>>()?; + if config.trusted_peers.len() >= config.target_peers { + slog::warn!(log, "More trusted peers than the target peer limit. This will prevent efficient peer selection criteria."; "target_peers" => config.target_peers, "trusted_peers" => config.trusted_peers.len()); + } } if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") { @@ -1190,6 +1234,20 @@ pub fn set_network_config( config.outbound_rate_limiter_config = Some(Default::default()); } + // Proposer-only mode overrides a number of previous configuration parameters. + // Specifically, we avoid subscribing to long-lived subnets and wish to maintain a minimal set + // of peers. + if cli_args.is_present("proposer-only") { + config.subscribe_all_subnets = false; + + if cli_args.value_of("target-peers").is_none() { + // If a custom value is not set, change the default to 15 + config.target_peers = 15; + } + config.proposer_only = true; + warn!(log, "Proposer-only mode enabled"; "info"=> "Do not connect a validator client to this node unless via the --proposer-nodes flag"); + } + Ok(()) } diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 7ec2af9f9d..a1c65bd26d 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -13,8 +13,8 @@ db-key = "0.0.5" leveldb = { version = "0.8.6", default-features = false } parking_lot = "0.12.0" itertools = "0.10.0" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } slog = "2.5.2" diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 027b8152ee..581003b4fa 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -7,6 +7,7 @@ use types::{EthSpec, MinimalEthSpec}; pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5; +pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: usize = 1; /// Database configuration parameters. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -17,6 +18,8 @@ pub struct StoreConfig { pub slots_per_restore_point_set_explicitly: bool, /// Maximum number of blocks to store in the in-memory block cache. pub block_cache_size: usize, + /// Maximum number of states from freezer database to store in the in-memory state cache. + pub historic_state_cache_size: usize, /// Whether to compact the database on initialization. pub compact_on_init: bool, /// Whether to compact the database during database pruning. @@ -43,6 +46,7 @@ impl Default for StoreConfig { slots_per_restore_point: MinimalEthSpec::slots_per_historical_root() as u64, slots_per_restore_point_set_explicitly: false, block_cache_size: DEFAULT_BLOCK_CACHE_SIZE, + historic_state_cache_size: DEFAULT_HISTORIC_STATE_CACHE_SIZE, compact_on_init: false, compact_on_prune: true, prune_payloads: true, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 3255006b55..7695ea520e 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -30,7 +30,7 @@ use slog::{debug, error, info, trace, warn, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::{ - BlockProcessingError, BlockReplayer, SlotProcessingError, StateRootStrategy, + BlockProcessingError, BlockReplayer, SlotProcessingError, StateProcessingStrategy, }; use std::cmp::min; use std::convert::TryInto; @@ -62,6 +62,8 @@ pub struct HotColdDB, Cold: ItemStore> { pub hot_db: Hot, /// LRU cache of deserialized blocks. Updated whenever a block is loaded. block_cache: Mutex>>, + /// LRU cache of replayed states. + state_cache: Mutex>>, /// Chain spec. pub(crate) spec: ChainSpec, /// Logger. @@ -129,6 +131,7 @@ impl HotColdDB, MemoryStore> { cold_db: MemoryStore::open(), hot_db: MemoryStore::open(), block_cache: Mutex::new(LruCache::new(config.block_cache_size)), + state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), config, spec, log, @@ -162,6 +165,7 @@ impl HotColdDB, LevelDB> { cold_db: LevelDB::open(cold_path)?, hot_db: LevelDB::open(hot_path)?, block_cache: Mutex::new(LruCache::new(config.block_cache_size)), + state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), config, spec, log, @@ -527,10 +531,10 @@ impl, Cold: ItemStore> HotColdDB // chain. This way we avoid returning a state that doesn't match `state_root`. self.load_cold_state(state_root) } else { - self.load_hot_state(state_root, StateRootStrategy::Accurate) + self.load_hot_state(state_root, StateProcessingStrategy::Accurate) } } else { - match self.load_hot_state(state_root, StateRootStrategy::Accurate)? { + match self.load_hot_state(state_root, StateProcessingStrategy::Accurate)? { Some(state) => Ok(Some(state)), None => self.load_cold_state(state_root), } @@ -568,7 +572,7 @@ impl, Cold: ItemStore> HotColdDB } .into()) } else { - self.load_hot_state(state_root, StateRootStrategy::Inconsistent) + self.load_hot_state(state_root, StateProcessingStrategy::Inconsistent) } } @@ -658,10 +662,13 @@ impl, Cold: ItemStore> HotColdDB { // NOTE: minor inefficiency here because we load an unnecessary hot state summary // - // `StateRootStrategy` should be irrelevant here since we never replay blocks for an epoch + // `StateProcessingStrategy` should be irrelevant here since we never replay blocks for an epoch // boundary state in the hot DB. let state = self - .load_hot_state(&epoch_boundary_state_root, StateRootStrategy::Accurate)? + .load_hot_state( + &epoch_boundary_state_root, + StateProcessingStrategy::Accurate, + )? .ok_or(HotColdDBError::MissingEpochBoundaryState( epoch_boundary_state_root, ))?; @@ -830,7 +837,7 @@ impl, Cold: ItemStore> HotColdDB pub fn load_hot_state( &self, state_root: &Hash256, - state_root_strategy: StateRootStrategy, + state_processing_strategy: StateProcessingStrategy, ) -> Result>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); @@ -863,7 +870,7 @@ impl, Cold: ItemStore> HotColdDB blocks, slot, no_state_root_iter(), - state_root_strategy, + state_processing_strategy, )? }; @@ -977,40 +984,70 @@ impl, Cold: ItemStore> HotColdDB /// Load a frozen state that lies between restore points. fn load_cold_intermediate_state(&self, slot: Slot) -> Result, Error> { + if let Some(state) = self.state_cache.lock().get(&slot) { + return Ok(state.clone()); + } + // 1. Load the restore points either side of the intermediate state. let low_restore_point_idx = slot.as_u64() / self.config.slots_per_restore_point; let high_restore_point_idx = low_restore_point_idx + 1; + // Use low restore point as the base state. + let mut low_slot: Slot = + Slot::new(low_restore_point_idx * self.config.slots_per_restore_point); + let mut low_state: Option> = None; + + // Try to get a more recent state from the cache to avoid massive blocks replay. + for (s, state) in self.state_cache.lock().iter() { + if s.as_u64() / self.config.slots_per_restore_point == low_restore_point_idx + && *s < slot + && low_slot < *s + { + low_slot = *s; + low_state = Some(state.clone()); + } + } + + // If low_state is still None, use load_restore_point_by_index to load the state. + let low_state = match low_state { + Some(state) => state, + None => self.load_restore_point_by_index(low_restore_point_idx)?, + }; + // Acquire the read lock, so that the split can't change while this is happening. let split = self.split.read_recursive(); - let low_restore_point = self.load_restore_point_by_index(low_restore_point_idx)?; let high_restore_point = self.get_restore_point(high_restore_point_idx, &split)?; - // 2. Load the blocks from the high restore point back to the low restore point. + // 2. Load the blocks from the high restore point back to the low point. let blocks = self.load_blocks_to_replay( - low_restore_point.slot(), + low_slot, slot, self.get_high_restore_point_block_root(&high_restore_point, slot)?, )?; - // 3. Replay the blocks on top of the low restore point. + // 3. Replay the blocks on top of the low point. // Use a forwards state root iterator to avoid doing any tree hashing. // The state root of the high restore point should never be used, so is safely set to 0. let state_root_iter = self.forwards_state_roots_iterator_until( - low_restore_point.slot(), + low_slot, slot, || (high_restore_point, Hash256::zero()), &self.spec, )?; - self.replay_blocks( - low_restore_point, + let state = self.replay_blocks( + low_state, blocks, slot, Some(state_root_iter), - StateRootStrategy::Accurate, - ) + StateProcessingStrategy::Accurate, + )?; + + // If state is not error, put it in the cache. + self.state_cache.lock().put(slot, state.clone()); + + Ok(state) } /// Get the restore point with the given index, or if it is out of bounds, the split state. @@ -1096,10 +1133,10 @@ impl, Cold: ItemStore> HotColdDB blocks: Vec>>, target_slot: Slot, state_root_iter: Option>>, - state_root_strategy: StateRootStrategy, + state_processing_strategy: StateProcessingStrategy, ) -> Result, Error> { let mut block_replayer = BlockReplayer::new(state, &self.spec) - .state_root_strategy(state_root_strategy) + .state_processing_strategy(state_processing_strategy) .no_signature_verification() .minimal_block_root_verification(); @@ -1741,7 +1778,7 @@ fn no_state_root_iter() -> Option bool { - self.oldest_block_slot == 0 + /// This is a comparison between the oldest block slot and the target backfill slot (which is + /// likely to be the closest WSP). + pub fn block_backfill_complete(&self, target_slot: Slot) -> bool { + self.oldest_block_slot <= target_slot } } diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index c939fd3f51..cd50babdb0 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -1,11 +1,11 @@ //! Implementation of historic state reconstruction (given complete block history). use crate::hot_cold_store::{HotColdDB, HotColdDBError}; -use crate::{Error, ItemStore, KeyValueStore}; +use crate::{Error, ItemStore}; use itertools::{process_results, Itertools}; use slog::info; use state_processing::{ per_block_processing, per_slot_processing, BlockSignatureStrategy, ConsensusContext, - VerifyBlockRoot, + StateProcessingStrategy, VerifyBlockRoot, }; use std::sync::Arc; use types::{EthSpec, Hash256}; @@ -13,8 +13,8 @@ use types::{EthSpec, Hash256}; impl HotColdDB where E: EthSpec, - Hot: KeyValueStore + ItemStore, - Cold: KeyValueStore + ItemStore, + Hot: ItemStore, + Cold: ItemStore, { pub fn reconstruct_historic_states(self: &Arc) -> Result<(), Error> { let mut anchor = if let Some(anchor) = self.get_anchor_info() { @@ -96,6 +96,7 @@ where &mut state, &block, BlockSignatureStrategy::NoVerification, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &self.spec, diff --git a/book/src/LaTeX/full-withdrawal.tex b/book/src/LaTeX/full-withdrawal.tex new file mode 100644 index 0000000000..2447ba0974 --- /dev/null +++ b/book/src/LaTeX/full-withdrawal.tex @@ -0,0 +1,66 @@ +% To compile the file using PdfLaTeX, you may use the latex+dvips+ps2pdf compilation. If you are using TeXstudio, this is builtin and you can choose this option by going to Options > Configure TeXstudio under Build & View, choose DVI -> PS -> PDF Chain + +% Alternatively, you may use XeLaTeX with --shell-escape command. To do so in TeXstuidio, go to Options > Configure TeXstudio > Build. Under "Add Commands", enter a user of your choice, and in the right empty space, insert: txs:///xelatex/[--shell-escape]. When compile, go to Tools > User and select the user you just inserted. + +\documentclass[]{article} +\usepackage{pst-all} +\pagestyle{empty} + + + +\begin{document} + + +\begin{figure} + \psscalebox{1.0 1.0} % Change this value to rescale the drawing. + { + \begin{pspicture}(0,-9.09)(11.8,6.13) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](7.3,6.13)(4.2,5.21) + \rput[bl](4.6,5.51){Voluntary exit} + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{-<}(5.8,5.21)(5.8,3.71)(5.8,3.81) + \psline[linecolor=black, linewidth=0.04](1.7,3.61)(9.8,3.61) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,3.61)(1.7,2.61) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](2.9,2.63)(0.8,1.55) + \rput[bl](1.0,1.91){Type 0x00} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,2.63)(8.6,1.55) + \rput[bl](8.8,1.91){Type 0x01} + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,3.61)(9.8,2.61) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,1.51)(1.7,0.61) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,0.61)(0.0,-1.19) + \rput[bl](0.6,-0.19){Funds locked in} + \rput[bl](0.7,-0.79){Beacon chain} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](11.8,0.73)(7.9,-1.39) + \rput[bl](9.0,-0.59){Exit queue} + \rput[bl](8.8,0.01){Varying time} + \rput[bl](8.3,-1.09){32 minutes to weeks} + \rput[bl](9.0,-2.89){Fixed time} + \rput[bl](9.0,-3.49){27.3 hours} + \rput[bl](8.8,-5.49){Varying time} + \rput[bl](8.7,-5.99){validator sweep} + \rput[bl](8.9,-6.59){up to 5 days} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](11.6,-2.19)(8.0,-3.89) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](11.7,-4.79)(7.9,-6.89) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-2.49)(0.0,-4.29) + \rput[bl](1.3,-3.29){BLS to} + \rput[bl](0.6,-3.89){execution change} + \psline[linecolor=black, linewidth=0.04, linestyle=dashed, dash=0.17638889cm 0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,-1.19)(1.7,-2.49) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,1.51)(9.8,0.71) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,-1.39)(9.8,-2.19) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,-3.89)(9.8,-4.79) + \psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm](3.7,-3.39)(5.8,-3.39) + \psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(5.8,-3.39)(5.8,-0.39)(7.9,-0.39) + \psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(5.8,-3.39)(8.0,-3.39) + \psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(5.8,-3.39)(5.8,-6.09)(7.9,-6.09) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](11.7,-7.79)(7.9,-9.09) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,-6.89)(9.8,-7.79) + \rput[bl](8.1,-8.59){\Large{Full withdrawal}} + \rput[bl](1.8,-2.09){\textit{\Large{anytime}}} + \rput[bl](4.0,-3.19){\textit{\Large{either}}} + \rput[bl](4.2,-3.89){\textit{\Large{one}}} + \end{pspicture} + } +\end{figure} + + + +\end{document} diff --git a/book/src/LaTeX/partial-withdrawal.tex b/book/src/LaTeX/partial-withdrawal.tex new file mode 100644 index 0000000000..05db3b6888 --- /dev/null +++ b/book/src/LaTeX/partial-withdrawal.tex @@ -0,0 +1,50 @@ +% To compile the file using PdfLaTeX, you may use the latex+dvips+ps2pdf compilation. If you are using TeXstudio, this is builtin and you can choose this option by going to Options > Configure TeXstudio under Build & View, choose DVI -> PS -> PDF Chain + +% Alternatively, you may use XeLaTeX with --shell-escape command. To do so in TeXstuidio, go to Options > Configure TeXstudio > Build. Under "Add Commands", enter a user of your choice, and in the right empty space, insert: txs:///xelatex/[--shell-escape]. When compile, go to Tools > User and select the user you just inserted. + + +\documentclass[]{article} +\usepackage{pst-all} +\pagestyle{empty} + + + +\begin{document} + +\begin{figure} + \psscalebox{1.0 1.0} % Change this value to rescale the drawing. + { + \begin{pspicture}(0,-8.09)(10.7,5.53) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](7.14,5.53)(3.6,4.45) + \rput[bl](3.8,4.81){Partial withdrawals} + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{-<}(5.2,4.41)(5.2,2.91)(5.2,3.01) + \psline[linecolor=black, linewidth=0.04](1.8,2.81)(8.9,2.81) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.8,2.81)(1.8,1.81) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](2.7,1.83)(0.6,0.75) + \rput[bl](0.8,1.09){Type 0x00} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](9.8,1.83)(7.7,0.75) + \rput[bl](7.92,1.07){Type 0x01} + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(8.9,2.81)(8.9,1.81) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,0.71)(1.7,-0.19) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-0.19)(0.0,-1.99) + \rput[bl](0.66,-0.99){Funds locked in} + \rput[bl](0.9,-1.59){Beacon chain} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,-3.29)(6.8,-5.09) + \rput[bl](7.6,-3.99){validator sweep} + \rput[bl](7.5,-4.69){$\sim$ every 5 days} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-3.29)(0.0,-5.09) + \rput[bl](1.3,-4.09){BLS to} + \rput[bl](0.5,-4.69){execution change} + \psline[linecolor=black, linewidth=0.04, linestyle=dashed, dash=0.17638889cm 0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,-1.99)(1.7,-3.29) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(8.9,0.71)(8.9,-3.29) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(3.7,-4.19)(6.7,-4.19) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,-6.29)(6.9,-8.09) + \rput[bl](7.0,-6.99){Balance above 32 ETH} + \rput[bl](7.9,-7.59){withdrawn} + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(8.9,-5.09)(8.9,-6.29) + \rput[bl](1.8,-2.89){\textit{\Large{anytime}}} + \end{pspicture} + } +\end{figure} + +\end{document} \ No newline at end of file diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 7def1821dd..bfd5a02a6f 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -2,7 +2,6 @@ * [Introduction](./intro.md) * [Installation](./installation.md) - * [System Requirements](./system-requirements.md) * [Pre-Built Binaries](./installation-binaries.md) * [Docker](./docker.md) * [Build from Source](./installation-source.md) @@ -12,15 +11,10 @@ * [Update Priorities](./installation-priorities.md) * [Run a Node](./run_a_node.md) * [Become a Validator](./mainnet-validator.md) - * [Become a Testnet Validator](./testnet-validator.md) -* [Key Management](./key-management.md) - * [Create a wallet](./wallet-create.md) - * [Create a validator](./validator-create.md) - * [Key recovery](./key-recovery.md) * [Validator Management](./validator-management.md) - * [Importing from the Staking Launchpad](./validator-import-launchpad.md) * [Slashing Protection](./slashing-protection.md) * [Voluntary Exits](./voluntary-exit.md) + * [Partial Withdrawals](./partial-withdrawal.md) * [Validator Monitoring](./validator-monitoring.md) * [Doppelganger Protection](./validator-doppelganger.md) * [Suggested Fee Recipient](./suggested-fee-recipient.md) @@ -35,6 +29,7 @@ * [Prometheus Metrics](./advanced_metrics.md) * [Lighthouse UI (Siren)](./lighthouse-ui.md) * [Installation](./ui-installation.md) + * [Authentication](./ui-authentication.md) * [Configuration](./ui-configuration.md) * [Usage](./ui-usage.md) * [FAQs](./ui-faqs.md) @@ -42,9 +37,12 @@ * [Checkpoint Sync](./checkpoint-sync.md) * [Custom Data Directories](./advanced-datadir.md) * [Validator Graffiti](./graffiti.md) + * [Proposer Only Beacon Nodes](./advanced-proposer-only.md) * [Remote Signing with Web3Signer](./validator-web3signer.md) * [Database Configuration](./advanced_database.md) * [Database Migrations](./database-migrations.md) + * [Key Management](./key-management.md) + * [Key Recovery](./key-recovery.md) * [Advanced Networking](./advanced_networking.md) * [Running a Slasher](./slasher.md) * [Redundancy](./redundancy.md) diff --git a/book/src/advanced-proposer-only.md b/book/src/advanced-proposer-only.md new file mode 100644 index 0000000000..c3347e044b --- /dev/null +++ b/book/src/advanced-proposer-only.md @@ -0,0 +1,71 @@ +# Advanced Proposer-Only Beacon Nodes + +Lighthouse allows for more exotic setups that can minimize attack vectors by +adding redundant beacon nodes and dividing the roles of attesting and block +production between them. + +The purpose of this is to minimize attack vectors +where malicious users obtain the network identities (IP addresses) of beacon +nodes corresponding to individual validators and subsequently perform Denial Of Service +attacks on the beacon nodes when they are due to produce a block on the +network. By splitting the duties of attestation and block production across +different beacon nodes, an attacker may not know which node is the block +production node, especially if the user rotates IP addresses of the block +production beacon node in between block proposals (this is in-frequent with +networks with large validator counts). + +## The Beacon Node + +A Lighthouse beacon node can be configured with the `--proposer-only` flag +(i.e. `lighthouse bn --proposer-only`). +Setting a beacon node with this flag will limit its use as a beacon node for +normal activities such as performing attestations, but it will make the node +harder to identify as a potential node to attack and will also consume less +resources. + +Specifically, this flag reduces the default peer count (to a safe minimal +number as maintaining peers on attestation subnets do not need to be considered), +prevents the node from subscribing to any attestation-subnets or +sync-committees which is a primary way for attackers to de-anonymize +validators. + +> Note: Beacon nodes that have set the `--proposer-only` flag should not be connected +> to validator clients unless via the `--proposer-nodes` flag. If connected as a +> normal beacon node, the validator may fail to handle its duties correctly and +> result in a loss of income. + + +## The Validator Client + +The validator client can be given a list of HTTP API endpoints representing +beacon nodes that will be solely used for block propagation on the network, via +the CLI flag `--proposer-nodes`. These nodes can be any working beacon nodes +and do not specifically have to be proposer-only beacon nodes that have been +executed with the `--proposer-only` (although we do recommend this flag for +these nodes for added security). + +> Note: The validator client still requires at least one other beacon node to +> perform its duties and must be specified in the usual `--beacon-nodes` flag. + +> Note: The validator client will attempt to get a block to propose from the +> beacon nodes specified in `--beacon-nodes` before trying `--proposer-nodes`. +> This is because the nodes subscribed to subnets have a higher chance of +> producing a more profitable block. Any block builders should therefore be +> attached to the `--beacon-nodes` and not necessarily the `--proposer-nodes`. + + +## Setup Overview + +The intended set-up to take advantage of this mechanism is to run one (or more) +normal beacon nodes in conjunction with one (or more) proposer-only beacon +nodes. See the [Redundancy](./redundancy.md) section for more information about +setting up redundant beacon nodes. The proposer-only beacon nodes should be +setup to use a different IP address than the primary (non proposer-only) nodes. +For added security, the IP addresses of the proposer-only nodes should be +rotated occasionally such that a new IP-address is used per block proposal. + +A single validator client can then connect to all of the above nodes via the +`--beacon-nodes` and `--proposer-nodes` flags. The resulting setup will allow +the validator client to perform its regular duties on the standard beacon nodes +and when the time comes to propose a block, it will send this block via the +specified proposer-only nodes. diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index 397d9a28b5..57e49531ca 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -58,6 +58,16 @@ the `--slots-per-restore-point` flag: lighthouse beacon_node --slots-per-restore-point 32 ``` +### Historic state cache + +Lighthouse includes a cache to avoid repeatedly replaying blocks when loading historic states. Lighthouse will cache a limited number of reconstructed states and will re-use them when serving requests for subsequent states at higher slots. This greatly reduces the cost of requesting several states in order, and we recommend that applications like block explorers take advantage of this cache. + +The historical state cache size can be specified with the flag `--historic-state-cache-size` (default value is 1): + +```bash +lighthouse beacon_node --historic-state-cache-size 4 +``` + ## Glossary * _Freezer DB_: part of the database storing finalized states. States are stored in a sparser diff --git a/book/src/api-bn.md b/book/src/api-bn.md index 481c001694..b86e593bf1 100644 --- a/book/src/api-bn.md +++ b/book/src/api-bn.md @@ -72,8 +72,7 @@ specification][OpenAPI]. Returns the block header at the head of the canonical chain. ```bash -curl -X GET "http://localhost:5052/eth/v1/beacon/headers/head" -H "accept: -application/json" +curl -X GET "http://localhost:5052/eth/v1/beacon/headers/head" -H "accept: application/json" | jq ``` ```json @@ -100,7 +99,7 @@ application/json" Shows the status of validator at index `1` at the `head` state. ```bash -curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json" +curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json" | jq ``` ```json @@ -159,8 +158,7 @@ The API is now being served at `https://localhost:5052`. To test connectivity, you can run the following: ```bash -curl -X GET "https://localhost:5052/eth/v1/node/version" -H "accept: application/json" --cacert cert.pem - +curl -X GET "https://localhost:5052/eth/v1/node/version" -H "accept: application/json" --cacert cert.pem | jq ``` ### Connecting a validator client In order to connect a validator client to a beacon node over TLS, the validator @@ -203,7 +201,7 @@ Ensure the `--http` flag has been supplied at the CLI. You can quickly check that the HTTP endpoint is up using `curl`: ```bash -curl -X GET "http://localhost:5052/eth/v1/node/version" -H "accept: application/json" +curl -X GET "http://localhost:5052/eth/v1/node/version" -H "accept: application/json" | jq ``` The beacon node should respond with its version: diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 2848180970..e67a79c8f0 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -141,7 +141,8 @@ curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indic "attestation_head_hit_percentage": 100, "attestation_target_hits": 5, "attestation_target_misses": 5, - "attestation_target_hit_percentage": 50 + "attestation_target_hit_percentage": 50, + "latest_attestation_inclusion_distance": 1 } } } @@ -455,6 +456,7 @@ curl "http://localhost:5052/lighthouse/database/info" | jq "config": { "slots_per_restore_point": 2048, "block_cache_size": 5, + "historic_state_cache_size": 1, "compact_on_init": false, "compact_on_prune": true }, diff --git a/book/src/builders.md b/book/src/builders.md index f2a4b3936a..fc42f9b743 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -178,7 +178,7 @@ You can check that your builder is configured correctly by looking for these log On start-up, the beacon node will log if a builder is configured: ``` -INFO Connected to external block builder +INFO Using external block builder ``` At regular intervals the validator client will log that it successfully registered its validators diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index ef7e95cc7a..5e0b896359 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -28,6 +28,7 @@ validator client or the slasher**. | v3.3.0 | Nov 2022 | v13 | yes | | v3.4.0 | Jan 2023 | v13 | yes | | v3.5.0 | Feb 2023 | v15 | yes before Capella | +| v4.0.1 | Mar 2023 | v16 | yes before Capella | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). @@ -91,6 +92,7 @@ curl "http://localhost:5052/lighthouse/database/info" "slots_per_restore_point": 8192, "slots_per_restore_point_set_explicitly": true, "block_cache_size": 5, + "historic_state_cache_size": 1, "compact_on_init": false, "compact_on_prune": true } diff --git a/book/src/docker.md b/book/src/docker.md index 7484f9f525..d67b084da6 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -16,21 +16,18 @@ way to run Lighthouse without building the image yourself. Obtain the latest image with: ```bash -$ docker pull sigp/lighthouse +docker pull sigp/lighthouse ``` Download and test the image with: ```bash -$ docker run sigp/lighthouse lighthouse --version +docker run sigp/lighthouse lighthouse --version ``` If you can see the latest [Lighthouse release](https://github.com/sigp/lighthouse/releases) version (see example below), then you've successfully installed Lighthouse via Docker. -> Pro tip: try the `latest-modern` image for a 20-30% speed-up! See [Available Docker -> Images](#available-docker-images) below. - ### Example Version Output ``` @@ -38,6 +35,9 @@ Lighthouse vx.x.xx-xxxxxxxxx BLS Library: xxxx-xxxxxxx ``` +> Pro tip: try the `latest-modern` image for a 20-30% speed-up! See [Available Docker +> Images](#available-docker-images) below. + ### Available Docker Images There are several images available on Docker Hub. @@ -47,11 +47,10 @@ Lighthouse with optimizations enabled. If you are running on older hardware then `latest` image bundles a _portable_ version of Lighthouse which is slower but with better hardware compatibility (see [Portability](./installation-binaries.md#portability)). -To install a specific tag (in this case `latest-modern`) add the tag name to your `docker` commands -like so: +To install a specific tag (in this case `latest-modern`), add the tag name to your `docker` commands: ``` -$ docker pull sigp/lighthouse:latest-modern +docker pull sigp/lighthouse:latest-modern ``` Image tags follow this format: @@ -65,17 +64,17 @@ The `version` is: * `vX.Y.Z` for a tagged Lighthouse release, e.g. `v2.1.1` * `latest` for the `stable` branch (latest release) or `unstable` branch -The `stability` is: - -* `-unstable` for the `unstable` branch -* empty for a tagged release or the `stable` branch - The `arch` is: * `-amd64` for x86_64, e.g. Intel, AMD * `-arm64` for aarch64, e.g. Raspberry Pi 4 * empty for a multi-arch image (works on either `amd64` or `arm64` platforms) +The `stability` is: + +* `-unstable` for the `unstable` branch +* empty for a tagged release or the `stable` branch + The `modernity` is: * `-modern` for optimized builds @@ -99,13 +98,13 @@ To build the image from source, navigate to the root of the repository and run: ```bash -$ docker build . -t lighthouse:local +docker build . -t lighthouse:local ``` The build will likely take several minutes. Once it's built, test it with: ```bash -$ docker run lighthouse:local lighthouse --help +docker run lighthouse:local lighthouse --help ``` ## Using the Docker image @@ -113,12 +112,12 @@ $ docker run lighthouse:local lighthouse --help You can run a Docker beacon node with the following command: ```bash -$ docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse --network mainnet beacon --http --http-address 0.0.0.0 +docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse --network mainnet beacon --http --http-address 0.0.0.0 ``` -> To join the Prater testnet, use `--network prater` instead. +> To join the Goerli testnet, use `--network goerli` instead. -> The `-p` and `-v` and values are described below. +> The `-v` (Volumes) and `-p` (Ports) and values are described below. ### Volumes @@ -131,7 +130,7 @@ The following example runs a beacon node with the data directory mapped to the users home directory: ```bash -$ docker run -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse beacon +docker run -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse beacon ``` ### Ports @@ -140,14 +139,14 @@ In order to be a good peer and serve other peers you should expose port `9000` f Use the `-p` flag to do this: ```bash -$ docker run -p 9000:9000/tcp -p 9000:9000/udp sigp/lighthouse lighthouse beacon +docker run -p 9000:9000/tcp -p 9000:9000/udp sigp/lighthouse lighthouse beacon ``` If you use the `--http` flag you may also want to expose the HTTP port with `-p 127.0.0.1:5052:5052`. ```bash -$ docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0 +docker run -p 9000:9000/tcp -p 9000:9000/udp -p 127.0.0.1:5052:5052 sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0 ``` [docker_hub]: https://hub.docker.com/repository/docker/sigp/lighthouse/ diff --git a/book/src/faq.md b/book/src/faq.md index 43de40eee3..b42e197a00 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -9,6 +9,11 @@ - [What is "Syncing deposit contract block cache"?](#what-is-syncing-deposit-contract-block-cache) - [Can I use redundancy in my staking setup?](#can-i-use-redundancy-in-my-staking-setup) - [How can I monitor my validators?](#how-can-i-monitor-my-validators) +- [I see beacon logs showing `WARN: Execution engine called failed`, what should I do?](#i-see-beacon-logs-showing-warn-execution-engine-called-failed-what-should-i-do) +- [How do I check or update my withdrawal credentials?](#how-do-i-check-or-update-my-withdrawal-credentials) +- [I am missing attestations. Why?](#i-am-missing-attestations-why) +- [Sometimes I miss the attestation head vote, resulting in penalty. Is this normal?](#sometimes-i-miss-the-attestation-head-vote-resulting-in-penalty-is-this-normal) +- [My beacon node is stuck at downloading historical block using checkpoing sync. What can I do?](#my-beacon-node-is-stuck-at-downloading-historical-block-using-checkpoing-sync-what-can-i-do) ### Why does it take so long for a validator to be activated? @@ -185,4 +190,47 @@ However, there are some components which can be configured with redundancy. See Apart from using block explorers, you may use the "Validator Monitor" built into Lighthouse which provides logging and Prometheus/Grafana metrics for individual validators. See [Validator -Monitoring](./validator-monitoring.md) for more information. +Monitoring](./validator-monitoring.md) for more information. Lighthouse has also developed Lighthouse UI (Siren) to monitor performance, see [Lighthouse UI (Siren)](./lighthouse-ui.md). + +### I see beacon logs showing `WARN: Execution engine called failed`, what should I do? + +The `WARN Execution engine called failed` log is shown when the beacon node cannot reach the execution engine. When this warning occurs, it will be followed by a detailed message. A frequently encountered example of the error message is: + +`error: Reqwest(reqwest::Error { kind: Request, url: Url { scheme: "http", cannot_be_a_base: false, username: "", password: None, host: Some(Ipv4(127.0.0.1)), port: Some(8551), path: "/", query: None, fragment: None }, source: TimedOut }), service: exec` + +which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. There are a few reasons why this can occur: +1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced. +1. The computer is overloaded. Check the CPU and RAM usage to see if it has overloaded. You can use `htop` to check for CPU and RAM usage. +1. Your SSD is slow. Check if your SSD is in "The Bad" list [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). If your SSD is in "The Bad" list, it means it cannot keep in sync to the network and you may want to consider upgrading to a better SSD. + +If the reason for the error message is caused by no. 1 above, you may want to look further. If the execution engine is out of sync suddenly, it is usually caused by ungraceful shutdown. The common causes for ungraceful shutdown are: +- Power outage. If power outages are an issue at your place, consider getting a UPS to avoid ungraceful shutdown of services. +- The service file is not stopped properly. To overcome this, make sure that the process is stop properly, e.g., during client updates. +- Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. When this occurs, the log file will show `Main process exited, code=killed, status=9/KILL`. You can also run `sudo journalctl -a --since "18 hours ago" | grep -i "killed process` to confirm that the execution client has been killed due to oom. If you are using geth as the execution client, a short term solution is to reduce the resources used, for example: (1) reduce the cache by adding the flag `--cache 2048` (2) connect to less peers using the flag `--maxpeers 10`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. + + +### How do I check or update my withdrawal credentials? +Withdrawals will be available after the Capella/Shanghai upgrades on 12th April 2023. To check that if you are eligible for withdrawals, go to [Staking launchpad](https://launchpad.ethereum.org/en/withdrawals), enter your validator index and click `verify on mainnet`: +- `withdrawals enabled` means you will automatically receive withdrawals to the withdrawal address that you set. +- `withdrawals not enabled` means you will need to update your withdrawal credentials from `0x00` type to `0x01` type. The common way to do this is using `Staking deposit CLI` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). + +For the case of `withdrawals not enabled`, you can update your withdrawal credentials **anytime**, and there is no deadline for that. The catch is that as long as you do not update your withdrawal credentials, your rewards in the beacon chain will continue to be locked in the beacon chain. Only after you update the withdrawal credentials, will the rewards be withdrawn to the withdrawal address. + + +### I am missing attestations. Why? +The first thing is to ensure both consensus and execution clients are synced with the network. If they are synced, there may still be some issues with the node setup itself that is causing the missed attestations. Check the setup to ensure that: +- the clock is synced +- the computer has sufficient resources and is not overloaded +- the internet is working well +- you have sufficient peers + +You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). Once the above points are good, missing attestation should be a rare occurance. + +### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? + +In general it is unavoiadable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone else's performance. + + +### My beacon node is stuck at downloading historical block using checkpoing sync. What can I do? + +Check the number of peers you are connected to. If you have low peers (less than 50), try to do port forwarding on the port 9000 TCP/UDP to increase peer count. \ No newline at end of file diff --git a/book/src/imgs/full-withdrawal.png b/book/src/imgs/full-withdrawal.png new file mode 100644 index 0000000000..6fa2db6a91 Binary files /dev/null and b/book/src/imgs/full-withdrawal.png differ diff --git a/book/src/imgs/partial-withdrawal.png b/book/src/imgs/partial-withdrawal.png new file mode 100644 index 0000000000..0bf90b91db Binary files /dev/null and b/book/src/imgs/partial-withdrawal.png differ diff --git a/book/src/imgs/ui-autoconnect-auth.png b/book/src/imgs/ui-autoconnect-auth.png new file mode 100644 index 0000000000..4121f56cab Binary files /dev/null and b/book/src/imgs/ui-autoconnect-auth.png differ diff --git a/book/src/imgs/ui-exit.png b/book/src/imgs/ui-exit.png new file mode 100644 index 0000000000..7061fab388 Binary files /dev/null and b/book/src/imgs/ui-exit.png differ diff --git a/book/src/imgs/ui-fail-auth.png b/book/src/imgs/ui-fail-auth.png new file mode 100644 index 0000000000..dece7b707a Binary files /dev/null and b/book/src/imgs/ui-fail-auth.png differ diff --git a/book/src/imgs/ui-session-auth.png b/book/src/imgs/ui-session-auth.png new file mode 100644 index 0000000000..c66b92af74 Binary files /dev/null and b/book/src/imgs/ui-session-auth.png differ diff --git a/book/src/installation-binaries.md b/book/src/installation-binaries.md index 2365ea7ed7..30bf03e14e 100644 --- a/book/src/installation-binaries.md +++ b/book/src/installation-binaries.md @@ -23,21 +23,24 @@ For details, see [Portability](#portability). ## Usage Each binary is contained in a `.tar.gz` archive. For this example, lets assume the user needs -a portable `x86_64` binary. +a `x86_64` binary. ### Steps 1. Go to the [Releases](https://github.com/sigp/lighthouse/releases) page and select the latest release. -1. Download the `lighthouse-${VERSION}-x86_64-unknown-linux-gnu-portable.tar.gz` binary. -1. Extract the archive: - 1. `cd Downloads` - 1. `tar -xvf lighthouse-${VERSION}-x86_64-unknown-linux-gnu.tar.gz` +1. Download the `lighthouse-${VERSION}-x86_64-unknown-linux-gnu.tar.gz` binary. For example, to obtain the binary file for v4.0.1 (the latest version at the time of writing), a user can run the following commands in a linux terminal: + ```bash + cd ~ + curl -LO https://github.com/sigp/lighthouse/releases/download/v4.0.1/lighthouse-v4.0.1-x86_64-unknown-linux-gnu.tar.gz + tar -xvf lighthouse-v4.0.1-x86_64-unknown-linux-gnu.tar.gz + ``` 1. Test the binary with `./lighthouse --version` (it should print the version). -1. (Optional) Move the `lighthouse` binary to a location in your `PATH`, so the `lighthouse` command can be called from anywhere. - - E.g., `cp lighthouse /usr/bin` +1. (Optional) Move the `lighthouse` binary to a location in your `PATH`, so the `lighthouse` command can be called from anywhere. For example, to copy `lighthouse` from the current directory to `usr/bin`, run `sudo cp lighthouse /usr/bin`. -> Windows users will need to execute the commands in Step 3 from PowerShell. + + +> Windows users will need to execute the commands in Step 2 from PowerShell. ## Portability @@ -64,4 +67,4 @@ WARN CPU seems incompatible with optimized Lighthouse build, advice: If you get On some VPS providers, the virtualization can make it appear as if CPU features are not available, even when they are. In this case you might see the warning above, but so long as the client -continues to function it's nothing to worry about. +continues to function, it's nothing to worry about. diff --git a/book/src/installation-source.md b/book/src/installation-source.md index c89dd1add4..b9c9df163d 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -5,8 +5,20 @@ the instructions below, and then proceed to [Building Lighthouse](#build-lightho ## Dependencies -First, **install Rust** using [rustup](https://rustup.rs/). The rustup installer provides an easy way -to update the Rust compiler, and works on all platforms. +First, **install Rust** using [rustup](https://rustup.rs/): + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +The rustup installer provides an easy way to update the Rust compiler, and works on all platforms. + +> Tips: +> +> - During installation, when prompted, enter `1` for the default installation. +> - After Rust installation completes, try running `cargo version` . If it cannot +> be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`. +> - It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`. With Rust installed, follow the instructions below to install dependencies relevant to your operating system. @@ -19,10 +31,17 @@ Install the following packages: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler ``` +> Tips: +> +> - If there are difficulties, try updating the package manager with `sudo apt +> update`. + > Note: Lighthouse requires CMake v3.12 or newer, which isn't available in the package repositories > of Ubuntu 18.04 or earlier. On these distributions CMake can still be installed via PPA: > [https://apt.kitware.com/](https://apt.kitware.com) +After this, you are ready to [build Lighthouse](#build-lighthouse). + #### macOS 1. Install the [Homebrew][] package manager. @@ -39,10 +58,19 @@ brew install protobuf [Homebrew]: https://brew.sh/ +After this, you are ready to [build Lighthouse](#build-lighthouse). + #### Windows -1. Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git). +1. Install [Git](https://git-scm.com/download/win). 1. Install the [Chocolatey](https://chocolatey.org/install) package manager for Windows. + > Tips: + > - Use PowerShell to install. In Windows, search for PowerShell and run as administrator. + > - You must ensure `Get-ExecutionPolicy` is not Restricted. To test this, run `Get-ExecutionPolicy` in PowerShell. If it returns `restricted`, then run `Set-ExecutionPolicy AllSigned`, and then run + ```bash + Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + ``` + > - To verify that Chocolatey is ready, run `choco` and it should return the version. 1. Install Make, CMake, LLVM and protoc using Chocolatey: ``` @@ -67,6 +95,8 @@ should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)]( [WSL]: https://docs.microsoft.com/en-us/windows/wsl/about +After this, you are ready to [build Lighthouse](#build-lighthouse). + ## Build Lighthouse Once you have Rust and the build dependencies you're ready to build Lighthouse: @@ -136,7 +166,7 @@ Commonly used features include: * `spec-minimal`: support for the minimal preset (useful for testing). Default features (e.g. `slasher-mdbx`) may be opted out of using the `--no-default-features` -argument for `cargo`, which can plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable. +argument for `cargo`, which can be plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable. E.g. ``` @@ -171,12 +201,11 @@ PROFILE=maxperf make Lighthouse will be installed to `CARGO_HOME` or `$HOME/.cargo`. This directory needs to be on your `PATH` before you can run `$ lighthouse`. -See ["Configuring the `PATH` environment variable" -(rust-lang.org)](https://www.rust-lang.org/tools/install) for more information. +See ["Configuring the `PATH` environment variable"](https://www.rust-lang.org/tools/install) for more information. ### Compilation error -Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply type `rustup update`. +Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply run `rustup update`. If you can't install the latest version of Rust you can instead compile using the Minimum Supported Rust Version (MSRV) which is listed under the `rust-version` key in Lighthouse's @@ -185,7 +214,7 @@ Rust Version (MSRV) which is listed under the `rust-version` key in Lighthouse's If compilation fails with `(signal: 9, SIGKILL: kill)`, this could mean your machine ran out of memory during compilation. If you are on a resource-constrained device you can look into [cross compilation](./cross-compiling.md), or use a [pre-built -binary](./installation-binaries.md). +binary](https://github.com/sigp/lighthouse/releases). If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`. diff --git a/book/src/installation.md b/book/src/installation.md index bc546e0987..4adaf8da76 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -8,24 +8,31 @@ There are three core methods to obtain the Lighthouse application: - [Docker images](./docker.md). - [Building from source](./installation-source.md). -Community-maintained additional installation methods: - -- [Homebrew package](./homebrew.md). -- Arch Linux AUR packages: [source](https://aur.archlinux.org/packages/lighthouse-ethereum), - [binary](https://aur.archlinux.org/packages/lighthouse-ethereum-bin). - Additionally, there are two extra guides for specific uses: - [Raspberry Pi 4 guide](./pi.md). - [Cross-compiling guide for developers](./cross-compiling.md). -## Minimum System Requirements +There are also community-maintained installation methods: -* Dual-core CPU, 2015 or newer -* 8 GB RAM -* 128 GB solid state storage -* 10 Mb/s download, 5 Mb/s upload broadband connection +- [Homebrew package](./homebrew.md). +- Arch Linux AUR packages: [source](https://aur.archlinux.org/packages/lighthouse-ethereum), + [binary](https://aur.archlinux.org/packages/lighthouse-ethereum-bin). -For more information see [System Requirements](./system-requirements.md). -[WSL]: https://docs.microsoft.com/en-us/windows/wsl/about + +## Recommended System Requirements + +Before [The Merge](https://ethereum.org/en/roadmap/merge/), Lighthouse was able to run on its own with low to mid-range consumer hardware, but would perform best when provided with ample system resources. + +After [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, it is necessary to run Lighthouse together with an execution client ([Nethermind](https://nethermind.io/), [Besu](https://www.hyperledger.org/use/besu), [Erigon](https://github.com/ledgerwatch/erigon), [Geth](https://geth.ethereum.org/)). The following system requirements listed are therefore for running a Lighthouse beacon node combined with an execution client , and a validator client with a modest number of validator keys (less than 100): + + +* CPU: Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer +* Memory: 32 GB RAM* +* Storage: 2 TB solid state drive +* Network: 100 Mb/s download, 20 Mb/s upload broadband connection + +> *Note: 16 GB RAM is becoming rather limited due to the increased resources required. 16 GB RAM would likely result in out of memory errors in the case of a spike in computing demand (e.g., caused by a bug) or during periods of non-finality of the beacon chain. Users with 16 GB RAM also have a limited choice when it comes to selecting an execution client, which does not help with the [client diversity](https://clientdiversity.org/). We therefore recommend users to have at least 32 GB RAM for long term health of the node, while also giving users the flexibility to change client should the thought arise. + +Last update: April 2023 diff --git a/book/src/key-management.md b/book/src/key-management.md index bb1751be16..084b1fbe4c 100644 --- a/book/src/key-management.md +++ b/book/src/key-management.md @@ -3,12 +3,12 @@ [launchpad]: https://launchpad.ethereum.org/ > -> **Note: we recommend using the [Staking launchpad][launchpad] to create validators.** +> **Note: While Lighthouse is able to generate the validator keys and the deposit data file to submit to the deposit contract, we strongly recommend using the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) to create validators keys and the deposit data file. This is because the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) which has the option to assign a withdrawal address during the key generation process, while Lighthouse wallet will always generate keys with withdrawal credentials of type 0x00. This means that users who created keys using Lighthouse will have to update their withdrawal credentials in the future to enable withdrawals. In addition, Lighthouse generates the deposit data file in the form of `*.rlp`, which cannot be uploaded to the [Staking launchpad][launchpad] that accepts only `*.json` file. This means that users have to directly interact with the deposit contract to be able to submit the deposit if they were to generate the files using Lighthouse.** Lighthouse uses a _hierarchical_ key management system for producing validator keys. It is hierarchical because each validator key can be _derived_ from a master key, making the validators keys _children_ of the master key. This -scheme means that a single 24-word mnemonic can be used to backup all of your +scheme means that a single 24-word mnemonic can be used to back up all of your validator keys without providing any observable link between them (i.e., it is privacy-retaining). Hierarchical key derivation schemes are common-place in cryptocurrencies, they are already used by most hardware and software wallets @@ -30,37 +30,63 @@ We defined some terms in the context of validator key management: keypair. - Defined in EIP-2335. - **Voting Keypair**: a BLS public and private keypair which is used for - signing blocks, attestations and other messages on regular intervals, - whilst staking in Phase 0. + signing blocks, attestations and other messages on regular intervals in the beacon chain. - **Withdrawal Keypair**: a BLS public and private keypair which will be required _after_ Phase 0 to manage ETH once a validator has exited. -## Overview +## Create a validator +There are 2 steps involved to create a validator key using Lighthouse: + 1. [Create a wallet](#step-1-create-a-wallet-and-record-the-mnemonic) + 1. [Create a validator](#step-2-create-a-validator) -The key management system in Lighthouse involves moving down the above list of -items, starting at one easy-to-backup mnemonic and ending with multiple -keypairs. Creating a single validator looks like this: +The following example demonstrates how to create a single validator key. -1. Create a **wallet** and record the **mnemonic**: - - `lighthouse --network prater account wallet create --name wally --password-file wally.pass` -1. Create the voting and withdrawal **keystores** for one validator: - - `lighthouse --network prater account validator create --wallet-name wally --wallet-password wally.pass --count 1` +### Step 1: Create a wallet and record the mnemonic +A wallet allows for generating practically unlimited validators from an +easy-to-remember 24-word string (a mnemonic). As long as that mnemonic is +backed up, all validator keys can be trivially re-generated. + +Whilst the wallet stores the mnemonic, it does not store it in plain-text: the +mnemonic is encrypted with a password. It is the responsibility of the user to +define a strong password. The password is only required for interacting with +the wallet, it is not required for recovering keys from a mnemonic. + +To create a wallet, use the `lighthouse account wallet` command. For example, if we wish to create a new wallet for the Goerli testnet named `wally` and saves it in `~/.lighthouse/goerli/wallets` with a randomly generated password saved +to `./wallet.pass`: + +```bash +lighthouse --network goerli account wallet create --name wally --password-file wally.pass +``` +Using the above command, a wallet will be created in `~/.lighthouse/goerli/wallets` with the name +`wally`. It is encrypted using the password defined in the +`wally.pass` file. + +During the wallet creation process, a 24-word mnemonic will be displayed. Record the mnemonic because it allows you to recreate the files in the case of data loss. +> Notes: +> - When navigating to the directory `~/.lighthouse/goerli/wallets`, one will not see the wallet name `wally`, but a hexadecimal folder containing the wallet file. However, when interacting with `lighthouse` in the CLI, the name `wally` will be used. +> - The password is not `wally.pass`, it is the _content_ of the +> `wally.pass` file. +> - If `wally.pass` already exists, the wallet password will be set to the content +> of that file. + +### Step 2: Create a validator +Validators are fundamentally represented by a BLS keypair. In Lighthouse, we use a wallet to generate these keypairs. Once a wallet exists, the `lighthouse account validator create` command can be used to generate the BLS keypair and all necessary information to submit a validator deposit. With the `wally` wallet created in [Step 1](#step-1-create-a-wallet-and-record-the-mnemonic), we can create a validator with the command: + +```bash +lighthouse --network goerli account validator create --wallet-name wally --wallet-password wally.pass --count 1 +``` +This command will: + +- Derive a single new BLS keypair from wallet `wally` in `~/.lighthouse/goerli/wallets`, updating it so that it generates a new key next time. +- Create a new directory `~/.lighthouse/goerli/validators` containing: + - An encrypted keystore file `voting-keystore.json` containing the validator's voting keypair. + - An `eth1_deposit_data.rlp` assuming the default deposit amount (`32 ETH`) which can be submitted to the deposit + contract for the Goerli testnet. Other networks can be set via the + `--network` parameter. +- Create a new directory `~/.lighthouse/goerli/secrets` which stores a password to the validator's voting keypair. -In step (1), we created a wallet in `~/.lighthouse/{network}/wallets` with the name -`wally`. We encrypted this using a pre-defined password in the -`wally.pass` file. Then, in step (2), we created one new validator in the -`~/.lighthouse/{network}/validators` directory using `wally` (unlocking it with -`wally.pass`) and storing the passwords to the validators voting key in -`~/.lighthouse/{network}/secrets`. - -Thanks to the hierarchical key derivation scheme, we can delete all of the -aforementioned directories and then regenerate them as long as we remembered -the 24-word mnemonic (we don't recommend doing this, though). - -Creating another validator is easy, it's just a matter of repeating step (2). -The wallet keeps track of how many validators it has generated and ensures that -a new validator is generated each time. +If you want to create another validator in the future, repeat [Step 2](#step-2-create-a-validator). The wallet keeps track of how many validators it has generated and ensures that a new validator is generated each time. The important thing is to keep the 24-word mnemonic safe so that it can be used to generate new validator keys if needed. ## Detail @@ -76,36 +102,17 @@ There are three important directories in Lighthouse validator key management: - Defaults to `~/.lighthouse/{network}/validators` - `secrets/`: since the validator signing keys are "hot", the validator process needs access to the passwords to decrypt the keystores in the validators - dir. These passwords are stored here. - - Defaults to `~/.lighthouse/{network}/secrets` where `network` is the name of the network passed in the `--network` parameter (default is `mainnet`). + directory. These passwords are stored here. + - Defaults to `~/.lighthouse/{network}/secrets` + +where `{network}` is the name of the network passed in the `--network` parameter. When the validator client boots, it searches the `validators/` for directories containing voting keystores. When it discovers a keystore, it searches the -`secrets/` dir for a file with the same name as the 0x-prefixed hex -representation of the keystore public key. If it finds this file, it attempts +`secrets/` directory for a file with the same name as the 0x-prefixed validator public key. If it finds this file, it attempts to decrypt the keystore using the contents of this file as the password. If it fails, it logs an error and moves onto the next keystore. The `validators/` and `secrets/` directories are kept separate to allow for ease-of-backup; you can safely backup `validators/` without worrying about leaking private key data. - -### Withdrawal Keypairs - -In Ethereum consensus Phase 0, withdrawal keypairs do not serve any immediate purpose. -However, they become very important _after_ Phase 0: they will provide the -ultimate control of the ETH of withdrawn validators. - -This presents an interesting key management scenario: withdrawal keys are very -important, but not right now. Considering this, Lighthouse has adopted a -strategy where **we do not save withdrawal keypairs to disk by default** (it is -opt-in). Instead, we assert that since the withdrawal keys can be regenerated -from a mnemonic, having them lying around on the file-system only presents risk -and complexity. - -At the time of writing, we do not expose the commands to regenerate keys from -mnemonics. However, key regeneration is tested on the public Lighthouse -repository and will be exposed prior to mainnet launch. - -So, in summary, withdrawal keypairs can be trivially regenerated from the -mnemonic via EIP-2333 so they are not saved to disk like the voting keypairs. diff --git a/book/src/key-recovery.md b/book/src/key-recovery.md index 2474d123ca..a996e95cbc 100644 --- a/book/src/key-recovery.md +++ b/book/src/key-recovery.md @@ -1,8 +1,8 @@ -# Key recovery +# Key Recovery Generally, validator keystore files are generated alongside a *mnemonic*. If -the keystore and/or the keystore password are lost this mnemonic can +the keystore and/or the keystore password are lost, this mnemonic can regenerate a new, equivalent keystore with a new password. There are two ways to recover keys using the `lighthouse` CLI: @@ -48,7 +48,7 @@ which contains all the information necessary to run a validator using the `lighthouse vc` command. The password to this new keystore will be placed in the `--secrets-dir` (default `~/.lighthouse/{network}/secrets`). -where `network` is the name of the consensus layer network passed in the `--network` parameter (default is `mainnet`). +where `{network}` is the name of the consensus layer network passed in the `--network` parameter (default is `mainnet`). ## Recover a EIP-2386 wallet diff --git a/book/src/late-block-re-orgs.md b/book/src/late-block-re-orgs.md index 0014af8f15..fc4530589d 100644 --- a/book/src/late-block-re-orgs.md +++ b/book/src/late-block-re-orgs.md @@ -14,6 +14,15 @@ There are three flags which control the re-orging behaviour: * `--proposer-reorg-threshold N`: attempt to orphan blocks with less than N% of the committee vote. If this parameter isn't set then N defaults to 20% when the feature is enabled. * `--proposer-reorg-epochs-since-finalization N`: only attempt to re-org late blocks when the number of epochs since finalization is less than or equal to N. The default is 2 epochs, meaning re-orgs will only be attempted when the chain is finalizing optimally. +* `--proposer-reorg-cutoff T`: only attempt to re-org late blocks when the proposal is being made + before T milliseconds into the slot. Delays between the validator client and the beacon node can + cause some blocks to be requested later than the start of the slot, which makes them more likely + to fail. The default cutoff is 1000ms on mainnet, which gives blocks 3000ms to be signed and + propagated before the attestation deadline at 4000ms. +* `--proposer-reorg-disallowed-offsets N1,N2,N3...`: Prohibit Lighthouse from attempting to reorg at + specific offsets in each epoch. A disallowed offset `N` prevents reorging blocks from being + proposed at any `slot` such that `slot % SLOTS_PER_EPOCH == N`. The value to this flag is a + comma-separated list of integer offsets. All flags should be applied to `lighthouse bn`. The default configuration is recommended as it balances the chance of the re-org succeeding against the chance of failure due to attestations diff --git a/book/src/lighthouse-ui.md b/book/src/lighthouse-ui.md index 225f293f97..4182314da1 100644 --- a/book/src/lighthouse-ui.md +++ b/book/src/lighthouse-ui.md @@ -11,7 +11,7 @@ _Documentation for Siren users and developers._ Siren is a user interface built for Lighthouse that connects to a Lighthouse Beacon Node and a Lighthouse Validator Client to monitor performance and display key validator -metrics. +metrics. The UI is currently in active development. Its resides in the [Siren](https://github.com/sigp/siren) repository. @@ -24,7 +24,8 @@ information: - [Installation Guide](./ui-installation.md) - Information to install and run the Lighthouse UI. - [Configuration Guide](./ui-configuration.md) - Explanation of how to setup and configure Siren. -- [Usage](./ui-usage.md) - Details various Siren components. +- [Authentication Guide](./ui-authentication.md) - Explanation of how Siren authentication works and protects validator actions. +- [Usage](./ui-usage.md) - Details various Siren components. - [FAQs](./ui-faqs.md) - Frequently Asked Questions. ## Contributing diff --git a/book/src/mainnet-validator.md b/book/src/mainnet-validator.md index 41735f85bb..377e5ebaa4 100644 --- a/book/src/mainnet-validator.md +++ b/book/src/mainnet-validator.md @@ -2,7 +2,6 @@ [launchpad]: https://launchpad.ethereum.org/ [lh-book]: https://lighthouse-book.sigmaprime.io/ -[testnet-validator]: ./testnet-validator.md [advanced-datadir]: ./advanced-datadir.md [license]: https://github.com/sigp/lighthouse/blob/stable/LICENSE [slashing]: ./slashing-protection.md @@ -12,25 +11,13 @@ Becoming an Ethereum consensus validator is rewarding, but it's not for the fain familiar with the rules of staking (e.g., rewards, penalties, etc.) and also configuring and managing servers. You'll also need at least 32 ETH! -For those with an understanding of Ethereum consensus and server maintenance, you'll find that running Lighthouse -is easy. Install it, start it, monitor it and keep it updated. You shouldn't need to interact -with it on a day-to-day basis. +Being educated is critical to a validator's success. Before submitting your mainnet deposit, we recommend: -Being educated is critical to validator success. Before submitting your mainnet deposit, we -recommend: - -- Thoroughly exploring the [Staking Launchpad][launchpad] website - - Try running through the deposit process *without* actually submitting a deposit. +- Thoroughly exploring the [Staking Launchpad][launchpad] website, try running through the deposit process using a testnet launchpad such as the [Goerli staking launchpad](https://goerli.launchpad.ethereum.org/en/). +- Running a testnet validator. - Reading through this documentation, especially the [Slashing Protection][slashing] section. -- Running a [testnet validator][testnet-validator]. - Performing a web search and doing your own research. -By far, the best technical learning experience is to run a [Testnet Validator][testnet-validator]. -You can get hands-on experience with all the tools and it's a great way to test your staking -hardware. We recommend *all* mainnet validators to run a testnet validator initially; 32 ETH is a -significant outlay and joining a testnet is a great way to "try before you buy". - -Remember, if you get stuck you can always reach out on our [Discord][discord]. > > **Please note**: the Lighthouse team does not take any responsibility for losses or damages @@ -40,116 +27,187 @@ Remember, if you get stuck you can always reach out on our [Discord][discord]. > due to the actions of other actors on the consensus layer or software bugs. See the > [software license][license] for more detail on liability. -## Using Lighthouse for Mainnet -When using Lighthouse, the `--network` flag selects a network. E.g., +## Become a validator -- `lighthouse` (no flag): Mainnet. -- `lighthouse --network mainnet`: Mainnet. -- `lighthouse --network prater`: Prater (testnet). +There are five primary steps to become a validator: -Using the correct `--network` flag is very important; using the wrong flag can -result in penalties, slashings or lost deposits. As a rule of thumb, always -provide a `--network` flag instead of relying on the default. +1. [Create validator keys](#step-1-create-validator-keys) +1. [Start an execution client and Lighthouse beacon node](#step-2-start-an-execution-client-and-lighthouse-beacon-node) +1. [Import validator keys into Lighthouse](#step-3-import-validator-keys-to-lighthouse) +1. [Start Lighthouse validator client](#step-4-start-lighthouse-validator-client) +1. [Submit deposit](#step-5-submit-deposit-32eth-per-validator) -## Joining a Testnet +> **Important note**: The guide below contains both mainnet and testnet instructions. We highly recommend *all* users to **run a testnet validator** prior to staking mainnet ETH. By far, the best technical learning experience is to run a testnet validator. You can get hands-on experience with all the tools and it's a great way to test your staking +hardware. 32 ETH is a significant outlay and joining a testnet is a great way to "try before you buy". -There are five primary steps to become a testnet validator: + -1. Create validator keys and submit deposits. -1. Start an execution client. -1. Install Lighthouse. -1. Import the validator keys into Lighthouse. -1. Start Lighthouse. -1. Leave Lighthouse running. -Each of these primary steps has several intermediate steps, so we recommend -setting aside one or two hours for this process. +> **Never use real ETH to join a testnet!** Testnet such as the Goerli testnet uses Goerli ETH which is worthless. This allows experimentation without real-world costs. ### Step 1. Create validator keys -The Ethereum Foundation provides a "Staking Launchpad" for creating validator keypairs and submitting -deposits: - -- [Staking Launchpad][launchpad] - -Please follow the steps on the launch pad site to generate validator keys and submit deposits. Make -sure you select "Lighthouse" as your client. - -Move to the next step once you have completed the steps on the launch pad, -including generating keys via the Python CLI and submitting gETH/ETH deposits. - -### Step 2. Start an execution client - -Since the consensus chain relies upon the execution chain for validator on-boarding, all consensus validators must have a -connection to an execution client. - -We provide instructions for using Geth, but you could use any client that implements the JSON RPC -via HTTP. A fast-synced node is sufficient. - -#### Installing Geth - -If you're using a Mac, follow the instructions [listed -here](https://github.com/ethereum/go-ethereum/wiki/Installation-Instructions-for-Mac) to install -geth. Otherwise [see here](https://github.com/ethereum/go-ethereum/wiki/Installing-Geth). - -#### Starting Geth - -Once you have geth installed, use this command to start your execution node: - +The Ethereum Foundation provides the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli/releases) for creating validator keys. Download and run the `staking-deposit-cli` with the command: ```bash - geth --http +./deposit new-mnemonic +``` +and follow the instructions to generate the keys. When prompted for a network, select `mainnet` if you want to run a mainnet validator, or select `goerli` if you want to run a Goerli testnet validator. A new mnemonic will be generated in the process. + +> **Important note:** A mnemonic (or seed phrase) is a 24-word string randomly generated in the process. It is highly recommended to write down the mnemonic and keep it safe offline. It is important to ensure that the mnemonic is never stored in any digital form (computers, mobile phones, etc) connected to the internet. Please also make one or more backups of the mnemonic to ensure your ETH is not lost in the case of data loss. It is very important to keep your mnemonic private as it represents the ultimate control of your ETH. + +Upon completing this step, the files `deposit_data-*.json` and `keystore-m_*.json` will be created. The keys that are generated from staking-deposit-cli can be easily loaded into a Lighthouse validator client (`lighthouse vc`) in [Step 3](#step-3-import-validator-keys-to-lighthouse). In fact, both of these programs are designed to work with each other. + + +> Lighthouse also supports creating validator keys, see [Key management](./key-management.md) for more info. + +### Step 2. Start an execution client and Lighthouse beacon node + +Start an execution client and Lighthouse beacon node according to the [Run a Node](./run_a_node.md) guide. Make sure that both execution client and consensus client are synced. + +### Step 3. Import validator keys to Lighthouse + +In [Step 1](#step-1-create-validator-keys), the staking-deposit-cli will generate the validator keys into a `validator_keys` directory. Let's assume that +this directory is `$HOME/staking-deposit-cli/validator_keys`. Using the default `validators` directory in Lighthouse (`~/.lighthouse/mainnet/validators`), run the following command to import validator keys: + +Mainnet: +```bash +lighthouse --network mainnet account validator import --directory $HOME/staking-deposit-cli/validator_keys ``` -### Step 3. Install Lighthouse - -*Note: Lighthouse only supports Windows via WSL.* - -Follow the [Lighthouse Installation Instructions](./installation.md) to install -Lighthouse from one of the available options. - -Proceed to the next step once you've successfully installed Lighthouse and viewed -its `--version` info. - -> Note: Some of the instructions vary when using Docker, ensure you follow the -> appropriate sections later in this guide. - -### Step 4. Import validator keys to Lighthouse - -When Lighthouse is installed, follow the [Importing from the Ethereum Staking Launch -pad](./validator-import-launchpad.md) instructions so the validator client can -perform your validator duties. - -Proceed to the next step once you've successfully imported all validators. - -### Step 5. Start Lighthouse - -For staking, one needs to run two Lighthouse processes: - -- `lighthouse bn`: the "beacon node" which connects to the P2P network and - verifies blocks. -- `lighthouse vc`: the "validator client" which manages validators, using data - obtained from the beacon node via a HTTP API. - -Starting these processes is different for binary and docker users: - -#### Binary users - -Those using the pre- or custom-built binaries can start the two processes with: - +Goerli testnet: ```bash -lighthouse --network mainnet bn --staking +lighthouse --network goerli account validator import --directory $HOME/staking-deposit-cli/validator_keys ``` -```bash -lighthouse --network mainnet vc +> Note: The user must specify the consensus client network that they are importing the keys by using the `--network` flag. + +> Note: If the validator_keys directory is in a different location, modify the path accordingly. + +> Note: `~/.lighthouse/mainnet` is the default directory which contains the keys and database. To specify a custom directory, see [Custom Directories][advanced-datadir]. + +> Docker users should use the command from the [Docker](#docker-users) documentation. + + +The user will be prompted for a password for each keystore discovered: + +``` +Keystore found at "/home/{username}/staking-deposit-cli/validator_keys/keystore-m_12381_3600_0_0_0-1595406747.json": + + - Public key: 0xa5e8702533f6d66422e042a0bf3471ab9b302ce115633fa6fdc5643f804b6b4f1c33baf95f125ec21969a3b1e0dd9e56 + - UUID: 8ea4cf99-8719-43c5-9eda-e97b8a4e074f + +If you enter the password it will be stored as plain text in validator_definitions.yml so that it is not required each time the validator client starts. + +Enter the keystore password, or press enter to omit it: ``` -> Note: `~/.lighthouse/mainnet` is the default directory which contains the keys and databases. -> To specify a custom dir, see [Custom Directories][advanced-datadir]. +The user can choose whether or not they'd like to store the validator password +in the [`validator_definitions.yml`](./validator-management.md) file. If the +password is *not* stored here, the validator client (`lighthouse vc`) +application will ask for the password each time it starts. This might be nice +for some users from a security perspective (i.e., if it is a shared computer), +however it means that if the validator client restarts, the user will be subject +to offline penalties until they can enter the password. If the user trusts the +computer that is running the validator client and they are seeking maximum +validator rewards, we recommend entering a password at this point. -#### Docker users +Once the process is done the user will see: +``` +Successfully imported keystore. +Successfully updated validator_definitions.yml. + +Successfully imported 1 validators (0 skipped). + +WARNING: DO NOT USE THE ORIGINAL KEYSTORES TO VALIDATE WITH ANOTHER CLIENT, OR YOU WILL GET SLASHED. +``` + +Once you see the above message, you have successfully imported the validator keys. You can now proceed to the next step to start the validator client. + + +### Step 4. Start Lighthouse validator client + +After the keys are imported, the user can start performing their validator duties +by starting the Lighthouse validator client `lighthouse vc`: + +Mainnet: + +```bash +lighthouse vc --network mainnet --suggested-fee-recipient YourFeeRecipientAddress +``` + +Goerli testnet: +```bash +lighthouse vc --network goerli --suggested-fee-recipient YourFeeRecipientAddress +``` + +The `validator client` manages validators using data obtained from the beacon node via a HTTP API. You are highly recommended to enter a fee-recipient by changing `YourFeeRecipientAddress` to an Ethereum address under your control. + +When `lighthouse vc` starts, check that the validator public key appears +as a `voting_pubkey` as shown below: + +``` +INFO Enabled validator voting_pubkey: 0xa5e8702533f6d66422e042a0bf3471ab9b302ce115633fa6fdc5643f804b6b4f1c33baf95f125ec21969a3b1e0dd9e56 +``` + +Once this log appears (and there are no errors) the `lighthouse vc` application +will ensure that the validator starts performing its duties and being rewarded +by the protocol. + +### Step 5: Submit deposit (32ETH per validator) + +After you have successfully run and synced the execution client, beacon node and validator client, you can now proceed to submit the deposit. Go to the mainnet [Staking launchpad](https://launchpad.ethereum.org/en/) (or [Goerli staking launchpad](https://goerli.launchpad.ethereum.org/en/) for testnet validator) and carefully go through the steps to becoming a validator. Once you are ready, you can submit the deposit by sending 32ETH per validator to the deposit contract. Upload the `deposit_data-*.json` file generated in [Step 1](#step-1-create-validator-keys) to the Staking launchpad. + +> **Important note:** Double check that the deposit contract for mainnet is `0x00000000219ab540356cBB839Cbe05303d7705Fa` before you confirm the transaction. + +Once the deposit transaction is confirmed, it will take a minimum of ~16 hours to a few days/weeks for the beacon chain to process and activate your validator, depending on the queue. Refer to our [FAQ - Why does it take so long for a validator to be activated](./faq.md#why-does-it-take-so-long-for-a-validator-to-be-activated) for more info. + +Once your validator is activated, the validator client will start to publish attestations each epoch: + +``` +Dec 03 08:49:40.053 INFO Successfully published attestation slot: 98, committee_index: 0, head_block: 0xa208…7fd5, +``` + +If you propose a block, the log will look like: + +``` +Dec 03 08:49:36.225 INFO Successfully published block slot: 98, attestations: 2, deposits: 0, service: block +``` + +Congratulations! Your validator is now performing its duties and you will receive rewards for securing the Ethereum network. + +### What is next? +After the validator is running and performing its duties, it is important to keep the validator online to continue accumulating rewards. However, there could be problems with the computer, the internet or other factors that cause the validator to be offline. For this, it is best to subscribe to notifications, e.g., via [beaconcha.in](https://beaconcha.in/) which will send notifications about missed attestations and/or proposals. You will be notified about the validator's offline status and will be able to react promptly. + +The next important thing is to stay up to date with updates to Lighthouse and the execution client. Updates are released from time to time, typically once or twice a month. For Lighthouse updates, you can subscribe to notifications on [Github](https://github.com/sigp/lighthouse) by clicking on `Watch`. If you only want to receive notification on new releases, select `Custom`, then `Releases`. You could also join [Lighthouse Discord](https://discord.gg/cyAszAh) where we will make an announcement when there is a new release. + +You may also want to try out [Siren](./lighthouse-ui.md), a UI developed by Lighthouse to monitor validator performance. + +Once you are familiar with running a validator and server maintenance, you'll find that running Lighthouse is easy. Install it, start it, monitor it and keep it updated. You shouldn't need to interact with it on a day-to-day basis. Happy staking! + +## Docker users + +### Import validator keys + +The `import` command is a little more complex for Docker users, but the example +in this document can be substituted with: + +```bash +docker run -it \ + -v $HOME/.lighthouse:/root/.lighthouse \ + -v $(pwd)/validator_keys:/root/validator_keys \ + sigp/lighthouse \ + lighthouse --network mainnet account validator import --directory /root/validator_keys +``` + +Here we use two `-v` volumes to attach: + +- `~/.lighthouse` on the host to `/root/.lighthouse` in the Docker container. +- The `validator_keys` directory in the present working directory of the host + to the `/root/validator_keys` directory of the Docker container. + +### Start Lighthouse beacon node and validator client Those using Docker images can start the processes with: ```bash @@ -167,29 +225,8 @@ $ docker run \ lighthouse --network mainnet vc ``` -### Step 6. Leave Lighthouse running -Leave your beacon node and validator client running and you'll see logs as the -beacon node stays synced with the network while the validator client produces -blocks and attestations. - -It will take 4-8+ hours for the beacon chain to process and activate your -validator, however you'll know you're active when the validator client starts -successfully publishing attestations each epoch: - -``` -Dec 03 08:49:40.053 INFO Successfully published attestation slot: 98, committee_index: 0, head_block: 0xa208…7fd5, -``` - -Although you'll produce an attestation each epoch, it's less common to produce a -block. Watch for the block production logs too: - -``` -Dec 03 08:49:36.225 INFO Successfully published block slot: 98, attestations: 2, deposits: 0, service: block -``` - -If you see any `ERRO` (error) logs, please reach out on -[Discord](https://discord.gg/cyAszAh) or [create an +If you get stuck you can always reach out on our [Discord][discord] or [create an issue](https://github.com/sigp/lighthouse/issues/new). -Happy staking! + diff --git a/book/src/partial-withdrawal.md b/book/src/partial-withdrawal.md new file mode 100644 index 0000000000..db722d729e --- /dev/null +++ b/book/src/partial-withdrawal.md @@ -0,0 +1,23 @@ +# Partial Withdrawals + +After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023: + + - if a validator has a withdrawal credential type `0x00`, the rewards will continue to accumulate and will be locked in the beacon chain. + - if a validator has a withdrawal credential type `0x01`, any rewards above 32ETH will be periodically withdrawn to the withdrawal address. This is also known as the "validator sweep", i.e., once the "validator sweep" reaches your validator's index, your rewards will be withdrawn to the withdrawal address. At the time of writing, with 560,000+ validators on the Ethereum mainnet, you shall expect to receive the rewards approximately every 5 days. + +### FAQ +1. How to know if I have the withdrawal credentials type `0x00` or `0x01`? + + Refer [here](./voluntary-exit.md#1-how-to-know-if-i-have-the-withdrawal-credentials-type-0x01). + +2. My validator has withdrawal credentials type `0x00`, is there a deadline to update my withdrawal credentials? + + No. You can update your withdrawal credentials **anytime**. The catch is that as long as you do not update your withdrawal credentials, your rewards in the beacon chain will continue to be locked in the beacon chain. Only after you update the withdrawal credentials, will the rewards be withdrawn to the withdrawal address. + +3. Do I have to do anything to get my rewards after I update the withdrawal credentials to type `0x01`? + + No. The "validator sweep" occurs automatically and you can expect to receive the rewards every few days. + + Figure below summarizes partial withdrawals. + + ![partial](./imgs/partial-withdrawal.png) \ No newline at end of file diff --git a/book/src/pi.md b/book/src/pi.md index 24796d394e..d8d154d765 100644 --- a/book/src/pi.md +++ b/book/src/pi.md @@ -12,18 +12,18 @@ desktop) may be convenient.* ### 1. Install Ubuntu -Follow the [Ubuntu Raspberry Pi installation instructions](https://ubuntu.com/download/raspberry-pi). - -**A 64-bit version is required** and latest version is recommended (Ubuntu -20.04 LTS was the latest at the time of writing). +Follow the [Ubuntu Raspberry Pi installation instructions](https://ubuntu.com/download/raspberry-pi). **A 64-bit version is required** A graphical environment is not required in order to use Lighthouse. Only the terminal and an Internet connection are necessary. ### 2. Install Packages -Install the [Ubuntu Dependencies](installation-source.md#ubuntu). -(I.e., run the `sudo apt install ...` command at that link). +Install the Ubuntu dependencies: + +```bash +sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler +``` > Tips: > @@ -32,15 +32,18 @@ Install the [Ubuntu Dependencies](installation-source.md#ubuntu). ### 3. Install Rust -Install Rust as per [rustup](https://rustup.rs/). (I.e., run the `curl ... ` -command). +Install Rust as per [rustup](https://rustup.rs/): + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` > Tips: > -> - When prompted, enter `1` for the default installation. -> - Try running `cargo version` after Rust installation completes. If it cannot -> be found, run `source $HOME/.cargo/env`. -> - It's generally advised to append `source $HOME/.cargo/env` to `~/.bashrc`. +> - During installation, when prompted, enter `1` for the default installation. +> - After Rust installation completes, try running `cargo version` . If it cannot +> be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`. +> - It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`. ### 4. Install Lighthouse diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index fb112c3675..a31aedf785 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -1,77 +1,43 @@ # Run a Node -This document provides detail for users who want to run a Lighthouse beacon node. +This section provides the detail for users who want to run a Lighthouse beacon node. You should be finished with one [Installation](./installation.md) method of your choice to continue with the following steps: -1. Set up an [execution node](#step-1-set-up-an-execution-node); -1. Enable [checkpoint sync](#step-2-choose-a-checkpoint-sync-provider); -1. Run [Lighthouse](#step-3-run-lighthouse); -1. [Check logs](#step-4-check-logs); and -1. [Further readings](#step-5-further-readings). +1. Create a [JWT secret file](#step-1-create-a-jwt-secret-file) +1. Set up an [execution node](#step-2-set-up-an-execution-node); +1. Set up a [beacon node](#step-3-set-up-a-beacon-node-using-lighthouse); +1. [Check logs for sync status](#step-4-check-logs-for-sync-status); -Checkpoint sync is *optional*; however, we recommend it since it is substantially faster -than syncing from genesis while still providing the same functionality. -## Step 1: Set up an execution node -The Lighthouse beacon node *must* connect to an execution engine in order to validate the transactions -present in blocks. Two flags are used to configure this connection: +## Step 1: Create a JWT secret file +A JWT secret file is used to secure the communication between the execution client and the consensus client. In this step, we will create a JWT secret file which will be used in later steps. -- `--execution-endpoint`: the *URL* of the execution engine API. Often this will be - `http://localhost:8551`. -- `--execution-jwt`: the *path* to the file containing the JWT secret shared by Lighthouse and the - execution engine. This is a mandatory form of authentication that ensures that Lighthouse -has authority to control the execution engine. +```bash +sudo mkdir -p /secrets +openssl rand -hex 32 | tr -d "\n" | sudo tee /secrets/jwt.hex +``` -Each execution engine has its own flags for configuring the engine API and JWT. -Please consult the relevant page of your execution engine for the required flags: +## Step 2: Set up an execution node -- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/getting-started/consensus-clients) -- [Nethermind: Running Nethermind & CL](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) -- [Besu: Connect to Mainnet](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) -- [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) +The Lighthouse beacon node *must* connect to an execution engine in order to validate the transactions present in blocks. The execution engine connection must be *exclusive*, i.e. you must have one execution node +per beacon node. The reason for this is that the beacon node _controls_ the execution node. Select an execution client from the list below and run it: -The execution engine connection must be *exclusive*, i.e. you must have one execution node -per beacon node. The reason for this is that the beacon node _controls_ the execution node. -## Step 2: Choose a checkpoint sync provider +- [Nethermind](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) +- [Besu](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) +- [Erigon](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) +- [Geth](https://geth.ethereum.org/docs/getting-started/consensus-clients) -Lighthouse supports fast sync from a recent finalized checkpoint. -The checkpoint sync is done using a [public endpoint](#use-a-community-checkpoint-sync-endpoint) -provided by the Ethereum community. -In [step 3](#step-3-run-lighthouse), when running Lighthouse, -we will enable checkpoint sync by providing the URL to the `--checkpoint-sync-url` flag. +> Note: Each execution engine has its own flags for configuring the engine API and JWT secret to connect to a beacon node. Please consult the relevant page of your execution engine as above for the required flags. -### Use a community checkpoint sync endpoint -The Ethereum community provides various [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) for you to choose from for your initial checkpoint state. Select one for your network and use it as the URL. +Once the execution client is up, just let it continue running. The execution client will start syncing when it connects to a beacon node. Depending on the execution client and computer hardware specifications, syncing can take from a few hours to a few days. You can safely proceed to Step 3 to set up a beacon node while the execution client is still syncing. -For example, the URL for Sigma Prime's checkpoint sync server for mainnet is `https://mainnet.checkpoint.sigp.io`, -which we will use in [step 3](#step-3-run-lighthouse). +## Step 3: Set up a beacon node using Lighthouse -## Step 3: Run Lighthouse - -To run Lighthouse, we use the three flags from the steps above: -- `--execution-endpoint`; -- `--execution-jwt`; and -- `--checkpoint-sync-url`. - -Additionally, we run Lighthouse with the `--network` flag, which selects a network: - -- `lighthouse` (no flag): Mainnet. -- `lighthouse --network mainnet`: Mainnet. -- `lighthouse --network goerli`: Goerli (testnet). - -Using the correct `--network` flag is very important; using the wrong flag can -result in penalties, slashings or lost deposits. As a rule of thumb, *always* -provide a `--network` flag instead of relying on the default. - -For the testnets we support [Goerli](https://goerli.net/) (`--network goerli`), -[Sepolia](https://sepolia.dev/) (`--network sepolia`), and [Gnosis chain](https://www.gnosis.io/) (`--network gnosis`). - -Minor modifications depend on if you want to run your node while [staking](#staking) or [non-staking](#non-staking). -In the following, we will provide examples of what a Lighthouse setup could look like. +In this step, we will set up a beacon node. Use the following command to start a beacon node that connects to the execution node: ### Staking @@ -84,9 +50,30 @@ lighthouse bn \ --http ``` -A Lighthouse beacon node can be configured to expose an HTTP server by supplying the `--http` flag. -The default listen address is `127.0.0.1:5052`. -The HTTP API is required for the beacon node to accept connections from the *validator client*, which manages keys. +> Note: If you download the binary file, you need to navigate to the directory of the binary file to run the above command. + +Notable flags: +- `--network` flag, which selects a network: + - `lighthouse` (no flag): Mainnet. + - `lighthouse --network mainnet`: Mainnet. + - `lighthouse --network goerli`: Goerli (testnet). + - `lighthouse --network sepolia`: Sepolia (testnet). + - `lighthouse --network gnosis`: Gnosis chain + + > Note: Using the correct `--network` flag is very important; using the wrong flag can +result in penalties, slashings or lost deposits. As a rule of thumb, *always* +provide a `--network` flag instead of relying on the default. +- `--execution-endpoint`: the URL of the execution engine API. If the execution engine is running on the same computer with the default port, this will be + `http://localhost:8551`. +- `--execution-jwt`: the path to the JWT secret file shared by Lighthouse and the + execution engine. This is a mandatory form of authentication which ensures that Lighthouse has the authority to control the execution engine. +- `--checkpoint-sync-url`: Lighthouse supports fast sync from a recent finalized checkpoint. Checkpoint sync is *optional*; however, we **highly recommend** it since it is substantially faster than syncing from genesis while still providing the same functionality. The checkpoint sync is done using [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) provided by the Ethereum community. For example, in the above command, we use the URL for Sigma Prime's checkpoint sync server for mainnet `https://mainnet.checkpoint.sigp.io`. +- `--http`: to expose an HTTP server of the beacon chain. The default listening address is `http://localhost:5052`. The HTTP API is required for the beacon node to accept connections from the *validator client*, which manages keys. + + + +If you intend to run the beacon node without running the validator client (e.g., for non-staking purposes such as supporting the network), you can modify the above command so that the beacon node is configured for non-staking purposes: + ### Non-staking @@ -99,17 +86,19 @@ lighthouse bn \ --disable-deposit-contract-sync ``` -Since we are not staking, we can use the `--disable-deposit-contract-sync` flag. +Since we are not staking, we can use the `--disable-deposit-contract-sync` flag to disable syncing of deposit logs from the execution node. + ---- Once Lighthouse runs, we can monitor the logs to see if it is syncing correctly. -## Step 4: Check logs + + +## Step 4: Check logs for sync status Several logs help you identify if Lighthouse is running correctly. ### Logs - Checkpoint sync -Lighthouse will print a message to indicate that checkpoint sync is being used: +If you run Lighthouse with the flag `--checkpoint-sync-url`, Lighthouse will print a message to indicate that checkpoint sync is being used: ``` INFO Starting checkpoint sync remote_url: http://remote-bn:8000/, service: beacon @@ -122,16 +111,17 @@ loaded from the remote beacon node: INFO Loaded checkpoint block and state state_root: 0xe8252c68784a8d5cc7e5429b0e95747032dd1dcee0d1dc9bdaf6380bf90bc8a6, block_root: 0x5508a20147299b1a7fe9dbea1a8b3bf979f74c52e7242039bd77cbff62c0695a, slot: 2034720, service: beacon ``` -Once the checkpoint is loaded Lighthouse will sync forwards to the head of the chain. +Once the checkpoint is loaded, Lighthouse will sync forwards to the head of the chain. -If a validator client is connected to the node then it will be able to start completing its duties -as soon as forwards sync completes. +If a validator client is connected to the beacon node it will be able to start its duties as soon as forwards sync completes, which typically takes 1-2 minutes. + +> Note: If you have an existing Lighthouse database, you will need to delete the database by using the `--purge-db` flag or manually delete the database with `sudo rm -r /path_to_database/beacon`. If you do use a `--purge-db` flag, once checkpoint sync is complete, you can remove the flag upon a restart. > **Security Note**: You should cross-reference the `block_root` and `slot` of the loaded checkpoint > against a trusted source like another [public endpoint](https://eth-clients.github.io/checkpoint-sync-endpoints/), > a friend's node, or a block explorer. -#### Backfilling Blocks +### Backfilling Blocks Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks from the checkpoint back to genesis. @@ -156,16 +146,17 @@ as `verified` indicating that they have been processed successfully by the execu INFO Synced, slot: 3690668, block: 0x1244…cb92, epoch: 115333, finalized_epoch: 115331, finalized_root: 0x0764…2a3d, exec_hash: 0x929c…1ff6 (verified), peers: 78 ``` +Once you see the above message - congratulations! This means that your node is synced and you have contributed to the decentralization and security of the Ethereum network. -## Step 5: Further readings +## Further readings Several other resources are the next logical step to explore after running your beacon node: -- Learn how to [become a validator](./mainnet-validator.md); +- If you intend to run a validator, proceed to [become a validator](./mainnet-validator.md); - Explore how to [manage your keys](./key-management.md); - Research on [validator management](./validator-management.md); - Dig into the [APIs](./api.md) that the beacon node and validator client provide; - Study even more about [checkpoint sync](./checkpoint-sync.md); or - Investigate what steps had to be taken in the past to execute a smooth [merge migration](./merge-migration.md). -Finally, if you a struggling with anything, join our [Discord](https://discord.gg/cyAszAh). We are happy to help! \ No newline at end of file +Finally, if you are struggling with anything, join our [Discord](https://discord.gg/cyAszAh). We are happy to help! diff --git a/book/src/slashing-protection.md b/book/src/slashing-protection.md index a60c8e36dc..6e2ca65b41 100644 --- a/book/src/slashing-protection.md +++ b/book/src/slashing-protection.md @@ -21,8 +21,8 @@ and carefully to keep your validators safe. See the [Troubleshooting](#troublesh The database will be automatically created, and your validators registered with it when: -* Importing keys from another source (e.g. Launchpad, Teku, Prysm, `ethdo`). - See [the docs on importing keys](./validator-import-launchpad.md). +* Importing keys from another source (e.g. [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli/releases), Lodestar, Nimbus, Prysm, Teku, [ethdo](https://github.com/wealdtech/ethdo)). + See [import validator keys](./mainnet-validator.md#step-3-import-validator-keys-to-lighthouse). * Creating keys using Lighthouse itself (`lighthouse account validator create`) * Creating keys via the [validator client API](./api-vc.md). @@ -45,7 +45,7 @@ Examples of circumstances where the slashing protection database is effective ar your client to be imported into Lighthouse's slashing protection database. See [Import and Export](#import-and-export). * Misplacing `slashing_protection.sqlite` during a datadir change or migration between machines. - By default Lighthouse will refuse to start if it finds validator keys that are not registered + By default, Lighthouse will refuse to start if it finds validator keys that are not registered in the slashing protection database. Examples where it is **ineffective** are: @@ -54,7 +54,7 @@ Examples where it is **ineffective** are: clients (e.g. Lighthouse and Prysm) running on the same machine, two Lighthouse instances using different datadirs, or two clients on completely different machines (e.g. one on a cloud server and one running locally). You are responsible for ensuring that your validator keys are never - running simultaneously – the slashing protection DB **cannot protect you in this case**. + running simultaneously – the slashing protection database **cannot protect you in this case**. * Importing keys from another client without also importing voting history. * If you use `--init-slashing-protection` to recreate a missing slashing protection database. @@ -64,19 +64,22 @@ Lighthouse supports the slashing protection interchange format described in [EIP interchange file is a record of blocks and attestations signed by a set of validator keys – basically a portable slashing protection database! -With your validator client stopped, you can import a `.json` interchange file from another client +To import a slashing protection database to Lighthouse, you first need to export your existing client's database. Instructions to export the slashing protection database for other clients are listed below: +- [Lodestar](https://chainsafe.github.io/lodestar/reference/cli/#validator-slashing-protection-export) +- [Nimbus](https://nimbus.guide/migration.html#2-export-slashing-protection-history) +- [Prysm](https://docs.prylabs.network/docs/wallet/slashing-protection#exporting-your-validators-slashing-protection-history) +- [Teku](https://docs.teku.consensys.net/HowTo/Prevent-Slashing#export-a-slashing-protection-file) + + +Once you have the slashing protection database from your existing client, you can now import the database to Lighthouse. With your validator client stopped, you can import a `.json` interchange file from another client using this command: ```bash lighthouse account validator slashing-protection import ``` -Instructions for exporting your existing client's database are out of scope for this document, -please check the other client's documentation for instructions. - When importing an interchange file, you still need to import the validator keystores themselves -separately, using the instructions for [importing keystores into -Lighthouse](./validator-import-launchpad.md). +separately, using the instructions for [import validator keys](./mainnet-validator.md#step-3-import-validator-keys-to-lighthouse). --- @@ -93,7 +96,7 @@ up to date. ### How Import Works -Since version 1.6.0 Lighthouse will ignore any slashable data in the import data and will safely +Since version 1.6.0, Lighthouse will ignore any slashable data in the import data and will safely update the low watermarks for blocks and attestations. It will store only the maximum-slot block for each validator, and the maximum source/target attestation. This is faster than importing all data while also being more resilient to repeated imports & stale data. @@ -121,7 +124,7 @@ Oct 12 14:41:26.415 CRIT Failed to start validator client reason: Failed Ensure that `slashing_protection.sqlite` is in "/home/karlm/.lighthouse/mainnet/validators" folder ``` -Usually this indicates that during some manual intervention the slashing database has been +Usually this indicates that during some manual intervention, the slashing database has been misplaced. This error can also occur if you have upgraded from Lighthouse v0.2.x to v0.3.x without moving the slashing protection database. If you have imported your keys into a new node, you should never see this error (see [Initialization](#initialization)). @@ -137,7 +140,7 @@ the Lighthouse validator client with the `--init-slashing-protection` flag. This dangerous and should not be used lightly, and we **strongly recommend** you try finding your old slashing protection database before using it. If you do decide to use it, you should wait at least 1 epoch (~7 minutes) from when your validator client was last actively signing -messages. If you suspect your node experienced a clock drift issue you should wait +messages. If you suspect your node experienced a clock drift issue, you should wait longer. Remember that the inactivity penalty for being offline for even a day or so is approximately equal to the rewards earned in a day. You will get slashed if you use `--init-slashing-protection` incorrectly. diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index c1739aa937..f3ece85062 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -1,14 +1,12 @@ # Suggested Fee Recipient The _fee recipient_ is an Ethereum address nominated by a beacon chain validator to receive -tips from user transactions. If you run validators on a network that has already merged -or is due to merge soon then you should nominate a fee recipient for your validators. +tips from user transactions. Given that all mainnet and testnets have gone through [The Merge](https://ethereum.org/en/roadmap/merge/), if you run validators on a network, you are strongly recommended to nominate a fee recipient for your validators. Failing to nominate a fee recipient will result in losing the tips from transactions. ## Background During post-merge block production, the Beacon Node (BN) will provide a `suggested_fee_recipient` to -the execution node. This is a 20-byte Ethereum address which the EL might choose to set as the -coinbase and the recipient of other fees or rewards. +the execution node. This is a 20-byte Ethereum address which the execution node might choose to set as the recipient of other fees or rewards. There is no guarantee that an execution node will use the `suggested_fee_recipient` to collect fees, it may use any address it chooses. It is assumed that an honest execution node *will* use the @@ -189,4 +187,4 @@ accumulates other staking rewards. The reason for this is that transaction fees validated by the execution node, and therefore need to be paid to an address that exists on the execution chain. Validators use BLS keys which do not correspond to Ethereum addresses, so they have no "presence" on the execution chain. Therefore, it's necessary for each validator to nominate -a separate fee recipient address. +a fee recipient address. diff --git a/book/src/system-requirements.md b/book/src/system-requirements.md deleted file mode 100644 index 0c51d07cce..0000000000 --- a/book/src/system-requirements.md +++ /dev/null @@ -1,23 +0,0 @@ -# System Requirements - -Lighthouse is able to run on most low to mid-range consumer hardware, but will perform best when -provided with ample system resources. The following system requirements are for running a beacon -node and a validator client with a modest number of validator keys (less than 100). - -## Minimum - -* Dual-core CPU, 2015 or newer -* 8 GB RAM -* 128 GB solid state storage -* 10 Mb/s download, 5 Mb/s upload broadband connection - -During smooth network conditions, Lighthouse's database will fit within 15 GB, but in case of a long -period of non-finality, it is **strongly recommended** that at least 128 GB is available. - -## Recommended - -* Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer -* 16 GB RAM -* 256 GB solid state storage -* 100 Mb/s download, 20 Mb/s upload broadband connection - diff --git a/book/src/testnet-validator.md b/book/src/testnet-validator.md deleted file mode 100644 index 98ba66c244..0000000000 --- a/book/src/testnet-validator.md +++ /dev/null @@ -1,23 +0,0 @@ -# Become a Testnet Validator - -[mainnet-validator]: ./mainnet-validator.md -[prater-launchpad]: https://prater.launchpad.ethereum.org/ - -Joining an Ethereum consensus testnet is a great way to get familiar with staking in Phase 0. All users should -experiment with a testnet prior to staking mainnet ETH. - -To join a testnet, you can follow the [Become an Ethereum consensus Mainnet Validator][mainnet-validator] -instructions but with a few differences: - -1. Use the appropriate Staking launchpad website: - - [Prater][prater-launchpad] -1. Instead of `--network mainnet`, use the appropriate network flag: - - `--network prater`: Prater. -1. Use a Goerli execution node instead of a mainnet one: - - For Geth, this means using `geth --goerli --http`. -1. Notice that Lighthouse will store its files in a different directory by default: - - `~/.lighthouse/prater`: Prater. - -> -> **Never use real ETH to join a testnet!** All of the testnets listed here use Goerli ETH which is -> basically worthless. This allows experimentation without real-world costs. diff --git a/book/src/ui-authentication.md b/book/src/ui-authentication.md new file mode 100644 index 0000000000..0572824d5c --- /dev/null +++ b/book/src/ui-authentication.md @@ -0,0 +1,33 @@ +# Authentication + +To enhance the security of your account, we offer the option to set a session password. This allows the user to avoid re-entering the api-token when performing critical mutating operations on the validator. Instead a user can simply enter their session password. In the absence of a session password, Siren will revert to the api-token specified in your configuration settings as the default security measure. + +> This does not protect your validators from unauthorized device access. + +![](imgs/ui-session-auth.png) + +Session passwords must contain at least: + +- 12 characters +- 1 lowercase letter +- 1 uppercase letter +- 1 number +- 1 special character + + +## Protected Actions + +Prior to executing any sensitive validator action, Siren will request authentication of the session password or api-token. + +![](imgs/ui-exit.png) + + +In the event of three consecutive failed attempts, Siren will initiate a security measure by locking all actions and prompting for configuration settings to be renewed to regain access to these features. + +![](imgs/ui-fail-auth.png) + +## Auto Connect + +In the event that auto-connect is enabled, refreshing the Siren application will result in a prompt to authenticate the session password or api-token. If three consecutive authentication attempts fail, Siren will activate a security measure by locking the session and prompting for configuration settings to be reset to regain access. + +![](imgs/ui-autoconnect-auth.png) \ No newline at end of file diff --git a/book/src/validator-create.md b/book/src/validator-create.md deleted file mode 100644 index f13c449b9f..0000000000 --- a/book/src/validator-create.md +++ /dev/null @@ -1,90 +0,0 @@ -# Create a validator - -[launchpad]: https://launchpad.ethereum.org/ - -> -> **Note: we recommend using the [Staking launchpad][launchpad] to create validators.** - -Validators are fundamentally represented by a BLS keypair. In Lighthouse, we -use a [wallet](./wallet-create.md) to generate these keypairs. Once a wallet -exists, the `lighthouse account validator create` command is used to generate -the BLS keypair and all necessary information to submit a validator deposit and -have that validator operate in the `lighthouse validator_client`. - -## Usage - -To create a validator from a [wallet](./wallet-create.md), use the `lighthouse -account validator create` command: - -```bash -lighthouse account validator create --help - -Creates new validators from an existing EIP-2386 wallet using the EIP-2333 HD key derivation scheme. - -USAGE: - lighthouse account_manager validator create [FLAGS] [OPTIONS] - -FLAGS: - -h, --help Prints help information - --stdin-inputs If present, read all user inputs from stdin instead of tty. - --store-withdrawal-keystore If present, the withdrawal keystore will be stored alongside the voting keypair. - It is generally recommended to *not* store the withdrawal key and instead - generate them from the wallet seed when required. - -V, --version Prints version information - -OPTIONS: - --at-most - Observe the number of validators in --validator-dir, only creating enough to reach the given count. Never - deletes an existing validator. - --count - The number of validators to create, regardless of how many already exist - - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - The verbosity level for emitting logs. [default: info] [possible values: info, debug, trace, warn, error, - crit] - --deposit-gwei - The GWEI value of the deposit amount. Defaults to the minimum amount required for an active validator - (MAX_EFFECTIVE_BALANCE) - --network - Name of the Eth2 chain Lighthouse will sync and follow. [default: mainnet] [possible values: prater, mainnet] - --secrets-dir - The path where the validator keystore passwords will be stored. Defaults to ~/.lighthouse/{network}/secrets - - -s, --spec - This flag is deprecated, it will be disallowed in a future release. This value is now derived from the - --network or --testnet-dir flags. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --wallet-name Use the wallet identified by this name - --wallet-password - A path to a file containing the password which will unlock the wallet. - - --wallets-dir - A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{network}/wallets -``` - -## Example - -The example assumes that the `wally` wallet was generated from the -[wallet](./wallet-create.md) example. - -```bash -lighthouse --network prater account validator create --wallet-name wally --wallet-password wally.pass --count 1 -``` - -This command will: - -- Derive a single new BLS keypair from wallet `wally` in `~/.lighthouse/{network}/wallets`, updating it so that it generates a - new key next time. -- Create a new directory in `~/.lighthouse/{network}/validators` containing: - - An encrypted keystore containing the validators voting keypair. - - An `eth1_deposit_data.rlp` assuming the default deposit amount (`32 ETH` - for most testnets and mainnet) which can be submitted to the deposit - contract for the Prater testnet. Other testnets can be set via the - `--network` CLI param. -- Store a password to the validators voting keypair in `~/.lighthouse/{network}/secrets`. diff --git a/book/src/validator-doppelganger.md b/book/src/validator-doppelganger.md index d880cce0ae..6eaddcc7b0 100644 --- a/book/src/validator-doppelganger.md +++ b/book/src/validator-doppelganger.md @@ -16,8 +16,7 @@ achieves this by staying silent for 2-3 epochs after a validator is started so i other instances of that validator before starting to sign potentially slashable messages. > Note: Doppelganger Protection is not yet interoperable, so if it is configured on a Lighthouse -> validator client, the client must be connected to a Lighthouse beacon node. Because Infura -> uses Teku, Lighthouse's Doppelganger Protection cannot yet be used with Infura's Eth2 service. +> validator client, the client must be connected to a Lighthouse beacon node. ## Initial Considerations @@ -30,9 +29,9 @@ is no guarantee that your Beacon Node (BN) will see messages from it. **It is fe doppelganger protection to fail to detect another validator due to network faults or other common circumstances.** -DP should be considered a last-line-of-defence that *might* save a validator from being slashed due +DP should be considered as a last-line-of-defence that *might* save a validator from being slashed due to operator error (i.e. running two instances of the same validator). Users should -*never* rely upon DP and should practice the same caution with regards to duplicating validators as +*never* rely upon DP and should practice the same caution with regard to duplicating validators as if it did not exist. **Remember: even with doppelganger protection enabled, it is not safe to run two instances of the @@ -44,7 +43,7 @@ DP works by staying silent on the network for 2-3 epochs before starting to sign Staying silent and refusing to sign messages will cause the following: - 2-3 missed attestations, incurring penalties and missed rewards. -- 2-3 epochs of missed sync committee contributions (if the validator is in a sync committee, which is unlikely), incurring penalties and missed rewards (post-Altair upgrade only). +- 2-3 epochs of missed sync committee contributions (if the validator is in a sync committee, which is unlikely), incurring penalties and missed rewards. - Potentially missed rewards by missing a block proposal (if the validator is an elected block proposer, which is unlikely). @@ -105,7 +104,7 @@ there is no other instance of that validator running elsewhere!** The steps to solving a doppelganger vary depending on the case, but some places to check are: 1. Is there another validator process running on this host? - - Unix users can check `ps aux | grep lighthouse` + - Unix users can check by running the command `ps aux | grep lighthouse` - Windows users can check the Task Manager. 1. Has this validator recently been moved from another host? Check to ensure it's not running. 1. Has this validator been delegated to a staking service? diff --git a/book/src/validator-import-launchpad.md b/book/src/validator-import-launchpad.md deleted file mode 100644 index 9849b91b70..0000000000 --- a/book/src/validator-import-launchpad.md +++ /dev/null @@ -1,111 +0,0 @@ -# Importing from the Ethereum Staking Launch pad - -The [Staking Launchpad](https://github.com/ethereum/eth2.0-deposit) is a website -from the Ethereum Foundation which guides users how to use the -[`eth2.0-deposit-cli`](https://github.com/ethereum/eth2.0-deposit-cli) -command-line program to generate consensus validator keys. - -The keys that are generated from `eth2.0-deposit-cli` can be easily loaded into -a Lighthouse validator client (`lighthouse vc`). In fact, both of these -programs are designed to work with each other. - -This guide will show the user how to import their keys into Lighthouse so they -can perform their duties as a validator. The guide assumes the user has already -[installed Lighthouse](./installation.md). - -## Instructions - -Whilst following the steps on the website, users are instructed to download the -[`eth2.0-deposit-cli`](https://github.com/ethereum/eth2.0-deposit-cli) -repository. This `eth2-deposit-cli` script will generate the validator BLS keys -into a `validator_keys` directory. We assume that the user's -present-working-directory is the `eth2-deposit-cli` repository (this is where -you will be if you just ran the `./deposit.sh` script from the Staking Launch pad -website). If this is not the case, simply change the `--directory` to point to -the `validator_keys` directory. - -Now, assuming that the user is in the `eth2-deposit-cli` directory and they're -using the default (`~/.lighthouse/{network}/validators`) `validators` directory (specify a different one using -`--validators-dir` flag), they can follow these steps: - -### 1. Run the `lighthouse account validator import` command. - -Docker users should use the command from the [Docker](#docker) -section, all other users can use: - - -```bash -lighthouse --network mainnet account validator import --directory validator_keys -``` - -Note: The user must specify the consensus client network that they are importing the keys for using the `--network` flag. - - -After which they will be prompted for a password for each keystore discovered: - -``` -Keystore found at "validator_keys/keystore-m_12381_3600_0_0_0-1595406747.json": - - - Public key: 0xa5e8702533f6d66422e042a0bf3471ab9b302ce115633fa6fdc5643f804b6b4f1c33baf95f125ec21969a3b1e0dd9e56 - - UUID: 8ea4cf99-8719-43c5-9eda-e97b8a4e074f - -If you enter a password it will be stored in validator_definitions.yml so that it is not required each time the validator client starts. - -Enter a password, or press enter to omit a password: -``` - -The user can choose whether or not they'd like to store the validator password -in the [`validator_definitions.yml`](./validator-management.md) file. If the -password is *not* stored here, the validator client (`lighthouse vc`) -application will ask for the password each time it starts. This might be nice -for some users from a security perspective (i.e., if it is a shared computer), -however it means that if the validator client restarts, the user will be liable -to off-line penalties until they can enter the password. If the user trusts the -computer that is running the validator client and they are seeking maximum -validator rewards, we recommend entering a password at this point. - -Once the process is done the user will see: - -``` -Successfully imported keystore. -Successfully updated validator_definitions.yml. - -Successfully imported 1 validators (0 skipped). - -WARNING: DO NOT USE THE ORIGINAL KEYSTORES TO VALIDATE WITH ANOTHER CLIENT, OR YOU WILL GET SLASHED.. -``` - -The import process is complete! - -### 2. Run the `lighthouse vc` command. - -Now the keys are imported the user can start performing their validator duties -by running `lighthouse vc` and checking that their validator public key appears -as a `voting_pubkey` in one of the following logs: - -``` -INFO Enabled validator voting_pubkey: 0xa5e8702533f6d66422e042a0bf3471ab9b302ce115633fa6fdc5643f804b6b4f1c33baf95f125ec21969a3b1e0dd9e56 -``` - -Once this log appears (and there are no errors) the `lighthouse vc` application -will ensure that the validator starts performing its duties and being rewarded -by the protocol. There is no more input required from the user. - -## Docker - -The `import` command is a little more complex for Docker users, but the example -in this document can be substituted with: - -```bash -docker run -it \ - -v $HOME/.lighthouse:/root/.lighthouse \ - -v $(pwd)/validator_keys:/root/validator_keys \ - sigp/lighthouse \ - lighthouse --network MY_NETWORK account validator import --directory /root/validator_keys -``` - -Here we use two `-v` volumes to attach: - -- `~/.lighthouse` on the host to `/root/.lighthouse` in the Docker container. -- The `validator_keys` directory in the present working directory of the host - to the `/root/validator_keys` directory of the Docker container. diff --git a/book/src/validator-management.md b/book/src/validator-management.md index b7d4442de3..be34fef2c3 100644 --- a/book/src/validator-management.md +++ b/book/src/validator-management.md @@ -1,10 +1,10 @@ # Validator Management The `lighthouse vc` command starts a *validator client* instance which connects -to a beacon node performs the duties of a staked validator. +to a beacon node to perform the duties of a staked validator. This document provides information on how the validator client discovers the -validators it will act for and how it should obtain their cryptographic +validators it will act for and how it obtains their cryptographic signatures. Users that create validators using the `lighthouse account` tool in the @@ -49,7 +49,7 @@ Each permitted field of the file is listed below for reference: - `enabled`: A `true`/`false` indicating if the validator client should consider this validator "enabled". - `voting_public_key`: A validator public key. -- `type`: How the validator signs messages (currently restricted to `local_keystore`). +- `type`: How the validator signs messages (this can be `local_keystore` or `web3signer` (see [Web3Signer](./validator-web3signer.md))). - `voting_keystore_path`: The path to a EIP-2335 keystore. - `voting_keystore_password_path`: The path to the password for the EIP-2335 keystore. - `voting_keystore_password`: The password to the EIP-2335 keystore. @@ -59,7 +59,7 @@ Each permitted field of the file is listed below for reference: ## Populating the `validator_definitions.yml` file -When validator client starts and the `validator_definitions.yml` file doesn't +When a validator client starts and the `validator_definitions.yml` file doesn't exist, a new file will be created. If the `--disable-auto-discover` flag is provided, the new file will be empty and the validator client will not start any validators. If the `--disable-auto-discover` flag is **not** provided, an @@ -71,7 +71,7 @@ recap: ### Automatic validator discovery -When the `--disable-auto-discover` flag is **not** provided, the validator will search the +When the `--disable-auto-discover` flag is **not** provided, the validator client will search the `validator-dir` for validators and add any *new* validators to the `validator_definitions.yml` with `enabled: true`. @@ -89,7 +89,7 @@ name identical to the `voting_public_key` value. #### Discovery Example -Lets assume the following directory structure: +Let's assume the following directory structure: ``` ~/.lighthouse/{network}/validators @@ -158,7 +158,7 @@ start. If a validator client were to start using the [first example `validator_definitions.yml` file](#example) it would print the following log, -acknowledging there there are two validators and one is disabled: +acknowledging there are two validators and one is disabled: ``` INFO Initialized validators enabled: 1, disabled: 1 @@ -180,8 +180,8 @@ should not be opened by another process. 1. Proceed to act for that validator, creating blocks and attestations if/when required. If there is an error during any of these steps (e.g., a file is missing or -corrupt) the validator client will log an error and continue to attempt to +corrupt), the validator client will log an error and continue to attempt to process other validators. -When the validator client exits (or the validator is deactivated) it will +When the validator client exits (or the validator is deactivated), it will remove the `voting-keystore.json.lock` to indicate that the keystore is free for use again. diff --git a/book/src/validator-monitoring.md b/book/src/validator-monitoring.md index 9074bc0273..893ec90bdd 100644 --- a/book/src/validator-monitoring.md +++ b/book/src/validator-monitoring.md @@ -38,7 +38,7 @@ minutes after start up. #### Example ``` -lighthouse bn --staking --validator-monitor-auto +lighthouse bn --http --validator-monitor-auto ``` ### Manual diff --git a/book/src/voluntary-exit.md b/book/src/voluntary-exit.md index 5056040e4c..d90395c07f 100644 --- a/book/src/voluntary-exit.md +++ b/book/src/voluntary-exit.md @@ -1,7 +1,7 @@ -# Voluntary exits +# Voluntary Exits (Full Withdrawals) A validator may chose to voluntarily stop performing duties (proposing blocks and attesting to blocks) by submitting -a voluntary exit transaction to the beacon chain. +a voluntary exit message to the beacon chain. A validator can initiate a voluntary exit provided that the validator is currently active, has not been slashed and has been active for at least 256 epochs (~27 hours) since it has been activated. @@ -10,24 +10,15 @@ A validator can initiate a voluntary exit provided that the validator is current It takes at a minimum 5 epochs (32 minutes) for a validator to exit after initiating a voluntary exit. This number can be much higher depending on how many other validators are queued to exit. -## Withdrawal of exited funds - -Even though users can currently perform a voluntary exit, they **cannot withdraw their exited funds at this point in time**. -This implies that the staked funds are effectively **frozen** until withdrawals are enabled in a future hard fork (Capella). - -To understand the rollout strategy for Ethereum upgrades, please visit . - - - ## Initiating a voluntary exit In order to initiate an exit, users can use the `lighthouse account validator exit` command. -- The `--keystore` flag is used to specify the path to the EIP-2335 voting keystore for the validator. +- The `--keystore` flag is used to specify the path to the EIP-2335 voting keystore for the validator. The path should point directly to the validator key `.json` file, _not_ the folder containing the `.json` file. - The `--beacon-node` flag is used to specify a beacon chain HTTP endpoint that confirms to the [Beacon Node API](https://ethereum.github.io/beacon-APIs/) specifications. That beacon node will be used to validate and propagate the voluntary exit. The default value for this flag is `http://localhost:5052`. -- The `--network` flag is used to specify a particular Eth2 network (default is `mainnet`). +- The `--network` flag is used to specify the network (default is `mainnet`). - The `--password-file` flag is used to specify the path to the file containing the password for the voting keystore. If this flag is not provided, the user will be prompted to enter the password. @@ -39,13 +30,13 @@ The exit phrase is the following: -Below is an example for initiating a voluntary exit on the Prater testnet. +Below is an example for initiating a voluntary exit on the Goerli testnet. ``` -$ lighthouse --network prater account validator exit --keystore /path/to/keystore --beacon-node http://localhost:5052 +$ lighthouse --network goerli account validator exit --keystore /path/to/keystore --beacon-node http://localhost:5052 Running account manager for Prater network -validator-dir path: ~/.lighthouse/prater/validators +validator-dir path: ~/.lighthouse/goerli/validators Enter the keystore password for validator in 0xabcd @@ -55,7 +46,7 @@ Publishing a voluntary exit for validator 0xabcd WARNING: WARNING: THIS IS AN IRREVERSIBLE OPERATION -WARNING: WITHDRAWING STAKED ETH WILL NOT BE POSSIBLE UNTIL ETH1/ETH2 MERGE. + PLEASE VISIT https://lighthouse-book.sigmaprime.io/voluntary-exit.html TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT. @@ -70,3 +61,47 @@ Please keep your validator running till exit epoch Exit epoch in approximately 1920 secs ``` +## Full withdrawal of staked fund + +After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023, if a user initiates a voluntary exit, they will receive the full staked funds to the withdrawal address, provided that the validator has withdrawal credentials of type `0x01`. For more information on how fund withdrawal works, please visit [Ethereum.org](https://ethereum.org/en/staking/withdrawals/#how-do-withdrawals-work) website. + +## FAQ + +### 1. How to know if I have the withdrawal credentials type `0x01`? + +There are two types of withdrawal credentials, `0x00` and `0x01`. To check which type your validator has, go to [Staking launchpad](https://launchpad.ethereum.org/en/withdrawals), enter your validator index and click `verify on mainnet`: + + - `withdrawals enabled` means your validator is of type `0x01`, and you will automatically receive the full withdrawal to the withdrawal address that you set. +- `withdrawals not enabled` means your validator is of type `0x00`, and will need to update your withdrawal credentials from `0x00` type to `0x01` type (also known as BLS-to-execution-change, or BTEC) to receive the staked funds. The common way to do this is using `Staking deposit CLI` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). + + +### 2. What if my validator is of type `0x00` and I do not update my withdrawal credentials after I initiated a voluntary exit? + + Your staked fund will continue to be locked on the beacon chain. You can update your withdrawal credentials **anytime**, and there is no deadline for that. The catch is that as long as you do not update your withdrawal credentials, your staked funds in the beacon chain will continue to be locked in the beacon chain. Only after you update the withdrawal credentials, will the staked funds be withdrawn to the withdrawal address. + +### 3. How many times can I update my withdrawal credentials? + + If your withdrawal credentials is of type `0x00`, you can only update it once to type `0x01`. It is therefore very important to ensure that the withdrawal address you set is an address under your control, preferably an address controlled by a hardware wallet. + + If your withdrawal credentials is of type `0x01`, it means you have set your withdrawal address previously, and you will not be able to change the withdrawal address. + +### 3. When will my BTEC request (update withdrawal credentials to type `0x01`) be processed ? + + Your BTEC request will be included very quickly as soon as a new block is proposed. This should be the case most (if not all) of the time, given that the peak BTEC request time has now past (right after the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023 and lasted for ~ 2 days) . + +### 4. When will I get my staked fund after voluntary exit if my validator is of type `0x01`? + + There are 3 waiting periods until you get the staked funds in your withdrawal address: + + - An exit queue: a varying time that takes at a minimum 5 epochs (32 minutes) if there is no queue; or if there are many validators exiting at the same time, it has to go through the exit queue. The exit queue can be from hours to weeks, depending on the number of validators in the exit queue. During this time your validator has to stay online to perform its duties to avoid penalties. + + - A fixed waiting period of 256 epochs (27.3 hours) for the validator's status to become withdrawable. + + - A varying time of "validator sweep" that can take up to 5 days (at the time of writing with ~560,000 validators on the mainnet). The "validator sweep" is the process of skimming through all validators by index number for eligible withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set. + + The total time taken is the summation of the above 3 waiting periods. After these waiting periods, you will receive the staked funds in your withdrawal address. + +The voluntary exit and full withdrawal process is summarized in the Figure below. + +![full](./imgs/full-withdrawal.png) + diff --git a/book/src/wallet-create.md b/book/src/wallet-create.md deleted file mode 100644 index 25cac8d34d..0000000000 --- a/book/src/wallet-create.md +++ /dev/null @@ -1,74 +0,0 @@ -# Create a wallet - -[launchpad]: https://launchpad.ethereum.org/ - -> -> **Note: we recommend using the [Staking launchpad][launchpad] to create validators.** - -A wallet allows for generating practically unlimited validators from an -easy-to-remember 24-word string (a mnemonic). As long as that mnemonic is -backed up, all validator keys can be trivially re-generated. - -The 24-word string is randomly generated during wallet creation and printed out -to the terminal. It's important to **make one or more backups of the mnemonic** -to ensure your ETH is not lost in the case of data loss. It is very important to -**keep your mnemonic private** as it represents the ultimate control of your -ETH. - -Whilst the wallet stores the mnemonic, it does not store it in plain-text: the -mnemonic is encrypted with a password. It is the responsibility of the user to -define a strong password. The password is only required for interacting with -the wallet, it is not required for recovering keys from a mnemonic. - -## Usage - -To create a wallet, use the `lighthouse account wallet` command: - -```bash -lighthouse account wallet create --help - -Creates a new HD (hierarchical-deterministic) EIP-2386 wallet. - -USAGE: - lighthouse account_manager wallet create [OPTIONS] --name --password-file - -FLAGS: - -h, --help Prints help information - -V, --version Prints version information - -OPTIONS: - -d, --datadir Data directory for lighthouse keys and databases. - --mnemonic-output-path - If present, the mnemonic will be saved to this file. DO NOT SHARE THE MNEMONIC. - - --name - The wallet will be created with this name. It is not allowed to create two wallets with the same name for - the same --base-dir. - --password-file - A path to a file containing the password which will unlock the wallet. If the file does not exist, a random - password will be generated and saved at that path. To avoid confusion, if the file does not already exist it - must include a '.pass' suffix. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --type - The type of wallet to create. Only HD (hierarchical-deterministic) wallets are supported presently.. - [default: hd] [possible values: hd] -``` - - -## Example - -Creates a new wallet named `wally` and saves it in `~/.lighthouse/prater/wallets` with a randomly generated password saved -to `./wallet.pass`: - -```bash -lighthouse --network prater account wallet create --name wally --password-file wally.pass -``` - -> Notes: -> -> - The password is not `wally.pass`, it is the _contents_ of the -> `wally.pass` file. -> - If `wally.pass` already exists the wallet password will be set to contents -> of that file. diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index b18d38ccd4..7eb37a9b94 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "4.0.1-rc.0" +version = "4.1.0" authors = ["Sigma Prime "] edition = "2021" @@ -10,7 +10,7 @@ clap = "2.33.3" clap_utils = { path = "../common/clap_utils" } lighthouse_network = { path = "../beacon_node/lighthouse_network" } types = { path = "../consensus/types" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" slog = "2.5.2" tokio = "1.14.0" log = "0.4.11" diff --git a/common/clap_utils/Cargo.toml b/common/clap_utils/Cargo.toml index 62eb8aa3d5..a882b7ce64 100644 --- a/common/clap_utils/Cargo.toml +++ b/common/clap_utils/Cargo.toml @@ -11,7 +11,7 @@ clap = "2.33.3" hex = "0.4.2" dirs = "3.0.1" eth2_network_config = { path = "../eth2_network_config" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" ethereum-types = "0.14.1" serde = "1.0.116" serde_json = "1.0.59" diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index 7be0e8f3d2..aabc07fc52 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -14,6 +14,6 @@ hex = "0.4.2" [dependencies] types = { path = "../../consensus/types"} -eth2_ssz = "0.4.1" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +tree_hash = "0.5.0" ethabi = "16.0.0" diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index eca086d838..2c5e7060b2 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -13,15 +13,15 @@ types = { path = "../../consensus/types" } reqwest = { version = "0.11.0", features = ["json","stream"] } lighthouse_network = { path = "../../beacon_node/lighthouse_network" } proto_array = { path = "../../consensus/proto_array", optional = true } -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" eth2_keystore = { path = "../../crypto/eth2_keystore" } libsecp256k1 = "0.7.0" ring = "0.16.19" bytes = "1.0.1" account_utils = { path = "../../common/account_utils" } sensitive_url = { path = "../../common/sensitive_url" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" futures-util = "0.3.8" futures = "0.3.8" store = { path = "../../beacon_node/store", optional = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 4d74299fff..e03cc2e9b0 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -22,13 +22,14 @@ use lighthouse_network::PeerId; pub use reqwest; use reqwest::{IntoUrl, RequestBuilder, Response}; pub use reqwest::{StatusCode, Url}; -pub use sensitive_url::SensitiveUrl; +pub use sensitive_url::{SensitiveError, SensitiveUrl}; use serde::{de::DeserializeOwned, Serialize}; use std::convert::TryFrom; use std::fmt; use std::iter::Iterator; use std::path::PathBuf; use std::time::Duration; +use store::fork_versioned_response::ExecutionOptimisticFinalizedForkVersionedResponse; pub const V1: EndpointVersion = EndpointVersion(1); pub const V2: EndpointVersion = EndpointVersion(2); @@ -338,7 +339,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_root( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -357,7 +358,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_fork( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -376,7 +377,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_finality_checkpoints( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -396,7 +397,8 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, ids: Option<&[ValidatorId]>, - ) -> Result>>, Error> { + ) -> Result>>, Error> + { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -426,7 +428,7 @@ impl BeaconNodeHttpClient { state_id: StateId, ids: Option<&[ValidatorId]>, statuses: Option<&[ValidatorStatus]>, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -466,7 +468,7 @@ impl BeaconNodeHttpClient { slot: Option, index: Option, epoch: Option, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -499,7 +501,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, epoch: Option, - ) -> Result, Error> { + ) -> Result, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -522,7 +524,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, epoch: Option, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -547,7 +549,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, validator_id: &ValidatorId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -568,7 +570,7 @@ impl BeaconNodeHttpClient { &self, slot: Option, parent_root: Option, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -595,7 +597,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_headers_block_id( &self, block_id: BlockId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -675,7 +677,10 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks( &self, block_id: BlockId, - ) -> Result>>, Error> { + ) -> Result< + Option>>, + Error, + > { let path = self.get_beacon_blocks_path(block_id)?; let response = match self.get_response(path, |b| b).await.optional()? { Some(res) => res, @@ -691,8 +696,10 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blinded_blocks( &self, block_id: BlockId, - ) -> Result>>, Error> - { + ) -> Result< + Option>>, + Error, + > { let path = self.get_beacon_blinded_blocks_path(block_id)?; let response = match self.get_response(path, |b| b).await.optional()? { Some(res) => res, @@ -760,7 +767,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_root( &self, block_id: BlockId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -779,7 +786,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_attestations( &self, block_id: BlockId, - ) -> Result>>>, Error> { + ) -> Result>>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1267,28 +1274,12 @@ impl BeaconNodeHttpClient { pub async fn get_debug_beacon_states( &self, state_id: StateId, - ) -> Result>>, Error> { + ) -> Result>>, Error> + { let path = self.get_debug_beacon_states_path(state_id)?; self.get_opt(path).await } - /// `GET v1/debug/beacon/states/{state_id}` (LEGACY) - pub async fn get_debug_beacon_states_v1( - &self, - state_id: StateId, - ) -> Result>>, Error> { - let mut path = self.eth_path(V1)?; - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("debug") - .push("beacon") - .push("states") - .push(&state_id.to_string()); - - self.get_opt(path).await - } - /// `GET debug/beacon/states/{state_id}` /// `-H "accept: application/octet-stream"` pub async fn get_debug_beacon_states_ssz( @@ -1334,6 +1325,18 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `GET v1/debug/fork_choice` + pub async fn get_debug_fork_choice(&self) -> Result { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("debug") + .push("fork_choice"); + + self.get(path).await + } + /// `GET validator/duties/proposer/{epoch}` pub async fn get_validator_duties_proposer( &self, @@ -1649,7 +1652,7 @@ impl BeaconNodeHttpClient { &self, epoch: Epoch, indices: &[u64], - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index e50d9f4dc0..bb933dbe12 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -13,7 +13,7 @@ use crate::{ BeaconState, ChainSpec, DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock, GenericResponse, ValidatorId, }, - BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, StateId, StatusCode, + BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, Slot, StateId, StatusCode, }; use proto_array::core::ProtoArray; use reqwest::IntoUrl; @@ -566,4 +566,73 @@ impl BeaconNodeHttpClient { self.post_with_response(path, &()).await } + + /// + /// Analysis endpoints. + /// + + /// `GET` lighthouse/analysis/block_rewards?start_slot,end_slot + pub async fn get_lighthouse_analysis_block_rewards( + &self, + start_slot: Slot, + end_slot: Slot, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("analysis") + .push("block_rewards"); + + path.query_pairs_mut() + .append_pair("start_slot", &start_slot.to_string()) + .append_pair("end_slot", &end_slot.to_string()); + + self.get(path).await + } + + /// `GET` lighthouse/analysis/block_packing?start_epoch,end_epoch + pub async fn get_lighthouse_analysis_block_packing( + &self, + start_epoch: Epoch, + end_epoch: Epoch, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("analysis") + .push("block_packing_efficiency"); + + path.query_pairs_mut() + .append_pair("start_epoch", &start_epoch.to_string()) + .append_pair("end_epoch", &end_epoch.to_string()); + + self.get(path).await + } + + /// `GET` lighthouse/analysis/attestation_performance/{index}?start_epoch,end_epoch + pub async fn get_lighthouse_analysis_attestation_performance( + &self, + start_epoch: Epoch, + end_epoch: Epoch, + target: String, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("analysis") + .push("attestation_performance") + .push(&target); + + path.query_pairs_mut() + .append_pair("start_epoch", &start_epoch.to_string()) + .append_pair("end_epoch", &end_epoch.to_string()); + + self.get(path).await + } } diff --git a/common/eth2/src/lighthouse/attestation_rewards.rs b/common/eth2/src/lighthouse/attestation_rewards.rs index 314ffb8512..bebd1c661b 100644 --- a/common/eth2/src/lighthouse/attestation_rewards.rs +++ b/common/eth2/src/lighthouse/attestation_rewards.rs @@ -6,32 +6,32 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct IdealAttestationRewards { // Validator's effective balance in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub effective_balance: u64, // Ideal attester's reward for head vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub head: u64, // Ideal attester's reward for target vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub target: u64, // Ideal attester's reward for source vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub source: u64, } #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct TotalAttestationRewards { // one entry for every validator based on their attestations in the epoch - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, // attester's reward for head vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub head: u64, // attester's reward for target vote in gwei - #[serde(with = "eth2_serde_utils::quoted_i64")] + #[serde(with = "serde_utils::quoted_i64")] pub target: i64, // attester's reward for source vote in gwei - #[serde(with = "eth2_serde_utils::quoted_i64")] + #[serde(with = "serde_utils::quoted_i64")] pub source: i64, // TBD attester's inclusion_delay reward in gwei (phase0 only) // pub inclusion_delay: u64, diff --git a/common/eth2/src/lighthouse/standard_block_rewards.rs b/common/eth2/src/lighthouse/standard_block_rewards.rs index 502577500d..15fcdc6066 100644 --- a/common/eth2/src/lighthouse/standard_block_rewards.rs +++ b/common/eth2/src/lighthouse/standard_block_rewards.rs @@ -5,22 +5,22 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct StandardBlockReward { // proposer of the block, the proposer index who receives these rewards - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, // total block reward in gwei, // equal to attestations + sync_aggregate + proposer_slashings + attester_slashings - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub total: u64, // block reward component due to included attestations in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub attestations: u64, // block reward component due to included sync_aggregate in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub sync_aggregate: u64, // block reward component due to included proposer_slashings in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_slashings: u64, // block reward component due to included attester_slashings in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub attester_slashings: u64, } diff --git a/common/eth2/src/lighthouse/sync_committee_rewards.rs b/common/eth2/src/lighthouse/sync_committee_rewards.rs index e215d8e3e0..66a721dc22 100644 --- a/common/eth2/src/lighthouse/sync_committee_rewards.rs +++ b/common/eth2/src/lighthouse/sync_committee_rewards.rs @@ -5,9 +5,9 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct SyncCommitteeReward { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, // sync committee reward in gwei for the validator - #[serde(with = "eth2_serde_utils::quoted_i64")] + #[serde(with = "serde_utils::quoted_i64")] pub reward: i64, } diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index a2e3e3f6ff..b2c8e03cd6 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -57,7 +57,7 @@ pub fn parse_pubkey(secret: &str) -> Result, Error> { &secret[SECRET_PREFIX.len()..] }; - eth2_serde_utils::hex::decode(secret) + serde_utils::hex::decode(secret) .map_err(|e| Error::InvalidSecret(format!("invalid hex: {:?}", e))) .and_then(|bytes| { if bytes.len() != PK_LEN { @@ -174,7 +174,7 @@ impl ValidatorClientHttpClient { let message = Message::parse_slice(digest(&SHA256, &body).as_ref()).expect("sha256 is 32 bytes"); - eth2_serde_utils::hex::decode(&sig) + serde_utils::hex::decode(&sig) .ok() .and_then(|bytes| { let sig = Signature::parse_der(&bytes).ok()?; @@ -657,6 +657,30 @@ impl ValidatorClientHttpClient { let url = self.make_gas_limit_url(pubkey)?; self.delete_with_raw_response(url, &()).await } + + /// `POST /eth/v1/validator/{pubkey}/voluntary_exit` + pub async fn post_validator_voluntary_exit( + &self, + pubkey: &PublicKeyBytes, + epoch: Option, + ) -> Result { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("validator") + .push(&pubkey.to_string()) + .push("voluntary_exit"); + + if let Some(epoch) = epoch { + path.query_pairs_mut() + .append_pair("epoch", &epoch.to_string()); + } + + self.post(path, &()).await + } } /// Returns `Ok(response)` if the response is a `200 OK` response or a diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index 077850b030..33e2f764ef 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -14,7 +14,7 @@ pub struct GetFeeRecipientResponse { #[derive(Debug, Deserialize, Serialize, PartialEq)] pub struct GetGasLimitResponse { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub gas_limit: u64, } @@ -46,7 +46,7 @@ pub struct ImportKeystoresRequest { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(transparent)] -pub struct KeystoreJsonStr(#[serde(with = "eth2_serde_utils::json_str")] pub Keystore); +pub struct KeystoreJsonStr(#[serde(with = "serde_utils::json_str")] pub Keystore); impl std::ops::Deref for KeystoreJsonStr { type Target = Keystore; @@ -57,7 +57,7 @@ impl std::ops::Deref for KeystoreJsonStr { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(transparent)] -pub struct InterchangeJsonStr(#[serde(with = "eth2_serde_utils::json_str")] pub Interchange); +pub struct InterchangeJsonStr(#[serde(with = "serde_utils::json_str")] pub Interchange); #[derive(Debug, Deserialize, Serialize)] pub struct ImportKeystoresResponse { @@ -104,7 +104,7 @@ pub struct DeleteKeystoresRequest { #[derive(Debug, Deserialize, Serialize)] pub struct DeleteKeystoresResponse { pub data: Vec>, - #[serde(with = "eth2_serde_utils::json_str")] + #[serde(with = "serde_utils::json_str")] pub slashing_protection: Interchange, } diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 2d9f01c292..5b2b3d889d 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -32,14 +32,14 @@ pub struct ValidatorRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub builder_proposals: Option, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub deposit_gwei: u64, } #[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct CreateValidatorsMnemonicRequest { pub mnemonic: ZeroizeString, - #[serde(with = "eth2_serde_utils::quoted_u32")] + #[serde(with = "serde_utils::quoted_u32")] pub key_derivation_path_offset: u32, pub validators: Vec, } @@ -62,7 +62,7 @@ pub struct CreatedValidator { #[serde(skip_serializing_if = "Option::is_none")] pub builder_proposals: Option, pub eth1_deposit_tx_data: String, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub deposit_gwei: u64, } @@ -141,14 +141,19 @@ pub struct UpdateFeeRecipientRequest { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct UpdateGasLimitRequest { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub gas_limit: u64, } +#[derive(Deserialize)] +pub struct VoluntaryExitQuery { + pub epoch: Option, +} + #[derive(Deserialize, Serialize)] pub struct ExportKeystoresResponse { pub data: Vec, - #[serde(with = "eth2_serde_utils::json_str")] + #[serde(with = "serde_utils::json_str")] pub slashing_protection: Interchange, } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 175c7db786..f58dc8e2a4 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -82,10 +82,10 @@ impl std::fmt::Display for EndpointVersion { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct GenesisData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub genesis_time: u64, pub genesis_validators_root: Hash256, - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub genesis_fork_version: [u8; 4], } @@ -200,6 +200,14 @@ pub struct ExecutionOptimisticResponse { + pub execution_optimistic: Option, + pub finalized: Option, + pub data: T, +} + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(bound = "T: Serialize + serde::de::DeserializeOwned")] pub struct GenericResponse { @@ -222,6 +230,18 @@ impl GenericResponse { data: self.data, } } + + pub fn add_execution_optimistic_finalized( + self, + execution_optimistic: bool, + finalized: bool, + ) -> ExecutionOptimisticFinalizedResponse { + ExecutionOptimisticFinalizedResponse { + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + data: self.data, + } + } } #[derive(Debug, PartialEq, Clone, Serialize)] @@ -296,9 +316,9 @@ impl fmt::Display for ValidatorId { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub balance: u64, pub status: ValidatorStatus, pub validator: Validator, @@ -306,9 +326,9 @@ pub struct ValidatorData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorBalanceData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub balance: u64, } @@ -471,16 +491,16 @@ pub struct ValidatorsQuery { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CommitteeData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub validators: Vec, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncCommitteeByValidatorIndices { - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub validators: Vec, pub validator_aggregates: Vec, } @@ -493,7 +513,7 @@ pub struct RandaoMix { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(transparent)] pub struct SyncSubcommittee { - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub indices: Vec, } @@ -518,7 +538,7 @@ pub struct BlockHeaderData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct DepositContractData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub chain_id: u64, pub address: Address, } @@ -542,7 +562,7 @@ pub struct IdentityData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct MetaData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub seq_number: u64, pub attnets: String, pub syncnets: String, @@ -629,27 +649,27 @@ pub struct ValidatorBalancesQuery { #[derive(Clone, Serialize, Deserialize)] #[serde(transparent)] -pub struct ValidatorIndexData(#[serde(with = "eth2_serde_utils::quoted_u64_vec")] pub Vec); +pub struct ValidatorIndexData(#[serde(with = "serde_utils::quoted_u64_vec")] pub Vec); /// Borrowed variant of `ValidatorIndexData`, for serializing/sending. #[derive(Clone, Copy, Serialize)] #[serde(transparent)] pub struct ValidatorIndexDataRef<'a>( - #[serde(serialize_with = "eth2_serde_utils::quoted_u64_vec::serialize")] pub &'a [u64], + #[serde(serialize_with = "serde_utils::quoted_u64_vec::serialize")] pub &'a [u64], ); #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AttesterData { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committees_at_slot: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committee_index: CommitteeIndex, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committee_length: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_committee_index: u64, pub slot: Slot, } @@ -657,7 +677,7 @@ pub struct AttesterData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ProposerData { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, pub slot: Slot, } @@ -706,11 +726,11 @@ pub struct ValidatorAggregateAttestationQuery { #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] pub struct BeaconCommitteeSubscription { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committee_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committees_at_slot: u64, pub slot: Slot, pub is_aggregator: bool, @@ -831,13 +851,13 @@ impl fmt::Display for PeerDirection { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct PeerCount { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub connected: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub connecting: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub disconnected: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub disconnecting: u64, } @@ -872,7 +892,7 @@ pub struct SseHead { #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct SseChainReorg { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub depth: u64, pub old_head_block: Hash256, pub old_head_state: Hash256, @@ -905,7 +925,7 @@ pub struct SseLateHead { #[serde(untagged)] pub struct SsePayloadAttributes { #[superstruct(getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub timestamp: u64, #[superstruct(getter(copy))] pub prev_randao: Hash256, @@ -918,10 +938,10 @@ pub struct SsePayloadAttributes { #[derive(PartialEq, Debug, Deserialize, Serialize, Clone)] pub struct SseExtendedPayloadAttributesGeneric { pub proposal_slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, pub parent_block_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub parent_block_number: u64, pub parent_block_hash: ExecutionBlockHash, pub payload_attributes: T, @@ -1185,18 +1205,38 @@ fn parse_accept(accept: &str) -> Result, String> { #[derive(Debug, Serialize, Deserialize)] pub struct LivenessRequestData { pub epoch: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub indices: Vec, } #[derive(PartialEq, Debug, Serialize, Deserialize)] pub struct LivenessResponseData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, pub epoch: Epoch, pub is_live: bool, } +#[derive(Debug, Serialize, Deserialize)] +pub struct ForkChoice { + pub justified_checkpoint: Checkpoint, + pub finalized_checkpoint: Checkpoint, + pub fork_choice_nodes: Vec, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct ForkChoiceNode { + pub slot: Slot, + pub block_root: Hash256, + pub parent_root: Option, + pub justified_epoch: Epoch, + pub finalized_epoch: Epoch, + #[serde(with = "serde_utils::quoted_u64")] + pub weight: u64, + pub validity: Option, + pub execution_block_hash: Option, +} + #[cfg(test)] mod tests { use super::*; diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index 5f577bedc3..7a376568eb 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" [dependencies] lazy_static = "1.4.0" num-bigint = "0.4.2" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" hex = "0.4.2" serde_yaml = "0.8.13" serde = "1.0.116" diff --git a/common/eth2_interop_keypairs/src/lib.rs b/common/eth2_interop_keypairs/src/lib.rs index eb26f563e0..7b5fa7a8e4 100644 --- a/common/eth2_interop_keypairs/src/lib.rs +++ b/common/eth2_interop_keypairs/src/lib.rs @@ -20,7 +20,7 @@ extern crate lazy_static; use bls::{Keypair, PublicKey, SecretKey}; -use eth2_hashing::hash; +use ethereum_hashing::hash; use num_bigint::BigUint; use serde_derive::{Deserialize, Serialize}; use std::convert::TryInto; diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 95cea62d44..f8382c95d3 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -16,6 +16,6 @@ tempfile = "3.1.0" [dependencies] serde_yaml = "0.8.13" types = { path = "../../consensus/types"} -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" eth2_config = { path = "../eth2_config"} discv5 = "0.2.2" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index f4e19e7962..d30f45ca29 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v4.0.1-rc.0-", - fallback = "Lighthouse/v4.0.1-rc.0" + prefix = "Lighthouse/v4.1.0-", + fallback = "Lighthouse/v4.1.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index 183f5c9313..1c8813ca2f 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -104,12 +104,23 @@ pub trait SlotClock: Send + Sync + Sized + Clone { self.slot_duration() * 2 / INTERVALS_PER_SLOT as u32 } - /// Returns the `Duration` since the start of the current `Slot`. Useful in determining whether to apply proposer boosts. - fn seconds_from_current_slot_start(&self, seconds_per_slot: u64) -> Option { + /// Returns the `Duration` since the start of the current `Slot` at seconds precision. Useful in determining whether to apply proposer boosts. + fn seconds_from_current_slot_start(&self) -> Option { self.now_duration() .and_then(|now| now.checked_sub(self.genesis_duration())) .map(|duration_into_slot| { - Duration::from_secs(duration_into_slot.as_secs() % seconds_per_slot) + Duration::from_secs(duration_into_slot.as_secs() % self.slot_duration().as_secs()) + }) + } + + /// Returns the `Duration` since the start of the current `Slot` at milliseconds precision. + fn millis_from_current_slot_start(&self) -> Option { + self.now_duration() + .and_then(|now| now.checked_sub(self.genesis_duration())) + .map(|duration_into_slot| { + Duration::from_millis( + (duration_into_slot.as_millis() % self.slot_duration().as_millis()) as u64, + ) }) } diff --git a/common/unused_port/Cargo.toml b/common/unused_port/Cargo.toml index 06c1ca8f58..2dd041ff07 100644 --- a/common/unused_port/Cargo.toml +++ b/common/unused_port/Cargo.toml @@ -6,3 +6,6 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +lru_cache = { path = "../lru_cache" } +lazy_static = "1.4.0" +parking_lot = "0.12.0" diff --git a/common/unused_port/src/lib.rs b/common/unused_port/src/lib.rs index a5d0817211..386f08a739 100644 --- a/common/unused_port/src/lib.rs +++ b/common/unused_port/src/lib.rs @@ -1,4 +1,8 @@ -use std::net::{TcpListener, UdpSocket}; +use lazy_static::lazy_static; +use lru_cache::LRUTimeCache; +use parking_lot::Mutex; +use std::net::{SocketAddr, TcpListener, UdpSocket}; +use std::time::Duration; #[derive(Copy, Clone)] pub enum Transport { @@ -12,6 +16,13 @@ pub enum IpVersion { Ipv6, } +pub const CACHED_PORTS_TTL: Duration = Duration::from_secs(300); + +lazy_static! { + static ref FOUND_PORTS_CACHE: Mutex> = + Mutex::new(LRUTimeCache::new(CACHED_PORTS_TTL)); +} + /// A convenience wrapper over [`zero_port`]. pub fn unused_tcp4_port() -> Result { zero_port(Transport::Tcp, IpVersion::Ipv4) @@ -48,6 +59,20 @@ pub fn zero_port(transport: Transport, ipv: IpVersion) -> Result { IpVersion::Ipv6 => std::net::Ipv6Addr::LOCALHOST.into(), }; let socket_addr = std::net::SocketAddr::new(localhost, 0); + let mut unused_port: u16; + loop { + unused_port = find_unused_port(transport, socket_addr)?; + let mut cache_lock = FOUND_PORTS_CACHE.lock(); + if !cache_lock.contains(&unused_port) { + cache_lock.insert(unused_port); + break; + } + } + + Ok(unused_port) +} + +fn find_unused_port(transport: Transport, socket_addr: SocketAddr) -> Result { let local_addr = match transport { Transport::Tcp => { let listener = TcpListener::bind(socket_addr).map_err(|e| { diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index 091140568a..8accddfcb9 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -16,7 +16,7 @@ filesystem = { path = "../filesystem" } types = { path = "../../consensus/types" } rand = "0.8.5" deposit_contract = { path = "../deposit_contract" } -tree_hash = "0.4.1" +tree_hash = "0.5.0" hex = "0.4.2" derivative = "2.1.1" lockfile = { path = "../lockfile" } diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml index 0e0ef0707e..c2856003bf 100644 --- a/consensus/cached_tree_hash/Cargo.toml +++ b/consensus/cached_tree_hash/Cargo.toml @@ -6,11 +6,11 @@ edition = "2021" [dependencies] ethereum-types = "0.14.1" -eth2_ssz_types = "0.2.2" -eth2_hashing = "0.3.0" -eth2_ssz_derive = "0.3.1" -eth2_ssz = "0.4.1" -tree_hash = "0.4.1" +ssz_types = "0.5.0" +ethereum_hashing = "1.0.0-beta.2" +ethereum_ssz_derive = "0.5.0" +ethereum_ssz = "0.5.0" +tree_hash = "0.5.0" smallvec = "1.6.1" [dev-dependencies] diff --git a/consensus/cached_tree_hash/src/cache.rs b/consensus/cached_tree_hash/src/cache.rs index edb60f3060..3b4878503e 100644 --- a/consensus/cached_tree_hash/src/cache.rs +++ b/consensus/cached_tree_hash/src/cache.rs @@ -1,7 +1,7 @@ use crate::cache_arena; use crate::SmallVec8; use crate::{Error, Hash256}; -use eth2_hashing::{hash32_concat, ZERO_HASHES}; +use ethereum_hashing::{hash32_concat, ZERO_HASHES}; use smallvec::smallvec; use ssz_derive::{Decode, Encode}; use tree_hash::BYTES_PER_CHUNK; diff --git a/consensus/cached_tree_hash/src/test.rs b/consensus/cached_tree_hash/src/test.rs index 244439ab30..69b49826bf 100644 --- a/consensus/cached_tree_hash/src/test.rs +++ b/consensus/cached_tree_hash/src/test.rs @@ -1,6 +1,6 @@ use crate::impls::hash256_iter; use crate::{CacheArena, CachedTreeHash, Error, Hash256, TreeHashCache}; -use eth2_hashing::ZERO_HASHES; +use ethereum_hashing::ZERO_HASHES; use quickcheck_macros::quickcheck; use ssz_types::{ typenum::{Unsigned, U16, U255, U256, U257}, diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index f0381e5ad9..3864d52d47 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -10,8 +10,8 @@ edition = "2021" types = { path = "../types" } state_processing = { path = "../state_processing" } proto_array = { path = "../proto_array" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } [dev-dependencies] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index b9d2046761..e6c46e83e7 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,7 +1,7 @@ use crate::{ForkChoiceStore, InvalidationOperation}; use proto_array::{ - Block as ProtoBlock, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, - ProtoArrayForkChoice, ReOrgThreshold, + Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError, + ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; use slog::{crit, debug, warn, Logger}; use ssz_derive::{Decode, Encode}; @@ -533,6 +533,7 @@ where current_slot: Slot, canonical_head: Hash256, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result>> { // Ensure that fork choice has already been updated for the current slot. This prevents @@ -564,6 +565,7 @@ where canonical_head, self.fc_store.justified_balances(), re_org_threshold, + disallowed_offsets, max_epochs_since_finalization, ) .map_err(ProposerHeadError::convert_inner_error) @@ -573,6 +575,7 @@ where &self, canonical_head: Hash256, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result>> { let current_slot = self.fc_store.get_current_slot(); @@ -582,6 +585,7 @@ where canonical_head, self.fc_store.justified_balances(), re_org_threshold, + disallowed_offsets, max_epochs_since_finalization, ) .map_err(ProposerHeadError::convert_inner_error) diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index 2c0dbf1a75..2b883f8646 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] ethereum-types = "0.14.1" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" lazy_static = "1.4.0" safe_arith = { path = "../safe_arith" } diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index 887deb1efd..dc3de71cef 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -1,4 +1,4 @@ -use eth2_hashing::{hash, hash32_concat, ZERO_HASHES}; +use ethereum_hashing::{hash, hash32_concat, ZERO_HASHES}; use ethereum_types::H256; use lazy_static::lazy_static; use safe_arith::ArithError; diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index 205ef8f521..81a535e34a 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -10,9 +10,10 @@ path = "src/bin.rs" [dependencies] types = { path = "../types" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" serde = "1.0.116" serde_derive = "1.0.116" serde_yaml = "0.8.13" safe_arith = { path = "../safe_arith" } +superstruct = "0.5.0" \ No newline at end of file diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index c55739da79..35cb4007b7 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -14,6 +14,8 @@ pub enum Error { InvalidBestDescendant(usize), InvalidParentDelta(usize), InvalidNodeDelta(usize), + MissingJustifiedCheckpoint, + MissingFinalizedCheckpoint, DeltaOverflow(usize), ProposerBoostOverflow(usize), ReOrgThresholdOverflow, @@ -50,6 +52,7 @@ pub enum Error { block_root: Hash256, parent_root: Hash256, }, + InvalidEpochOffset(u64), Arith(ArithError), } @@ -66,6 +69,6 @@ pub struct InvalidBestNodeInfo { pub justified_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint, pub head_root: Hash256, - pub head_justified_checkpoint: Option, - pub head_finalized_checkpoint: Option, + pub head_justified_checkpoint: Checkpoint, + pub head_finalized_checkpoint: Checkpoint, } diff --git a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs index ede5bb3948..aa26a84306 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs @@ -987,11 +987,11 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { ops.push(Operation::AssertWeight { block_root: get_root(0), - weight: 33_000, + weight: 33_250, }); ops.push(Operation::AssertWeight { block_root: get_root(1), - weight: 33_000, + weight: 33_250, }); ops.push(Operation::AssertWeight { block_root: get_root(2), @@ -1000,7 +1000,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { ops.push(Operation::AssertWeight { block_root: get_root(3), // This is a "magic number" generated from `calculate_committee_fraction`. - weight: 31_000, + weight: 31_250, }); // Invalidate the payload of 3. diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index e84139345a..780563954c 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -8,13 +8,13 @@ mod ssz_container; pub use crate::justified_balances::JustifiedBalances; pub use crate::proto_array::{calculate_committee_fraction, InvalidationOperation}; pub use crate::proto_array_fork_choice::{ - Block, DoNotReOrg, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, - ReOrgThreshold, + Block, DisallowedReOrgOffsets, DoNotReOrg, ExecutionStatus, ProposerHeadError, + ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; pub use error::Error; pub mod core { pub use super::proto_array::{ProposerBoost, ProtoArray, ProtoNode}; pub use super::proto_array_fork_choice::VoteTracker; - pub use super::ssz_container::SszContainer; + pub use super::ssz_container::{SszContainer, SszContainerV16, SszContainerV17}; } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 2c19206cb7..88111b461d 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -5,6 +5,7 @@ use ssz::four_byte_option_impl; use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::collections::{HashMap, HashSet}; +use superstruct::superstruct; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, @@ -66,7 +67,13 @@ impl InvalidationOperation { } } -#[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)] +pub type ProtoNode = ProtoNodeV17; + +#[superstruct( + variants(V16, V17), + variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)), + no_enum +)] pub struct ProtoNode { /// The `slot` is not necessary for `ProtoArray`, it just exists so external components can /// easily query the block slot. This is useful for upstream fork choice logic. @@ -85,10 +92,16 @@ pub struct ProtoNode { pub root: Hash256, #[ssz(with = "four_byte_option_usize")] pub parent: Option, + #[superstruct(only(V16))] #[ssz(with = "four_byte_option_checkpoint")] pub justified_checkpoint: Option, + #[superstruct(only(V16))] #[ssz(with = "four_byte_option_checkpoint")] pub finalized_checkpoint: Option, + #[superstruct(only(V17))] + pub justified_checkpoint: Checkpoint, + #[superstruct(only(V17))] + pub finalized_checkpoint: Checkpoint, pub weight: u64, #[ssz(with = "four_byte_option_usize")] pub best_child: Option, @@ -103,6 +116,57 @@ pub struct ProtoNode { pub unrealized_finalized_checkpoint: Option, } +impl TryInto for ProtoNodeV16 { + type Error = Error; + + fn try_into(self) -> Result { + let result = ProtoNode { + slot: self.slot, + state_root: self.state_root, + target_root: self.target_root, + current_epoch_shuffling_id: self.current_epoch_shuffling_id, + next_epoch_shuffling_id: self.next_epoch_shuffling_id, + root: self.root, + parent: self.parent, + justified_checkpoint: self + .justified_checkpoint + .ok_or(Error::MissingJustifiedCheckpoint)?, + finalized_checkpoint: self + .finalized_checkpoint + .ok_or(Error::MissingFinalizedCheckpoint)?, + weight: self.weight, + best_child: self.best_child, + best_descendant: self.best_descendant, + execution_status: self.execution_status, + unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, + }; + Ok(result) + } +} + +impl Into for ProtoNode { + fn into(self) -> ProtoNodeV16 { + ProtoNodeV16 { + slot: self.slot, + state_root: self.state_root, + target_root: self.target_root, + current_epoch_shuffling_id: self.current_epoch_shuffling_id, + next_epoch_shuffling_id: self.next_epoch_shuffling_id, + root: self.root, + parent: self.parent, + justified_checkpoint: Some(self.justified_checkpoint), + finalized_checkpoint: Some(self.finalized_checkpoint), + weight: self.weight, + best_child: self.best_child, + best_descendant: self.best_descendant, + execution_status: self.execution_status, + unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, + } + } +} + #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] pub struct ProposerBoost { pub root: Hash256, @@ -320,8 +384,8 @@ impl ProtoArray { parent: block .parent_root .and_then(|parent| self.indices.get(&parent).copied()), - justified_checkpoint: Some(block.justified_checkpoint), - finalized_checkpoint: Some(block.finalized_checkpoint), + justified_checkpoint: block.justified_checkpoint, + finalized_checkpoint: block.finalized_checkpoint, weight: 0, best_child: None, best_descendant: None, @@ -883,14 +947,7 @@ impl ProtoArray { let genesis_epoch = Epoch::new(0); let current_epoch = current_slot.epoch(E::slots_per_epoch()); let node_epoch = node.slot.epoch(E::slots_per_epoch()); - let node_justified_checkpoint = - if let Some(justified_checkpoint) = node.justified_checkpoint { - justified_checkpoint - } else { - // The node does not have any information about the justified - // checkpoint. This indicates an inconsistent proto-array. - return false; - }; + let node_justified_checkpoint = node.justified_checkpoint; let voting_source = if current_epoch > node_epoch { // The block is from a prior epoch, the voting source will be pulled-up. @@ -998,9 +1055,13 @@ impl ProtoArray { // Run this check once, outside of the loop rather than inside the loop. // If the conditions don't match for this node then they're unlikely to // start matching for its ancestors. + for checkpoint in &[node.finalized_checkpoint, node.justified_checkpoint] { + if checkpoint == &self.finalized_checkpoint { + return true; + } + } + for checkpoint in &[ - node.finalized_checkpoint, - node.justified_checkpoint, node.unrealized_finalized_checkpoint, node.unrealized_justified_checkpoint, ] { @@ -1055,13 +1116,9 @@ pub fn calculate_committee_fraction( justified_balances: &JustifiedBalances, proposer_score_boost: u64, ) -> Option { - let average_balance = justified_balances + let committee_weight = justified_balances .total_effective_balance - .checked_div(justified_balances.num_active_validators)?; - let committee_size = justified_balances - .num_active_validators .checked_div(E::slots_per_epoch())?; - let committee_weight = committee_size.checked_mul(average_balance)?; committee_weight .checked_mul(proposer_score_boost)? .checked_div(100) diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index eae54e7342..fe831b3c35 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -10,7 +10,10 @@ use crate::{ use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::collections::{BTreeSet, HashMap}; +use std::{ + collections::{BTreeSet, HashMap}, + fmt, +}; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, @@ -125,6 +128,17 @@ impl ExecutionStatus { } } +impl fmt::Display for ExecutionStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ExecutionStatus::Valid(_) => write!(f, "valid"), + ExecutionStatus::Invalid(_) => write!(f, "invalid"), + ExecutionStatus::Optimistic(_) => write!(f, "optimistic"), + ExecutionStatus::Irrelevant(_) => write!(f, "irrelevant"), + } + } +} + /// A block that is to be applied to the fork choice. /// /// A simplified version of `types::BeaconBlock`. @@ -236,6 +250,9 @@ pub enum DoNotReOrg { ParentDistance, HeadDistance, ShufflingUnstable, + DisallowedOffset { + offset: u64, + }, JustificationAndFinalizationNotCompetitive, ChainNotFinalizing { epochs_since_finalization: u64, @@ -257,6 +274,9 @@ impl std::fmt::Display for DoNotReOrg { Self::ParentDistance => write!(f, "parent too far from head"), Self::HeadDistance => write!(f, "head too far from current slot"), Self::ShufflingUnstable => write!(f, "shuffling unstable at epoch boundary"), + Self::DisallowedOffset { offset } => { + write!(f, "re-orgs disabled at offset {offset}") + } Self::JustificationAndFinalizationNotCompetitive => { write!(f, "justification or finalization not competitive") } @@ -290,6 +310,31 @@ impl std::fmt::Display for DoNotReOrg { #[serde(transparent)] pub struct ReOrgThreshold(pub u64); +/// New-type for disallowed re-org slots. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct DisallowedReOrgOffsets { + // Vecs are faster than hashmaps for small numbers of items. + offsets: Vec, +} + +impl Default for DisallowedReOrgOffsets { + fn default() -> Self { + DisallowedReOrgOffsets { offsets: vec![0] } + } +} + +impl DisallowedReOrgOffsets { + pub fn new(offsets: Vec) -> Result { + for &offset in &offsets { + if offset >= E::slots_per_epoch() { + return Err(Error::InvalidEpochOffset(offset)); + } + } + Ok(Self { offsets }) + } +} + #[derive(PartialEq)] pub struct ProtoArrayForkChoice { pub(crate) proto_array: ProtoArray, @@ -446,6 +491,7 @@ impl ProtoArrayForkChoice { canonical_head: Hash256, justified_balances: &JustifiedBalances, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result> { let info = self.get_proposer_head_info::( @@ -453,6 +499,7 @@ impl ProtoArrayForkChoice { canonical_head, justified_balances, re_org_threshold, + disallowed_offsets, max_epochs_since_finalization, )?; @@ -487,6 +534,7 @@ impl ProtoArrayForkChoice { canonical_head: Hash256, justified_balances: &JustifiedBalances, re_org_threshold: ReOrgThreshold, + disallowed_offsets: &DisallowedReOrgOffsets, max_epochs_since_finalization: Epoch, ) -> Result> { let mut nodes = self @@ -531,6 +579,12 @@ impl ProtoArrayForkChoice { return Err(DoNotReOrg::ShufflingUnstable.into()); } + // Check allowed slot offsets. + let offset = (re_org_block_slot % E::slots_per_epoch()).as_u64(); + if disallowed_offsets.offsets.contains(&offset) { + return Err(DoNotReOrg::DisallowedOffset { offset }.into()); + } + // Check FFG. let ffg_competitive = parent_node.unrealized_justified_checkpoint == head_node.unrealized_justified_checkpoint @@ -700,29 +754,20 @@ impl ProtoArrayForkChoice { .and_then(|i| self.proto_array.nodes.get(i)) .map(|parent| parent.root); - // If a node does not have a `finalized_checkpoint` or `justified_checkpoint` populated, - // it means it is not a descendant of the finalized checkpoint, so it is valid to return - // `None` here. - if let (Some(justified_checkpoint), Some(finalized_checkpoint)) = - (block.justified_checkpoint, block.finalized_checkpoint) - { - Some(Block { - slot: block.slot, - root: block.root, - parent_root, - state_root: block.state_root, - target_root: block.target_root, - current_epoch_shuffling_id: block.current_epoch_shuffling_id.clone(), - next_epoch_shuffling_id: block.next_epoch_shuffling_id.clone(), - justified_checkpoint, - finalized_checkpoint, - execution_status: block.execution_status, - unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, - }) - } else { - None - } + Some(Block { + slot: block.slot, + root: block.root, + parent_root, + state_root: block.state_root, + target_root: block.target_root, + current_epoch_shuffling_id: block.current_epoch_shuffling_id.clone(), + next_epoch_shuffling_id: block.next_epoch_shuffling_id.clone(), + justified_checkpoint: block.justified_checkpoint, + finalized_checkpoint: block.finalized_checkpoint, + execution_status: block.execution_status, + unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, + }) } /// Returns the `block.execution_status` field, if the block is present. diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index ed1efaae1a..de7fa70d6a 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -1,6 +1,6 @@ use crate::proto_array::ProposerBoost; use crate::{ - proto_array::{ProtoArray, ProtoNode}, + proto_array::{ProtoArray, ProtoNodeV16, ProtoNodeV17}, proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, Error, JustifiedBalances, }; @@ -8,24 +8,71 @@ use ssz::{four_byte_option_impl, Encode}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; use std::convert::TryFrom; +use superstruct::superstruct; use types::{Checkpoint, Hash256}; // Define a "legacy" implementation of `Option` which uses four bytes for encoding the union // selector. four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); -#[derive(Encode, Decode)] +pub type SszContainer = SszContainerV17; + +#[superstruct( + variants(V16, V17), + variant_attributes(derive(Encode, Decode)), + no_enum +)] pub struct SszContainer { pub votes: Vec, pub balances: Vec, pub prune_threshold: usize, pub justified_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint, - pub nodes: Vec, + #[superstruct(only(V16))] + pub nodes: Vec, + #[superstruct(only(V17))] + pub nodes: Vec, pub indices: Vec<(Hash256, usize)>, pub previous_proposer_boost: ProposerBoost, } +impl TryInto for SszContainerV16 { + type Error = Error; + + fn try_into(self) -> Result { + let nodes: Result, Error> = + self.nodes.into_iter().map(TryInto::try_into).collect(); + + Ok(SszContainer { + votes: self.votes, + balances: self.balances, + prune_threshold: self.prune_threshold, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + nodes: nodes?, + indices: self.indices, + previous_proposer_boost: self.previous_proposer_boost, + }) + } +} + +impl Into for SszContainer { + fn into(self) -> SszContainerV16 { + let nodes = self.nodes.into_iter().map(Into::into).collect(); + + SszContainerV16 { + votes: self.votes, + balances: self.balances, + prune_threshold: self.prune_threshold, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + nodes, + indices: self.indices, + previous_proposer_boost: self.previous_proposer_boost, + } + } +} + impl From<&ProtoArrayForkChoice> for SszContainer { fn from(from: &ProtoArrayForkChoice) -> Self { let proto_array = &from.proto_array; diff --git a/consensus/serde_utils/Cargo.toml b/consensus/serde_utils/Cargo.toml deleted file mode 100644 index d4ba02765f..0000000000 --- a/consensus/serde_utils/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -name = "eth2_serde_utils" -version = "0.1.1" -authors = ["Paul Hauner "] -edition = "2021" -description = "Serialization and deserialization utilities useful for JSON representations of Ethereum 2.0 types." -license = "Apache-2.0" - -[dependencies] -serde = { version = "1.0.116", features = ["derive"] } -serde_derive = "1.0.116" -serde_json = "1.0.58" -hex = "0.4.2" -ethereum-types = "0.14.1" diff --git a/consensus/serde_utils/src/fixed_bytes_hex.rs b/consensus/serde_utils/src/fixed_bytes_hex.rs deleted file mode 100644 index 4e9dc98aca..0000000000 --- a/consensus/serde_utils/src/fixed_bytes_hex.rs +++ /dev/null @@ -1,52 +0,0 @@ -//! Formats `[u8; n]` as a 0x-prefixed hex string. -//! -//! E.g., `[0, 1, 2, 3]` serializes as `"0x00010203"`. - -use crate::hex::PrefixedHexVisitor; -use serde::de::Error; -use serde::{Deserializer, Serializer}; - -macro_rules! bytes_hex { - ($num_bytes: tt) => { - use super::*; - - const BYTES_LEN: usize = $num_bytes; - - pub fn serialize(bytes: &[u8; BYTES_LEN], serializer: S) -> Result - where - S: Serializer, - { - let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(&bytes)); - - serializer.serialize_str(&hex_string) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; BYTES_LEN], D::Error> - where - D: Deserializer<'de>, - { - let decoded = deserializer.deserialize_str(PrefixedHexVisitor)?; - - if decoded.len() != BYTES_LEN { - return Err(D::Error::custom(format!( - "expected {} bytes for array, got {}", - BYTES_LEN, - decoded.len() - ))); - } - - let mut array = [0; BYTES_LEN]; - array.copy_from_slice(&decoded); - Ok(array) - } - }; -} - -pub mod bytes_4_hex { - bytes_hex!(4); -} - -pub mod bytes_8_hex { - bytes_hex!(8); -} diff --git a/consensus/serde_utils/src/hex.rs b/consensus/serde_utils/src/hex.rs deleted file mode 100644 index 9a2cd65c76..0000000000 --- a/consensus/serde_utils/src/hex.rs +++ /dev/null @@ -1,77 +0,0 @@ -//! Provides utilities for parsing 0x-prefixed hex strings. - -use serde::de::{self, Visitor}; -use std::fmt; - -/// Encode `data` as a 0x-prefixed hex string. -pub fn encode>(data: T) -> String { - let hex = hex::encode(data); - - let mut s = "0x".to_string(); - s.push_str(hex.as_str()); - s -} - -/// Decode `data` from a 0x-prefixed hex string. -pub fn decode(s: &str) -> Result, String> { - if let Some(stripped) = s.strip_prefix("0x") { - hex::decode(stripped).map_err(|e| format!("invalid hex: {:?}", e)) - } else { - Err("hex must have 0x prefix".to_string()) - } -} - -pub struct PrefixedHexVisitor; - -impl<'de> Visitor<'de> for PrefixedHexVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a hex string with 0x prefix") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - decode(value).map_err(de::Error::custom) - } -} - -pub struct HexVisitor; - -impl<'de> Visitor<'de> for HexVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a hex string (irrelevant of prefix)") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - hex::decode(value.trim_start_matches("0x")) - .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn encoding() { - let bytes = vec![0, 255]; - let hex = encode(bytes); - assert_eq!(hex.as_str(), "0x00ff"); - - let bytes = vec![]; - let hex = encode(bytes); - assert_eq!(hex.as_str(), "0x"); - - let bytes = vec![1, 2, 3]; - let hex = encode(bytes); - assert_eq!(hex.as_str(), "0x010203"); - } -} diff --git a/consensus/serde_utils/src/hex_vec.rs b/consensus/serde_utils/src/hex_vec.rs deleted file mode 100644 index f7f4833628..0000000000 --- a/consensus/serde_utils/src/hex_vec.rs +++ /dev/null @@ -1,23 +0,0 @@ -//! Formats `Vec` as a 0x-prefixed hex string. -//! -//! E.g., `vec![0, 1, 2, 3]` serializes as `"0x00010203"`. - -use crate::hex::PrefixedHexVisitor; -use serde::{Deserializer, Serializer}; - -pub fn serialize(bytes: &[u8], serializer: S) -> Result -where - S: Serializer, -{ - let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(bytes)); - - serializer.serialize_str(&hex_string) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, -{ - deserializer.deserialize_str(PrefixedHexVisitor) -} diff --git a/consensus/serde_utils/src/json_str.rs b/consensus/serde_utils/src/json_str.rs deleted file mode 100644 index b9a1813915..0000000000 --- a/consensus/serde_utils/src/json_str.rs +++ /dev/null @@ -1,25 +0,0 @@ -//! Serialize a datatype as a JSON-blob within a single string. -use serde::{ - de::{DeserializeOwned, Error as _}, - ser::Error as _, - Deserialize, Deserializer, Serialize, Serializer, -}; - -/// Serialize as a JSON object within a string. -pub fn serialize(value: &T, serializer: S) -> Result -where - S: Serializer, - T: Serialize, -{ - serializer.serialize_str(&serde_json::to_string(value).map_err(S::Error::custom)?) -} - -/// Deserialize a JSON object embedded in a string. -pub fn deserialize<'de, T, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, - T: DeserializeOwned, -{ - let json_str = String::deserialize(deserializer)?; - serde_json::from_str(&json_str).map_err(D::Error::custom) -} diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs deleted file mode 100644 index 5c5dafc665..0000000000 --- a/consensus/serde_utils/src/lib.rs +++ /dev/null @@ -1,15 +0,0 @@ -mod quoted_int; - -pub mod fixed_bytes_hex; -pub mod hex; -pub mod hex_vec; -pub mod json_str; -pub mod list_of_bytes_lists; -pub mod quoted_u64_vec; -pub mod u256_hex_be; -pub mod u32_hex; -pub mod u64_hex_be; -pub mod u8_hex; - -pub use fixed_bytes_hex::{bytes_4_hex, bytes_8_hex}; -pub use quoted_int::{quoted_i64, quoted_u256, quoted_u32, quoted_u64, quoted_u8}; diff --git a/consensus/serde_utils/src/list_of_bytes_lists.rs b/consensus/serde_utils/src/list_of_bytes_lists.rs deleted file mode 100644 index b93321aa06..0000000000 --- a/consensus/serde_utils/src/list_of_bytes_lists.rs +++ /dev/null @@ -1,49 +0,0 @@ -//! Formats `Vec` using quotes. -//! -//! E.g., `vec![0, 1, 2]` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. - -use crate::hex; -use serde::ser::SerializeSeq; -use serde::{de, Deserializer, Serializer}; - -pub struct ListOfBytesListVisitor; -impl<'a> serde::de::Visitor<'a> for ListOfBytesListVisitor { - type Value = Vec>; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of 0x-prefixed byte lists") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let mut vec = vec![]; - - while let Some(val) = seq.next_element::()? { - vec.push(hex::decode(&val).map_err(de::Error::custom)?); - } - - Ok(vec) - } -} - -pub fn serialize(value: &[Vec], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for val in value { - seq.serialize_element(&hex::encode(val))?; - } - seq.end() -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result>, D::Error> -where - D: Deserializer<'de>, -{ - deserializer.deserialize_any(ListOfBytesListVisitor) -} diff --git a/consensus/serde_utils/src/quoted_int.rs b/consensus/serde_utils/src/quoted_int.rs deleted file mode 100644 index 0cc35aa318..0000000000 --- a/consensus/serde_utils/src/quoted_int.rs +++ /dev/null @@ -1,247 +0,0 @@ -//! Formats some integer types using quotes. -//! -//! E.g., `1` serializes as `"1"`. -//! -//! Quotes can be optional during decoding. - -use ethereum_types::U256; -use serde::{Deserializer, Serializer}; -use serde_derive::{Deserialize, Serialize}; -use std::convert::TryFrom; -use std::marker::PhantomData; - -macro_rules! define_mod { - ($int: ty) => { - /// Serde support for deserializing quoted integers. - /// - /// Configurable so that quotes are either required or optional. - pub struct QuotedIntVisitor { - require_quotes: bool, - _phantom: PhantomData, - } - - impl<'a, T> serde::de::Visitor<'a> for QuotedIntVisitor - where - T: From<$int> + Into<$int> + Copy + TryFrom, - { - type Value = T; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - if self.require_quotes { - write!(formatter, "a quoted integer") - } else { - write!(formatter, "a quoted or unquoted integer") - } - } - - fn visit_str(self, s: &str) -> Result - where - E: serde::de::Error, - { - s.parse::<$int>() - .map(T::from) - .map_err(serde::de::Error::custom) - } - - fn visit_u64(self, v: u64) -> Result - where - E: serde::de::Error, - { - if self.require_quotes { - Err(serde::de::Error::custom( - "received unquoted integer when quotes are required", - )) - } else { - T::try_from(v).map_err(|_| serde::de::Error::custom("invalid integer")) - } - } - } - - /// Compositional wrapper type that allows quotes or no quotes. - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] - #[serde(transparent)] - pub struct MaybeQuoted - where - T: From<$int> + Into<$int> + Copy + TryFrom, - { - #[serde(with = "self")] - pub value: T, - } - - /// Wrapper type for requiring quotes on a `$int`-like type. - /// - /// Unlike using `serde(with = "quoted_$int::require_quotes")` this is composable, and can be nested - /// inside types like `Option`, `Result` and `Vec`. - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] - #[serde(transparent)] - pub struct Quoted - where - T: From<$int> + Into<$int> + Copy + TryFrom, - { - #[serde(with = "require_quotes")] - pub value: T, - } - - /// Serialize with quotes. - pub fn serialize(value: &T, serializer: S) -> Result - where - S: Serializer, - T: From<$int> + Into<$int> + Copy, - { - let v: $int = (*value).into(); - serializer.serialize_str(&format!("{}", v)) - } - - /// Deserialize with or without quotes. - pub fn deserialize<'de, D, T>(deserializer: D) -> Result - where - D: Deserializer<'de>, - T: From<$int> + Into<$int> + Copy + TryFrom, - { - deserializer.deserialize_any(QuotedIntVisitor { - require_quotes: false, - _phantom: PhantomData, - }) - } - - /// Requires quotes when deserializing. - /// - /// Usage: `#[serde(with = "quoted_u64::require_quotes")]`. - pub mod require_quotes { - pub use super::serialize; - use super::*; - - pub fn deserialize<'de, D, T>(deserializer: D) -> Result - where - D: Deserializer<'de>, - T: From<$int> + Into<$int> + Copy + TryFrom, - { - deserializer.deserialize_any(QuotedIntVisitor { - require_quotes: true, - _phantom: PhantomData, - }) - } - } - - #[cfg(test)] - mod test { - use super::*; - - #[test] - fn require_quotes() { - let x = serde_json::from_str::>("\"8\"").unwrap(); - assert_eq!(x.value, 8); - serde_json::from_str::>("8").unwrap_err(); - } - } - }; -} - -pub mod quoted_u8 { - use super::*; - - define_mod!(u8); -} - -pub mod quoted_u32 { - use super::*; - - define_mod!(u32); -} - -pub mod quoted_u64 { - use super::*; - - define_mod!(u64); -} - -pub mod quoted_i64 { - use super::*; - - define_mod!(i64); -} - -pub mod quoted_u256 { - use super::*; - - struct U256Visitor; - - impl<'de> serde::de::Visitor<'de> for U256Visitor { - type Value = U256; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - formatter.write_str("a quoted U256 integer") - } - - fn visit_str(self, v: &str) -> Result - where - E: serde::de::Error, - { - U256::from_dec_str(v).map_err(serde::de::Error::custom) - } - } - - /// Serialize with quotes. - pub fn serialize(value: &U256, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&format!("{}", value)) - } - - /// Deserialize with quotes. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_str(U256Visitor) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct WrappedU256(#[serde(with = "quoted_u256")] U256); - - #[test] - fn u256_with_quotes() { - assert_eq!( - &serde_json::to_string(&WrappedU256(U256::one())).unwrap(), - "\"1\"" - ); - assert_eq!( - serde_json::from_str::("\"1\"").unwrap(), - WrappedU256(U256::one()) - ); - } - - #[test] - fn u256_without_quotes() { - serde_json::from_str::("1").unwrap_err(); - } - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct WrappedI64(#[serde(with = "quoted_i64")] i64); - - #[test] - fn negative_i64_with_quotes() { - assert_eq!( - serde_json::from_str::("\"-200\"").unwrap().0, - -200 - ); - assert_eq!( - serde_json::to_string(&WrappedI64(-12_500)).unwrap(), - "\"-12500\"" - ); - } - - // It would be OK if this worked, but we don't need it to (i64s should always be quoted). - #[test] - fn negative_i64_without_quotes() { - serde_json::from_str::("-200").unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/quoted_u64_vec.rs b/consensus/serde_utils/src/quoted_u64_vec.rs deleted file mode 100644 index f124c98909..0000000000 --- a/consensus/serde_utils/src/quoted_u64_vec.rs +++ /dev/null @@ -1,97 +0,0 @@ -//! Formats `Vec` using quotes. -//! -//! E.g., `vec![0, 1, 2]` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. - -use serde::ser::SerializeSeq; -use serde::{Deserializer, Serializer}; -use serde_derive::{Deserialize, Serialize}; - -#[derive(Serialize, Deserialize)] -#[serde(transparent)] -pub struct QuotedIntWrapper { - #[serde(with = "crate::quoted_u64")] - pub int: u64, -} - -pub struct QuotedIntVecVisitor; -impl<'a> serde::de::Visitor<'a> for QuotedIntVecVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of quoted or unquoted integers") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let mut vec = vec![]; - - while let Some(val) = seq.next_element()? { - let val: QuotedIntWrapper = val; - vec.push(val.int); - } - - Ok(vec) - } -} - -pub fn serialize(value: &[u64], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for &int in value { - seq.serialize_element(&QuotedIntWrapper { int })?; - } - seq.end() -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, -{ - deserializer.deserialize_any(QuotedIntVecVisitor) -} - -#[cfg(test)] -mod test { - use super::*; - - #[derive(Debug, Serialize, Deserialize)] - struct Obj { - #[serde(with = "crate::quoted_u64_vec")] - values: Vec, - } - - #[test] - fn quoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", "2", "3", "4"] }"#).unwrap(); - assert_eq!(obj.values, vec![1, 2, 3, 4]); - } - - #[test] - fn unquoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2, 3, 4] }"#).unwrap(); - assert_eq!(obj.values, vec![1, 2, 3, 4]); - } - - #[test] - fn mixed_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", 2, "3", "4"] }"#).unwrap(); - assert_eq!(obj.values, vec![1, 2, 3, 4]); - } - - #[test] - fn empty_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [] }"#).unwrap(); - assert!(obj.values.is_empty()); - } - - #[test] - fn whole_list_quoted_err() { - serde_json::from_str::(r#"{ "values": "[1, 2, 3, 4]" }"#).unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/u256_hex_be.rs b/consensus/serde_utils/src/u256_hex_be.rs deleted file mode 100644 index 8007e5792c..0000000000 --- a/consensus/serde_utils/src/u256_hex_be.rs +++ /dev/null @@ -1,144 +0,0 @@ -use ethereum_types::U256; - -use serde::de::Visitor; -use serde::{de, Deserializer, Serialize, Serializer}; -use std::fmt; -use std::str::FromStr; - -pub fn serialize(num: &U256, serializer: S) -> Result -where - S: Serializer, -{ - num.serialize(serializer) -} - -pub struct U256Visitor; - -impl<'de> Visitor<'de> for U256Visitor { - type Value = String; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a well formatted hex string") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - if !value.starts_with("0x") { - return Err(de::Error::custom("must start with 0x")); - } - let stripped = &value[2..]; - if stripped.is_empty() { - Err(de::Error::custom(format!( - "quantity cannot be {:?}", - stripped - ))) - } else if stripped == "0" { - Ok(value.to_string()) - } else if stripped.starts_with('0') { - Err(de::Error::custom("cannot have leading zero")) - } else { - Ok(value.to_string()) - } - } -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let decoded = deserializer.deserialize_string(U256Visitor)?; - - U256::from_str(&decoded).map_err(|e| de::Error::custom(format!("Invalid U256 string: {}", e))) -} - -#[cfg(test)] -mod test { - use ethereum_types::U256; - use serde::{Deserialize, Serialize}; - use serde_json; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct Wrapper { - #[serde(with = "super")] - val: U256, - } - - #[test] - fn encoding() { - assert_eq!( - &serde_json::to_string(&Wrapper { val: 0.into() }).unwrap(), - "\"0x0\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1.into() }).unwrap(), - "\"0x1\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 256.into() }).unwrap(), - "\"0x100\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 65.into() }).unwrap(), - "\"0x41\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1024.into() }).unwrap(), - "\"0x400\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: U256::max_value() - 1 - }) - .unwrap(), - "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: U256::max_value() - }) - .unwrap(), - "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" - ); - } - - #[test] - fn decoding() { - assert_eq!( - serde_json::from_str::("\"0x0\"").unwrap(), - Wrapper { val: 0.into() }, - ); - assert_eq!( - serde_json::from_str::("\"0x41\"").unwrap(), - Wrapper { val: 65.into() }, - ); - assert_eq!( - serde_json::from_str::("\"0x400\"").unwrap(), - Wrapper { val: 1024.into() }, - ); - assert_eq!( - serde_json::from_str::( - "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" - ) - .unwrap(), - Wrapper { - val: U256::max_value() - 1 - }, - ); - assert_eq!( - serde_json::from_str::( - "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" - ) - .unwrap(), - Wrapper { - val: U256::max_value() - }, - ); - serde_json::from_str::("\"0x\"").unwrap_err(); - serde_json::from_str::("\"0x0400\"").unwrap_err(); - serde_json::from_str::("\"400\"").unwrap_err(); - serde_json::from_str::("\"ff\"").unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/u32_hex.rs b/consensus/serde_utils/src/u32_hex.rs deleted file mode 100644 index c1ab3537b2..0000000000 --- a/consensus/serde_utils/src/u32_hex.rs +++ /dev/null @@ -1,21 +0,0 @@ -//! Formats `u32` as a 0x-prefixed, little-endian hex string. -//! -//! E.g., `0` serializes as `"0x00000000"`. - -use crate::bytes_4_hex; -use serde::{Deserializer, Serializer}; - -pub fn serialize(num: &u32, serializer: S) -> Result -where - S: Serializer, -{ - let hex = format!("0x{}", hex::encode(num.to_le_bytes())); - serializer.serialize_str(&hex) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - bytes_4_hex::deserialize(deserializer).map(u32::from_le_bytes) -} diff --git a/consensus/serde_utils/src/u64_hex_be.rs b/consensus/serde_utils/src/u64_hex_be.rs deleted file mode 100644 index e3364a2d2c..0000000000 --- a/consensus/serde_utils/src/u64_hex_be.rs +++ /dev/null @@ -1,134 +0,0 @@ -//! Formats `u64` as a 0x-prefixed, big-endian hex string. -//! -//! E.g., `0` serializes as `"0x0000000000000000"`. - -use serde::de::{self, Error, Visitor}; -use serde::{Deserializer, Serializer}; -use std::fmt; - -const BYTES_LEN: usize = 8; - -pub struct QuantityVisitor; -impl<'de> Visitor<'de> for QuantityVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a hex string") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - if !value.starts_with("0x") { - return Err(de::Error::custom("must start with 0x")); - } - - let stripped = value.trim_start_matches("0x"); - - if stripped.is_empty() { - Err(de::Error::custom(format!( - "quantity cannot be {}", - stripped - ))) - } else if stripped == "0" { - Ok(vec![0]) - } else if stripped.starts_with('0') { - Err(de::Error::custom("cannot have leading zero")) - } else if stripped.len() % 2 != 0 { - hex::decode(format!("0{}", stripped)) - .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) - } else { - hex::decode(stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) - } - } -} - -pub fn serialize(num: &u64, serializer: S) -> Result -where - S: Serializer, -{ - let raw = hex::encode(num.to_be_bytes()); - let trimmed = raw.trim_start_matches('0'); - - let hex = if trimmed.is_empty() { "0" } else { trimmed }; - - serializer.serialize_str(&format!("0x{}", &hex)) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let decoded = deserializer.deserialize_str(QuantityVisitor)?; - - // TODO: this is not strict about byte length like other methods. - if decoded.len() > BYTES_LEN { - return Err(D::Error::custom(format!( - "expected max {} bytes for array, got {}", - BYTES_LEN, - decoded.len() - ))); - } - - let mut array = [0; BYTES_LEN]; - array[BYTES_LEN - decoded.len()..].copy_from_slice(&decoded); - Ok(u64::from_be_bytes(array)) -} - -#[cfg(test)] -mod test { - use serde::{Deserialize, Serialize}; - use serde_json; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct Wrapper { - #[serde(with = "super")] - val: u64, - } - - #[test] - fn encoding() { - assert_eq!( - &serde_json::to_string(&Wrapper { val: 0 }).unwrap(), - "\"0x0\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1 }).unwrap(), - "\"0x1\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 256 }).unwrap(), - "\"0x100\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 65 }).unwrap(), - "\"0x41\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1024 }).unwrap(), - "\"0x400\"" - ); - } - - #[test] - fn decoding() { - assert_eq!( - serde_json::from_str::("\"0x0\"").unwrap(), - Wrapper { val: 0 }, - ); - assert_eq!( - serde_json::from_str::("\"0x41\"").unwrap(), - Wrapper { val: 65 }, - ); - assert_eq!( - serde_json::from_str::("\"0x400\"").unwrap(), - Wrapper { val: 1024 }, - ); - serde_json::from_str::("\"0x\"").unwrap_err(); - serde_json::from_str::("\"0x0400\"").unwrap_err(); - serde_json::from_str::("\"400\"").unwrap_err(); - serde_json::from_str::("\"ff\"").unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/u8_hex.rs b/consensus/serde_utils/src/u8_hex.rs deleted file mode 100644 index 8083e1d120..0000000000 --- a/consensus/serde_utils/src/u8_hex.rs +++ /dev/null @@ -1,29 +0,0 @@ -//! Formats `u8` as a 0x-prefixed hex string. -//! -//! E.g., `0` serializes as `"0x00"`. - -use crate::hex::PrefixedHexVisitor; -use serde::de::Error; -use serde::{Deserializer, Serializer}; - -pub fn serialize(byte: &u8, serializer: S) -> Result -where - S: Serializer, -{ - let hex = format!("0x{}", hex::encode([*byte])); - serializer.serialize_str(&hex) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - if bytes.len() != 1 { - return Err(D::Error::custom(format!( - "expected 1 byte for u8, got {}", - bytes.len() - ))); - } - Ok(bytes[0]) -} diff --git a/consensus/ssz/Cargo.toml b/consensus/ssz/Cargo.toml deleted file mode 100644 index d39ad10875..0000000000 --- a/consensus/ssz/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "eth2_ssz" -version = "0.4.1" -authors = ["Paul Hauner "] -edition = "2021" -description = "SimpleSerialize (SSZ) as used in Ethereum 2.0" -license = "Apache-2.0" - -[lib] -name = "ssz" - -[dev-dependencies] -eth2_ssz_derive = "0.3.1" - -[dependencies] -ethereum-types = "0.14.1" -smallvec = { version = "1.6.1", features = ["const_generics"] } -itertools = "0.10.3" - -[features] -arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/ssz/README.md b/consensus/ssz/README.md deleted file mode 100644 index 04603cda33..0000000000 --- a/consensus/ssz/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# simpleserialize (ssz) - -[](https://crates.io/crates/eth2_ssz) diff --git a/consensus/ssz/examples/large_list.rs b/consensus/ssz/examples/large_list.rs deleted file mode 100644 index a1b10ab7a3..0000000000 --- a/consensus/ssz/examples/large_list.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! Encode and decode a list many times. -//! -//! Useful for `cargo flamegraph`. - -use ssz::{Decode, Encode}; - -fn main() { - let vec: Vec = vec![4242; 8196]; - - let output: Vec> = (0..40_000) - .map(|_| Vec::from_ssz_bytes(&vec.as_ssz_bytes()).unwrap()) - .collect(); - - println!("{}", output.len()); -} diff --git a/consensus/ssz/examples/large_list_of_structs.rs b/consensus/ssz/examples/large_list_of_structs.rs deleted file mode 100644 index 2aaaf9b8a5..0000000000 --- a/consensus/ssz/examples/large_list_of_structs.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! Encode and decode a list many times. -//! -//! Useful for `cargo flamegraph`. - -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; - -#[derive(Clone, Copy, Encode, Decode)] -pub struct FixedLen { - a: u64, - b: u64, - c: u64, - d: u64, -} - -fn main() { - let fixed_len = FixedLen { - a: 42, - b: 42, - c: 42, - d: 42, - }; - - let vec: Vec = vec![fixed_len; 8196]; - - let output: Vec> = (0..40_000) - .map(|_| Vec::from_ssz_bytes(&vec.as_ssz_bytes()).unwrap()) - .collect(); - - println!("{}", output.len()); -} diff --git a/consensus/ssz/examples/struct_definition.rs b/consensus/ssz/examples/struct_definition.rs deleted file mode 100644 index 123da12c58..0000000000 --- a/consensus/ssz/examples/struct_definition.rs +++ /dev/null @@ -1,73 +0,0 @@ -use ssz::{Decode, DecodeError, Encode, SszDecoderBuilder, SszEncoder}; - -#[derive(Debug, PartialEq)] -pub struct Foo { - a: u16, - b: Vec, - c: u16, -} - -impl Encode for Foo { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() && as Encode>::is_ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - ::ssz_fixed_len() - + ssz::BYTES_PER_LENGTH_OFFSET - + ::ssz_fixed_len() - + self.b.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - let offset = ::ssz_fixed_len() - + as Encode>::ssz_fixed_len() - + ::ssz_fixed_len(); - - let mut encoder = SszEncoder::container(buf, offset); - - encoder.append(&self.a); - encoder.append(&self.b); - encoder.append(&self.c); - - encoder.finalize(); - } -} - -impl Decode for Foo { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() && as Decode>::is_ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let mut builder = SszDecoderBuilder::new(bytes); - - builder.register_type::()?; - builder.register_type::>()?; - builder.register_type::()?; - - let mut decoder = builder.build()?; - - Ok(Self { - a: decoder.decode_next()?, - b: decoder.decode_next()?, - c: decoder.decode_next()?, - }) - } -} - -fn main() { - let my_foo = Foo { - a: 42, - b: vec![0, 1, 2, 3], - c: 11, - }; - - let bytes = vec![42, 0, 8, 0, 0, 0, 11, 0, 0, 1, 2, 3]; - - assert_eq!(my_foo.as_ssz_bytes(), bytes); - - let decoded_foo = Foo::from_ssz_bytes(&bytes).unwrap(); - - assert_eq!(my_foo, decoded_foo); -} diff --git a/consensus/ssz/src/decode.rs b/consensus/ssz/src/decode.rs deleted file mode 100644 index 10b3573b16..0000000000 --- a/consensus/ssz/src/decode.rs +++ /dev/null @@ -1,374 +0,0 @@ -use super::*; -use smallvec::{smallvec, SmallVec}; -use std::cmp::Ordering; - -type SmallVec8 = SmallVec<[T; 8]>; - -pub mod impls; -pub mod try_from_iter; - -/// Returned when SSZ decoding fails. -#[derive(Debug, PartialEq, Clone)] -pub enum DecodeError { - /// The bytes supplied were too short to be decoded into the specified type. - InvalidByteLength { len: usize, expected: usize }, - /// The given bytes were too short to be read as a length prefix. - InvalidLengthPrefix { len: usize, expected: usize }, - /// A length offset pointed to a byte that was out-of-bounds (OOB). - /// - /// A bytes may be OOB for the following reasons: - /// - /// - It is `>= bytes.len()`. - /// - When decoding variable length items, the 1st offset points "backwards" into the fixed - /// length items (i.e., `length[0] < BYTES_PER_LENGTH_OFFSET`). - /// - When decoding variable-length items, the `n`'th offset was less than the `n-1`'th offset. - OutOfBoundsByte { i: usize }, - /// An offset points “backwards” into the fixed-bytes portion of the message, essentially - /// double-decoding bytes that will also be decoded as fixed-length. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#1-Offset-into-fixed-portion - OffsetIntoFixedPortion(usize), - /// The first offset does not point to the byte that follows the fixed byte portion, - /// essentially skipping a variable-length byte. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#2-Skip-first-variable-byte - OffsetSkipsVariableBytes(usize), - /// An offset points to bytes prior to the previous offset. Depending on how you look at it, - /// this either double-decodes bytes or makes the first offset a negative-length. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#3-Offsets-are-decreasing - OffsetsAreDecreasing(usize), - /// An offset references byte indices that do not exist in the source bytes. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#4-Offsets-are-out-of-bounds - OffsetOutOfBounds(usize), - /// A variable-length list does not have a fixed portion that is cleanly divisible by - /// `BYTES_PER_LENGTH_OFFSET`. - InvalidListFixedBytesLen(usize), - /// Some item has a `ssz_fixed_len` of zero. This is illegal. - ZeroLengthItem, - /// The given bytes were invalid for some application-level reason. - BytesInvalid(String), - /// The given union selector is out of bounds. - UnionSelectorInvalid(u8), -} - -/// Performs checks on the `offset` based upon the other parameters provided. -/// -/// ## Detail -/// -/// - `offset`: the offset bytes (e.g., result of `read_offset(..)`). -/// - `previous_offset`: unless this is the first offset in the SSZ object, the value of the -/// previously-read offset. Used to ensure offsets are not decreasing. -/// - `num_bytes`: the total number of bytes in the SSZ object. Used to ensure the offset is not -/// out of bounds. -/// - `num_fixed_bytes`: the number of fixed-bytes in the struct, if it is known. Used to ensure -/// that the first offset doesn't skip any variable bytes. -/// -/// ## References -/// -/// The checks here are derived from this document: -/// -/// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view -pub fn sanitize_offset( - offset: usize, - previous_offset: Option, - num_bytes: usize, - num_fixed_bytes: Option, -) -> Result { - if num_fixed_bytes.map_or(false, |fixed_bytes| offset < fixed_bytes) { - Err(DecodeError::OffsetIntoFixedPortion(offset)) - } else if previous_offset.is_none() - && num_fixed_bytes.map_or(false, |fixed_bytes| offset != fixed_bytes) - { - Err(DecodeError::OffsetSkipsVariableBytes(offset)) - } else if offset > num_bytes { - Err(DecodeError::OffsetOutOfBounds(offset)) - } else if previous_offset.map_or(false, |prev| prev > offset) { - Err(DecodeError::OffsetsAreDecreasing(offset)) - } else { - Ok(offset) - } -} - -/// Provides SSZ decoding (de-serialization) via the `from_ssz_bytes(&bytes)` method. -/// -/// See `examples/` for manual implementations or the crate root for implementations using -/// `#[derive(Decode)]`. -pub trait Decode: Sized { - /// Returns `true` if this object has a fixed-length. - /// - /// I.e., there are no variable length items in this object or any of it's contained objects. - fn is_ssz_fixed_len() -> bool; - - /// The number of bytes this object occupies in the fixed-length portion of the SSZ bytes. - /// - /// By default, this is set to `BYTES_PER_LENGTH_OFFSET` which is suitable for variable length - /// objects, but not fixed-length objects. Fixed-length objects _must_ return a value which - /// represents their length. - fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - /// Attempts to decode `Self` from `bytes`, returning a `DecodeError` on failure. - /// - /// The supplied bytes must be the exact length required to decode `Self`, excess bytes will - /// result in an error. - fn from_ssz_bytes(bytes: &[u8]) -> Result; -} - -#[derive(Copy, Clone, Debug)] -pub struct Offset { - position: usize, - offset: usize, -} - -/// Builds an `SszDecoder`. -/// -/// The purpose of this struct is to split some SSZ bytes into individual slices. The builder is -/// then converted into a `SszDecoder` which decodes those values into object instances. -/// -/// See [`SszDecoder`](struct.SszDecoder.html) for usage examples. -pub struct SszDecoderBuilder<'a> { - bytes: &'a [u8], - items: SmallVec8<&'a [u8]>, - offsets: SmallVec8, - items_index: usize, -} - -impl<'a> SszDecoderBuilder<'a> { - /// Instantiate a new builder that should build a `SszDecoder` over the given `bytes` which - /// are assumed to be the SSZ encoding of some object. - pub fn new(bytes: &'a [u8]) -> Self { - Self { - bytes, - items: smallvec![], - offsets: smallvec![], - items_index: 0, - } - } - - /// Registers a variable-length object as the next item in `bytes`, without specifying the - /// actual type. - /// - /// ## Notes - /// - /// Use of this function is generally discouraged since it cannot detect if some type changes - /// from variable to fixed length. - /// - /// Use `Self::register_type` wherever possible. - pub fn register_anonymous_variable_length_item(&mut self) -> Result<(), DecodeError> { - struct Anonymous; - - impl Decode for Anonymous { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(_bytes: &[u8]) -> Result { - unreachable!("Anonymous should never be decoded") - } - } - - self.register_type::() - } - - /// Declares that some type `T` is the next item in `bytes`. - pub fn register_type(&mut self) -> Result<(), DecodeError> { - self.register_type_parameterized(T::is_ssz_fixed_len(), T::ssz_fixed_len()) - } - - /// Declares that a type with the given parameters is the next item in `bytes`. - pub fn register_type_parameterized( - &mut self, - is_ssz_fixed_len: bool, - ssz_fixed_len: usize, - ) -> Result<(), DecodeError> { - if is_ssz_fixed_len { - let start = self.items_index; - self.items_index += ssz_fixed_len; - - let slice = - self.bytes - .get(start..self.items_index) - .ok_or(DecodeError::InvalidByteLength { - len: self.bytes.len(), - expected: self.items_index, - })?; - - self.items.push(slice); - } else { - self.offsets.push(Offset { - position: self.items.len(), - offset: sanitize_offset( - read_offset(&self.bytes[self.items_index..])?, - self.offsets.last().map(|o| o.offset), - self.bytes.len(), - None, - )?, - }); - - // Push an empty slice into items; it will be replaced later. - self.items.push(&[]); - - self.items_index += BYTES_PER_LENGTH_OFFSET; - } - - Ok(()) - } - - fn finalize(&mut self) -> Result<(), DecodeError> { - if let Some(first_offset) = self.offsets.first().map(|o| o.offset) { - // Check to ensure the first offset points to the byte immediately following the - // fixed-length bytes. - match first_offset.cmp(&self.items_index) { - Ordering::Less => return Err(DecodeError::OffsetIntoFixedPortion(first_offset)), - Ordering::Greater => { - return Err(DecodeError::OffsetSkipsVariableBytes(first_offset)) - } - Ordering::Equal => (), - } - - // Iterate through each pair of offsets, grabbing the slice between each of the offsets. - for pair in self.offsets.windows(2) { - let a = pair[0]; - let b = pair[1]; - - self.items[a.position] = &self.bytes[a.offset..b.offset]; - } - - // Handle the last offset, pushing a slice from it's start through to the end of - // `self.bytes`. - if let Some(last) = self.offsets.last() { - self.items[last.position] = &self.bytes[last.offset..] - } - } else { - // If the container is fixed-length, ensure there are no excess bytes. - if self.items_index != self.bytes.len() { - return Err(DecodeError::InvalidByteLength { - len: self.bytes.len(), - expected: self.items_index, - }); - } - } - - Ok(()) - } - - /// Finalizes the builder, returning a `SszDecoder` that may be used to instantiate objects. - pub fn build(mut self) -> Result, DecodeError> { - self.finalize()?; - - Ok(SszDecoder { items: self.items }) - } -} - -/// Decodes some slices of SSZ into object instances. Should be instantiated using -/// [`SszDecoderBuilder`](struct.SszDecoderBuilder.html). -/// -/// ## Example -/// -/// ```rust -/// use ssz_derive::{Encode, Decode}; -/// use ssz::{Decode, Encode, SszDecoder, SszDecoderBuilder}; -/// -/// #[derive(PartialEq, Debug, Encode, Decode)] -/// struct Foo { -/// a: u64, -/// b: Vec, -/// } -/// -/// fn ssz_decoding_example() { -/// let foo = Foo { -/// a: 42, -/// b: vec![1, 3, 3, 7] -/// }; -/// -/// let bytes = foo.as_ssz_bytes(); -/// -/// let mut builder = SszDecoderBuilder::new(&bytes); -/// -/// builder.register_type::().unwrap(); -/// builder.register_type::>().unwrap(); -/// -/// let mut decoder = builder.build().unwrap(); -/// -/// let decoded_foo = Foo { -/// a: decoder.decode_next().unwrap(), -/// b: decoder.decode_next().unwrap(), -/// }; -/// -/// assert_eq!(foo, decoded_foo); -/// } -/// -/// ``` -pub struct SszDecoder<'a> { - items: SmallVec8<&'a [u8]>, -} - -impl<'a> SszDecoder<'a> { - /// Decodes the next item. - /// - /// # Panics - /// - /// Panics when attempting to decode more items than actually exist. - pub fn decode_next(&mut self) -> Result { - self.decode_next_with(|slice| T::from_ssz_bytes(slice)) - } - - /// Decodes the next item using the provided function. - pub fn decode_next_with(&mut self, f: F) -> Result - where - F: FnOnce(&'a [u8]) -> Result, - { - f(self.items.remove(0)) - } -} - -/// Takes `bytes`, assuming it is the encoding for a SSZ union, and returns the union-selector and -/// the body (trailing bytes). -/// -/// ## Errors -/// -/// Returns an error if: -/// -/// - `bytes` is empty. -/// - the union selector is not a valid value (i.e., larger than the maximum number of variants. -pub fn split_union_bytes(bytes: &[u8]) -> Result<(UnionSelector, &[u8]), DecodeError> { - let selector = bytes - .first() - .copied() - .ok_or(DecodeError::OutOfBoundsByte { i: 0 }) - .and_then(UnionSelector::new)?; - let body = bytes - .get(1..) - .ok_or(DecodeError::OutOfBoundsByte { i: 1 })?; - Ok((selector, body)) -} - -/// Reads a `BYTES_PER_LENGTH_OFFSET`-byte length from `bytes`, where `bytes.len() >= -/// BYTES_PER_LENGTH_OFFSET`. -pub fn read_offset(bytes: &[u8]) -> Result { - decode_offset(bytes.get(0..BYTES_PER_LENGTH_OFFSET).ok_or( - DecodeError::InvalidLengthPrefix { - len: bytes.len(), - expected: BYTES_PER_LENGTH_OFFSET, - }, - )?) -} - -/// Decode bytes as a little-endian usize, returning an `Err` if `bytes.len() != -/// BYTES_PER_LENGTH_OFFSET`. -fn decode_offset(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = BYTES_PER_LENGTH_OFFSET; - - if len != expected { - Err(DecodeError::InvalidLengthPrefix { len, expected }) - } else { - let mut array: [u8; BYTES_PER_LENGTH_OFFSET] = std::default::Default::default(); - array.clone_from_slice(bytes); - - Ok(u32::from_le_bytes(array) as usize) - } -} diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs deleted file mode 100644 index 3d36fb4379..0000000000 --- a/consensus/ssz/src/decode/impls.rs +++ /dev/null @@ -1,776 +0,0 @@ -use super::*; -use crate::decode::try_from_iter::{TryCollect, TryFromIter}; -use core::num::NonZeroUsize; -use ethereum_types::{H160, H256, U128, U256}; -use itertools::process_results; -use smallvec::SmallVec; -use std::collections::{BTreeMap, BTreeSet}; -use std::iter::{self, FromIterator}; -use std::sync::Arc; - -macro_rules! impl_decodable_for_uint { - ($type: ident, $bit_size: expr) => { - impl Decode for $type { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - $bit_size / 8 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - let mut array: [u8; $bit_size / 8] = std::default::Default::default(); - array.clone_from_slice(bytes); - - Ok(Self::from_le_bytes(array)) - } - } - } - }; -} - -impl_decodable_for_uint!(u8, 8); -impl_decodable_for_uint!(u16, 16); -impl_decodable_for_uint!(u32, 32); -impl_decodable_for_uint!(u64, 64); - -#[cfg(target_pointer_width = "32")] -impl_decodable_for_uint!(usize, 32); - -#[cfg(target_pointer_width = "64")] -impl_decodable_for_uint!(usize, 64); - -macro_rules! impl_decode_for_tuples { - ($( - $Tuple:ident { - $(($idx:tt) -> $T:ident)+ - } - )+) => { - $( - impl<$($T: Decode),+> Decode for ($($T,)+) { - fn is_ssz_fixed_len() -> bool { - $( - <$T as Decode>::is_ssz_fixed_len() && - )* - true - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - $( - <$T as Decode>::ssz_fixed_len() + - )* - 0 - } else { - BYTES_PER_LENGTH_OFFSET - } - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let mut builder = SszDecoderBuilder::new(bytes); - - $( - builder.register_type::<$T>()?; - )* - - let mut decoder = builder.build()?; - - Ok(($( - decoder.decode_next::<$T>()?, - )* - )) - } - } - )+ - } -} - -impl_decode_for_tuples! { - Tuple2 { - (0) -> A - (1) -> B - } - Tuple3 { - (0) -> A - (1) -> B - (2) -> C - } - Tuple4 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - } - Tuple5 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - } - Tuple6 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - } - Tuple7 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - } - Tuple8 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - } - Tuple9 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - } - Tuple10 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - } - Tuple11 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - (10) -> K - } - Tuple12 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - (10) -> K - (11) -> L - } -} - -impl Decode for bool { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 1 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - match bytes[0] { - 0b0000_0000 => Ok(false), - 0b0000_0001 => Ok(true), - _ => Err(DecodeError::BytesInvalid(format!( - "Out-of-range for boolean: {}", - bytes[0] - ))), - } - } - } -} - -impl Decode for NonZeroUsize { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - ::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let x = usize::from_ssz_bytes(bytes)?; - - if x == 0 { - Err(DecodeError::BytesInvalid( - "NonZeroUsize cannot be zero.".to_string(), - )) - } else { - // `unwrap` is safe here as `NonZeroUsize::new()` succeeds if `x > 0` and this path - // never executes when `x == 0`. - Ok(NonZeroUsize::new(x).unwrap()) - } - } -} - -impl Decode for Option { - fn is_ssz_fixed_len() -> bool { - false - } - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let (selector, body) = split_union_bytes(bytes)?; - match selector.into() { - 0u8 => Ok(None), - 1u8 => ::from_ssz_bytes(body).map(Option::Some), - other => Err(DecodeError::UnionSelectorInvalid(other)), - } - } -} - -impl Decode for Arc { - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - T::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - T::from_ssz_bytes(bytes).map(Arc::new) - } -} - -impl Decode for H160 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 20 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - Ok(Self::from_slice(bytes)) - } - } -} - -impl Decode for H256 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 32 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - Ok(H256::from_slice(bytes)) - } - } -} - -impl Decode for U256 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 32 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - Ok(U256::from_little_endian(bytes)) - } - } -} - -impl Decode for U128 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 16 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - Ok(U128::from_little_endian(bytes)) - } - } -} - -macro_rules! impl_decodable_for_u8_array { - ($len: expr) => { - impl Decode for [u8; $len] { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - $len - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - let mut array: [u8; $len] = [0; $len]; - array.copy_from_slice(bytes); - - Ok(array) - } - } - } - }; -} - -impl_decodable_for_u8_array!(4); -impl_decodable_for_u8_array!(32); -impl_decodable_for_u8_array!(48); - -macro_rules! impl_for_vec { - ($type: ty, $max_len: expr) => { - impl Decode for $type { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - if bytes.is_empty() { - Ok(Self::from_iter(iter::empty())) - } else if T::is_ssz_fixed_len() { - bytes - .chunks(T::ssz_fixed_len()) - .map(T::from_ssz_bytes) - .collect() - } else { - decode_list_of_variable_length_items(bytes, $max_len) - } - } - } - }; -} - -impl_for_vec!(Vec, None); -impl_for_vec!(SmallVec<[T; 1]>, None); -impl_for_vec!(SmallVec<[T; 2]>, None); -impl_for_vec!(SmallVec<[T; 3]>, None); -impl_for_vec!(SmallVec<[T; 4]>, None); -impl_for_vec!(SmallVec<[T; 5]>, None); -impl_for_vec!(SmallVec<[T; 6]>, None); -impl_for_vec!(SmallVec<[T; 7]>, None); -impl_for_vec!(SmallVec<[T; 8]>, None); - -impl Decode for BTreeMap -where - K: Decode + Ord, - V: Decode, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - if bytes.is_empty() { - Ok(Self::from_iter(iter::empty())) - } else if <(K, V)>::is_ssz_fixed_len() { - bytes - .chunks(<(K, V)>::ssz_fixed_len()) - .map(<(K, V)>::from_ssz_bytes) - .collect() - } else { - decode_list_of_variable_length_items(bytes, None) - } - } -} - -impl Decode for BTreeSet -where - T: Decode + Ord, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - if bytes.is_empty() { - Ok(Self::from_iter(iter::empty())) - } else if T::is_ssz_fixed_len() { - bytes - .chunks(T::ssz_fixed_len()) - .map(T::from_ssz_bytes) - .collect() - } else { - decode_list_of_variable_length_items(bytes, None) - } - } -} - -/// Decodes `bytes` as if it were a list of variable-length items. -/// -/// The `ssz::SszDecoder` can also perform this functionality, however this function is -/// significantly faster as it is optimized to read same-typed items whilst `ssz::SszDecoder` -/// supports reading items of differing types. -pub fn decode_list_of_variable_length_items>( - bytes: &[u8], - max_len: Option, -) -> Result { - if bytes.is_empty() { - return Container::try_from_iter(iter::empty()).map_err(|e| { - DecodeError::BytesInvalid(format!("Error trying to collect empty list: {:?}", e)) - }); - } - - let first_offset = read_offset(bytes)?; - sanitize_offset(first_offset, None, bytes.len(), Some(first_offset))?; - - if first_offset % BYTES_PER_LENGTH_OFFSET != 0 || first_offset < BYTES_PER_LENGTH_OFFSET { - return Err(DecodeError::InvalidListFixedBytesLen(first_offset)); - } - - let num_items = first_offset / BYTES_PER_LENGTH_OFFSET; - - if max_len.map_or(false, |max| num_items > max) { - return Err(DecodeError::BytesInvalid(format!( - "Variable length list of {} items exceeds maximum of {:?}", - num_items, max_len - ))); - } - - let mut offset = first_offset; - process_results( - (1..=num_items).map(|i| { - let slice_option = if i == num_items { - bytes.get(offset..) - } else { - let start = offset; - - let next_offset = read_offset(&bytes[(i * BYTES_PER_LENGTH_OFFSET)..])?; - offset = - sanitize_offset(next_offset, Some(offset), bytes.len(), Some(first_offset))?; - - bytes.get(start..offset) - }; - - let slice = slice_option.ok_or(DecodeError::OutOfBoundsByte { i: offset })?; - T::from_ssz_bytes(slice) - }), - |iter| iter.try_collect(), - )? - .map_err(|e| DecodeError::BytesInvalid(format!("Error collecting into container: {:?}", e))) -} - -#[cfg(test)] -mod tests { - use super::*; - - // Note: decoding of valid bytes is generally tested "indirectly" in the `/tests` dir, by - // encoding then decoding the element. - - #[test] - fn invalid_u8_array_4() { - assert_eq!( - <[u8; 4]>::from_ssz_bytes(&[0; 3]), - Err(DecodeError::InvalidByteLength { - len: 3, - expected: 4 - }) - ); - - assert_eq!( - <[u8; 4]>::from_ssz_bytes(&[0; 5]), - Err(DecodeError::InvalidByteLength { - len: 5, - expected: 4 - }) - ); - } - - #[test] - fn invalid_bool() { - assert_eq!( - bool::from_ssz_bytes(&[0; 2]), - Err(DecodeError::InvalidByteLength { - len: 2, - expected: 1 - }) - ); - - assert_eq!( - bool::from_ssz_bytes(&[]), - Err(DecodeError::InvalidByteLength { - len: 0, - expected: 1 - }) - ); - - if let Err(DecodeError::BytesInvalid(_)) = bool::from_ssz_bytes(&[2]) { - // Success. - } else { - panic!("Did not return error on invalid bool val") - } - } - - #[test] - fn invalid_h256() { - assert_eq!( - H256::from_ssz_bytes(&[0; 33]), - Err(DecodeError::InvalidByteLength { - len: 33, - expected: 32 - }) - ); - - assert_eq!( - H256::from_ssz_bytes(&[0; 31]), - Err(DecodeError::InvalidByteLength { - len: 31, - expected: 32 - }) - ); - } - - #[test] - fn empty_list() { - let vec: Vec> = vec![]; - let bytes = vec.as_ssz_bytes(); - assert!(bytes.is_empty()); - assert_eq!(Vec::from_ssz_bytes(&bytes), Ok(vec),); - } - - #[test] - fn first_length_points_backwards() { - assert_eq!( - >>::from_ssz_bytes(&[0, 0, 0, 0]), - Err(DecodeError::InvalidListFixedBytesLen(0)) - ); - - assert_eq!( - >>::from_ssz_bytes(&[1, 0, 0, 0]), - Err(DecodeError::InvalidListFixedBytesLen(1)) - ); - - assert_eq!( - >>::from_ssz_bytes(&[2, 0, 0, 0]), - Err(DecodeError::InvalidListFixedBytesLen(2)) - ); - - assert_eq!( - >>::from_ssz_bytes(&[3, 0, 0, 0]), - Err(DecodeError::InvalidListFixedBytesLen(3)) - ); - } - - #[test] - fn lengths_are_decreasing() { - assert_eq!( - >>::from_ssz_bytes(&[12, 0, 0, 0, 14, 0, 0, 0, 12, 0, 0, 0, 1, 0, 1, 0]), - Err(DecodeError::OffsetsAreDecreasing(12)) - ); - } - - #[test] - fn awkward_fixed_length_portion() { - assert_eq!( - >>::from_ssz_bytes(&[10, 0, 0, 0, 10, 0, 0, 0, 0, 0]), - Err(DecodeError::InvalidListFixedBytesLen(10)) - ); - } - - #[test] - fn length_out_of_bounds() { - assert_eq!( - >>::from_ssz_bytes(&[5, 0, 0, 0]), - Err(DecodeError::OffsetOutOfBounds(5)) - ); - assert_eq!( - >>::from_ssz_bytes(&[8, 0, 0, 0, 9, 0, 0, 0]), - Err(DecodeError::OffsetOutOfBounds(9)) - ); - assert_eq!( - >>::from_ssz_bytes(&[8, 0, 0, 0, 16, 0, 0, 0]), - Err(DecodeError::OffsetOutOfBounds(16)) - ); - } - - #[test] - fn vec_of_vec_of_u16() { - assert_eq!( - >>::from_ssz_bytes(&[4, 0, 0, 0]), - Ok(vec![vec![]]) - ); - - assert_eq!( - >::from_ssz_bytes(&[0, 0, 1, 0, 2, 0, 3, 0]), - Ok(vec![0, 1, 2, 3]) - ); - assert_eq!(::from_ssz_bytes(&[16, 0]), Ok(16)); - assert_eq!(::from_ssz_bytes(&[0, 1]), Ok(256)); - assert_eq!(::from_ssz_bytes(&[255, 255]), Ok(65535)); - - assert_eq!( - ::from_ssz_bytes(&[255]), - Err(DecodeError::InvalidByteLength { - len: 1, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[]), - Err(DecodeError::InvalidByteLength { - len: 0, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[0, 1, 2]), - Err(DecodeError::InvalidByteLength { - len: 3, - expected: 2 - }) - ); - } - - #[test] - fn vec_of_u16() { - assert_eq!(>::from_ssz_bytes(&[0, 0, 0, 0]), Ok(vec![0, 0])); - assert_eq!( - >::from_ssz_bytes(&[0, 0, 1, 0, 2, 0, 3, 0]), - Ok(vec![0, 1, 2, 3]) - ); - assert_eq!(::from_ssz_bytes(&[16, 0]), Ok(16)); - assert_eq!(::from_ssz_bytes(&[0, 1]), Ok(256)); - assert_eq!(::from_ssz_bytes(&[255, 255]), Ok(65535)); - - assert_eq!( - ::from_ssz_bytes(&[255]), - Err(DecodeError::InvalidByteLength { - len: 1, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[]), - Err(DecodeError::InvalidByteLength { - len: 0, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[0, 1, 2]), - Err(DecodeError::InvalidByteLength { - len: 3, - expected: 2 - }) - ); - } - - #[test] - fn u16() { - assert_eq!(::from_ssz_bytes(&[0, 0]), Ok(0)); - assert_eq!(::from_ssz_bytes(&[16, 0]), Ok(16)); - assert_eq!(::from_ssz_bytes(&[0, 1]), Ok(256)); - assert_eq!(::from_ssz_bytes(&[255, 255]), Ok(65535)); - - assert_eq!( - ::from_ssz_bytes(&[255]), - Err(DecodeError::InvalidByteLength { - len: 1, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[]), - Err(DecodeError::InvalidByteLength { - len: 0, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[0, 1, 2]), - Err(DecodeError::InvalidByteLength { - len: 3, - expected: 2 - }) - ); - } - - #[test] - fn tuple() { - assert_eq!(<(u16, u16)>::from_ssz_bytes(&[0, 0, 0, 0]), Ok((0, 0))); - assert_eq!(<(u16, u16)>::from_ssz_bytes(&[16, 0, 17, 0]), Ok((16, 17))); - assert_eq!(<(u16, u16)>::from_ssz_bytes(&[0, 1, 2, 0]), Ok((256, 2))); - assert_eq!( - <(u16, u16)>::from_ssz_bytes(&[255, 255, 0, 0]), - Ok((65535, 0)) - ); - } -} diff --git a/consensus/ssz/src/decode/try_from_iter.rs b/consensus/ssz/src/decode/try_from_iter.rs deleted file mode 100644 index 1ff89a107f..0000000000 --- a/consensus/ssz/src/decode/try_from_iter.rs +++ /dev/null @@ -1,103 +0,0 @@ -use smallvec::SmallVec; -use std::collections::{BTreeMap, BTreeSet}; -use std::convert::Infallible; -use std::fmt::Debug; - -/// Partial variant of `std::iter::FromIterator`. -/// -/// This trait is implemented for types which can be constructed from an iterator of decoded SSZ -/// values, but which may refuse values once a length limit is reached. -pub trait TryFromIter: Sized { - type Error: Debug; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator; -} - -// It would be nice to be able to do a blanket impl, e.g. -// -// `impl TryFromIter for C where C: FromIterator` -// -// However this runs into trait coherence issues due to the type parameter `T` on `TryFromIter`. -// -// E.g. If we added an impl downstream for `List` then another crate downstream of that -// could legally add an impl of `FromIterator for List` which would create -// two conflicting implementations for `List`. Hence the `List` impl is disallowed -// by the compiler in the presence of the blanket impl. That's obviously annoying, so we opt to -// abandon the blanket impl in favour of impls for selected types. -impl TryFromIter for Vec { - type Error = Infallible; - - fn try_from_iter(values: I) -> Result - where - I: IntoIterator, - { - // Pre-allocate the expected size of the Vec, which is parsed from the SSZ input bytes as - // `num_items`. This length has already been checked to be less than or equal to the type's - // maximum length in `decode_list_of_variable_length_items`. - let iter = values.into_iter(); - let (_, opt_max_len) = iter.size_hint(); - let mut vec = Vec::with_capacity(opt_max_len.unwrap_or(0)); - vec.extend(iter); - Ok(vec) - } -} - -impl TryFromIter for SmallVec<[T; N]> { - type Error = Infallible; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator, - { - Ok(Self::from_iter(iter)) - } -} - -impl TryFromIter<(K, V)> for BTreeMap -where - K: Ord, -{ - type Error = Infallible; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator, - { - Ok(Self::from_iter(iter)) - } -} - -impl TryFromIter for BTreeSet -where - T: Ord, -{ - type Error = Infallible; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator, - { - Ok(Self::from_iter(iter)) - } -} - -/// Partial variant of `collect`. -pub trait TryCollect: Iterator { - fn try_collect(self) -> Result - where - C: TryFromIter; -} - -impl TryCollect for I -where - I: Iterator, -{ - fn try_collect(self) -> Result - where - C: TryFromIter, - { - C::try_from_iter(self) - } -} diff --git a/consensus/ssz/src/encode.rs b/consensus/ssz/src/encode.rs deleted file mode 100644 index a46ef80e05..0000000000 --- a/consensus/ssz/src/encode.rs +++ /dev/null @@ -1,196 +0,0 @@ -use super::*; - -mod impls; - -/// Provides SSZ encoding (serialization) via the `as_ssz_bytes(&self)` method. -/// -/// See `examples/` for manual implementations or the crate root for implementations using -/// `#[derive(Encode)]`. -pub trait Encode { - /// Returns `true` if this object has a fixed-length. - /// - /// I.e., there are no variable length items in this object or any of it's contained objects. - fn is_ssz_fixed_len() -> bool; - - /// Append the encoding `self` to `buf`. - /// - /// Note, variable length objects need only to append their "variable length" portion, they do - /// not need to provide their offset. - fn ssz_append(&self, buf: &mut Vec); - - /// The number of bytes this object occupies in the fixed-length portion of the SSZ bytes. - /// - /// By default, this is set to `BYTES_PER_LENGTH_OFFSET` which is suitable for variable length - /// objects, but not fixed-length objects. Fixed-length objects _must_ return a value which - /// represents their length. - fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - /// Returns the size (in bytes) when `self` is serialized. - /// - /// Returns the same value as `self.as_ssz_bytes().len()` but this method is significantly more - /// efficient. - fn ssz_bytes_len(&self) -> usize; - - /// Returns the full-form encoding of this object. - /// - /// The default implementation of this method should suffice for most cases. - fn as_ssz_bytes(&self) -> Vec { - let mut buf = vec![]; - - self.ssz_append(&mut buf); - - buf - } -} - -/// Allow for encoding an ordered series of distinct or indistinct objects as SSZ bytes. -/// -/// **You must call `finalize(..)` after the final `append(..)` call** to ensure the bytes are -/// written to `buf`. -/// -/// ## Example -/// -/// Use `SszEncoder` to produce identical output to `foo.as_ssz_bytes()`: -/// -/// ```rust -/// use ssz_derive::{Encode, Decode}; -/// use ssz::{Decode, Encode, SszEncoder}; -/// -/// #[derive(PartialEq, Debug, Encode, Decode)] -/// struct Foo { -/// a: u64, -/// b: Vec, -/// } -/// -/// fn ssz_encode_example() { -/// let foo = Foo { -/// a: 42, -/// b: vec![1, 3, 3, 7] -/// }; -/// -/// let mut buf: Vec = vec![]; -/// let offset = ::ssz_fixed_len() + as Encode>::ssz_fixed_len(); -/// -/// let mut encoder = SszEncoder::container(&mut buf, offset); -/// -/// encoder.append(&foo.a); -/// encoder.append(&foo.b); -/// -/// encoder.finalize(); -/// -/// assert_eq!(foo.as_ssz_bytes(), buf); -/// } -/// -/// ``` -pub struct SszEncoder<'a> { - offset: usize, - buf: &'a mut Vec, - variable_bytes: Vec, -} - -impl<'a> SszEncoder<'a> { - /// Instantiate a new encoder for encoding a SSZ container. - pub fn container(buf: &'a mut Vec, num_fixed_bytes: usize) -> Self { - buf.reserve(num_fixed_bytes); - - Self { - offset: num_fixed_bytes, - buf, - variable_bytes: vec![], - } - } - - /// Append some `item` to the SSZ bytes. - pub fn append(&mut self, item: &T) { - self.append_parameterized(T::is_ssz_fixed_len(), |buf| item.ssz_append(buf)) - } - - /// Uses `ssz_append` to append the encoding of some item to the SSZ bytes. - pub fn append_parameterized(&mut self, is_ssz_fixed_len: bool, ssz_append: F) - where - F: Fn(&mut Vec), - { - if is_ssz_fixed_len { - ssz_append(self.buf); - } else { - self.buf - .extend_from_slice(&encode_length(self.offset + self.variable_bytes.len())); - - ssz_append(&mut self.variable_bytes); - } - } - - /// Write the variable bytes to `self.bytes`. - /// - /// This method must be called after the final `append(..)` call when serializing - /// variable-length items. - pub fn finalize(&mut self) -> &mut Vec { - self.buf.append(&mut self.variable_bytes); - - self.buf - } -} - -/// Encode `len` as a little-endian byte array of `BYTES_PER_LENGTH_OFFSET` length. -/// -/// If `len` is larger than `2 ^ BYTES_PER_LENGTH_OFFSET`, a `debug_assert` is raised. -pub fn encode_length(len: usize) -> [u8; BYTES_PER_LENGTH_OFFSET] { - // Note: it is possible for `len` to be larger than what can be encoded in - // `BYTES_PER_LENGTH_OFFSET` bytes, triggering this debug assertion. - // - // These are the alternatives to using a `debug_assert` here: - // - // 1. Use `assert`. - // 2. Push an error to the caller (e.g., `Option` or `Result`). - // 3. Ignore it completely. - // - // I have avoided (1) because it's basically a choice between "produce invalid SSZ" or "kill - // the entire program". I figure it may be possible for an attacker to trigger this assert and - // take the program down -- I think producing invalid SSZ is a better option than this. - // - // I have avoided (2) because this error will need to be propagated upstream, making encoding a - // function which may fail. I don't think this is ergonomic and the upsides don't outweigh the - // downsides. - // - // I figure a `debug_assertion` is better than (3) as it will give us a change to detect the - // error during testing. - // - // If you have a different opinion, feel free to start an issue and tag @paulhauner. - debug_assert!(len <= MAX_LENGTH_VALUE); - - let mut bytes = [0; BYTES_PER_LENGTH_OFFSET]; - bytes.copy_from_slice(&len.to_le_bytes()[0..BYTES_PER_LENGTH_OFFSET]); - bytes -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_encode_length() { - assert_eq!(encode_length(0), [0; 4]); - - assert_eq!(encode_length(1), [1, 0, 0, 0]); - - assert_eq!( - encode_length(MAX_LENGTH_VALUE), - [255; BYTES_PER_LENGTH_OFFSET] - ); - } - - #[test] - #[should_panic] - #[cfg(debug_assertions)] - fn test_encode_length_above_max_debug_panics() { - encode_length(MAX_LENGTH_VALUE + 1); - } - - #[test] - #[cfg(not(debug_assertions))] - fn test_encode_length_above_max_not_debug_does_not_panic() { - assert_eq!(&encode_length(MAX_LENGTH_VALUE + 1)[..], &[0; 4]); - } -} diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs deleted file mode 100644 index 8c609d9397..0000000000 --- a/consensus/ssz/src/encode/impls.rs +++ /dev/null @@ -1,633 +0,0 @@ -use super::*; -use core::num::NonZeroUsize; -use ethereum_types::{H160, H256, U128, U256}; -use smallvec::SmallVec; -use std::collections::{BTreeMap, BTreeSet}; -use std::sync::Arc; - -macro_rules! impl_encodable_for_uint { - ($type: ident, $bit_size: expr) => { - impl Encode for $type { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - $bit_size / 8 - } - - fn ssz_bytes_len(&self) -> usize { - $bit_size / 8 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self.to_le_bytes()); - } - } - }; -} - -impl_encodable_for_uint!(u8, 8); -impl_encodable_for_uint!(u16, 16); -impl_encodable_for_uint!(u32, 32); -impl_encodable_for_uint!(u64, 64); - -#[cfg(target_pointer_width = "32")] -impl_encodable_for_uint!(usize, 32); - -#[cfg(target_pointer_width = "64")] -impl_encodable_for_uint!(usize, 64); - -// Based on the `tuple_impls` macro from the standard library. -macro_rules! impl_encode_for_tuples { - ($( - $Tuple:ident { - $(($idx:tt) -> $T:ident)+ - } - )+) => { - $( - impl<$($T: Encode),+> Encode for ($($T,)+) { - fn is_ssz_fixed_len() -> bool { - $( - <$T as Encode>::is_ssz_fixed_len() && - )* - true - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - $( - <$T as Encode>::ssz_fixed_len() + - )* - 0 - } else { - BYTES_PER_LENGTH_OFFSET - } - } - - fn ssz_bytes_len(&self) -> usize { - if ::is_ssz_fixed_len() { - ::ssz_fixed_len() - } else { - let mut len = 0; - $( - len += if <$T as Encode>::is_ssz_fixed_len() { - <$T as Encode>::ssz_fixed_len() - } else { - BYTES_PER_LENGTH_OFFSET + - self.$idx.ssz_bytes_len() - }; - )* - len - } - } - - fn ssz_append(&self, buf: &mut Vec) { - let offset = $( - <$T as Encode>::ssz_fixed_len() + - )* - 0; - - let mut encoder = SszEncoder::container(buf, offset); - - $( - encoder.append(&self.$idx); - )* - - encoder.finalize(); - } - } - )+ - } -} - -impl_encode_for_tuples! { - Tuple2 { - (0) -> A - (1) -> B - } - Tuple3 { - (0) -> A - (1) -> B - (2) -> C - } - Tuple4 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - } - Tuple5 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - } - Tuple6 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - } - Tuple7 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - } - Tuple8 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - } - Tuple9 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - } - Tuple10 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - } - Tuple11 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - (10) -> K - } - Tuple12 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - (10) -> K - (11) -> L - } -} - -impl Encode for Option { - fn is_ssz_fixed_len() -> bool { - false - } - fn ssz_append(&self, buf: &mut Vec) { - match self { - Option::None => { - let union_selector: u8 = 0u8; - buf.push(union_selector); - } - Option::Some(ref inner) => { - let union_selector: u8 = 1u8; - buf.push(union_selector); - inner.ssz_append(buf); - } - } - } - fn ssz_bytes_len(&self) -> usize { - match self { - Option::None => 1usize, - Option::Some(ref inner) => inner - .ssz_bytes_len() - .checked_add(1) - .expect("encoded length must be less than usize::max_value"), - } - } -} - -impl Encode for Arc { - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - T::ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.as_ref().ssz_append(buf) - } - - fn ssz_bytes_len(&self) -> usize { - self.as_ref().ssz_bytes_len() - } -} - -// Encode transparently through references. -impl<'a, T: Encode> Encode for &'a T { - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - T::ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - T::ssz_append(self, buf) - } - - fn ssz_bytes_len(&self) -> usize { - T::ssz_bytes_len(self) - } -} - -/// Compute the encoded length of a vector-like sequence of `T`. -pub fn sequence_ssz_bytes_len(iter: I) -> usize -where - I: Iterator + ExactSizeIterator, - T: Encode, -{ - // Compute length before doing any iteration. - let length = iter.len(); - if ::is_ssz_fixed_len() { - ::ssz_fixed_len() * length - } else { - let mut len = iter.map(|item| item.ssz_bytes_len()).sum(); - len += BYTES_PER_LENGTH_OFFSET * length; - len - } -} - -/// Encode a vector-like sequence of `T`. -pub fn sequence_ssz_append(iter: I, buf: &mut Vec) -where - I: Iterator + ExactSizeIterator, - T: Encode, -{ - if T::is_ssz_fixed_len() { - buf.reserve(T::ssz_fixed_len() * iter.len()); - - for item in iter { - item.ssz_append(buf); - } - } else { - let mut encoder = SszEncoder::container(buf, iter.len() * BYTES_PER_LENGTH_OFFSET); - - for item in iter { - encoder.append(&item); - } - - encoder.finalize(); - } -} - -macro_rules! impl_for_vec { - ($type: ty) => { - impl Encode for $type { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - sequence_ssz_bytes_len(self.iter()) - } - - fn ssz_append(&self, buf: &mut Vec) { - sequence_ssz_append(self.iter(), buf) - } - } - }; -} - -impl_for_vec!(Vec); -impl_for_vec!(SmallVec<[T; 1]>); -impl_for_vec!(SmallVec<[T; 2]>); -impl_for_vec!(SmallVec<[T; 3]>); -impl_for_vec!(SmallVec<[T; 4]>); -impl_for_vec!(SmallVec<[T; 5]>); -impl_for_vec!(SmallVec<[T; 6]>); -impl_for_vec!(SmallVec<[T; 7]>); -impl_for_vec!(SmallVec<[T; 8]>); - -impl Encode for BTreeMap -where - K: Encode + Ord, - V: Encode, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - sequence_ssz_bytes_len(self.iter()) - } - - fn ssz_append(&self, buf: &mut Vec) { - sequence_ssz_append(self.iter(), buf) - } -} - -impl Encode for BTreeSet -where - T: Encode + Ord, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - sequence_ssz_bytes_len(self.iter()) - } - - fn ssz_append(&self, buf: &mut Vec) { - sequence_ssz_append(self.iter(), buf) - } -} - -impl Encode for bool { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 1 - } - - fn ssz_bytes_len(&self) -> usize { - 1 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&(*self as u8).to_le_bytes()); - } -} - -impl Encode for NonZeroUsize { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - ::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - std::mem::size_of::() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.get().ssz_append(buf) - } -} - -impl Encode for H160 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 20 - } - - fn ssz_bytes_len(&self) -> usize { - 20 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(self.as_bytes()); - } -} - -impl Encode for H256 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 32 - } - - fn ssz_bytes_len(&self) -> usize { - 32 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(self.as_bytes()); - } -} - -impl Encode for U256 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 32 - } - - fn ssz_bytes_len(&self) -> usize { - 32 - } - - fn ssz_append(&self, buf: &mut Vec) { - let n = ::ssz_fixed_len(); - let s = buf.len(); - - buf.resize(s + n, 0); - self.to_little_endian(&mut buf[s..]); - } -} - -impl Encode for U128 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 16 - } - - fn ssz_bytes_len(&self) -> usize { - 16 - } - - fn ssz_append(&self, buf: &mut Vec) { - let n = ::ssz_fixed_len(); - let s = buf.len(); - - buf.resize(s + n, 0); - self.to_little_endian(&mut buf[s..]); - } -} - -macro_rules! impl_encodable_for_u8_array { - ($len: expr) => { - impl Encode for [u8; $len] { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - $len - } - - fn ssz_bytes_len(&self) -> usize { - $len - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self[..]); - } - } - }; -} - -impl_encodable_for_u8_array!(4); -impl_encodable_for_u8_array!(32); -impl_encodable_for_u8_array!(48); - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn vec_of_u8() { - let vec: Vec = vec![]; - assert_eq!(vec.as_ssz_bytes(), vec![]); - - let vec: Vec = vec![1]; - assert_eq!(vec.as_ssz_bytes(), vec![1]); - - let vec: Vec = vec![0, 1, 2, 3]; - assert_eq!(vec.as_ssz_bytes(), vec![0, 1, 2, 3]); - } - - #[test] - fn vec_of_vec_of_u8() { - let vec: Vec> = vec![]; - assert_eq!(vec.as_ssz_bytes(), vec![]); - - let vec: Vec> = vec![vec![]]; - assert_eq!(vec.as_ssz_bytes(), vec![4, 0, 0, 0]); - - let vec: Vec> = vec![vec![], vec![]]; - assert_eq!(vec.as_ssz_bytes(), vec![8, 0, 0, 0, 8, 0, 0, 0]); - - let vec: Vec> = vec![vec![0, 1, 2], vec![11, 22, 33]]; - assert_eq!( - vec.as_ssz_bytes(), - vec![8, 0, 0, 0, 11, 0, 0, 0, 0, 1, 2, 11, 22, 33] - ); - } - - #[test] - fn ssz_encode_u8() { - assert_eq!(0_u8.as_ssz_bytes(), vec![0]); - assert_eq!(1_u8.as_ssz_bytes(), vec![1]); - assert_eq!(100_u8.as_ssz_bytes(), vec![100]); - assert_eq!(255_u8.as_ssz_bytes(), vec![255]); - } - - #[test] - fn ssz_encode_u16() { - assert_eq!(1_u16.as_ssz_bytes(), vec![1, 0]); - assert_eq!(100_u16.as_ssz_bytes(), vec![100, 0]); - assert_eq!((1_u16 << 8).as_ssz_bytes(), vec![0, 1]); - assert_eq!(65535_u16.as_ssz_bytes(), vec![255, 255]); - } - - #[test] - fn ssz_encode_u32() { - assert_eq!(1_u32.as_ssz_bytes(), vec![1, 0, 0, 0]); - assert_eq!(100_u32.as_ssz_bytes(), vec![100, 0, 0, 0]); - assert_eq!((1_u32 << 16).as_ssz_bytes(), vec![0, 0, 1, 0]); - assert_eq!((1_u32 << 24).as_ssz_bytes(), vec![0, 0, 0, 1]); - assert_eq!((!0_u32).as_ssz_bytes(), vec![255, 255, 255, 255]); - } - - #[test] - fn ssz_encode_u64() { - assert_eq!(1_u64.as_ssz_bytes(), vec![1, 0, 0, 0, 0, 0, 0, 0]); - assert_eq!( - (!0_u64).as_ssz_bytes(), - vec![255, 255, 255, 255, 255, 255, 255, 255] - ); - } - - #[test] - fn ssz_encode_usize() { - assert_eq!(1_usize.as_ssz_bytes(), vec![1, 0, 0, 0, 0, 0, 0, 0]); - assert_eq!( - (!0_usize).as_ssz_bytes(), - vec![255, 255, 255, 255, 255, 255, 255, 255] - ); - } - - #[test] - fn ssz_encode_option_u8() { - let opt: Option = None; - assert_eq!(opt.as_ssz_bytes(), vec![0]); - let opt: Option = Some(2); - assert_eq!(opt.as_ssz_bytes(), vec![1, 2]); - } - - #[test] - fn ssz_encode_bool() { - assert_eq!(true.as_ssz_bytes(), vec![1]); - assert_eq!(false.as_ssz_bytes(), vec![0]); - } - - #[test] - fn ssz_encode_h256() { - assert_eq!(H256::from(&[0; 32]).as_ssz_bytes(), vec![0; 32]); - assert_eq!(H256::from(&[1; 32]).as_ssz_bytes(), vec![1; 32]); - - let bytes = vec![ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]; - - assert_eq!(H256::from_slice(&bytes).as_ssz_bytes(), bytes); - } - - #[test] - fn ssz_encode_u8_array_4() { - assert_eq!([0, 0, 0, 0].as_ssz_bytes(), vec![0; 4]); - assert_eq!([1, 0, 0, 0].as_ssz_bytes(), vec![1, 0, 0, 0]); - assert_eq!([1, 2, 3, 4].as_ssz_bytes(), vec![1, 2, 3, 4]); - } - - #[test] - fn tuple() { - assert_eq!((10u8, 11u8).as_ssz_bytes(), vec![10, 11]); - assert_eq!((10u32, 11u8).as_ssz_bytes(), vec![10, 0, 0, 0, 11]); - assert_eq!((10u8, 11u8, 12u8).as_ssz_bytes(), vec![10, 11, 12]); - } -} diff --git a/consensus/ssz/src/legacy.rs b/consensus/ssz/src/legacy.rs deleted file mode 100644 index 4953db057d..0000000000 --- a/consensus/ssz/src/legacy.rs +++ /dev/null @@ -1,265 +0,0 @@ -//! Provides a "legacy" version of SSZ encoding for `Option where T: Encode + Decode`. -//! -//! The SSZ specification changed in 2021 to use a 1-byte union selector, instead of a 4-byte one -//! which was used in the Lighthouse database. -//! -//! Users can use the `four_byte_option_impl` macro to define a module that can be used with the -//! `#[ssz(with = "module")]`. -//! -//! ## Example -//! -//! ```rust -//! use ssz_derive::{Encode, Decode}; -//! use ssz::four_byte_option_impl; -//! -//! four_byte_option_impl!(impl_for_u64, u64); -//! -//! #[derive(Encode, Decode)] -//! struct Foo { -//! #[ssz(with = "impl_for_u64")] -//! a: Option, -//! } -//! ``` - -use crate::*; - -#[macro_export] -macro_rules! four_byte_option_impl { - ($mod_name: ident, $type: ty) => { - #[allow(dead_code)] - mod $mod_name { - use super::*; - - pub mod encode { - use super::*; - #[allow(unused_imports)] - use ssz::*; - - pub fn is_ssz_fixed_len() -> bool { - false - } - - pub fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - pub fn ssz_bytes_len(opt: &Option<$type>) -> usize { - if let Some(some) = opt { - let len = if <$type as Encode>::is_ssz_fixed_len() { - <$type as Encode>::ssz_fixed_len() - } else { - <$type as Encode>::ssz_bytes_len(some) - }; - len + BYTES_PER_LENGTH_OFFSET - } else { - BYTES_PER_LENGTH_OFFSET - } - } - - pub fn ssz_append(opt: &Option<$type>, buf: &mut Vec) { - match opt { - None => buf.extend_from_slice(&legacy::encode_four_byte_union_selector(0)), - Some(t) => { - buf.extend_from_slice(&legacy::encode_four_byte_union_selector(1)); - t.ssz_append(buf); - } - } - } - - pub fn as_ssz_bytes(opt: &Option<$type>) -> Vec { - let mut buf = vec![]; - - ssz_append(opt, &mut buf); - - buf - } - } - - pub mod decode { - use super::*; - #[allow(unused_imports)] - use ssz::*; - - pub fn is_ssz_fixed_len() -> bool { - false - } - - pub fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - pub fn from_ssz_bytes(bytes: &[u8]) -> Result, DecodeError> { - if bytes.len() < BYTES_PER_LENGTH_OFFSET { - return Err(DecodeError::InvalidByteLength { - len: bytes.len(), - expected: BYTES_PER_LENGTH_OFFSET, - }); - } - - let (index_bytes, value_bytes) = bytes.split_at(BYTES_PER_LENGTH_OFFSET); - - let index = legacy::read_four_byte_union_selector(index_bytes)?; - if index == 0 { - Ok(None) - } else if index == 1 { - Ok(Some(<$type as ssz::Decode>::from_ssz_bytes(value_bytes)?)) - } else { - Err(DecodeError::BytesInvalid(format!( - "{} is not a valid union index for Option", - index - ))) - } - } - } - } - }; -} - -pub fn encode_four_byte_union_selector(selector: usize) -> [u8; BYTES_PER_LENGTH_OFFSET] { - encode_length(selector) -} - -pub fn read_four_byte_union_selector(bytes: &[u8]) -> Result { - read_offset(bytes) -} - -#[cfg(test)] -mod test { - use super::*; - use crate as ssz; - use ssz_derive::{Decode, Encode}; - - type VecU16 = Vec; - - four_byte_option_impl!(impl_u16, u16); - four_byte_option_impl!(impl_vec_u16, VecU16); - - #[test] - fn ssz_encode_option_u16() { - let item = Some(65535_u16); - let bytes = vec![1, 0, 0, 0, 255, 255]; - assert_eq!(impl_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_u16::decode::from_ssz_bytes(&bytes).unwrap(), item); - - let item = None; - let bytes = vec![0, 0, 0, 0]; - assert_eq!(impl_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_u16::decode::from_ssz_bytes(&bytes).unwrap(), None); - } - - #[test] - fn ssz_encode_option_vec_u16() { - let item = Some(vec![0_u16, 1]); - let bytes = vec![1, 0, 0, 0, 0, 0, 1, 0]; - assert_eq!(impl_vec_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_vec_u16::decode::from_ssz_bytes(&bytes).unwrap(), item); - - let item = None; - let bytes = vec![0, 0, 0, 0]; - assert_eq!(impl_vec_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_vec_u16::decode::from_ssz_bytes(&bytes).unwrap(), item); - } - - fn round_trip(items: Vec) { - for item in items { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct TwoVariableLenOptions { - a: u16, - #[ssz(with = "impl_u16")] - b: Option, - #[ssz(with = "impl_vec_u16")] - c: Option>, - #[ssz(with = "impl_vec_u16")] - d: Option>, - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn two_variable_len_options_encoding() { - let s = TwoVariableLenOptions { - a: 42, - b: None, - c: Some(vec![0]), - d: None, - }; - - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 - // | option | offset | offset | option = vec![ - TwoVariableLenOptions { - a: 42, - b: Some(12), - c: Some(vec![0]), - d: Some(vec![1]), - }, - TwoVariableLenOptions { - a: 42, - b: Some(12), - c: Some(vec![0]), - d: None, - }, - TwoVariableLenOptions { - a: 42, - b: None, - c: Some(vec![0]), - d: None, - }, - TwoVariableLenOptions { - a: 42, - b: None, - c: None, - d: None, - }, - ]; - - round_trip(vec); - } - - #[test] - fn tuple_u8_u16() { - let vec: Vec<(u8, u16)> = vec![ - (0, 0), - (0, 1), - (1, 0), - (u8::max_value(), u16::max_value()), - (0, u16::max_value()), - (u8::max_value(), 0), - (42, 12301), - ]; - - round_trip(vec); - } - - #[test] - fn tuple_vec_vec() { - let vec: Vec<(u64, Vec, Vec>)> = vec![ - (0, vec![], vec![vec![]]), - (99, vec![101], vec![vec![], vec![]]), - ( - 42, - vec![12, 13, 14], - vec![vec![99, 98, 97, 96], vec![42, 44, 46, 48, 50]], - ), - ]; - - round_trip(vec); - } -} diff --git a/consensus/ssz/src/lib.rs b/consensus/ssz/src/lib.rs deleted file mode 100644 index e71157a3ee..0000000000 --- a/consensus/ssz/src/lib.rs +++ /dev/null @@ -1,71 +0,0 @@ -//! Provides encoding (serialization) and decoding (deserialization) in the SimpleSerialize (SSZ) -//! format designed for use in Ethereum 2.0. -//! -//! Adheres to the Ethereum 2.0 [SSZ -//! specification](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/ssz/simple-serialize.md) -//! at v0.12.1. -//! -//! ## Example -//! -//! ```rust -//! use ssz_derive::{Encode, Decode}; -//! use ssz::{Decode, Encode}; -//! -//! #[derive(PartialEq, Debug, Encode, Decode)] -//! struct Foo { -//! a: u64, -//! b: Vec, -//! } -//! -//! fn ssz_encode_decode_example() { -//! let foo = Foo { -//! a: 42, -//! b: vec![1, 3, 3, 7] -//! }; -//! -//! let ssz_bytes: Vec = foo.as_ssz_bytes(); -//! -//! let decoded_foo = Foo::from_ssz_bytes(&ssz_bytes).unwrap(); -//! -//! assert_eq!(foo, decoded_foo); -//! } -//! -//! ``` -//! -//! See `examples/` for manual implementations of the `Encode` and `Decode` traits. - -mod decode; -mod encode; -pub mod legacy; -mod union_selector; - -pub use decode::{ - impls::decode_list_of_variable_length_items, read_offset, split_union_bytes, - try_from_iter::TryFromIter, Decode, DecodeError, SszDecoder, SszDecoderBuilder, -}; -pub use encode::{encode_length, Encode, SszEncoder}; -pub use union_selector::UnionSelector; - -/// The number of bytes used to represent an offset. -pub const BYTES_PER_LENGTH_OFFSET: usize = 4; -/// The maximum value that can be represented using `BYTES_PER_LENGTH_OFFSET`. -#[cfg(target_pointer_width = "32")] -pub const MAX_LENGTH_VALUE: usize = (std::u32::MAX >> (8 * (4 - BYTES_PER_LENGTH_OFFSET))) as usize; -#[cfg(target_pointer_width = "64")] -pub const MAX_LENGTH_VALUE: usize = (std::u64::MAX >> (8 * (8 - BYTES_PER_LENGTH_OFFSET))) as usize; - -/// The number of bytes used to indicate the variant of a union. -pub const BYTES_PER_UNION_SELECTOR: usize = 1; -/// The highest possible union selector value (higher values are reserved for backwards compatible -/// extensions). -pub const MAX_UNION_SELECTOR: u8 = 127; - -/// Convenience function to SSZ encode an object supporting ssz::Encode. -/// -/// Equivalent to `val.as_ssz_bytes()`. -pub fn ssz_encode(val: &T) -> Vec -where - T: Encode, -{ - val.as_ssz_bytes() -} diff --git a/consensus/ssz/src/union_selector.rs b/consensus/ssz/src/union_selector.rs deleted file mode 100644 index 18bab094aa..0000000000 --- a/consensus/ssz/src/union_selector.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::*; - -/// Provides the one-byte "selector" from the SSZ union specification: -/// -/// https://github.com/ethereum/consensus-specs/blob/v1.1.0-beta.3/ssz/simple-serialize.md#union -#[derive(Copy, Clone)] -pub struct UnionSelector(u8); - -impl From for u8 { - fn from(union_selector: UnionSelector) -> u8 { - union_selector.0 - } -} - -impl PartialEq for UnionSelector { - fn eq(&self, other: &u8) -> bool { - self.0 == *other - } -} - -impl UnionSelector { - /// Instantiate `self`, returning an error if `selector > MAX_UNION_SELECTOR`. - pub fn new(selector: u8) -> Result { - Some(selector) - .filter(|_| selector <= MAX_UNION_SELECTOR) - .map(Self) - .ok_or(DecodeError::UnionSelectorInvalid(selector)) - } -} diff --git a/consensus/ssz/tests/tests.rs b/consensus/ssz/tests/tests.rs deleted file mode 100644 index f52d2c5cdf..0000000000 --- a/consensus/ssz/tests/tests.rs +++ /dev/null @@ -1,390 +0,0 @@ -use ethereum_types::H256; -use ssz::{Decode, DecodeError, Encode}; -use ssz_derive::{Decode, Encode}; - -mod round_trip { - use super::*; - use std::collections::BTreeMap; - use std::iter::FromIterator; - - fn round_trip(items: Vec) { - for item in items { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - } - - #[test] - fn bool() { - let items: Vec = vec![true, false]; - - round_trip(items); - } - - #[test] - fn option_u16() { - let items: Vec> = vec![None, Some(2u16)]; - - round_trip(items); - } - - #[test] - fn u8_array_4() { - let items: Vec<[u8; 4]> = vec![[0, 0, 0, 0], [1, 0, 0, 0], [1, 2, 3, 4], [1, 2, 0, 4]]; - - round_trip(items); - } - - #[test] - fn h256() { - let items: Vec = vec![H256::zero(), H256::from([1; 32]), H256::random()]; - - round_trip(items); - } - - #[test] - fn vec_of_h256() { - let items: Vec> = vec![ - vec![], - vec![H256::zero(), H256::from([1; 32]), H256::random()], - ]; - - round_trip(items); - } - - #[test] - fn option_vec_h256() { - let items: Vec>> = vec![ - None, - Some(vec![]), - Some(vec![H256::zero(), H256::from([1; 32]), H256::random()]), - ]; - - round_trip(items); - } - - #[test] - fn vec_u16() { - let items: Vec> = vec![ - vec![], - vec![255], - vec![0, 1, 2], - vec![100; 64], - vec![255, 0, 255], - ]; - - round_trip(items); - } - - #[test] - fn vec_of_vec_u16() { - let items: Vec>> = vec![ - vec![], - vec![vec![]], - vec![vec![1, 2, 3]], - vec![vec![], vec![]], - vec![vec![], vec![1, 2, 3]], - vec![vec![1, 2, 3], vec![1, 2, 3]], - vec![vec![1, 2, 3], vec![], vec![1, 2, 3]], - vec![vec![], vec![], vec![1, 2, 3]], - vec![vec![], vec![1], vec![1, 2, 3]], - vec![vec![], vec![1], vec![1, 2, 3]], - ]; - - round_trip(items); - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct FixedLen { - a: u16, - b: u64, - c: u32, - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn fixed_len_struct_encoding() { - let items: Vec = vec![ - FixedLen { a: 0, b: 0, c: 0 }, - FixedLen { a: 1, b: 1, c: 1 }, - FixedLen { a: 1, b: 0, c: 1 }, - ]; - - let expected_encodings = vec![ - // | u16--| u64----------------------------| u32----------| - vec![00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00], - vec![01, 00, 01, 00, 00, 00, 00, 00, 00, 00, 01, 00, 00, 00], - vec![01, 00, 00, 00, 00, 00, 00, 00, 00, 00, 01, 00, 00, 00], - ]; - - for i in 0..items.len() { - assert_eq!( - items[i].as_ssz_bytes(), - expected_encodings[i], - "Failed on {}", - i - ); - } - } - - #[test] - fn fixed_len_excess_bytes() { - let fixed = FixedLen { a: 1, b: 2, c: 3 }; - - let mut bytes = fixed.as_ssz_bytes(); - bytes.append(&mut vec![0]); - - assert_eq!( - FixedLen::from_ssz_bytes(&bytes), - Err(DecodeError::InvalidByteLength { - len: 15, - expected: 14, - }) - ); - } - - #[test] - fn vec_of_fixed_len_struct() { - let items: Vec = vec![ - FixedLen { a: 0, b: 0, c: 0 }, - FixedLen { a: 1, b: 1, c: 1 }, - FixedLen { a: 1, b: 0, c: 1 }, - ]; - - round_trip(items); - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct VariableLen { - a: u16, - b: Vec, - c: u32, - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn offset_into_fixed_bytes() { - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 - // | offset | u32 | variable - 01, 00, 09, 00, 00, 00, 01, 00, 00, 00, 00, 00, 01, 00, 02, 00, - ]; - - assert_eq!( - VariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OffsetIntoFixedPortion(9)) - ); - } - - #[test] - fn variable_len_excess_bytes() { - let variable = VariableLen { - a: 1, - b: vec![2], - c: 3, - }; - - let mut bytes = variable.as_ssz_bytes(); - bytes.append(&mut vec![0]); - - // The error message triggered is not so helpful, it's caught by a side-effect. Just - // checking there is _some_ error is fine. - assert!(VariableLen::from_ssz_bytes(&bytes).is_err()); - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn first_offset_skips_byte() { - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 - // | offset | u32 | variable - 01, 00, 11, 00, 00, 00, 01, 00, 00, 00, 00, 00, 01, 00, 02, 00, - ]; - - assert_eq!( - VariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OffsetSkipsVariableBytes(11)) - ); - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn variable_len_struct_encoding() { - let items: Vec = vec![ - VariableLen { - a: 0, - b: vec![], - c: 0, - }, - VariableLen { - a: 1, - b: vec![0], - c: 1, - }, - VariableLen { - a: 1, - b: vec![0, 1, 2], - c: 1, - }, - ]; - - let expected_encodings = vec![ - // 00..................................09 - // | u16--| vec offset-----| u32------------| vec payload --------| - vec![00, 00, 10, 00, 00, 00, 00, 00, 00, 00], - vec![01, 00, 10, 00, 00, 00, 01, 00, 00, 00, 00, 00], - vec![ - 01, 00, 10, 00, 00, 00, 01, 00, 00, 00, 00, 00, 01, 00, 02, 00, - ], - ]; - - for i in 0..items.len() { - assert_eq!( - items[i].as_ssz_bytes(), - expected_encodings[i], - "Failed on {}", - i - ); - } - } - - #[test] - fn vec_of_variable_len_struct() { - let items: Vec = vec![ - VariableLen { - a: 0, - b: vec![], - c: 0, - }, - VariableLen { - a: 255, - b: vec![0, 1, 2, 3], - c: 99, - }, - VariableLen { - a: 255, - b: vec![0], - c: 99, - }, - VariableLen { - a: 50, - b: vec![0], - c: 0, - }, - ]; - - round_trip(items); - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct ThreeVariableLen { - a: u16, - b: Vec, - c: Vec, - d: Vec, - } - - #[test] - fn three_variable_len() { - let vec: Vec = vec![ThreeVariableLen { - a: 42, - b: vec![0], - c: vec![1], - d: vec![2], - }]; - - round_trip(vec); - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn offsets_decreasing() { - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 - // | offset | offset | offset | variable - 01, 00, 14, 00, 00, 00, 15, 00, 00, 00, 14, 00, 00, 00, 00, 00, - ]; - - assert_eq!( - ThreeVariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OffsetsAreDecreasing(14)) - ); - } - - #[test] - fn tuple_u8_u16() { - let vec: Vec<(u8, u16)> = vec![ - (0, 0), - (0, 1), - (1, 0), - (u8::max_value(), u16::max_value()), - (0, u16::max_value()), - (u8::max_value(), 0), - (42, 12301), - ]; - - round_trip(vec); - } - - #[test] - fn tuple_vec_vec() { - let vec: Vec<(u64, Vec, Vec>)> = vec![ - (0, vec![], vec![vec![]]), - (99, vec![101], vec![vec![], vec![]]), - ( - 42, - vec![12, 13, 14], - vec![vec![99, 98, 97, 96], vec![42, 44, 46, 48, 50]], - ), - ]; - - round_trip(vec); - } - - #[test] - fn btree_map_fixed() { - let data = vec![ - BTreeMap::new(), - BTreeMap::from_iter(vec![(0u8, 0u16), (1, 2), (2, 4), (4, 6)]), - ]; - round_trip(data); - } - - #[test] - fn btree_map_variable_value() { - let data = vec![ - BTreeMap::new(), - BTreeMap::from_iter(vec![ - ( - 0u64, - ThreeVariableLen { - a: 1, - b: vec![3, 5, 7], - c: vec![], - d: vec![0, 0], - }, - ), - ( - 1, - ThreeVariableLen { - a: 99, - b: vec![1], - c: vec![2, 3, 4, 5, 6, 7, 8, 9, 10], - d: vec![4, 5, 6, 7, 8], - }, - ), - ( - 2, - ThreeVariableLen { - a: 0, - b: vec![], - c: vec![], - d: vec![], - }, - ), - ]), - ]; - round_trip(data); - } -} diff --git a/consensus/ssz_derive/Cargo.toml b/consensus/ssz_derive/Cargo.toml deleted file mode 100644 index d3b2865a61..0000000000 --- a/consensus/ssz_derive/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "eth2_ssz_derive" -version = "0.3.1" -authors = ["Paul Hauner "] -edition = "2021" -description = "Procedural derive macros to accompany the eth2_ssz crate." -license = "Apache-2.0" - -[lib] -name = "ssz_derive" -proc-macro = true - -[dependencies] -syn = "1.0.42" -proc-macro2 = "1.0.23" -quote = "1.0.7" -darling = "0.13.0" - -[dev-dependencies] -eth2_ssz = "0.4.1" diff --git a/consensus/ssz_derive/src/lib.rs b/consensus/ssz_derive/src/lib.rs deleted file mode 100644 index 53752ba44b..0000000000 --- a/consensus/ssz_derive/src/lib.rs +++ /dev/null @@ -1,981 +0,0 @@ -//! Provides procedural derive macros for the `Encode` and `Decode` traits of the `eth2_ssz` crate. -//! -//! ## Attributes -//! -//! The following struct/enum attributes are available: -//! -//! - `#[ssz(enum_behaviour = "union")]`: encodes and decodes an `enum` with a one-byte variant selector. -//! - `#[ssz(enum_behaviour = "transparent")]`: allows encoding an `enum` by serializing only the -//! value whilst ignoring outermost the `enum`. -//! - `#[ssz(struct_behaviour = "container")]`: encodes and decodes the `struct` as an SSZ -//! "container". -//! - `#[ssz(struct_behaviour = "transparent")]`: encodes and decodes a `struct` with exactly one -//! non-skipped field as if the outermost `struct` does not exist. -//! -//! The following field attributes are available: -//! -//! - `#[ssz(with = "module")]`: uses the methods in `module` to implement `ssz::Encode` and -//! `ssz::Decode`. This is useful when it's not possible to create an `impl` for that type -//! (e.g. the type is defined in another crate). -//! - `#[ssz(skip_serializing)]`: this field will not be included in the serialized SSZ vector. -//! - `#[ssz(skip_deserializing)]`: this field will not be expected in the serialized -//! SSZ vector and it will be initialized from a `Default` implementation. -//! -//! ## Examples -//! -//! ### Structs -//! -//! ```rust -//! use ssz::{Encode, Decode}; -//! use ssz_derive::{Encode, Decode}; -//! -//! /// Represented as an SSZ "list" wrapped in an SSZ "container". -//! #[derive(Debug, PartialEq, Encode, Decode)] -//! #[ssz(struct_behaviour = "container")] // "container" is the default behaviour -//! struct TypicalStruct { -//! foo: Vec -//! } -//! -//! assert_eq!( -//! TypicalStruct { foo: vec![42] }.as_ssz_bytes(), -//! vec![4, 0, 0, 0, 42] -//! ); -//! -//! assert_eq!( -//! TypicalStruct::from_ssz_bytes(&[4, 0, 0, 0, 42]).unwrap(), -//! TypicalStruct { foo: vec![42] }, -//! ); -//! -//! /// Represented as an SSZ "list" *without* an SSZ "container". -//! #[derive(Encode, Decode)] -//! #[ssz(struct_behaviour = "transparent")] -//! struct WrapperStruct { -//! foo: Vec -//! } -//! -//! assert_eq!( -//! WrapperStruct { foo: vec![42] }.as_ssz_bytes(), -//! vec![42] -//! ); -//! -//! /// Represented as an SSZ "list" *without* an SSZ "container". The `bar` byte is ignored. -//! #[derive(Debug, PartialEq, Encode, Decode)] -//! #[ssz(struct_behaviour = "transparent")] -//! struct WrapperStructSkippedField { -//! foo: Vec, -//! #[ssz(skip_serializing, skip_deserializing)] -//! bar: u8, -//! } -//! -//! assert_eq!( -//! WrapperStructSkippedField { foo: vec![42], bar: 99 }.as_ssz_bytes(), -//! vec![42] -//! ); -//! assert_eq!( -//! WrapperStructSkippedField::from_ssz_bytes(&[42]).unwrap(), -//! WrapperStructSkippedField { foo: vec![42], bar: 0 } -//! ); -//! -//! /// Represented as an SSZ "list" *without* an SSZ "container". -//! #[derive(Encode, Decode)] -//! #[ssz(struct_behaviour = "transparent")] -//! struct NewType(Vec); -//! -//! assert_eq!( -//! NewType(vec![42]).as_ssz_bytes(), -//! vec![42] -//! ); -//! -//! /// Represented as an SSZ "list" *without* an SSZ "container". The `bar` byte is ignored. -//! #[derive(Debug, PartialEq, Encode, Decode)] -//! #[ssz(struct_behaviour = "transparent")] -//! struct NewTypeSkippedField(Vec, #[ssz(skip_serializing, skip_deserializing)] u8); -//! -//! assert_eq!( -//! NewTypeSkippedField(vec![42], 99).as_ssz_bytes(), -//! vec![42] -//! ); -//! assert_eq!( -//! NewTypeSkippedField::from_ssz_bytes(&[42]).unwrap(), -//! NewTypeSkippedField(vec![42], 0) -//! ); -//! ``` -//! -//! ### Enums -//! -//! ```rust -//! use ssz::{Encode, Decode}; -//! use ssz_derive::{Encode, Decode}; -//! -//! /// Represented as an SSZ "union". -//! #[derive(Debug, PartialEq, Encode, Decode)] -//! #[ssz(enum_behaviour = "union")] -//! enum UnionEnum { -//! Foo(u8), -//! Bar(Vec), -//! } -//! -//! assert_eq!( -//! UnionEnum::Foo(42).as_ssz_bytes(), -//! vec![0, 42] -//! ); -//! assert_eq!( -//! UnionEnum::from_ssz_bytes(&[1, 42, 42]).unwrap(), -//! UnionEnum::Bar(vec![42, 42]), -//! ); -//! -//! /// Represented as only the value in the enum variant. -//! #[derive(Debug, PartialEq, Encode)] -//! #[ssz(enum_behaviour = "transparent")] -//! enum TransparentEnum { -//! Foo(u8), -//! Bar(Vec), -//! } -//! -//! assert_eq!( -//! TransparentEnum::Foo(42).as_ssz_bytes(), -//! vec![42] -//! ); -//! assert_eq!( -//! TransparentEnum::Bar(vec![42, 42]).as_ssz_bytes(), -//! vec![42, 42] -//! ); -//! ``` - -use darling::{FromDeriveInput, FromMeta}; -use proc_macro::TokenStream; -use quote::quote; -use std::convert::TryInto; -use syn::{parse_macro_input, DataEnum, DataStruct, DeriveInput, Ident}; - -/// The highest possible union selector value (higher values are reserved for backwards compatible -/// extensions). -const MAX_UNION_SELECTOR: u8 = 127; - -const ENUM_TRANSPARENT: &str = "transparent"; -const ENUM_UNION: &str = "union"; -const NO_ENUM_BEHAVIOUR_ERROR: &str = "enums require an \"enum_behaviour\" attribute with \ - a \"transparent\" or \"union\" value, e.g., #[ssz(enum_behaviour = \"transparent\")]"; - -#[derive(Debug, FromDeriveInput)] -#[darling(attributes(ssz))] -struct StructOpts { - #[darling(default)] - enum_behaviour: Option, - #[darling(default)] - struct_behaviour: Option, -} - -/// Field-level configuration. -#[derive(Debug, Default, FromMeta)] -struct FieldOpts { - #[darling(default)] - with: Option, - #[darling(default)] - skip_serializing: bool, - #[darling(default)] - skip_deserializing: bool, -} - -enum Procedure<'a> { - Struct { - data: &'a syn::DataStruct, - behaviour: StructBehaviour, - }, - Enum { - data: &'a syn::DataEnum, - behaviour: EnumBehaviour, - }, -} - -enum StructBehaviour { - Container, - Transparent, -} - -enum EnumBehaviour { - Union, - Transparent, -} - -impl<'a> Procedure<'a> { - fn read(item: &'a DeriveInput) -> Self { - let opts = StructOpts::from_derive_input(item).unwrap(); - - match &item.data { - syn::Data::Struct(data) => { - if opts.enum_behaviour.is_some() { - panic!("cannot use \"enum_behaviour\" for a struct"); - } - - match opts.struct_behaviour.as_deref() { - Some("container") | None => Procedure::Struct { - data, - behaviour: StructBehaviour::Container, - }, - Some("transparent") => Procedure::Struct { - data, - behaviour: StructBehaviour::Transparent, - }, - Some(other) => panic!( - "{} is not a valid struct behaviour, use \"container\" or \"transparent\"", - other - ), - } - } - syn::Data::Enum(data) => { - if opts.struct_behaviour.is_some() { - panic!("cannot use \"struct_behaviour\" for an enum"); - } - - match opts.enum_behaviour.as_deref() { - Some("union") => Procedure::Enum { - data, - behaviour: EnumBehaviour::Union, - }, - Some("transparent") => Procedure::Enum { - data, - behaviour: EnumBehaviour::Transparent, - }, - Some(other) => panic!( - "{} is not a valid enum behaviour, use \"container\" or \"transparent\"", - other - ), - None => panic!("{}", NO_ENUM_BEHAVIOUR_ERROR), - } - } - _ => panic!("ssz_derive only supports structs and enums"), - } - } -} - -fn parse_ssz_fields( - struct_data: &syn::DataStruct, -) -> Vec<(&syn::Type, Option<&syn::Ident>, FieldOpts)> { - struct_data - .fields - .iter() - .map(|field| { - let ty = &field.ty; - let ident = field.ident.as_ref(); - - let field_opts_candidates = field - .attrs - .iter() - .filter(|attr| attr.path.get_ident().map_or(false, |ident| *ident == "ssz")) - .collect::>(); - - if field_opts_candidates.len() > 1 { - panic!("more than one field-level \"ssz\" attribute provided") - } - - let field_opts = field_opts_candidates - .first() - .map(|attr| { - let meta = attr.parse_meta().unwrap(); - FieldOpts::from_meta(&meta).unwrap() - }) - .unwrap_or_default(); - - (ty, ident, field_opts) - }) - .collect() -} - -/// Implements `ssz::Encode` for some `struct` or `enum`. -#[proc_macro_derive(Encode, attributes(ssz))] -pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - let procedure = Procedure::read(&item); - - match procedure { - Procedure::Struct { data, behaviour } => match behaviour { - StructBehaviour::Transparent => ssz_encode_derive_struct_transparent(&item, data), - StructBehaviour::Container => ssz_encode_derive_struct(&item, data), - }, - Procedure::Enum { data, behaviour } => match behaviour { - EnumBehaviour::Transparent => ssz_encode_derive_enum_transparent(&item, data), - EnumBehaviour::Union => ssz_encode_derive_enum_union(&item, data), - }, - } -} - -/// Derive `ssz::Encode` for a struct. -/// -/// Fields are encoded in the order they are defined. -/// -/// ## Field attributes -/// -/// - `#[ssz(skip_serializing)]`: the field will not be serialized. -fn ssz_encode_derive_struct(derive_input: &DeriveInput, struct_data: &DataStruct) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let field_is_ssz_fixed_len = &mut vec![]; - let field_fixed_len = &mut vec![]; - let field_ssz_bytes_len = &mut vec![]; - let field_encoder_append = &mut vec![]; - - for (ty, ident, field_opts) in parse_ssz_fields(struct_data) { - if field_opts.skip_serializing { - continue; - } - - let ident = match ident { - Some(ref ident) => ident, - _ => panic!( - "#[ssz(struct_behaviour = \"container\")] only supports named struct fields." - ), - }; - - if let Some(module) = field_opts.with { - let module = quote! { #module::encode }; - field_is_ssz_fixed_len.push(quote! { #module::is_ssz_fixed_len() }); - field_fixed_len.push(quote! { #module::ssz_fixed_len() }); - field_ssz_bytes_len.push(quote! { #module::ssz_bytes_len(&self.#ident) }); - field_encoder_append.push(quote! { - encoder.append_parameterized( - #module::is_ssz_fixed_len(), - |buf| #module::ssz_append(&self.#ident, buf) - ) - }); - } else { - field_is_ssz_fixed_len.push(quote! { <#ty as ssz::Encode>::is_ssz_fixed_len() }); - field_fixed_len.push(quote! { <#ty as ssz::Encode>::ssz_fixed_len() }); - field_ssz_bytes_len.push(quote! { self.#ident.ssz_bytes_len() }); - field_encoder_append.push(quote! { encoder.append(&self.#ident) }); - } - } - - let output = quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - #( - #field_is_ssz_fixed_len && - )* - true - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - let mut len: usize = 0; - #( - len = len - .checked_add(#field_fixed_len) - .expect("encode ssz_fixed_len length overflow"); - )* - len - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn ssz_bytes_len(&self) -> usize { - if ::is_ssz_fixed_len() { - ::ssz_fixed_len() - } else { - let mut len: usize = 0; - #( - if #field_is_ssz_fixed_len { - len = len - .checked_add(#field_fixed_len) - .expect("encode ssz_bytes_len length overflow"); - } else { - len = len - .checked_add(ssz::BYTES_PER_LENGTH_OFFSET) - .expect("encode ssz_bytes_len length overflow for offset"); - len = len - .checked_add(#field_ssz_bytes_len) - .expect("encode ssz_bytes_len length overflow for bytes"); - } - )* - - len - } - } - - fn ssz_append(&self, buf: &mut Vec) { - let mut offset: usize = 0; - #( - offset = offset - .checked_add(#field_fixed_len) - .expect("encode ssz_append offset overflow"); - )* - - let mut encoder = ssz::SszEncoder::container(buf, offset); - - #( - #field_encoder_append; - )* - - encoder.finalize(); - } - } - }; - output.into() -} - -/// Derive `ssz::Encode` "transparently" for a struct which has exactly one non-skipped field. -/// -/// The single field is encoded directly, making the outermost `struct` transparent. -/// -/// ## Field attributes -/// -/// - `#[ssz(skip_serializing)]`: the field will not be serialized. -fn ssz_encode_derive_struct_transparent( - derive_input: &DeriveInput, - struct_data: &DataStruct, -) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - let ssz_fields = parse_ssz_fields(struct_data); - let num_fields = ssz_fields - .iter() - .filter(|(_, _, field_opts)| !field_opts.skip_deserializing) - .count(); - - if num_fields != 1 { - panic!( - "A \"transparent\" struct must have exactly one non-skipped field ({} fields found)", - num_fields - ); - } - - let (ty, ident, _field_opts) = ssz_fields - .iter() - .find(|(_, _, field_opts)| !field_opts.skip_deserializing) - .expect("\"transparent\" struct must have at least one non-skipped field"); - - let output = if let Some(field_name) = ident { - quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - <#ty as ssz::Encode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - <#ty as ssz::Encode>::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.#field_name.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.#field_name.ssz_append(buf) - } - } - } - } else { - quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - <#ty as ssz::Encode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - <#ty as ssz::Encode>::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.0.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.0.ssz_append(buf) - } - } - } - }; - - output.into() -} - -/// Derive `ssz::Encode` for an enum in the "transparent" method. -/// -/// The "transparent" method is distinct from the "union" method specified in the SSZ specification. -/// When using "transparent", the enum will be ignored and the contained field will be serialized as -/// if the enum does not exist. Since an union variant "selector" is not serialized, it is not -/// possible to reliably decode an enum that is serialized transparently. -/// -/// ## Limitations -/// -/// Only supports: -/// - Enums with a single field per variant, where -/// - All fields are variably sized from an SSZ-perspective (not fixed size). -/// -/// ## Panics -/// -/// Will panic at compile-time if the single field requirement isn't met, but will panic *at run -/// time* if the variable-size requirement isn't met. -fn ssz_encode_derive_enum_transparent( - derive_input: &DeriveInput, - enum_data: &DataEnum, -) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let (patterns, assert_exprs): (Vec<_>, Vec<_>) = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("ssz::Encode can only be derived for enums with 1 field per variant"); - } - - let pattern = quote! { - #name::#variant_name(ref inner) - }; - - let ty = &(&variant.fields).into_iter().next().unwrap().ty; - let type_assert = quote! { - !<#ty as ssz::Encode>::is_ssz_fixed_len() - }; - (pattern, type_assert) - }) - .unzip(); - - let output = quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - assert!( - #( - #assert_exprs && - )* true, - "not all enum variants are variably-sized" - ); - false - } - - fn ssz_bytes_len(&self) -> usize { - match self { - #( - #patterns => inner.ssz_bytes_len(), - )* - } - } - - fn ssz_append(&self, buf: &mut Vec) { - match self { - #( - #patterns => inner.ssz_append(buf), - )* - } - } - } - }; - output.into() -} - -/// Derive `ssz::Encode` for an `enum` following the "union" SSZ spec. -/// -/// The union selector will be determined based upon the order in which the enum variants are -/// defined. E.g., the top-most variant in the enum will have a selector of `0`, the variant -/// beneath it will have a selector of `1` and so on. -/// -/// # Limitations -/// -/// Only supports enums where each variant has a single field. -fn ssz_encode_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let patterns: Vec<_> = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("ssz::Encode can only be derived for enums with 1 field per variant"); - } - - let pattern = quote! { - #name::#variant_name(ref inner) - }; - pattern - }) - .collect(); - - let union_selectors = compute_union_selectors(patterns.len()); - - let output = quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - match self { - #( - #patterns => inner - .ssz_bytes_len() - .checked_add(1) - .expect("encoded length must be less than usize::max_value"), - )* - } - } - - fn ssz_append(&self, buf: &mut Vec) { - match self { - #( - #patterns => { - let union_selector: u8 = #union_selectors; - debug_assert!(union_selector <= ssz::MAX_UNION_SELECTOR); - buf.push(union_selector); - inner.ssz_append(buf) - }, - )* - } - } - } - }; - output.into() -} - -/// Derive `ssz::Decode` for a struct or enum. -#[proc_macro_derive(Decode, attributes(ssz))] -pub fn ssz_decode_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - let procedure = Procedure::read(&item); - - match procedure { - Procedure::Struct { data, behaviour } => match behaviour { - StructBehaviour::Transparent => ssz_decode_derive_struct_transparent(&item, data), - StructBehaviour::Container => ssz_decode_derive_struct(&item, data), - }, - Procedure::Enum { data, behaviour } => match behaviour { - EnumBehaviour::Union => ssz_decode_derive_enum_union(&item, data), - EnumBehaviour::Transparent => panic!( - "Decode cannot be derived for enum_behaviour \"{}\", only \"{}\" is valid.", - ENUM_TRANSPARENT, ENUM_UNION - ), - }, - } -} - -/// Implements `ssz::Decode` for some `struct`. -/// -/// Fields are decoded in the order they are defined. -/// -/// ## Field attributes -/// -/// - `#[ssz(skip_deserializing)]`: during de-serialization the field will be instantiated from a -/// `Default` implementation. The decoder will assume that the field was not serialized at all -/// (e.g., if it has been serialized, an error will be raised instead of `Default` overriding it). -fn ssz_decode_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> TokenStream { - let name = &item.ident; - let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - - let mut register_types = vec![]; - let mut field_names = vec![]; - let mut fixed_decodes = vec![]; - let mut decodes = vec![]; - let mut is_fixed_lens = vec![]; - let mut fixed_lens = vec![]; - - for (ty, ident, field_opts) in parse_ssz_fields(struct_data) { - let ident = match ident { - Some(ref ident) => ident, - _ => panic!( - "#[ssz(struct_behaviour = \"container\")] only supports named struct fields." - ), - }; - - field_names.push(quote! { - #ident - }); - - // Field should not be deserialized; use a `Default` impl to instantiate. - if field_opts.skip_deserializing { - decodes.push(quote! { - let #ident = <_>::default(); - }); - - fixed_decodes.push(quote! { - let #ident = <_>::default(); - }); - - continue; - } - - let is_ssz_fixed_len; - let ssz_fixed_len; - let from_ssz_bytes; - if let Some(module) = field_opts.with { - let module = quote! { #module::decode }; - - is_ssz_fixed_len = quote! { #module::is_ssz_fixed_len() }; - ssz_fixed_len = quote! { #module::ssz_fixed_len() }; - from_ssz_bytes = quote! { #module::from_ssz_bytes(slice) }; - - register_types.push(quote! { - builder.register_type_parameterized(#is_ssz_fixed_len, #ssz_fixed_len)?; - }); - decodes.push(quote! { - let #ident = decoder.decode_next_with(|slice| #module::from_ssz_bytes(slice))?; - }); - } else { - is_ssz_fixed_len = quote! { <#ty as ssz::Decode>::is_ssz_fixed_len() }; - ssz_fixed_len = quote! { <#ty as ssz::Decode>::ssz_fixed_len() }; - from_ssz_bytes = quote! { <#ty as ssz::Decode>::from_ssz_bytes(slice) }; - - register_types.push(quote! { - builder.register_type::<#ty>()?; - }); - decodes.push(quote! { - let #ident = decoder.decode_next()?; - }); - } - - fixed_decodes.push(quote! { - let #ident = { - start = end; - end = end - .checked_add(#ssz_fixed_len) - .ok_or_else(|| ssz::DecodeError::OutOfBoundsByte { - i: usize::max_value() - })?; - let slice = bytes.get(start..end) - .ok_or_else(|| ssz::DecodeError::InvalidByteLength { - len: bytes.len(), - expected: end - })?; - #from_ssz_bytes? - }; - }); - is_fixed_lens.push(is_ssz_fixed_len); - fixed_lens.push(ssz_fixed_len); - } - - let output = quote! { - impl #impl_generics ssz::Decode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - #( - #is_fixed_lens && - )* - true - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - let mut len: usize = 0; - #( - len = len - .checked_add(#fixed_lens) - .expect("decode ssz_fixed_len overflow"); - )* - len - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn from_ssz_bytes(bytes: &[u8]) -> std::result::Result { - if ::is_ssz_fixed_len() { - if bytes.len() != ::ssz_fixed_len() { - return Err(ssz::DecodeError::InvalidByteLength { - len: bytes.len(), - expected: ::ssz_fixed_len(), - }); - } - - let mut start: usize = 0; - let mut end = start; - - #( - #fixed_decodes - )* - - Ok(Self { - #( - #field_names, - )* - }) - } else { - let mut builder = ssz::SszDecoderBuilder::new(bytes); - - #( - #register_types - )* - - let mut decoder = builder.build()?; - - #( - #decodes - )* - - - Ok(Self { - #( - #field_names, - )* - }) - } - } - } - }; - output.into() -} - -/// Implements `ssz::Decode` "transparently" for a `struct` with exactly one non-skipped field. -/// -/// The bytes will be decoded as if they are the inner field, without the outermost struct. The -/// outermost struct will then be applied artificially. -/// -/// ## Field attributes -/// -/// - `#[ssz(skip_deserializing)]`: during de-serialization the field will be instantiated from a -/// `Default` implementation. The decoder will assume that the field was not serialized at all -/// (e.g., if it has been serialized, an error will be raised instead of `Default` overriding it). -fn ssz_decode_derive_struct_transparent( - item: &DeriveInput, - struct_data: &DataStruct, -) -> TokenStream { - let name = &item.ident; - let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - let ssz_fields = parse_ssz_fields(struct_data); - let num_fields = ssz_fields - .iter() - .filter(|(_, _, field_opts)| !field_opts.skip_deserializing) - .count(); - - if num_fields != 1 { - panic!( - "A \"transparent\" struct must have exactly one non-skipped field ({} fields found)", - num_fields - ); - } - - let mut fields = vec![]; - let mut wrapped_type = None; - - for (i, (ty, ident, field_opts)) in ssz_fields.into_iter().enumerate() { - if let Some(name) = ident { - if field_opts.skip_deserializing { - fields.push(quote! { - #name: <_>::default(), - }); - } else { - fields.push(quote! { - #name: <_>::from_ssz_bytes(bytes)?, - }); - wrapped_type = Some(ty); - } - } else { - let index = syn::Index::from(i); - if field_opts.skip_deserializing { - fields.push(quote! { - #index:<_>::default(), - }); - } else { - fields.push(quote! { - #index:<_>::from_ssz_bytes(bytes)?, - }); - wrapped_type = Some(ty); - } - } - } - - let ty = wrapped_type.unwrap(); - - let output = quote! { - impl #impl_generics ssz::Decode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - <#ty as ssz::Decode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - <#ty as ssz::Decode>::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> std::result::Result { - Ok(Self { - #( - #fields - )* - - }) - } - } - }; - output.into() -} - -/// Derive `ssz::Decode` for an `enum` following the "union" SSZ spec. -fn ssz_decode_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let (constructors, var_types): (Vec<_>, Vec<_>) = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("ssz::Encode can only be derived for enums with 1 field per variant"); - } - - let constructor = quote! { - #name::#variant_name - }; - - let ty = &(&variant.fields).into_iter().next().unwrap().ty; - (constructor, ty) - }) - .unzip(); - - let union_selectors = compute_union_selectors(constructors.len()); - - let output = quote! { - impl #impl_generics ssz::Decode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - // Sanity check to ensure the definition here does not drift from the one defined in - // `ssz`. - debug_assert_eq!(#MAX_UNION_SELECTOR, ssz::MAX_UNION_SELECTOR); - - let (selector, body) = ssz::split_union_bytes(bytes)?; - - match selector.into() { - #( - #union_selectors => { - <#var_types as ssz::Decode>::from_ssz_bytes(body).map(#constructors) - }, - )* - other => Err(ssz::DecodeError::UnionSelectorInvalid(other)) - } - } - } - }; - output.into() -} - -fn compute_union_selectors(num_variants: usize) -> Vec { - let union_selectors = (0..num_variants) - .map(|i| { - i.try_into() - .expect("union selector exceeds u8::max_value, union has too many variants") - }) - .collect::>(); - - let highest_selector = union_selectors - .last() - .copied() - .expect("0-variant union is not permitted"); - - assert!( - highest_selector <= MAX_UNION_SELECTOR, - "union selector {} exceeds limit of {}, enum has too many variants", - highest_selector, - MAX_UNION_SELECTOR - ); - - union_selectors -} diff --git a/consensus/ssz_derive/tests/tests.rs b/consensus/ssz_derive/tests/tests.rs deleted file mode 100644 index 2eeb3a48db..0000000000 --- a/consensus/ssz_derive/tests/tests.rs +++ /dev/null @@ -1,215 +0,0 @@ -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use std::fmt::Debug; -use std::marker::PhantomData; - -fn assert_encode(item: &T, bytes: &[u8]) { - assert_eq!(item.as_ssz_bytes(), bytes); -} - -fn assert_encode_decode(item: &T, bytes: &[u8]) { - assert_encode(item, bytes); - assert_eq!(T::from_ssz_bytes(bytes).unwrap(), *item); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(enum_behaviour = "union")] -enum TwoFixedUnion { - U8(u8), - U16(u16), -} - -#[derive(PartialEq, Debug, Encode, Decode)] -struct TwoFixedUnionStruct { - a: TwoFixedUnion, -} - -#[test] -fn two_fixed_union() { - let eight = TwoFixedUnion::U8(1); - let sixteen = TwoFixedUnion::U16(1); - - assert_encode_decode(&eight, &[0, 1]); - assert_encode_decode(&sixteen, &[1, 1, 0]); - - assert_encode_decode(&TwoFixedUnionStruct { a: eight }, &[4, 0, 0, 0, 0, 1]); - assert_encode_decode(&TwoFixedUnionStruct { a: sixteen }, &[4, 0, 0, 0, 1, 1, 0]); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -struct VariableA { - a: u8, - b: Vec, -} - -#[derive(PartialEq, Debug, Encode, Decode)] -struct VariableB { - a: Vec, - b: u8, -} - -#[derive(PartialEq, Debug, Encode)] -#[ssz(enum_behaviour = "transparent")] -enum TwoVariableTrans { - A(VariableA), - B(VariableB), -} - -#[derive(PartialEq, Debug, Encode)] -struct TwoVariableTransStruct { - a: TwoVariableTrans, -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(enum_behaviour = "union")] -enum TwoVariableUnion { - A(VariableA), - B(VariableB), -} - -#[derive(PartialEq, Debug, Encode, Decode)] -struct TwoVariableUnionStruct { - a: TwoVariableUnion, -} - -#[test] -fn two_variable_trans() { - let trans_a = TwoVariableTrans::A(VariableA { - a: 1, - b: vec![2, 3], - }); - let trans_b = TwoVariableTrans::B(VariableB { - a: vec![1, 2], - b: 3, - }); - - assert_encode(&trans_a, &[1, 5, 0, 0, 0, 2, 3]); - assert_encode(&trans_b, &[5, 0, 0, 0, 3, 1, 2]); - - assert_encode( - &TwoVariableTransStruct { a: trans_a }, - &[4, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], - ); - assert_encode( - &TwoVariableTransStruct { a: trans_b }, - &[4, 0, 0, 0, 5, 0, 0, 0, 3, 1, 2], - ); -} - -#[test] -fn two_variable_union() { - let union_a = TwoVariableUnion::A(VariableA { - a: 1, - b: vec![2, 3], - }); - let union_b = TwoVariableUnion::B(VariableB { - a: vec![1, 2], - b: 3, - }); - - assert_encode_decode(&union_a, &[0, 1, 5, 0, 0, 0, 2, 3]); - assert_encode_decode(&union_b, &[1, 5, 0, 0, 0, 3, 1, 2]); - - assert_encode_decode( - &TwoVariableUnionStruct { a: union_a }, - &[4, 0, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], - ); - assert_encode_decode( - &TwoVariableUnionStruct { a: union_b }, - &[4, 0, 0, 0, 1, 5, 0, 0, 0, 3, 1, 2], - ); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(enum_behaviour = "union")] -enum TwoVecUnion { - A(Vec), - B(Vec), -} - -#[test] -fn two_vec_union() { - assert_encode_decode(&TwoVecUnion::A(vec![]), &[0]); - assert_encode_decode(&TwoVecUnion::B(vec![]), &[1]); - - assert_encode_decode(&TwoVecUnion::A(vec![0]), &[0, 0]); - assert_encode_decode(&TwoVecUnion::B(vec![0]), &[1, 0]); - - assert_encode_decode(&TwoVecUnion::A(vec![0, 1]), &[0, 0, 1]); - assert_encode_decode(&TwoVecUnion::B(vec![0, 1]), &[1, 0, 1]); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(struct_behaviour = "transparent")] -struct TransparentStruct { - inner: Vec, -} - -impl TransparentStruct { - fn new(inner: u8) -> Self { - Self { inner: vec![inner] } - } -} - -#[test] -fn transparent_struct() { - assert_encode_decode(&TransparentStruct::new(42), &vec![42_u8].as_ssz_bytes()); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(struct_behaviour = "transparent")] -struct TransparentStructSkippedField { - inner: Vec, - #[ssz(skip_serializing, skip_deserializing)] - skipped: PhantomData, -} - -impl TransparentStructSkippedField { - fn new(inner: u8) -> Self { - Self { - inner: vec![inner], - skipped: PhantomData, - } - } -} - -#[test] -fn transparent_struct_skipped_field() { - assert_encode_decode( - &TransparentStructSkippedField::new(42), - &vec![42_u8].as_ssz_bytes(), - ); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(struct_behaviour = "transparent")] -struct TransparentStructNewType(Vec); - -#[test] -fn transparent_struct_newtype() { - assert_encode_decode( - &TransparentStructNewType(vec![42_u8]), - &vec![42_u8].as_ssz_bytes(), - ); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(struct_behaviour = "transparent")] -struct TransparentStructNewTypeSkippedField( - Vec, - #[ssz(skip_serializing, skip_deserializing)] PhantomData, -); - -impl TransparentStructNewTypeSkippedField { - fn new(inner: Vec) -> Self { - Self(inner, PhantomData) - } -} - -#[test] -fn transparent_struct_newtype_skipped_field() { - assert_encode_decode( - &TransparentStructNewTypeSkippedField::new(vec![42_u8]), - &vec![42_u8].as_ssz_bytes(), - ); -} diff --git a/consensus/ssz_types/Cargo.toml b/consensus/ssz_types/Cargo.toml deleted file mode 100644 index 2baa8994fb..0000000000 --- a/consensus/ssz_types/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "eth2_ssz_types" -version = "0.2.2" -authors = ["Paul Hauner "] -edition = "2021" -description = "Provides types with unique properties required for SSZ serialization and Merklization." -license = "Apache-2.0" - -[lib] -name = "ssz_types" - -[dependencies] -tree_hash = "0.4.1" -serde = "1.0.116" -serde_derive = "1.0.116" -eth2_serde_utils = "0.1.1" -eth2_ssz = "0.4.1" -typenum = "1.12.0" -arbitrary = { version = "1.0", features = ["derive"], optional = true } -derivative = "2.1.1" -smallvec = "1.8.0" - -[dev-dependencies] -serde_json = "1.0.58" -tree_hash_derive = "0.4.0" diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs deleted file mode 100644 index b7bde22578..0000000000 --- a/consensus/ssz_types/src/bitfield.rs +++ /dev/null @@ -1,1332 +0,0 @@ -use crate::tree_hash::bitfield_bytes_tree_hash_root; -use crate::Error; -use core::marker::PhantomData; -use derivative::Derivative; -use eth2_serde_utils::hex::{encode as hex_encode, PrefixedHexVisitor}; -use serde::de::{Deserialize, Deserializer}; -use serde::ser::{Serialize, Serializer}; -use smallvec::{smallvec, SmallVec, ToSmallVec}; -use ssz::{Decode, Encode}; -use tree_hash::Hash256; -use typenum::Unsigned; - -/// Maximum number of bytes to store on the stack in a bitfield's `SmallVec`. -/// -/// The default of 32 bytes is enough to take us through to ~500K validators, as the byte length of -/// attestation bitfields is roughly `N // 32 slots // 64 committes // 8 bits`. -pub const SMALLVEC_LEN: usize = 32; - -/// A marker trait applied to `Variable` and `Fixed` that defines the behaviour of a `Bitfield`. -pub trait BitfieldBehaviour: Clone {} - -/// A marker struct used to declare SSZ `Variable` behaviour on a `Bitfield`. -/// -/// See the [`Bitfield`](struct.Bitfield.html) docs for usage. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Variable { - _phantom: PhantomData, -} - -/// A marker struct used to declare SSZ `Fixed` behaviour on a `Bitfield`. -/// -/// See the [`Bitfield`](struct.Bitfield.html) docs for usage. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Fixed { - _phantom: PhantomData, -} - -impl BitfieldBehaviour for Variable {} -impl BitfieldBehaviour for Fixed {} - -/// A heap-allocated, ordered, variable-length collection of `bool` values, limited to `N` bits. -pub type BitList = Bitfield>; - -/// A heap-allocated, ordered, fixed-length collection of `bool` values, with `N` bits. -/// -/// See [Bitfield](struct.Bitfield.html) documentation. -pub type BitVector = Bitfield>; - -/// A heap-allocated, ordered, fixed-length, collection of `bool` values. Use of -/// [`BitList`](type.BitList.html) or [`BitVector`](type.BitVector.html) type aliases is preferred -/// over direct use of this struct. -/// -/// The `T` type parameter is used to define length behaviour with the `Variable` or `Fixed` marker -/// structs. -/// -/// The length of the Bitfield is set at instantiation (i.e., runtime, not compile time). However, -/// use with a `Variable` sets a type-level (i.e., compile-time) maximum length and `Fixed` -/// provides a type-level fixed length. -/// -/// ## Example -/// -/// The example uses the following crate-level type aliases: -/// -/// - `BitList` is an alias for `Bitfield>` -/// - `BitVector` is an alias for `Bitfield>` -/// -/// ``` -/// use ssz_types::{BitVector, BitList, typenum}; -/// -/// // `BitList` has a type-level maximum length. The length of the list is specified at runtime -/// // and it must be less than or equal to `N`. After instantiation, `BitList` cannot grow or -/// // shrink. -/// type BitList8 = BitList; -/// -/// // Creating a `BitList` with a larger-than-`N` capacity returns `None`. -/// assert!(BitList8::with_capacity(9).is_err()); -/// -/// let mut bitlist = BitList8::with_capacity(4).unwrap(); // `BitList` permits a capacity of less than the maximum. -/// assert!(bitlist.set(3, true).is_ok()); // Setting inside the instantiation capacity is permitted. -/// assert!(bitlist.set(5, true).is_err()); // Setting outside that capacity is not. -/// -/// // `BitVector` has a type-level fixed length. Unlike `BitList`, it cannot be instantiated with a custom length -/// // or grow/shrink. -/// type BitVector8 = BitVector; -/// -/// let mut bitvector = BitVector8::new(); -/// assert_eq!(bitvector.len(), 8); // `BitVector` length is fixed at the type-level. -/// assert!(bitvector.set(7, true).is_ok()); // Setting inside the capacity is permitted. -/// assert!(bitvector.set(9, true).is_err()); // Setting outside the capacity is not. -/// -/// ``` -/// -/// ## Note -/// -/// The internal representation of the bitfield is the same as that required by SSZ. The lowest -/// byte (by `Vec` index) stores the lowest bit-indices and the right-most bit stores the lowest -/// bit-index. E.g., `smallvec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set. -#[derive(Clone, Debug, Derivative)] -#[derivative(PartialEq, Eq, Hash(bound = ""))] -pub struct Bitfield { - bytes: SmallVec<[u8; SMALLVEC_LEN]>, - len: usize, - _phantom: PhantomData, -} - -impl Bitfield> { - /// Instantiate with capacity for `num_bits` boolean values. The length cannot be grown or - /// shrunk after instantiation. - /// - /// All bits are initialized to `false`. - /// - /// Returns `None` if `num_bits > N`. - pub fn with_capacity(num_bits: usize) -> Result { - if num_bits <= N::to_usize() { - Ok(Self { - bytes: smallvec![0; bytes_for_bit_len(num_bits)], - len: num_bits, - _phantom: PhantomData, - }) - } else { - Err(Error::OutOfBounds { - i: Self::max_len(), - len: Self::max_len(), - }) - } - } - - /// Equal to `N` regardless of the value supplied to `with_capacity`. - pub fn max_len() -> usize { - N::to_usize() - } - - /// Consumes `self`, returning a serialized representation. - /// - /// The output is faithful to the SSZ encoding of `self`, such that a leading `true` bit is - /// used to indicate the length of the bitfield. - /// - /// ## Example - /// ``` - /// use ssz_types::{BitList, typenum}; - /// use smallvec::SmallVec; - /// - /// type BitList8 = BitList; - /// - /// let b = BitList8::with_capacity(4).unwrap(); - /// - /// assert_eq!(b.into_bytes(), SmallVec::from_buf([0b0001_0000])); - /// ``` - pub fn into_bytes(self) -> SmallVec<[u8; SMALLVEC_LEN]> { - let len = self.len(); - let mut bytes = self.bytes; - - bytes.resize(bytes_for_bit_len(len + 1), 0); - - let mut bitfield: Bitfield> = Bitfield::from_raw_bytes(bytes, len + 1) - .unwrap_or_else(|_| { - unreachable!( - "Bitfield with {} bytes must have enough capacity for {} bits.", - bytes_for_bit_len(len + 1), - len + 1 - ) - }); - bitfield - .set(len, true) - .expect("len must be in bounds for bitfield."); - - bitfield.bytes - } - - /// Instantiates a new instance from `bytes`. Consumes the same format that `self.into_bytes()` - /// produces (SSZ). - /// - /// Returns `None` if `bytes` are not a valid encoding. - pub fn from_bytes(bytes: SmallVec<[u8; SMALLVEC_LEN]>) -> Result { - let bytes_len = bytes.len(); - let mut initial_bitfield: Bitfield> = { - let num_bits = bytes.len() * 8; - Bitfield::from_raw_bytes(bytes, num_bits)? - }; - - let len = initial_bitfield - .highest_set_bit() - .ok_or(Error::MissingLengthInformation)?; - - // The length bit should be in the last byte, or else it means we have too many bytes. - if len / 8 + 1 != bytes_len { - return Err(Error::InvalidByteCount { - given: bytes_len, - expected: len / 8 + 1, - }); - } - - if len <= Self::max_len() { - initial_bitfield - .set(len, false) - .expect("Bit has been confirmed to exist"); - - let mut bytes = initial_bitfield.into_raw_bytes(); - - bytes.truncate(bytes_for_bit_len(len)); - - Self::from_raw_bytes(bytes, len) - } else { - Err(Error::OutOfBounds { - i: Self::max_len(), - len: Self::max_len(), - }) - } - } - - /// Compute the intersection of two BitLists of potentially different lengths. - /// - /// Return a new BitList with length equal to the shorter of the two inputs. - pub fn intersection(&self, other: &Self) -> Self { - let min_len = std::cmp::min(self.len(), other.len()); - let mut result = Self::with_capacity(min_len).expect("min len always less than N"); - // Bitwise-and the bytes together, starting from the left of each vector. This takes care - // of masking out any entries beyond `min_len` as well, assuming the bitfield doesn't - // contain any set bits beyond its length. - for i in 0..result.bytes.len() { - result.bytes[i] = self.bytes[i] & other.bytes[i]; - } - result - } - - /// Compute the union of two BitLists of potentially different lengths. - /// - /// Return a new BitList with length equal to the longer of the two inputs. - pub fn union(&self, other: &Self) -> Self { - let max_len = std::cmp::max(self.len(), other.len()); - let mut result = Self::with_capacity(max_len).expect("max len always less than N"); - for i in 0..result.bytes.len() { - result.bytes[i] = - self.bytes.get(i).copied().unwrap_or(0) | other.bytes.get(i).copied().unwrap_or(0); - } - result - } -} - -impl Bitfield> { - /// Instantiate a new `Bitfield` with a fixed-length of `N` bits. - /// - /// All bits are initialized to `false`. - pub fn new() -> Self { - Self { - bytes: smallvec![0; bytes_for_bit_len(Self::capacity())], - len: Self::capacity(), - _phantom: PhantomData, - } - } - - /// Returns `N`, the number of bits in `Self`. - pub fn capacity() -> usize { - N::to_usize() - } - - /// Consumes `self`, returning a serialized representation. - /// - /// The output is faithful to the SSZ encoding of `self`. - /// - /// ## Example - /// ``` - /// use ssz_types::{BitVector, typenum}; - /// use smallvec::SmallVec; - /// - /// type BitVector4 = BitVector; - /// - /// assert_eq!(BitVector4::new().into_bytes(), SmallVec::from_buf([0b0000_0000])); - /// ``` - pub fn into_bytes(self) -> SmallVec<[u8; SMALLVEC_LEN]> { - self.into_raw_bytes() - } - - /// Instantiates a new instance from `bytes`. Consumes the same format that `self.into_bytes()` - /// produces (SSZ). - /// - /// Returns `None` if `bytes` are not a valid encoding. - pub fn from_bytes(bytes: SmallVec<[u8; SMALLVEC_LEN]>) -> Result { - Self::from_raw_bytes(bytes, Self::capacity()) - } - - /// Compute the intersection of two fixed-length `Bitfield`s. - /// - /// Return a new fixed-length `Bitfield`. - pub fn intersection(&self, other: &Self) -> Self { - let mut result = Self::new(); - // Bitwise-and the bytes together, starting from the left of each vector. This takes care - // of masking out any entries beyond `min_len` as well, assuming the bitfield doesn't - // contain any set bits beyond its length. - for i in 0..result.bytes.len() { - result.bytes[i] = self.bytes[i] & other.bytes[i]; - } - result - } - - /// Compute the union of two fixed-length `Bitfield`s. - /// - /// Return a new fixed-length `Bitfield`. - pub fn union(&self, other: &Self) -> Self { - let mut result = Self::new(); - for i in 0..result.bytes.len() { - result.bytes[i] = - self.bytes.get(i).copied().unwrap_or(0) | other.bytes.get(i).copied().unwrap_or(0); - } - result - } -} - -impl Default for Bitfield> { - fn default() -> Self { - Self::new() - } -} - -impl Bitfield { - /// Sets the `i`'th bit to `value`. - /// - /// Returns `None` if `i` is out-of-bounds of `self`. - pub fn set(&mut self, i: usize, value: bool) -> Result<(), Error> { - let len = self.len; - - if i < len { - let byte = self - .bytes - .get_mut(i / 8) - .ok_or(Error::OutOfBounds { i, len })?; - - if value { - *byte |= 1 << (i % 8) - } else { - *byte &= !(1 << (i % 8)) - } - - Ok(()) - } else { - Err(Error::OutOfBounds { i, len: self.len }) - } - } - - /// Returns the value of the `i`'th bit. - /// - /// Returns `Error` if `i` is out-of-bounds of `self`. - pub fn get(&self, i: usize) -> Result { - if i < self.len { - let byte = self - .bytes - .get(i / 8) - .ok_or(Error::OutOfBounds { i, len: self.len })?; - - Ok(*byte & 1 << (i % 8) > 0) - } else { - Err(Error::OutOfBounds { i, len: self.len }) - } - } - - /// Returns the number of bits stored in `self`. - pub fn len(&self) -> usize { - self.len - } - - /// Returns `true` if `self.len() == 0`. - pub fn is_empty(&self) -> bool { - self.len == 0 - } - - /// Returns the underlying bytes representation of the bitfield. - pub fn into_raw_bytes(self) -> SmallVec<[u8; SMALLVEC_LEN]> { - self.bytes - } - - /// Returns a view into the underlying bytes representation of the bitfield. - pub fn as_slice(&self) -> &[u8] { - &self.bytes - } - - /// Instantiates from the given `bytes`, which are the same format as output from - /// `self.into_raw_bytes()`. - /// - /// Returns `None` if: - /// - /// - `bytes` is not the minimal required bytes to represent a bitfield of `bit_len` bits. - /// - `bit_len` is not a multiple of 8 and `bytes` contains set bits that are higher than, or - /// equal to `bit_len`. - fn from_raw_bytes(bytes: SmallVec<[u8; SMALLVEC_LEN]>, bit_len: usize) -> Result { - if bit_len == 0 { - if bytes.len() == 1 && bytes[0] == 0 { - // A bitfield with `bit_len` 0 can only be represented by a single zero byte. - Ok(Self { - bytes, - len: 0, - _phantom: PhantomData, - }) - } else { - Err(Error::ExcessBits) - } - } else if bytes.len() != bytes_for_bit_len(bit_len) { - // The number of bytes must be the minimum required to represent `bit_len`. - Err(Error::InvalidByteCount { - given: bytes.len(), - expected: bytes_for_bit_len(bit_len), - }) - } else { - // Ensure there are no bits higher than `bit_len` that are set to true. - let (mask, _) = u8::max_value().overflowing_shr(8 - (bit_len as u32 % 8)); - - if (bytes.last().expect("Guarded against empty bytes") & !mask) == 0 { - Ok(Self { - bytes, - len: bit_len, - _phantom: PhantomData, - }) - } else { - Err(Error::ExcessBits) - } - } - } - - /// Returns the `Some(i)` where `i` is the highest index with a set bit. Returns `None` if - /// there are no set bits. - pub fn highest_set_bit(&self) -> Option { - self.bytes - .iter() - .enumerate() - .rev() - .find(|(_, byte)| **byte > 0) - .map(|(i, byte)| i * 8 + 7 - byte.leading_zeros() as usize) - } - - /// Returns an iterator across bitfield `bool` values, starting at the lowest index. - pub fn iter(&self) -> BitIter<'_, T> { - BitIter { - bitfield: self, - i: 0, - } - } - - /// Returns true if no bits are set. - pub fn is_zero(&self) -> bool { - self.bytes.iter().all(|byte| *byte == 0) - } - - /// Returns the number of bits that are set to `true`. - pub fn num_set_bits(&self) -> usize { - self.bytes - .iter() - .map(|byte| byte.count_ones() as usize) - .sum() - } - - /// Compute the difference of this Bitfield and another of potentially different length. - pub fn difference(&self, other: &Self) -> Self { - let mut result = self.clone(); - result.difference_inplace(other); - result - } - - /// Compute the difference of this Bitfield and another of potentially different length. - pub fn difference_inplace(&mut self, other: &Self) { - let min_byte_len = std::cmp::min(self.bytes.len(), other.bytes.len()); - - for i in 0..min_byte_len { - self.bytes[i] &= !other.bytes[i]; - } - } - - /// Shift the bits to higher indices, filling the lower indices with zeroes. - /// - /// The amount to shift by, `n`, must be less than or equal to `self.len()`. - pub fn shift_up(&mut self, n: usize) -> Result<(), Error> { - if n <= self.len() { - // Shift the bits up (starting from the high indices to avoid overwriting) - for i in (n..self.len()).rev() { - self.set(i, self.get(i - n)?)?; - } - // Zero the low bits - for i in 0..n { - self.set(i, false).unwrap(); - } - Ok(()) - } else { - Err(Error::OutOfBounds { - i: n, - len: self.len(), - }) - } - } -} - -/// Returns the minimum required bytes to represent a given number of bits. -/// -/// `bit_len == 0` requires a single byte. -fn bytes_for_bit_len(bit_len: usize) -> usize { - std::cmp::max(1, (bit_len + 7) / 8) -} - -/// An iterator over the bits in a `Bitfield`. -pub struct BitIter<'a, T> { - bitfield: &'a Bitfield, - i: usize, -} - -impl<'a, T: BitfieldBehaviour> Iterator for BitIter<'a, T> { - type Item = bool; - - fn next(&mut self) -> Option { - let res = self.bitfield.get(self.i).ok()?; - self.i += 1; - Some(res) - } -} - -impl Encode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - // We could likely do better than turning this into bytes and reading the length, however - // it is kept this way for simplicity. - self.clone().into_bytes().len() - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self.clone().into_bytes()) - } -} - -impl Decode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - Self::from_bytes(bytes.to_smallvec()).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!("BitList failed to decode: {:?}", e)) - }) - } -} - -impl Encode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_bytes_len(&self) -> usize { - self.as_slice().len() - } - - fn ssz_fixed_len() -> usize { - bytes_for_bit_len(N::to_usize()) - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self.clone().into_bytes()) - } -} - -impl Decode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - bytes_for_bit_len(N::to_usize()) - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - Self::from_bytes(bytes.to_smallvec()).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!("BitVector failed to decode: {:?}", e)) - }) - } -} - -impl Serialize for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&hex_encode(self.as_ssz_bytes())) - } -} - -impl<'de, N: Unsigned + Clone> Deserialize<'de> for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - Self::from_ssz_bytes(&bytes) - .map_err(|e| serde::de::Error::custom(format!("Bitfield {:?}", e))) - } -} - -impl Serialize for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&hex_encode(self.as_ssz_bytes())) - } -} - -impl<'de, N: Unsigned + Clone> Deserialize<'de> for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - Self::from_ssz_bytes(&bytes) - .map_err(|e| serde::de::Error::custom(format!("Bitfield {:?}", e))) - } -} - -impl tree_hash::TreeHash for Bitfield> { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - // Note: we use `as_slice` because it does _not_ have the length-delimiting bit set (or - // present). - let root = bitfield_bytes_tree_hash_root::(self.as_slice()); - tree_hash::mix_in_length(&root, self.len()) - } -} - -impl tree_hash::TreeHash for Bitfield> { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - bitfield_bytes_tree_hash_root::(self.as_slice()) - } -} - -#[cfg(feature = "arbitrary")] -impl arbitrary::Arbitrary<'_> for Bitfield> { - fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - let size = N::to_usize(); - let mut vec = smallvec![0u8; size]; - u.fill_buffer(&mut vec)?; - Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(feature = "arbitrary")] -impl arbitrary::Arbitrary<'_> for Bitfield> { - fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - let max_size = N::to_usize(); - let rand = usize::arbitrary(u)?; - let size = std::cmp::min(rand, max_size); - let mut vec = smallvec![0u8; size]; - u.fill_buffer(&mut vec)?; - Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(test)] -mod bitvector { - use super::*; - use crate::BitVector; - - pub type BitVector0 = BitVector; - pub type BitVector1 = BitVector; - pub type BitVector4 = BitVector; - pub type BitVector8 = BitVector; - pub type BitVector16 = BitVector; - pub type BitVector64 = BitVector; - - #[test] - fn ssz_encode() { - assert_eq!(BitVector0::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!(BitVector1::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!(BitVector4::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!(BitVector8::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!( - BitVector16::new().as_ssz_bytes(), - vec![0b0000_0000, 0b0000_0000] - ); - - let mut b = BitVector8::new(); - for i in 0..8 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![255]); - - let mut b = BitVector4::new(); - for i in 0..4 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![0b0000_1111]); - } - - #[test] - fn ssz_decode() { - assert!(BitVector0::from_ssz_bytes(&[0b0000_0000]).is_ok()); - assert!(BitVector0::from_ssz_bytes(&[0b0000_0001]).is_err()); - assert!(BitVector0::from_ssz_bytes(&[0b0000_0010]).is_err()); - - assert!(BitVector1::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitVector1::from_ssz_bytes(&[0b0000_0010]).is_err()); - assert!(BitVector1::from_ssz_bytes(&[0b0000_0100]).is_err()); - assert!(BitVector1::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_err()); - - assert!(BitVector8::from_ssz_bytes(&[0b0000_0000]).is_ok()); - assert!(BitVector8::from_ssz_bytes(&[1, 0b0000_0000]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0000, 1]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0010]).is_ok()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0001]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0010]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0100]).is_err()); - - assert!(BitVector16::from_ssz_bytes(&[0b0000_0000]).is_err()); - assert!(BitVector16::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_ok()); - assert!(BitVector16::from_ssz_bytes(&[1, 0b0000_0000, 0b0000_0000]).is_err()); - } - - #[test] - fn intersection() { - let a = BitVector16::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitVector16::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitVector16::from_raw_bytes(smallvec![0b1000, 0b0001], 16).unwrap(); - - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - assert_eq!(a.intersection(&c), c); - assert_eq!(b.intersection(&c), c); - assert_eq!(a.intersection(&a), a); - assert_eq!(b.intersection(&b), b); - assert_eq!(c.intersection(&c), c); - } - - #[test] - fn intersection_diff_length() { - let a = BitVector16::from_bytes(smallvec![0b0010_1110, 0b0010_1011]).unwrap(); - let b = BitVector16::from_bytes(smallvec![0b0010_1101, 0b0000_0001]).unwrap(); - let c = BitVector16::from_bytes(smallvec![0b0010_1100, 0b0000_0001]).unwrap(); - - assert_eq!(a.len(), 16); - assert_eq!(b.len(), 16); - assert_eq!(c.len(), 16); - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - } - - #[test] - fn union() { - let a = BitVector16::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitVector16::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitVector16::from_raw_bytes(smallvec![0b1111, 0b1001], 16).unwrap(); - - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - assert_eq!(a.union(&a), a); - assert_eq!(b.union(&b), b); - assert_eq!(c.union(&c), c); - } - - #[test] - fn union_diff_length() { - let a = BitVector16::from_bytes(smallvec![0b0010_1011, 0b0010_1110]).unwrap(); - let b = BitVector16::from_bytes(smallvec![0b0000_0001, 0b0010_1101]).unwrap(); - let c = BitVector16::from_bytes(smallvec![0b0010_1011, 0b0010_1111]).unwrap(); - - assert_eq!(a.len(), c.len()); - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - } - - #[test] - fn ssz_round_trip() { - assert_round_trip(BitVector0::new()); - - let mut b = BitVector1::new(); - b.set(0, true).unwrap(); - assert_round_trip(b); - - let mut b = BitVector8::new(); - for j in 0..8 { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitVector8::new(); - for j in 0..8 { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - - let mut b = BitVector16::new(); - for j in 0..16 { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitVector16::new(); - for j in 0..16 { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - } - - fn assert_round_trip(t: T) { - assert_eq!(T::from_ssz_bytes(&t.as_ssz_bytes()).unwrap(), t); - } - - #[test] - fn ssz_bytes_len() { - for i in 0..64 { - let mut bitfield = BitVector64::new(); - for j in 0..i { - bitfield.set(j, true).expect("should set bit in bounds"); - } - let bytes = bitfield.as_ssz_bytes(); - assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i); - } - } - - #[test] - fn excess_bits_nimbus() { - let bad = vec![0b0001_1111]; - - assert!(BitVector4::from_ssz_bytes(&bad).is_err()); - } - - // Ensure that stack size of a BitVector is manageable. - #[test] - fn size_of() { - assert_eq!(std::mem::size_of::(), SMALLVEC_LEN + 24); - } -} - -#[cfg(test)] -#[allow(clippy::cognitive_complexity)] -mod bitlist { - use super::*; - use crate::BitList; - - pub type BitList0 = BitList; - pub type BitList1 = BitList; - pub type BitList8 = BitList; - pub type BitList16 = BitList; - pub type BitList1024 = BitList; - - #[test] - fn ssz_encode() { - assert_eq!( - BitList0::with_capacity(0).unwrap().as_ssz_bytes(), - vec![0b0000_0001], - ); - - assert_eq!( - BitList1::with_capacity(0).unwrap().as_ssz_bytes(), - vec![0b0000_0001], - ); - - assert_eq!( - BitList1::with_capacity(1).unwrap().as_ssz_bytes(), - vec![0b0000_0010], - ); - - assert_eq!( - BitList8::with_capacity(8).unwrap().as_ssz_bytes(), - vec![0b0000_0000, 0b0000_0001], - ); - - assert_eq!( - BitList8::with_capacity(7).unwrap().as_ssz_bytes(), - vec![0b1000_0000] - ); - - let mut b = BitList8::with_capacity(8).unwrap(); - for i in 0..8 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![255, 0b0000_0001]); - - let mut b = BitList8::with_capacity(8).unwrap(); - for i in 0..4 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![0b0000_1111, 0b0000_0001]); - - assert_eq!( - BitList16::with_capacity(16).unwrap().as_ssz_bytes(), - vec![0b0000_0000, 0b0000_0000, 0b0000_0001] - ); - } - - #[test] - fn ssz_decode() { - assert!(BitList0::from_ssz_bytes(&[]).is_err()); - assert!(BitList1::from_ssz_bytes(&[]).is_err()); - assert!(BitList8::from_ssz_bytes(&[]).is_err()); - assert!(BitList16::from_ssz_bytes(&[]).is_err()); - - assert!(BitList0::from_ssz_bytes(&[0b0000_0000]).is_err()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_err()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0000]).is_err()); - assert!(BitList16::from_ssz_bytes(&[0b0000_0000]).is_err()); - - assert!(BitList0::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitList0::from_ssz_bytes(&[0b0000_0010]).is_err()); - - assert!(BitList1::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0010]).is_ok()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0100]).is_err()); - - assert!(BitList8::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0010]).is_ok()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0001]).is_ok()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0010]).is_err()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0100]).is_err()); - } - - #[test] - fn ssz_decode_extra_bytes() { - assert!(BitList0::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList16::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0]).is_err()); - assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0, 0]).is_err()); - assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0, 0, 0, 0]).is_err()); - } - - #[test] - fn ssz_round_trip() { - assert_round_trip(BitList0::with_capacity(0).unwrap()); - - for i in 0..2 { - assert_round_trip(BitList1::with_capacity(i).unwrap()); - } - for i in 0..9 { - assert_round_trip(BitList8::with_capacity(i).unwrap()); - } - for i in 0..17 { - assert_round_trip(BitList16::with_capacity(i).unwrap()); - } - - let mut b = BitList1::with_capacity(1).unwrap(); - b.set(0, true).unwrap(); - assert_round_trip(b); - - for i in 0..8 { - let mut b = BitList8::with_capacity(i).unwrap(); - for j in 0..i { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitList8::with_capacity(i).unwrap(); - for j in 0..i { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - } - - for i in 0..16 { - let mut b = BitList16::with_capacity(i).unwrap(); - for j in 0..i { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitList16::with_capacity(i).unwrap(); - for j in 0..i { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - } - } - - fn assert_round_trip(t: T) { - assert_eq!(T::from_ssz_bytes(&t.as_ssz_bytes()).unwrap(), t); - } - - #[test] - fn from_raw_bytes() { - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0000], 0).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0001], 1).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0011], 2).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0111], 3).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_1111], 4).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0001_1111], 5).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0011_1111], 6).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0111_1111], 7).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111], 8).is_ok()); - - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0001], 9).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0011], 10).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0111], 11).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_1111], 12).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0001_1111], 13).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0011_1111], 14).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0111_1111], 15).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b1111_1111], 16).is_ok()); - - for i in 0..8 { - assert!(BitList1024::from_raw_bytes(smallvec![], i).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111], i).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0000, 0b1111_1110], i).is_err()); - } - - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0001], 0).is_err()); - - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0001], 0).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0011], 1).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0111], 2).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_1111], 3).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0001_1111], 4).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0011_1111], 5).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0111_1111], 6).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111], 7).is_err()); - - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0001], 8).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0011], 9).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0111], 10).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_1111], 11).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0001_1111], 12).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0011_1111], 13).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0111_1111], 14).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b1111_1111], 15).is_err()); - } - - fn test_set_unset(num_bits: usize) { - let mut bitfield = BitList1024::with_capacity(num_bits).unwrap(); - - for i in 0..=num_bits { - if i < num_bits { - // Starts as false - assert_eq!(bitfield.get(i), Ok(false)); - // Can be set true. - assert!(bitfield.set(i, true).is_ok()); - assert_eq!(bitfield.get(i), Ok(true)); - // Can be set false - assert!(bitfield.set(i, false).is_ok()); - assert_eq!(bitfield.get(i), Ok(false)); - } else { - assert!(bitfield.get(i).is_err()); - assert!(bitfield.set(i, true).is_err()); - assert!(bitfield.get(i).is_err()); - } - } - } - - fn test_bytes_round_trip(num_bits: usize) { - for i in 0..num_bits { - let mut bitfield = BitList1024::with_capacity(num_bits).unwrap(); - bitfield.set(i, true).unwrap(); - - let bytes = bitfield.clone().into_raw_bytes(); - assert_eq!(bitfield, Bitfield::from_raw_bytes(bytes, num_bits).unwrap()); - } - } - - #[test] - fn set_unset() { - for i in 0..8 * 5 { - test_set_unset(i) - } - } - - #[test] - fn bytes_round_trip() { - for i in 0..8 * 5 { - test_bytes_round_trip(i) - } - } - - /// Type-specialised `smallvec` macro for testing. - macro_rules! bytevec { - ($($x : expr),* $(,)*) => { - { - let __smallvec: SmallVec<[u8; SMALLVEC_LEN]> = smallvec!($($x),*); - __smallvec - } - }; - } - - #[test] - fn into_raw_bytes() { - let mut bitfield = BitList1024::with_capacity(9).unwrap(); - bitfield.set(0, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_0001, 0b0000_0000] - ); - bitfield.set(1, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_0011, 0b0000_0000] - ); - bitfield.set(2, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_0111, 0b0000_0000] - ); - bitfield.set(3, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_1111, 0b0000_0000] - ); - bitfield.set(4, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0001_1111, 0b0000_0000] - ); - bitfield.set(5, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0011_1111, 0b0000_0000] - ); - bitfield.set(6, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0111_1111, 0b0000_0000] - ); - bitfield.set(7, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b1111_1111, 0b0000_0000] - ); - bitfield.set(8, true).unwrap(); - assert_eq!( - bitfield.into_raw_bytes(), - bytevec![0b1111_1111, 0b0000_0001] - ); - } - - #[test] - fn highest_set_bit() { - assert_eq!( - BitList1024::with_capacity(16).unwrap().highest_set_bit(), - None - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_0001, 0b0000_0000], 16) - .unwrap() - .highest_set_bit(), - Some(0) - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_0010, 0b0000_0000], 16) - .unwrap() - .highest_set_bit(), - Some(1) - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_1000], 8) - .unwrap() - .highest_set_bit(), - Some(3) - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_0000, 0b1000_0000], 16) - .unwrap() - .highest_set_bit(), - Some(15) - ); - } - - #[test] - fn intersection() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitList1024::from_raw_bytes(smallvec![0b1000, 0b0001], 16).unwrap(); - - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - assert_eq!(a.intersection(&c), c); - assert_eq!(b.intersection(&c), c); - assert_eq!(a.intersection(&a), a); - assert_eq!(b.intersection(&b), b); - assert_eq!(c.intersection(&c), c); - } - - #[test] - fn intersection_diff_length() { - let a = BitList1024::from_bytes(smallvec![0b0010_1110, 0b0010_1011]).unwrap(); - let b = BitList1024::from_bytes(smallvec![0b0010_1101, 0b0000_0001]).unwrap(); - let c = BitList1024::from_bytes(smallvec![0b0010_1100, 0b0000_0001]).unwrap(); - let d = BitList1024::from_bytes(smallvec![0b0010_1110, 0b1111_1111, 0b1111_1111]).unwrap(); - - assert_eq!(a.len(), 13); - assert_eq!(b.len(), 8); - assert_eq!(c.len(), 8); - assert_eq!(d.len(), 23); - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - assert_eq!(a.intersection(&d), a); - assert_eq!(d.intersection(&a), a); - } - - #[test] - fn union() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitList1024::from_raw_bytes(smallvec![0b1111, 0b1001], 16).unwrap(); - - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - assert_eq!(a.union(&a), a); - assert_eq!(b.union(&b), b); - assert_eq!(c.union(&c), c); - } - - #[test] - fn union_diff_length() { - let a = BitList1024::from_bytes(smallvec![0b0010_1011, 0b0010_1110]).unwrap(); - let b = BitList1024::from_bytes(smallvec![0b0000_0001, 0b0010_1101]).unwrap(); - let c = BitList1024::from_bytes(smallvec![0b0010_1011, 0b0010_1111]).unwrap(); - let d = BitList1024::from_bytes(smallvec![0b0010_1011, 0b1011_1110, 0b1000_1101]).unwrap(); - - assert_eq!(a.len(), c.len()); - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - assert_eq!(a.union(&d), d); - assert_eq!(d.union(&a), d); - } - - #[test] - fn difference() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let a_b = BitList1024::from_raw_bytes(smallvec![0b0100, 0b0000], 16).unwrap(); - let b_a = BitList1024::from_raw_bytes(smallvec![0b0011, 0b1000], 16).unwrap(); - - assert_eq!(a.difference(&b), a_b); - assert_eq!(b.difference(&a), b_a); - assert!(a.difference(&a).is_zero()); - } - - #[test] - fn difference_diff_length() { - let a = BitList1024::from_raw_bytes(smallvec![0b0110, 0b1100, 0b0011], 24).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let a_b = BitList1024::from_raw_bytes(smallvec![0b0100, 0b0100, 0b0011], 24).unwrap(); - let b_a = BitList1024::from_raw_bytes(smallvec![0b1001, 0b0001], 16).unwrap(); - - assert_eq!(a.difference(&b), a_b); - assert_eq!(b.difference(&a), b_a); - } - - #[test] - fn shift_up() { - let mut a = BitList1024::from_raw_bytes(smallvec![0b1100_1111, 0b1101_0110], 16).unwrap(); - let mut b = BitList1024::from_raw_bytes(smallvec![0b1001_1110, 0b1010_1101], 16).unwrap(); - - a.shift_up(1).unwrap(); - assert_eq!(a, b); - a.shift_up(15).unwrap(); - assert!(a.is_zero()); - - b.shift_up(16).unwrap(); - assert!(b.is_zero()); - assert!(b.shift_up(17).is_err()); - } - - #[test] - fn num_set_bits() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - - assert_eq!(a.num_set_bits(), 3); - assert_eq!(b.num_set_bits(), 5); - } - - #[test] - fn iter() { - let mut bitfield = BitList1024::with_capacity(9).unwrap(); - bitfield.set(2, true).unwrap(); - bitfield.set(8, true).unwrap(); - - assert_eq!( - bitfield.iter().collect::>(), - vec![false, false, true, false, false, false, false, false, true] - ); - } - - #[test] - fn ssz_bytes_len() { - for i in 1..64 { - let mut bitfield = BitList1024::with_capacity(i).unwrap(); - for j in 0..i { - bitfield.set(j, true).expect("should set bit in bounds"); - } - let bytes = bitfield.as_ssz_bytes(); - assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i); - } - } - - // Ensure that the stack size of a BitList is manageable. - #[test] - fn size_of() { - assert_eq!(std::mem::size_of::(), SMALLVEC_LEN + 24); - } -} diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs deleted file mode 100644 index 9625f27f3a..0000000000 --- a/consensus/ssz_types/src/fixed_vector.rs +++ /dev/null @@ -1,446 +0,0 @@ -use crate::tree_hash::vec_tree_hash_root; -use crate::Error; -use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; -use std::marker::PhantomData; -use std::ops::{Deref, DerefMut, Index, IndexMut}; -use std::slice::SliceIndex; -use tree_hash::Hash256; -use typenum::Unsigned; - -pub use typenum; - -/// Emulates a SSZ `Vector` (distinct from a Rust `Vec`). -/// -/// An ordered, heap-allocated, fixed-length, homogeneous collection of `T`, with `N` values. -/// -/// This struct is backed by a Rust `Vec` but constrained such that it must be instantiated with a -/// fixed number of elements and you may not add or remove elements, only modify. -/// -/// The length of this struct is fixed at the type-level using -/// [typenum](https://crates.io/crates/typenum). -/// -/// ## Note -/// -/// Whilst it is possible with this library, SSZ declares that a `FixedVector` with a length of `0` -/// is illegal. -/// -/// ## Example -/// -/// ``` -/// use ssz_types::{FixedVector, typenum}; -/// -/// let base: Vec = vec![1, 2, 3, 4]; -/// -/// // Create a `FixedVector` from a `Vec` that has the expected length. -/// let exact: FixedVector<_, typenum::U4> = FixedVector::from(base.clone()); -/// assert_eq!(&exact[..], &[1, 2, 3, 4]); -/// -/// // Create a `FixedVector` from a `Vec` that is too long and the `Vec` is truncated. -/// let short: FixedVector<_, typenum::U3> = FixedVector::from(base.clone()); -/// assert_eq!(&short[..], &[1, 2, 3]); -/// -/// // Create a `FixedVector` from a `Vec` that is too short and the missing values are created -/// // using `std::default::Default`. -/// let long: FixedVector<_, typenum::U5> = FixedVector::from(base); -/// assert_eq!(&long[..], &[1, 2, 3, 4, 0]); -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] -#[derivative(PartialEq, Hash(bound = "T: std::hash::Hash"))] -#[serde(transparent)] -pub struct FixedVector { - vec: Vec, - _phantom: PhantomData, -} - -impl FixedVector { - /// Returns `Ok` if the given `vec` equals the fixed length of `Self`. Otherwise returns - /// `Err`. - pub fn new(vec: Vec) -> Result { - if vec.len() == Self::capacity() { - Ok(Self { - vec, - _phantom: PhantomData, - }) - } else { - Err(Error::OutOfBounds { - i: vec.len(), - len: Self::capacity(), - }) - } - } - - /// Create a new vector filled with clones of `elem`. - pub fn from_elem(elem: T) -> Self - where - T: Clone, - { - Self { - vec: vec![elem; N::to_usize()], - _phantom: PhantomData, - } - } - - /// Identical to `self.capacity`, returns the type-level constant length. - /// - /// Exists for compatibility with `Vec`. - pub fn len(&self) -> usize { - self.vec.len() - } - - /// True if the type-level constant length of `self` is zero. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the type-level constant length. - pub fn capacity() -> usize { - N::to_usize() - } -} - -impl From> for FixedVector { - fn from(mut vec: Vec) -> Self { - vec.resize_with(Self::capacity(), Default::default); - - Self { - vec, - _phantom: PhantomData, - } - } -} - -impl From> for Vec { - fn from(vector: FixedVector) -> Vec { - vector.vec - } -} - -impl Default for FixedVector { - fn default() -> Self { - Self { - vec: (0..N::to_usize()).map(|_| T::default()).collect(), - _phantom: PhantomData, - } - } -} - -impl> Index for FixedVector { - type Output = I::Output; - - #[inline] - fn index(&self, index: I) -> &Self::Output { - Index::index(&self.vec, index) - } -} - -impl> IndexMut for FixedVector { - #[inline] - fn index_mut(&mut self, index: I) -> &mut Self::Output { - IndexMut::index_mut(&mut self.vec, index) - } -} - -impl Deref for FixedVector { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -// This implementation is required to use `get_mut` to access elements. -// -// It's safe because none of the methods on mutable slices allow changing the length -// of the backing vec. -impl DerefMut for FixedVector { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.vec[..] - } -} - -impl tree_hash::TreeHash for FixedVector -where - T: tree_hash::TreeHash, -{ - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - vec_tree_hash_root::(&self.vec) - } -} - -impl ssz::Encode for FixedVector -where - T: ssz::Encode, -{ - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - T::ssz_fixed_len() * N::to_usize() - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn ssz_bytes_len(&self) -> usize { - self.vec.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - if T::is_ssz_fixed_len() { - buf.reserve(T::ssz_fixed_len() * self.len()); - - for item in &self.vec { - item.ssz_append(buf); - } - } else { - let mut encoder = - ssz::SszEncoder::container(buf, self.len() * ssz::BYTES_PER_LENGTH_OFFSET); - - for item in &self.vec { - encoder.append(item); - } - - encoder.finalize(); - } - } -} - -impl ssz::Decode for FixedVector -where - T: ssz::Decode, -{ - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - T::ssz_fixed_len() * N::to_usize() - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let fixed_len = N::to_usize(); - - if bytes.is_empty() { - Err(ssz::DecodeError::InvalidByteLength { - len: 0, - expected: 1, - }) - } else if T::is_ssz_fixed_len() { - let num_items = bytes - .len() - .checked_div(T::ssz_fixed_len()) - .ok_or(ssz::DecodeError::ZeroLengthItem)?; - - if num_items != fixed_len { - return Err(ssz::DecodeError::BytesInvalid(format!( - "FixedVector of {} items has {} items", - num_items, fixed_len - ))); - } - - bytes - .chunks(T::ssz_fixed_len()) - .map(|chunk| T::from_ssz_bytes(chunk)) - .collect::, _>>() - .and_then(|vec| { - Self::new(vec).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!( - "Wrong number of FixedVector elements: {:?}", - e - )) - }) - }) - } else { - let vec = ssz::decode_list_of_variable_length_items(bytes, Some(fixed_len))?; - Self::new(vec).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!( - "Wrong number of FixedVector elements: {:?}", - e - )) - }) - } - } -} - -#[cfg(feature = "arbitrary")] -impl<'a, T: arbitrary::Arbitrary<'a>, N: 'static + Unsigned> arbitrary::Arbitrary<'a> - for FixedVector -{ - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let size = N::to_usize(); - let mut vec: Vec = Vec::with_capacity(size); - for _ in 0..size { - vec.push(::arbitrary(u)?); - } - Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(test)] -mod test { - use super::*; - use ssz::*; - use tree_hash::{merkle_root, TreeHash}; - use tree_hash_derive::TreeHash; - use typenum::*; - - #[test] - fn new() { - let vec = vec![42; 5]; - let fixed: Result, _> = FixedVector::new(vec); - assert!(fixed.is_err()); - - let vec = vec![42; 3]; - let fixed: Result, _> = FixedVector::new(vec); - assert!(fixed.is_err()); - - let vec = vec![42; 4]; - let fixed: Result, _> = FixedVector::new(vec); - assert!(fixed.is_ok()); - } - - #[test] - fn indexing() { - let vec = vec![1, 2]; - - let mut fixed: FixedVector = vec.clone().into(); - - assert_eq!(fixed[0], 1); - assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!((fixed[..]).len(), 8192); - - fixed[1] = 3; - assert_eq!(fixed[1], 3); - } - - #[test] - fn length() { - let vec = vec![42; 5]; - let fixed: FixedVector = FixedVector::from(vec.clone()); - assert_eq!(&fixed[..], &vec[0..4]); - - let vec = vec![42; 3]; - let fixed: FixedVector = FixedVector::from(vec.clone()); - assert_eq!(&fixed[0..3], &vec[..]); - assert_eq!(&fixed[..], &vec![42, 42, 42, 0][..]); - - let vec = vec![]; - let fixed: FixedVector = FixedVector::from(vec); - assert_eq!(&fixed[..], &vec![0, 0, 0, 0][..]); - } - - #[test] - fn deref() { - let vec = vec![0, 2, 4, 6]; - let fixed: FixedVector = FixedVector::from(vec); - - assert_eq!(fixed.first(), Some(&0)); - assert_eq!(fixed.get(3), Some(&6)); - assert_eq!(fixed.get(4), None); - } - - #[test] - fn ssz_encode() { - let vec: FixedVector = vec![0; 2].into(); - assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); - assert_eq!( as Encode>::ssz_fixed_len(), 4); - } - - fn ssz_round_trip(item: T) { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - - #[test] - fn ssz_round_trip_u16_len_8() { - ssz_round_trip::>(vec![42; 8].into()); - ssz_round_trip::>(vec![0; 8].into()); - } - - #[test] - fn tree_hash_u8() { - let fixed: FixedVector = FixedVector::from(vec![]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); - - let fixed: FixedVector = FixedVector::from(vec![0; 1]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); - - let fixed: FixedVector = FixedVector::from(vec![0; 8]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); - - let fixed: FixedVector = FixedVector::from(vec![42; 16]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[42; 16], 0)); - - let source: Vec = (0..16).collect(); - let fixed: FixedVector = FixedVector::from(source.clone()); - assert_eq!(fixed.tree_hash_root(), merkle_root(&source, 0)); - } - - #[derive(Clone, Copy, TreeHash, Default)] - struct A { - a: u32, - b: u32, - } - - fn repeat(input: &[u8], n: usize) -> Vec { - let mut output = vec![]; - - for _ in 0..n { - output.append(&mut input.to_vec()); - } - - output - } - - #[test] - fn tree_hash_composite() { - let a = A { a: 0, b: 1 }; - - let fixed: FixedVector = FixedVector::from(vec![]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 32], 0)); - - let fixed: FixedVector = FixedVector::from(vec![a]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(a.tree_hash_root().as_bytes(), 0) - ); - - let fixed: FixedVector = FixedVector::from(vec![a; 8]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(&repeat(a.tree_hash_root().as_bytes(), 8), 0) - ); - - let fixed: FixedVector = FixedVector::from(vec![a; 13]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(&repeat(a.tree_hash_root().as_bytes(), 13), 0) - ); - - let fixed: FixedVector = FixedVector::from(vec![a; 16]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(&repeat(a.tree_hash_root().as_bytes(), 16), 0) - ); - } -} diff --git a/consensus/ssz_types/src/lib.rs b/consensus/ssz_types/src/lib.rs deleted file mode 100644 index 3e181da8cb..0000000000 --- a/consensus/ssz_types/src/lib.rs +++ /dev/null @@ -1,72 +0,0 @@ -//! Provides types with unique properties required for SSZ serialization and Merklization: -//! -//! - `FixedVector`: A heap-allocated list with a size that is fixed at compile time. -//! - `VariableList`: A heap-allocated list that cannot grow past a type-level maximum length. -//! - `BitList`: A heap-allocated bitfield that with a type-level _maximum_ length. -//! - `BitVector`: A heap-allocated bitfield that with a type-level _fixed__ length. -//! -//! These structs are required as SSZ serialization and Merklization rely upon type-level lengths -//! for padding and verification. -//! -//! Adheres to the Ethereum 2.0 [SSZ -//! specification](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/ssz/simple-serialize.md) -//! at v0.12.1. -//! -//! ## Example -//! ``` -//! use ssz_types::*; -//! -//! pub struct Example { -//! bit_vector: BitVector, -//! bit_list: BitList, -//! variable_list: VariableList, -//! fixed_vector: FixedVector, -//! } -//! -//! let mut example = Example { -//! bit_vector: Bitfield::new(), -//! bit_list: Bitfield::with_capacity(4).unwrap(), -//! variable_list: <_>::from(vec![0, 1]), -//! fixed_vector: <_>::from(vec![2, 3]), -//! }; -//! -//! assert_eq!(example.bit_vector.len(), 8); -//! assert_eq!(example.bit_list.len(), 4); -//! assert_eq!(&example.variable_list[..], &[0, 1]); -//! assert_eq!(&example.fixed_vector[..], &[2, 3, 0, 0, 0, 0, 0, 0]); -//! -//! ``` - -#[macro_use] -mod bitfield; -mod fixed_vector; -pub mod serde_utils; -mod tree_hash; -mod variable_list; - -pub use bitfield::{BitList, BitVector, Bitfield}; -pub use fixed_vector::FixedVector; -pub use typenum; -pub use variable_list::VariableList; - -pub mod length { - pub use crate::bitfield::{Fixed, Variable}; -} - -/// Returned when an item encounters an error. -#[derive(PartialEq, Debug, Clone)] -pub enum Error { - OutOfBounds { - i: usize, - len: usize, - }, - /// A `BitList` does not have a set bit, therefore it's length is unknowable. - MissingLengthInformation, - /// A `BitList` has excess bits set to true. - ExcessBits, - /// A `BitList` has an invalid number of bytes for a given bit length. - InvalidByteCount { - given: usize, - expected: usize, - }, -} diff --git a/consensus/ssz_types/src/serde_utils/hex_fixed_vec.rs b/consensus/ssz_types/src/serde_utils/hex_fixed_vec.rs deleted file mode 100644 index 86077891bc..0000000000 --- a/consensus/ssz_types/src/serde_utils/hex_fixed_vec.rs +++ /dev/null @@ -1,22 +0,0 @@ -use crate::FixedVector; -use eth2_serde_utils::hex::{self, PrefixedHexVisitor}; -use serde::{Deserializer, Serializer}; -use typenum::Unsigned; - -pub fn serialize(bytes: &FixedVector, serializer: S) -> Result -where - S: Serializer, - U: Unsigned, -{ - serializer.serialize_str(&hex::encode(&bytes[..])) -} - -pub fn deserialize<'de, D, U>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - U: Unsigned, -{ - let vec = deserializer.deserialize_string(PrefixedHexVisitor)?; - FixedVector::new(vec) - .map_err(|e| serde::de::Error::custom(format!("invalid fixed vector: {:?}", e))) -} diff --git a/consensus/ssz_types/src/serde_utils/hex_var_list.rs b/consensus/ssz_types/src/serde_utils/hex_var_list.rs deleted file mode 100644 index e3a3a14e06..0000000000 --- a/consensus/ssz_types/src/serde_utils/hex_var_list.rs +++ /dev/null @@ -1,23 +0,0 @@ -//! Serialize `VariableList` as 0x-prefixed hex string. -use crate::VariableList; -use eth2_serde_utils::hex::{self, PrefixedHexVisitor}; -use serde::{Deserializer, Serializer}; -use typenum::Unsigned; - -pub fn serialize(bytes: &VariableList, serializer: S) -> Result -where - S: Serializer, - N: Unsigned, -{ - serializer.serialize_str(&hex::encode(&**bytes)) -} - -pub fn deserialize<'de, D, N>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - N: Unsigned, -{ - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - VariableList::new(bytes) - .map_err(|e| serde::de::Error::custom(format!("invalid variable list: {:?}", e))) -} diff --git a/consensus/ssz_types/src/serde_utils/list_of_hex_var_list.rs b/consensus/ssz_types/src/serde_utils/list_of_hex_var_list.rs deleted file mode 100644 index e2fd8ddf32..0000000000 --- a/consensus/ssz_types/src/serde_utils/list_of_hex_var_list.rs +++ /dev/null @@ -1,77 +0,0 @@ -//! Serialize `VaraibleList, N>` as list of 0x-prefixed hex string. -use crate::VariableList; -use serde::{ser::SerializeSeq, Deserialize, Deserializer, Serialize, Serializer}; -use std::marker::PhantomData; -use typenum::Unsigned; - -#[derive(Deserialize)] -#[serde(transparent)] -pub struct WrappedListOwned( - #[serde(with = "crate::serde_utils::hex_var_list")] VariableList, -); - -#[derive(Serialize)] -#[serde(transparent)] -pub struct WrappedListRef<'a, N: Unsigned>( - #[serde(with = "crate::serde_utils::hex_var_list")] &'a VariableList, -); - -pub fn serialize( - list: &VariableList, N>, - serializer: S, -) -> Result -where - S: Serializer, - M: Unsigned, - N: Unsigned, -{ - let mut seq = serializer.serialize_seq(Some(list.len()))?; - for bytes in list { - seq.serialize_element(&WrappedListRef(bytes))?; - } - seq.end() -} - -#[derive(Default)] -pub struct Visitor { - _phantom_m: PhantomData, - _phantom_n: PhantomData, -} - -impl<'a, M, N> serde::de::Visitor<'a> for Visitor -where - M: Unsigned, - N: Unsigned, -{ - type Value = VariableList, N>; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of 0x-prefixed hex bytes") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let mut list: VariableList, N> = <_>::default(); - - while let Some(val) = seq.next_element::>()? { - list.push(val.0).map_err(|e| { - serde::de::Error::custom(format!("failed to push value to list: {:?}.", e)) - })?; - } - - Ok(list) - } -} - -pub fn deserialize<'de, D, M, N>( - deserializer: D, -) -> Result, N>, D::Error> -where - D: Deserializer<'de>, - M: Unsigned, - N: Unsigned, -{ - deserializer.deserialize_seq(Visitor::default()) -} diff --git a/consensus/ssz_types/src/serde_utils/mod.rs b/consensus/ssz_types/src/serde_utils/mod.rs deleted file mode 100644 index cd6d49cc85..0000000000 --- a/consensus/ssz_types/src/serde_utils/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub mod hex_fixed_vec; -pub mod hex_var_list; -pub mod list_of_hex_var_list; -pub mod quoted_u64_fixed_vec; -pub mod quoted_u64_var_list; diff --git a/consensus/ssz_types/src/serde_utils/quoted_u64_fixed_vec.rs b/consensus/ssz_types/src/serde_utils/quoted_u64_fixed_vec.rs deleted file mode 100644 index 0eb265adc3..0000000000 --- a/consensus/ssz_types/src/serde_utils/quoted_u64_fixed_vec.rs +++ /dev/null @@ -1,113 +0,0 @@ -//! Formats `FixedVector` using quotes. -//! -//! E.g., `FixedVector::from(vec![0, 1, 2])` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. If `N` does not equal the length deserialization will fail. - -use crate::serde_utils::quoted_u64_var_list::deserialize_max; -use crate::FixedVector; -use eth2_serde_utils::quoted_u64_vec::QuotedIntWrapper; -use serde::ser::SerializeSeq; -use serde::{Deserializer, Serializer}; -use std::marker::PhantomData; -use typenum::Unsigned; - -pub struct QuotedIntFixedVecVisitor { - _phantom: PhantomData, -} - -impl<'a, N> serde::de::Visitor<'a> for QuotedIntFixedVecVisitor -where - N: Unsigned, -{ - type Value = FixedVector; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of quoted or unquoted integers") - } - - fn visit_seq(self, seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let vec = deserialize_max(seq, N::to_usize())?; - let fix: FixedVector = FixedVector::new(vec) - .map_err(|e| serde::de::Error::custom(format!("FixedVector: {:?}", e)))?; - Ok(fix) - } -} - -pub fn serialize(value: &[u64], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for &int in value { - seq.serialize_element(&QuotedIntWrapper { int })?; - } - seq.end() -} - -pub fn deserialize<'de, D, N>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - N: Unsigned, -{ - deserializer.deserialize_any(QuotedIntFixedVecVisitor { - _phantom: PhantomData, - }) -} - -#[cfg(test)] -mod test { - use super::*; - use serde_derive::{Deserialize, Serialize}; - use typenum::U4; - - #[derive(Debug, Serialize, Deserialize)] - struct Obj { - #[serde(with = "crate::serde_utils::quoted_u64_fixed_vec")] - values: FixedVector, - } - - #[test] - fn quoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", "2", "3", "4"] }"#).unwrap(); - let expected: FixedVector = FixedVector::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn unquoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2, 3, 4] }"#).unwrap(); - let expected: FixedVector = FixedVector::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn mixed_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", 2, "3", "4"] }"#).unwrap(); - let expected: FixedVector = FixedVector::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn empty_list_err() { - serde_json::from_str::(r#"{ "values": [] }"#).unwrap_err(); - } - - #[test] - fn short_list_err() { - serde_json::from_str::(r#"{ "values": [1, 2] }"#).unwrap_err(); - } - - #[test] - fn long_list_err() { - serde_json::from_str::(r#"{ "values": [1, 2, 3, 4, 5] }"#).unwrap_err(); - } - - #[test] - fn whole_list_quoted_err() { - serde_json::from_str::(r#"{ "values": "[1, 2, 3, 4]" }"#).unwrap_err(); - } -} diff --git a/consensus/ssz_types/src/serde_utils/quoted_u64_var_list.rs b/consensus/ssz_types/src/serde_utils/quoted_u64_var_list.rs deleted file mode 100644 index 9e176b6359..0000000000 --- a/consensus/ssz_types/src/serde_utils/quoted_u64_var_list.rs +++ /dev/null @@ -1,139 +0,0 @@ -//! Formats `VariableList` using quotes. -//! -//! E.g., `VariableList::from(vec![0, 1, 2])` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. If the length of the `Vec` is greater than `N`, deserialization fails. - -use crate::VariableList; -use eth2_serde_utils::quoted_u64_vec::QuotedIntWrapper; -use serde::ser::SerializeSeq; -use serde::{Deserializer, Serializer}; -use std::marker::PhantomData; -use typenum::Unsigned; - -pub struct QuotedIntVarListVisitor { - _phantom: PhantomData, -} - -impl<'a, N> serde::de::Visitor<'a> for QuotedIntVarListVisitor -where - N: Unsigned, -{ - type Value = VariableList; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of quoted or unquoted integers") - } - - fn visit_seq(self, seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let vec = deserialize_max(seq, N::to_usize())?; - let list: VariableList = VariableList::new(vec) - .map_err(|e| serde::de::Error::custom(format!("VariableList: {:?}", e)))?; - Ok(list) - } -} - -pub fn serialize(value: &[u64], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for &int in value { - seq.serialize_element(&QuotedIntWrapper { int })?; - } - seq.end() -} - -pub fn deserialize<'de, D, N>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - N: Unsigned, -{ - deserializer.deserialize_any(QuotedIntVarListVisitor { - _phantom: PhantomData, - }) -} - -/// Returns a `Vec` of no more than `max_items` length. -pub(crate) fn deserialize_max<'a, A>(mut seq: A, max_items: usize) -> Result, A::Error> -where - A: serde::de::SeqAccess<'a>, -{ - let mut vec = vec![]; - let mut counter = 0; - - while let Some(val) = seq.next_element()? { - let val: QuotedIntWrapper = val; - counter += 1; - if counter > max_items { - return Err(serde::de::Error::custom(format!( - "Deserialization failed. Length cannot be greater than {}.", - max_items - ))); - } - - vec.push(val.int); - } - - Ok(vec) -} - -#[cfg(test)] -mod test { - use super::*; - use serde_derive::{Deserialize, Serialize}; - use typenum::U4; - - #[derive(Debug, Serialize, Deserialize)] - struct Obj { - #[serde(with = "crate::serde_utils::quoted_u64_var_list")] - values: VariableList, - } - - #[test] - fn quoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", "2", "3", "4"] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn unquoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2, 3, 4] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn mixed_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", 2, "3", "4"] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn empty_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [] }"#).unwrap(); - assert!(obj.values.is_empty()); - } - - #[test] - fn short_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2]); - assert_eq!(obj.values, expected); - } - - #[test] - fn long_list_err() { - serde_json::from_str::(r#"{ "values": [1, 2, 3, 4, 5] }"#).unwrap_err(); - } - - #[test] - fn whole_list_quoted_err() { - serde_json::from_str::(r#"{ "values": "[1, 2, 3, 4]" }"#).unwrap_err(); - } -} diff --git a/consensus/ssz_types/src/tree_hash.rs b/consensus/ssz_types/src/tree_hash.rs deleted file mode 100644 index e08c1d62fb..0000000000 --- a/consensus/ssz_types/src/tree_hash.rs +++ /dev/null @@ -1,58 +0,0 @@ -use tree_hash::{Hash256, MerkleHasher, TreeHash, TreeHashType, BYTES_PER_CHUNK}; -use typenum::Unsigned; - -/// A helper function providing common functionality between the `TreeHash` implementations for -/// `FixedVector` and `VariableList`. -pub fn vec_tree_hash_root(vec: &[T]) -> Hash256 -where - T: TreeHash, - N: Unsigned, -{ - match T::tree_hash_type() { - TreeHashType::Basic => { - let mut hasher = MerkleHasher::with_leaves( - (N::to_usize() + T::tree_hash_packing_factor() - 1) / T::tree_hash_packing_factor(), - ); - - for item in vec { - hasher - .write(&item.tree_hash_packed_encoding()) - .expect("ssz_types variable vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types variable vec should not have a remaining buffer") - } - TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => { - let mut hasher = MerkleHasher::with_leaves(N::to_usize()); - - for item in vec { - hasher - .write(item.tree_hash_root().as_bytes()) - .expect("ssz_types vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types vec should not have a remaining buffer") - } - } -} - -/// A helper function providing common functionality for finding the Merkle root of some bytes that -/// represent a bitfield. -pub fn bitfield_bytes_tree_hash_root(bytes: &[u8]) -> Hash256 { - let byte_size = (N::to_usize() + 7) / 8; - let leaf_count = (byte_size + BYTES_PER_CHUNK - 1) / BYTES_PER_CHUNK; - - let mut hasher = MerkleHasher::with_leaves(leaf_count); - - hasher - .write(bytes) - .expect("bitfield should not exceed tree hash leaf limit"); - - hasher - .finish() - .expect("bitfield tree hash buffer should not exceed leaf limit") -} diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs deleted file mode 100644 index 3361f75090..0000000000 --- a/consensus/ssz_types/src/variable_list.rs +++ /dev/null @@ -1,477 +0,0 @@ -use crate::tree_hash::vec_tree_hash_root; -use crate::Error; -use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; -use std::marker::PhantomData; -use std::ops::{Deref, DerefMut, Index, IndexMut}; -use std::slice::SliceIndex; -use tree_hash::Hash256; -use typenum::Unsigned; - -pub use typenum; - -/// Emulates a SSZ `List`. -/// -/// An ordered, heap-allocated, variable-length, homogeneous collection of `T`, with no more than -/// `N` values. -/// -/// This struct is backed by a Rust `Vec` but constrained such that it must be instantiated with a -/// fixed number of elements and you may not add or remove elements, only modify. -/// -/// The length of this struct is fixed at the type-level using -/// [typenum](https://crates.io/crates/typenum). -/// -/// ## Example -/// -/// ``` -/// use ssz_types::{VariableList, typenum}; -/// -/// let base: Vec = vec![1, 2, 3, 4]; -/// -/// // Create a `VariableList` from a `Vec` that has the expected length. -/// let exact: VariableList<_, typenum::U4> = VariableList::from(base.clone()); -/// assert_eq!(&exact[..], &[1, 2, 3, 4]); -/// -/// // Create a `VariableList` from a `Vec` that is too long and the `Vec` is truncated. -/// let short: VariableList<_, typenum::U3> = VariableList::from(base.clone()); -/// assert_eq!(&short[..], &[1, 2, 3]); -/// -/// // Create a `VariableList` from a `Vec` that is shorter than the maximum. -/// let mut long: VariableList<_, typenum::U5> = VariableList::from(base); -/// assert_eq!(&long[..], &[1, 2, 3, 4]); -/// -/// // Push a value to if it does not exceed the maximum -/// long.push(5).unwrap(); -/// assert_eq!(&long[..], &[1, 2, 3, 4, 5]); -/// -/// // Push a value to if it _does_ exceed the maximum. -/// assert!(long.push(6).is_err()); -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] -#[derivative(PartialEq, Eq, Hash(bound = "T: std::hash::Hash"))] -#[serde(transparent)] -pub struct VariableList { - vec: Vec, - _phantom: PhantomData, -} - -impl VariableList { - /// Returns `Some` if the given `vec` equals the fixed length of `Self`. Otherwise returns - /// `None`. - pub fn new(vec: Vec) -> Result { - if vec.len() <= N::to_usize() { - Ok(Self { - vec, - _phantom: PhantomData, - }) - } else { - Err(Error::OutOfBounds { - i: vec.len(), - len: Self::max_len(), - }) - } - } - - /// Create an empty list. - pub fn empty() -> Self { - Self { - vec: vec![], - _phantom: PhantomData, - } - } - - /// Returns the number of values presently in `self`. - pub fn len(&self) -> usize { - self.vec.len() - } - - /// True if `self` does not contain any values. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the type-level maximum length. - pub fn max_len() -> usize { - N::to_usize() - } - - /// Appends `value` to the back of `self`. - /// - /// Returns `Err(())` when appending `value` would exceed the maximum length. - pub fn push(&mut self, value: T) -> Result<(), Error> { - if self.vec.len() < Self::max_len() { - self.vec.push(value); - Ok(()) - } else { - Err(Error::OutOfBounds { - i: self.vec.len() + 1, - len: Self::max_len(), - }) - } - } -} - -impl From> for VariableList { - fn from(mut vec: Vec) -> Self { - vec.truncate(N::to_usize()); - - Self { - vec, - _phantom: PhantomData, - } - } -} - -impl From> for Vec { - fn from(list: VariableList) -> Vec { - list.vec - } -} - -impl Default for VariableList { - fn default() -> Self { - Self { - vec: Vec::default(), - _phantom: PhantomData, - } - } -} - -impl> Index for VariableList { - type Output = I::Output; - - #[inline] - fn index(&self, index: I) -> &Self::Output { - Index::index(&self.vec, index) - } -} - -impl> IndexMut for VariableList { - #[inline] - fn index_mut(&mut self, index: I) -> &mut Self::Output { - IndexMut::index_mut(&mut self.vec, index) - } -} - -impl Deref for VariableList { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -impl DerefMut for VariableList { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.vec[..] - } -} - -impl<'a, T, N: Unsigned> IntoIterator for &'a VariableList { - type Item = &'a T; - type IntoIter = std::slice::Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl IntoIterator for VariableList { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.vec.into_iter() - } -} - -impl tree_hash::TreeHash for VariableList -where - T: tree_hash::TreeHash, -{ - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let root = vec_tree_hash_root::(&self.vec); - - tree_hash::mix_in_length(&root, self.len()) - } -} - -impl ssz::Encode for VariableList -where - T: ssz::Encode, -{ - fn is_ssz_fixed_len() -> bool { - >::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - >::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.vec.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.vec.ssz_append(buf) - } -} - -impl ssz::Decode for VariableList -where - T: ssz::Decode, - N: Unsigned, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let max_len = N::to_usize(); - - if bytes.is_empty() { - Ok(vec![].into()) - } else if T::is_ssz_fixed_len() { - let num_items = bytes - .len() - .checked_div(T::ssz_fixed_len()) - .ok_or(ssz::DecodeError::ZeroLengthItem)?; - - if num_items > max_len { - return Err(ssz::DecodeError::BytesInvalid(format!( - "VariableList of {} items exceeds maximum of {}", - num_items, max_len - ))); - } - - bytes - .chunks(T::ssz_fixed_len()) - .try_fold(Vec::with_capacity(num_items), |mut vec, chunk| { - vec.push(T::from_ssz_bytes(chunk)?); - Ok(vec) - }) - .map(Into::into) - } else { - ssz::decode_list_of_variable_length_items(bytes, Some(max_len)) - .map(|vec: Vec<_>| vec.into()) - } - } -} - -#[cfg(feature = "arbitrary")] -impl<'a, T: arbitrary::Arbitrary<'a>, N: 'static + Unsigned> arbitrary::Arbitrary<'a> - for VariableList -{ - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let max_size = N::to_usize(); - let rand = usize::arbitrary(u)?; - let size = std::cmp::min(rand, max_size); - let mut vec: Vec = Vec::with_capacity(size); - for _ in 0..size { - vec.push(::arbitrary(u)?); - } - Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(test)] -mod test { - use super::*; - use ssz::*; - use tree_hash::{merkle_root, TreeHash}; - use tree_hash_derive::TreeHash; - use typenum::*; - - #[test] - fn new() { - let vec = vec![42; 5]; - let fixed: Result, _> = VariableList::new(vec); - assert!(fixed.is_err()); - - let vec = vec![42; 3]; - let fixed: Result, _> = VariableList::new(vec); - assert!(fixed.is_ok()); - - let vec = vec![42; 4]; - let fixed: Result, _> = VariableList::new(vec); - assert!(fixed.is_ok()); - } - - #[test] - fn indexing() { - let vec = vec![1, 2]; - - let mut fixed: VariableList = vec.clone().into(); - - assert_eq!(fixed[0], 1); - assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!((fixed[..]).len(), 2); - - fixed[1] = 3; - assert_eq!(fixed[1], 3); - } - - #[test] - fn length() { - let vec = vec![42; 5]; - let fixed: VariableList = VariableList::from(vec.clone()); - assert_eq!(&fixed[..], &vec[0..4]); - - let vec = vec![42; 3]; - let fixed: VariableList = VariableList::from(vec.clone()); - assert_eq!(&fixed[0..3], &vec[..]); - assert_eq!(&fixed[..], &vec![42, 42, 42][..]); - - let vec = vec![]; - let fixed: VariableList = VariableList::from(vec); - assert_eq!(&fixed[..], &[] as &[u64]); - } - - #[test] - fn deref() { - let vec = vec![0, 2, 4, 6]; - let fixed: VariableList = VariableList::from(vec); - - assert_eq!(fixed.first(), Some(&0)); - assert_eq!(fixed.get(3), Some(&6)); - assert_eq!(fixed.get(4), None); - } - - #[test] - fn encode() { - let vec: VariableList = vec![0; 2].into(); - assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); - assert_eq!( as Encode>::ssz_fixed_len(), 4); - } - - fn round_trip(item: T) { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - - #[test] - fn u16_len_8() { - round_trip::>(vec![42; 8].into()); - round_trip::>(vec![0; 8].into()); - } - - fn root_with_length(bytes: &[u8], len: usize) -> Hash256 { - let root = merkle_root(bytes, 0); - tree_hash::mix_in_length(&root, len) - } - - #[test] - fn tree_hash_u8() { - let fixed: VariableList = VariableList::from(vec![]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&[0; 8], 0)); - - for i in 0..=1 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - for i in 0..=8 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - for i in 0..=13 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - for i in 0..=16 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - let source: Vec = (0..16).collect(); - let fixed: VariableList = VariableList::from(source.clone()); - assert_eq!(fixed.tree_hash_root(), root_with_length(&source, 16)); - } - - #[derive(Clone, Copy, TreeHash, Default)] - struct A { - a: u32, - b: u32, - } - - fn repeat(input: &[u8], n: usize) -> Vec { - let mut output = vec![]; - - for _ in 0..n { - output.append(&mut input.to_vec()); - } - - output - } - - fn padded_root_with_length(bytes: &[u8], len: usize, min_nodes: usize) -> Hash256 { - let root = merkle_root(bytes, min_nodes); - tree_hash::mix_in_length(&root, len) - } - - #[test] - fn tree_hash_composite() { - let a = A { a: 0, b: 1 }; - - let fixed: VariableList = VariableList::from(vec![]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&[0; 32], 0, 0), - ); - - for i in 0..=1 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 1), - "U1 {}", - i - ); - } - - for i in 0..=8 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 8), - "U8 {}", - i - ); - } - - for i in 0..=13 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 13), - "U13 {}", - i - ); - } - - for i in 0..=16 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 16), - "U16 {}", - i - ); - } - } -} diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index ccb41830be..c16742782c 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -13,15 +13,15 @@ tokio = { version = "1.14.0", features = ["rt-multi-thread"] } bls = { path = "../../crypto/bls" } integer-sqrt = "0.1.5" itertools = "0.10.0" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" -eth2_ssz_types = "0.2.2" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +ssz_types = "0.5.0" merkle_proof = { path = "../merkle_proof" } safe_arith = { path = "../safe_arith" } -tree_hash = "0.4.1" +tree_hash = "0.5.0" types = { path = "../types", default-features = false } rayon = "1.4.1" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" int_to_bytes = { path = "../int_to_bytes" } smallvec = "1.6.1" arbitrary = { version = "1.0", features = ["derive"], optional = true } @@ -39,7 +39,7 @@ arbitrary-fuzz = [ "types/arbitrary-fuzz", "bls/arbitrary", "merkle_proof/arbitrary", - "eth2_ssz/arbitrary", - "eth2_ssz_types/arbitrary", + "ethereum_ssz/arbitrary", + "ssz_types/arbitrary", "tree_hash/arbitrary", ] diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index cc7bd17c50..ed5e642941 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -29,7 +29,7 @@ pub struct BlockReplayer< > { state: BeaconState, spec: &'a ChainSpec, - state_root_strategy: StateRootStrategy, + state_processing_strategy: StateProcessingStrategy, block_sig_strategy: BlockSignatureStrategy, verify_block_root: Option, pre_block_hook: Option>, @@ -60,13 +60,13 @@ impl From for BlockReplayError { } } -/// Defines how state roots should be computed during block replay. -#[derive(PartialEq)] -pub enum StateRootStrategy { +/// Defines how state roots should be computed and whether to perform all state transitions during block replay. +#[derive(PartialEq, Clone, Copy)] +pub enum StateProcessingStrategy { /// Perform all transitions faithfully to the specification. Accurate, - /// Don't compute state roots, eventually computing an invalid beacon state that can only be - /// used for obtaining shuffling. + /// Don't compute state roots and process withdrawals, eventually computing an invalid beacon + /// state that can only be used for obtaining shuffling. Inconsistent, } @@ -87,7 +87,7 @@ where Self { state, spec, - state_root_strategy: StateRootStrategy::Accurate, + state_processing_strategy: StateProcessingStrategy::Accurate, block_sig_strategy: BlockSignatureStrategy::VerifyBulk, verify_block_root: Some(VerifyBlockRoot::True), pre_block_hook: None, @@ -100,12 +100,15 @@ where } } - /// Set the replayer's state root strategy different from the default. - pub fn state_root_strategy(mut self, state_root_strategy: StateRootStrategy) -> Self { - if state_root_strategy == StateRootStrategy::Inconsistent { + /// Set the replayer's state processing strategy different from the default. + pub fn state_processing_strategy( + mut self, + state_processing_strategy: StateProcessingStrategy, + ) -> Self { + if state_processing_strategy == StateProcessingStrategy::Inconsistent { self.verify_block_root = None; } - self.state_root_strategy = state_root_strategy; + self.state_processing_strategy = state_processing_strategy; self } @@ -182,7 +185,7 @@ where i: usize, ) -> Result, Error> { // If we don't care about state roots then return immediately. - if self.state_root_strategy == StateRootStrategy::Inconsistent { + if self.state_processing_strategy == StateProcessingStrategy::Inconsistent { return Ok(Some(Hash256::zero())); } @@ -249,7 +252,7 @@ where // If no explicit policy is set, verify only the first 1 or 2 block roots if using // accurate state roots. Inaccurate state roots require block root verification to // be off. - if i <= 1 && self.state_root_strategy == StateRootStrategy::Accurate { + if i <= 1 && self.state_processing_strategy == StateProcessingStrategy::Accurate { VerifyBlockRoot::True } else { VerifyBlockRoot::False @@ -263,6 +266,7 @@ where &mut self.state, block, self.block_sig_strategy, + self.state_processing_strategy, verify_block_root, &mut ctxt, self.spec, diff --git a/consensus/state_processing/src/common/deposit_data_tree.rs b/consensus/state_processing/src/common/deposit_data_tree.rs index aaad96fbd5..2e86556b0f 100644 --- a/consensus/state_processing/src/common/deposit_data_tree.rs +++ b/consensus/state_processing/src/common/deposit_data_tree.rs @@ -1,4 +1,4 @@ -use eth2_hashing::hash; +use ethereum_hashing::hash; use int_to_bytes::int_to_bytes32; use merkle_proof::{MerkleTree, MerkleTreeError}; use safe_arith::SafeArith; diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index e4f36bedd8..7340206a34 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -27,7 +27,7 @@ pub mod state_advance; pub mod upgrade; pub mod verify_operation; -pub use block_replayer::{BlockReplayError, BlockReplayer, StateRootStrategy}; +pub use block_replayer::{BlockReplayError, BlockReplayer, StateProcessingStrategy}; pub use consensus_context::{ConsensusContext, ContextError}; pub use genesis::{ eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state, @@ -41,4 +41,4 @@ pub use per_epoch_processing::{ errors::EpochProcessingError, process_epoch as per_epoch_processing, }; pub use per_slot_processing::{per_slot_processing, Error as SlotProcessingError}; -pub use verify_operation::{SigVerifiedOp, VerifyOperation}; +pub use verify_operation::{SigVerifiedOp, VerifyOperation, VerifyOperationAt}; diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index c564b98d66..124fdf6500 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -39,6 +39,7 @@ mod verify_exit; mod verify_proposer_slashing; use crate::common::decrease_balance; +use crate::StateProcessingStrategy; #[cfg(feature = "arbitrary-fuzz")] use arbitrary::Arbitrary; @@ -96,6 +97,7 @@ pub fn per_block_processing>( state: &mut BeaconState, signed_block: &SignedBeaconBlock, block_signature_strategy: BlockSignatureStrategy, + state_processing_strategy: StateProcessingStrategy, verify_block_root: VerifyBlockRoot, ctxt: &mut ConsensusContext, spec: &ChainSpec, @@ -160,7 +162,9 @@ pub fn per_block_processing>( // previous block. if is_execution_enabled(state, block.body()) { let payload = block.body().execution_payload()?; - process_withdrawals::(state, payload, spec)?; + if state_processing_strategy == StateProcessingStrategy::Accurate { + process_withdrawals::(state, payload, spec)?; + } process_execution_payload::(state, payload, spec)?; } diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 7d04cad90b..4bee596615 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -282,7 +282,8 @@ pub fn process_exits( // Verify and apply each exit in series. We iterate in series because higher-index exits may // become invalid due to the application of lower-index ones. for (i, exit) in voluntary_exits.iter().enumerate() { - verify_exit(state, exit, verify_signatures, spec).map_err(|e| e.into_with_index(i))?; + verify_exit(state, None, exit, verify_signatures, spec) + .map_err(|e| e.into_with_index(i))?; initiate_validator_exit(state, exit.message.validator_index as usize, spec)?; } diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index b7d28832db..ddb9ca6ad5 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -1,11 +1,11 @@ #![cfg(all(test, not(feature = "fake_crypto")))] -use crate::per_block_processing; use crate::per_block_processing::errors::{ AttestationInvalid, AttesterSlashingInvalid, BlockOperationError, BlockProcessingError, DepositInvalid, HeaderInvalid, IndexedAttestationInvalid, IntoWithIndex, ProposerSlashingInvalid, }; +use crate::{per_block_processing, StateProcessingStrategy}; use crate::{ per_block_processing::{process_operations, verify_exit::verify_exit}, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures, @@ -72,6 +72,7 @@ async fn valid_block_ok() { &mut state, &block, BlockSignatureStrategy::VerifyIndividual, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -97,6 +98,7 @@ async fn invalid_block_header_state_slot() { &mut state, &SignedBeaconBlock::from_block(block, signature), BlockSignatureStrategy::VerifyIndividual, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -129,6 +131,7 @@ async fn invalid_parent_block_root() { &mut state, &SignedBeaconBlock::from_block(block, signature), BlockSignatureStrategy::VerifyIndividual, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -162,6 +165,7 @@ async fn invalid_block_signature() { &mut state, &SignedBeaconBlock::from_block(block, Signature::empty()), BlockSignatureStrategy::VerifyIndividual, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -195,6 +199,7 @@ async fn invalid_randao_reveal_signature() { &mut state, &signed_block, BlockSignatureStrategy::VerifyIndividual, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -978,8 +983,14 @@ async fn fork_spanning_exit() { let head = harness.chain.canonical_head.cached_head(); let head_state = &head.snapshot.beacon_state; assert!(head_state.current_epoch() < spec.altair_fork_epoch.unwrap()); - verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) - .expect("phase0 exit verifies against phase0 state"); + verify_exit( + head_state, + None, + &signed_exit, + VerifySignatures::True, + &spec, + ) + .expect("phase0 exit verifies against phase0 state"); /* * Ensure the exit verifies after Altair. @@ -992,8 +1003,14 @@ async fn fork_spanning_exit() { let head_state = &head.snapshot.beacon_state; assert!(head_state.current_epoch() >= spec.altair_fork_epoch.unwrap()); assert!(head_state.current_epoch() < spec.bellatrix_fork_epoch.unwrap()); - verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) - .expect("phase0 exit verifies against altair state"); + verify_exit( + head_state, + None, + &signed_exit, + VerifySignatures::True, + &spec, + ) + .expect("phase0 exit verifies against altair state"); /* * Ensure the exit no longer verifies after Bellatrix. @@ -1009,6 +1026,12 @@ async fn fork_spanning_exit() { let head = harness.chain.canonical_head.cached_head(); let head_state = &head.snapshot.beacon_state; assert!(head_state.current_epoch() >= spec.bellatrix_fork_epoch.unwrap()); - verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) - .expect_err("phase0 exit does not verify against bellatrix state"); + verify_exit( + head_state, + None, + &signed_exit, + VerifySignatures::True, + &spec, + ) + .expect_err("phase0 exit does not verify against bellatrix state"); } diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs index bb26799250..731a82aa95 100644 --- a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -1,7 +1,7 @@ use super::errors::{BlockOperationError, BlsExecutionChangeInvalid as Invalid}; use crate::per_block_processing::signature_sets::bls_execution_change_signature_set; use crate::VerifySignatures; -use eth2_hashing::hash; +use ethereum_hashing::hash; use types::*; type Result = std::result::Result>; diff --git a/consensus/state_processing/src/per_block_processing/verify_exit.rs b/consensus/state_processing/src/per_block_processing/verify_exit.rs index f17e5fcd23..9e9282912d 100644 --- a/consensus/state_processing/src/per_block_processing/verify_exit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_exit.rs @@ -20,10 +20,12 @@ fn error(reason: ExitInvalid) -> BlockOperationError { /// Spec v0.12.1 pub fn verify_exit( state: &BeaconState, + current_epoch: Option, signed_exit: &SignedVoluntaryExit, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<()> { + let current_epoch = current_epoch.unwrap_or(state.current_epoch()); let exit = &signed_exit.message; let validator = state @@ -33,7 +35,7 @@ pub fn verify_exit( // Verify the validator is active. verify!( - validator.is_active_at(state.current_epoch()), + validator.is_active_at(current_epoch), ExitInvalid::NotActive(exit.validator_index) ); @@ -45,9 +47,9 @@ pub fn verify_exit( // Exits must specify an epoch when they become valid; they are not valid before then. verify!( - state.current_epoch() >= exit.epoch, + current_epoch >= exit.epoch, ExitInvalid::FutureEpoch { - state: state.current_epoch(), + state: current_epoch, exit: exit.epoch } ); @@ -57,9 +59,9 @@ pub fn verify_exit( .activation_epoch .safe_add(spec.shard_committee_period)?; verify!( - state.current_epoch() >= earliest_exit_epoch, + current_epoch >= earliest_exit_epoch, ExitInvalid::TooYoungToExit { - current_epoch: state.current_epoch(), + current_epoch, earliest_exit_epoch, } ); diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index 50ac2ff3de..864844080f 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -134,7 +134,7 @@ impl VerifyOperation for SignedVoluntaryExit { state: &BeaconState, spec: &ChainSpec, ) -> Result, Self::Error> { - verify_exit(state, &self, VerifySignatures::True, spec)?; + verify_exit(state, None, &self, VerifySignatures::True, spec)?; Ok(SigVerifiedOp::new(self, state)) } @@ -205,3 +205,35 @@ impl VerifyOperation for SignedBlsToExecutionChange { smallvec![] } } + +/// Trait for operations that can be verified and transformed into a +/// `SigVerifiedOp`. +/// +/// The `At` suffix indicates that we can specify a particular epoch at which to +/// verify the operation. +pub trait VerifyOperationAt: VerifyOperation + Sized { + fn validate_at( + self, + state: &BeaconState, + validate_at_epoch: Epoch, + spec: &ChainSpec, + ) -> Result, Self::Error>; +} + +impl VerifyOperationAt for SignedVoluntaryExit { + fn validate_at( + self, + state: &BeaconState, + validate_at_epoch: Epoch, + spec: &ChainSpec, + ) -> Result, Self::Error> { + verify_exit( + state, + Some(validate_at_epoch), + &self, + VerifySignatures::True, + spec, + )?; + Ok(SigVerifiedOp::new(self, state)) + } +} diff --git a/consensus/swap_or_not_shuffle/Cargo.toml b/consensus/swap_or_not_shuffle/Cargo.toml index 9a7d58b77d..303e5cfba1 100644 --- a/consensus/swap_or_not_shuffle/Cargo.toml +++ b/consensus/swap_or_not_shuffle/Cargo.toml @@ -12,7 +12,7 @@ harness = false criterion = "0.3.3" [dependencies] -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" ethereum-types = "0.14.1" [features] diff --git a/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs b/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs index f43edfe864..e71f3ca18e 100644 --- a/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs +++ b/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs @@ -1,5 +1,5 @@ use crate::Hash256; -use eth2_hashing::{Context, Sha256Context}; +use ethereum_hashing::{Context, Sha256Context}; use std::cmp::max; /// Return `p(index)` in a pseudorandom permutation `p` of `0...list_size-1` with ``seed`` as entropy. diff --git a/consensus/swap_or_not_shuffle/src/shuffle_list.rs b/consensus/swap_or_not_shuffle/src/shuffle_list.rs index edc6dd6377..2b9a256554 100644 --- a/consensus/swap_or_not_shuffle/src/shuffle_list.rs +++ b/consensus/swap_or_not_shuffle/src/shuffle_list.rs @@ -1,5 +1,5 @@ use crate::Hash256; -use eth2_hashing::hash_fixed; +use ethereum_hashing::hash_fixed; use std::mem; const SEED_SIZE: usize = 32; diff --git a/consensus/tree_hash/Cargo.toml b/consensus/tree_hash/Cargo.toml deleted file mode 100644 index b2630d4bf6..0000000000 --- a/consensus/tree_hash/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "tree_hash" -version = "0.4.1" -authors = ["Paul Hauner "] -edition = "2021" -license = "Apache-2.0" -description = "Efficient Merkle-hashing as used in Ethereum 2.0" - -[dev-dependencies] -rand = "0.8.5" -tree_hash_derive = "0.4.0" -types = { path = "../types" } -beacon_chain = { path = "../../beacon_node/beacon_chain" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" - -[dependencies] -ethereum-types = "0.14.1" -eth2_hashing = "0.3.0" -smallvec = "1.6.1" - -[features] -arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/tree_hash/examples/flamegraph_beacon_state.rs b/consensus/tree_hash/examples/flamegraph_beacon_state.rs deleted file mode 100644 index e5b505bb91..0000000000 --- a/consensus/tree_hash/examples/flamegraph_beacon_state.rs +++ /dev/null @@ -1,50 +0,0 @@ -use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; -use types::{BeaconState, EthSpec, MainnetEthSpec}; - -const TREE_HASH_LOOPS: usize = 1_000; -const VALIDATOR_COUNT: usize = 1_000; - -fn get_harness() -> BeaconChainHarness> { - let harness = BeaconChainHarness::builder(T::default()) - .default_spec() - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .build(); - - harness.advance_slot(); - - harness -} - -fn build_state() -> BeaconState { - let state = get_harness::().chain.head_beacon_state_cloned(); - - assert_eq!(state.as_base().unwrap().validators.len(), VALIDATOR_COUNT); - assert_eq!(state.as_base().unwrap().balances.len(), VALIDATOR_COUNT); - assert!(state - .as_base() - .unwrap() - .previous_epoch_attestations - .is_empty()); - assert!(state - .as_base() - .unwrap() - .current_epoch_attestations - .is_empty()); - assert!(state.as_base().unwrap().eth1_data_votes.is_empty()); - assert!(state.as_base().unwrap().historical_roots.is_empty()); - - state -} - -fn main() { - let state = build_state::(); - - // This vec is an attempt to ensure the compiler doesn't optimize-out the hashing. - let mut vec = Vec::with_capacity(TREE_HASH_LOOPS); - - for _ in 0..TREE_HASH_LOOPS { - let root = state.canonical_root(); - vec.push(root[0]); - } -} diff --git a/consensus/tree_hash/src/impls.rs b/consensus/tree_hash/src/impls.rs deleted file mode 100644 index 899356f833..0000000000 --- a/consensus/tree_hash/src/impls.rs +++ /dev/null @@ -1,222 +0,0 @@ -use super::*; -use ethereum_types::{H160, H256, U128, U256}; - -fn int_to_hash256(int: u64) -> Hash256 { - let mut bytes = [0; HASHSIZE]; - bytes[0..8].copy_from_slice(&int.to_le_bytes()); - Hash256::from_slice(&bytes) -} - -macro_rules! impl_for_bitsize { - ($type: ident, $bit_size: expr) => { - impl TreeHash for $type { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - PackedEncoding::from_slice(&self.to_le_bytes()) - } - - fn tree_hash_packing_factor() -> usize { - HASHSIZE / ($bit_size / 8) - } - - #[allow(clippy::cast_lossless)] // Lint does not apply to all uses of this macro. - fn tree_hash_root(&self) -> Hash256 { - int_to_hash256(*self as u64) - } - } - }; -} - -impl_for_bitsize!(u8, 8); -impl_for_bitsize!(u16, 16); -impl_for_bitsize!(u32, 32); -impl_for_bitsize!(u64, 64); -impl_for_bitsize!(usize, 64); - -impl TreeHash for bool { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - (*self as u8).tree_hash_packed_encoding() - } - - fn tree_hash_packing_factor() -> usize { - u8::tree_hash_packing_factor() - } - - fn tree_hash_root(&self) -> Hash256 { - int_to_hash256(*self as u64) - } -} - -/// Only valid for byte types less than 32 bytes. -macro_rules! impl_for_lt_32byte_u8_array { - ($len: expr) => { - impl TreeHash for [u8; $len] { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("bytesN should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("bytesN should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; 32]; - result[0..$len].copy_from_slice(&self[..]); - Hash256::from_slice(&result) - } - } - }; -} - -impl_for_lt_32byte_u8_array!(4); -impl_for_lt_32byte_u8_array!(32); - -impl TreeHash for [u8; 48] { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let values_per_chunk = BYTES_PER_CHUNK; - let minimum_chunk_count = (48 + values_per_chunk - 1) / values_per_chunk; - merkle_root(self, minimum_chunk_count) - } -} - -impl TreeHash for U128 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - let mut result = [0; 16]; - self.to_little_endian(&mut result); - PackedEncoding::from_slice(&result) - } - - fn tree_hash_packing_factor() -> usize { - 2 - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; HASHSIZE]; - self.to_little_endian(&mut result[0..16]); - Hash256::from_slice(&result) - } -} - -impl TreeHash for U256 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - let mut result = [0; 32]; - self.to_little_endian(&mut result); - PackedEncoding::from_slice(&result) - } - - fn tree_hash_packing_factor() -> usize { - 1 - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; 32]; - self.to_little_endian(&mut result[..]); - Hash256::from_slice(&result) - } -} - -impl TreeHash for H160 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - let mut result = [0; 32]; - result[0..20].copy_from_slice(self.as_bytes()); - PackedEncoding::from_slice(&result) - } - - fn tree_hash_packing_factor() -> usize { - 1 - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; 32]; - result[0..20].copy_from_slice(self.as_bytes()); - Hash256::from_slice(&result) - } -} - -impl TreeHash for H256 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - PackedEncoding::from_slice(self.as_bytes()) - } - - fn tree_hash_packing_factor() -> usize { - 1 - } - - fn tree_hash_root(&self) -> Hash256 { - *self - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn bool() { - let mut true_bytes: Vec = vec![1]; - true_bytes.append(&mut vec![0; 31]); - - let false_bytes: Vec = vec![0; 32]; - - assert_eq!(true.tree_hash_root().as_bytes(), true_bytes.as_slice()); - assert_eq!(false.tree_hash_root().as_bytes(), false_bytes.as_slice()); - } - - #[test] - fn int_to_bytes() { - assert_eq!(int_to_hash256(0).as_bytes(), &[0; 32]); - assert_eq!( - int_to_hash256(1).as_bytes(), - &[ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0 - ] - ); - assert_eq!( - int_to_hash256(u64::max_value()).as_bytes(), - &[ - 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - ] - ); - } -} diff --git a/consensus/tree_hash/src/lib.rs b/consensus/tree_hash/src/lib.rs deleted file mode 100644 index ec40de9160..0000000000 --- a/consensus/tree_hash/src/lib.rs +++ /dev/null @@ -1,208 +0,0 @@ -pub mod impls; -mod merkle_hasher; -mod merkleize_padded; -mod merkleize_standard; - -pub use merkle_hasher::{Error, MerkleHasher}; -pub use merkleize_padded::merkleize_padded; -pub use merkleize_standard::merkleize_standard; - -use eth2_hashing::{hash_fixed, ZERO_HASHES, ZERO_HASHES_MAX_INDEX}; -use smallvec::SmallVec; - -pub const BYTES_PER_CHUNK: usize = 32; -pub const HASHSIZE: usize = 32; -pub const MERKLE_HASH_CHUNK: usize = 2 * BYTES_PER_CHUNK; -pub const MAX_UNION_SELECTOR: u8 = 127; -pub const SMALLVEC_SIZE: usize = 32; - -pub type Hash256 = ethereum_types::H256; -pub type PackedEncoding = SmallVec<[u8; SMALLVEC_SIZE]>; - -/// Convenience method for `MerkleHasher` which also provides some fast-paths for small trees. -/// -/// `minimum_leaf_count` will only be used if it is greater than or equal to the minimum number of leaves that can be created from `bytes`. -pub fn merkle_root(bytes: &[u8], minimum_leaf_count: usize) -> Hash256 { - let leaves = std::cmp::max( - (bytes.len() + (HASHSIZE - 1)) / HASHSIZE, - minimum_leaf_count, - ); - - if leaves == 0 { - // If there are no bytes then the hash is always zero. - Hash256::zero() - } else if leaves == 1 { - // If there is only one leaf, the hash is always those leaf bytes padded out to 32-bytes. - let mut hash = [0; HASHSIZE]; - hash[0..bytes.len()].copy_from_slice(bytes); - Hash256::from_slice(&hash) - } else if leaves == 2 { - // If there are only two leaves (this is common with BLS pubkeys), we can avoid some - // overhead with `MerkleHasher` and just do a simple 3-node tree here. - let mut leaves = [0; HASHSIZE * 2]; - leaves[0..bytes.len()].copy_from_slice(bytes); - - Hash256::from_slice(&hash_fixed(&leaves)) - } else { - // If there are 3 or more leaves, use `MerkleHasher`. - let mut hasher = MerkleHasher::with_leaves(leaves); - hasher - .write(bytes) - .expect("the number of leaves is adequate for the number of bytes"); - hasher - .finish() - .expect("the number of leaves is adequate for the number of bytes") - } -} - -/// Returns the node created by hashing `root` and `length`. -/// -/// Used in `TreeHash` for inserting the length of a list above it's root. -pub fn mix_in_length(root: &Hash256, length: usize) -> Hash256 { - let usize_len = std::mem::size_of::(); - - let mut length_bytes = [0; BYTES_PER_CHUNK]; - length_bytes[0..usize_len].copy_from_slice(&length.to_le_bytes()); - - Hash256::from_slice(ð2_hashing::hash32_concat(root.as_bytes(), &length_bytes)[..]) -} - -/// Returns `Some(root)` created by hashing `root` and `selector`, if `selector <= -/// MAX_UNION_SELECTOR`. Otherwise, returns `None`. -/// -/// Used in `TreeHash` for the "union" type. -/// -/// ## Specification -/// -/// ```ignore,text -/// mix_in_selector: Given a Merkle root root and a type selector selector ("uint256" little-endian -/// serialization) return hash(root + selector). -/// ``` -/// -/// https://github.com/ethereum/consensus-specs/blob/v1.1.0-beta.3/ssz/simple-serialize.md#union -pub fn mix_in_selector(root: &Hash256, selector: u8) -> Option { - if selector > MAX_UNION_SELECTOR { - return None; - } - - let mut chunk = [0; BYTES_PER_CHUNK]; - chunk[0] = selector; - - let root = eth2_hashing::hash32_concat(root.as_bytes(), &chunk); - Some(Hash256::from_slice(&root)) -} - -/// Returns a cached padding node for a given height. -fn get_zero_hash(height: usize) -> &'static [u8] { - if height <= ZERO_HASHES_MAX_INDEX { - &ZERO_HASHES[height] - } else { - panic!("Tree exceeds MAX_TREE_DEPTH of {}", ZERO_HASHES_MAX_INDEX) - } -} - -#[derive(Debug, PartialEq, Clone)] -pub enum TreeHashType { - Basic, - Vector, - List, - Container, -} - -pub trait TreeHash { - fn tree_hash_type() -> TreeHashType; - - fn tree_hash_packed_encoding(&self) -> PackedEncoding; - - fn tree_hash_packing_factor() -> usize; - - fn tree_hash_root(&self) -> Hash256; -} - -/// Punch through references. -impl<'a, T> TreeHash for &'a T -where - T: TreeHash, -{ - fn tree_hash_type() -> TreeHashType { - T::tree_hash_type() - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - T::tree_hash_packed_encoding(*self) - } - - fn tree_hash_packing_factor() -> usize { - T::tree_hash_packing_factor() - } - - fn tree_hash_root(&self) -> Hash256 { - T::tree_hash_root(*self) - } -} - -#[macro_export] -macro_rules! tree_hash_ssz_encoding_as_vector { - ($type: ident) => { - impl tree_hash::TreeHash for $type { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Vec { - tree_hash::merkle_root(&ssz::ssz_encode(self)) - } - } - }; -} - -#[macro_export] -macro_rules! tree_hash_ssz_encoding_as_list { - ($type: ident) => { - impl tree_hash::TreeHash for $type { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Vec { - ssz::ssz_encode(self).tree_hash_root() - } - } - }; -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn mix_length() { - let hash = { - let mut preimage = vec![42; BYTES_PER_CHUNK]; - preimage.append(&mut vec![42]); - preimage.append(&mut vec![0; BYTES_PER_CHUNK - 1]); - eth2_hashing::hash(&preimage) - }; - - assert_eq!( - mix_in_length(&Hash256::from_slice(&[42; BYTES_PER_CHUNK]), 42).as_bytes(), - &hash[..] - ); - } -} diff --git a/consensus/tree_hash/src/merkle_hasher.rs b/consensus/tree_hash/src/merkle_hasher.rs deleted file mode 100644 index 2acaf1c3b8..0000000000 --- a/consensus/tree_hash/src/merkle_hasher.rs +++ /dev/null @@ -1,573 +0,0 @@ -use crate::{get_zero_hash, Hash256, HASHSIZE}; -use eth2_hashing::{Context, Sha256Context, HASH_LEN}; -use smallvec::{smallvec, SmallVec}; -use std::mem; - -type SmallVec8 = SmallVec<[T; 8]>; - -#[derive(Clone, Debug, PartialEq)] -pub enum Error { - /// The maximum number of leaves defined by the initialization `depth` has been exceed. - MaximumLeavesExceeded { max_leaves: usize }, -} - -/// Helper struct to store either a hash digest or a slice. -/// -/// Should be used as a left or right value for some node. -enum Preimage<'a> { - Digest([u8; HASH_LEN]), - Slice(&'a [u8]), -} - -impl<'a> Preimage<'a> { - /// Returns a 32-byte slice. - fn as_bytes(&self) -> &[u8] { - match self { - Preimage::Digest(digest) => digest.as_ref(), - Preimage::Slice(slice) => slice, - } - } -} - -/// A node that has had a left child supplied, but not a right child. -struct HalfNode { - /// The hasher context. - context: Context, - /// The tree id of the node. The root node has in id of `1` and ids increase moving down the - /// tree from left to right. - id: usize, -} - -impl HalfNode { - /// Create a new half-node from the given `left` value. - fn new(id: usize, left: Preimage) -> Self { - let mut context = Context::new(); - context.update(left.as_bytes()); - - Self { context, id } - } - - /// Complete the half-node by providing a `right` value. Returns a digest of the left and right - /// nodes. - fn finish(mut self, right: Preimage) -> [u8; HASH_LEN] { - self.context.update(right.as_bytes()); - self.context.finalize() - } -} - -/// Provides a Merkle-root hasher that allows for streaming bytes (i.e., providing any-length byte -/// slices without need to separate into leaves). Efficiently handles cases where not all leaves -/// have been provided by assuming all non-provided leaves are `[0; 32]` and pre-computing the -/// zero-value hashes at all depths of the tree. -/// -/// This algorithm aims to allocate as little memory as possible and it does this by "folding" up -/// the tree as each leaf is provided. Consider this step-by-step functional diagram of hashing a -/// tree with depth three: -/// -/// ## Functional Diagram -/// -/// Nodes that are `-` have not been defined and do not occupy memory. Nodes that are `L` are -/// leaves that are provided but are not stored. Nodes that have integers (`1`, `2`) are stored in -/// our struct. Finally, nodes that are `X` were stored, but are now removed. -/// -/// ### Start -/// -/// ```ignore -/// - -/// / \ -/// - - -/// / \ / \ -/// - - - - -/// ``` -/// -/// ### Provide first leaf -/// -/// ```ignore -/// - -/// / \ -/// 2 - -/// / \ / \ -/// L - - - -/// ``` -/// -/// ### Provide second leaf -/// -/// ```ignore -/// 1 -/// / \ -/// X - -/// / \ / \ -/// L L - - -/// ``` -/// -/// ### Provide third leaf -/// -/// ```ignore -/// 1 -/// / \ -/// X 3 -/// / \ / \ -/// L L L - -/// ``` -/// -/// ### Provide fourth and final leaf -/// -/// ```ignore -/// 1 -/// / \ -/// X X -/// / \ / \ -/// L L L L -/// ``` -/// -pub struct MerkleHasher { - /// Stores the nodes that are half-complete and awaiting a right node. - /// - /// A smallvec of size 8 means we can hash a tree with 256 leaves without allocating on the - /// heap. Each half-node is 232 bytes, so this smallvec may store 1856 bytes on the stack. - half_nodes: SmallVec8, - /// The depth of the tree that will be produced. - /// - /// Depth is counted top-down (i.e., the root node is at depth 0). A tree with 1 leaf has a - /// depth of 1, a tree with 4 leaves has a depth of 3. - depth: usize, - /// The next leaf that we are expecting to process. - next_leaf: usize, - /// A buffer of bytes that are waiting to be written to a leaf. - buffer: SmallVec<[u8; 32]>, - /// Set to Some(root) when the root of the tree is known. - root: Option, -} - -/// Returns the parent of node with id `i`. -fn get_parent(i: usize) -> usize { - i / 2 -} - -/// Gets the depth of a node with an id of `i`. -/// -/// It is a logic error to provide `i == 0`. -/// -/// E.g., if `i` is 1, depth is 0. If `i` is is 1, depth is 1. -fn get_depth(i: usize) -> usize { - let total_bits = mem::size_of::() * 8; - total_bits - i.leading_zeros() as usize - 1 -} - -impl MerkleHasher { - /// Instantiate a hasher for a tree with a given number of leaves. - /// - /// `num_leaves` will be rounded to the next power of two. E.g., if `num_leaves == 6`, then the - /// tree will _actually_ be able to accomodate 8 leaves and the resulting hasher is exactly the - /// same as one that was instantiated with `Self::with_leaves(8)`. - /// - /// ## Notes - /// - /// If `num_leaves == 0`, a tree of depth 1 will be created. If no leaves are provided it will - /// return a root of `[0; 32]`. - pub fn with_leaves(num_leaves: usize) -> Self { - let depth = get_depth(num_leaves.next_power_of_two()) + 1; - Self::with_depth(depth) - } - - /// Instantiates a new, empty hasher for a tree with `depth` layers which will have capacity - /// for `1 << (depth - 1)` leaf nodes. - /// - /// It is not possible to grow the depth of the tree after instantiation. - /// - /// ## Panics - /// - /// Panics if `depth == 0`. - fn with_depth(depth: usize) -> Self { - assert!(depth > 0, "merkle tree cannot have a depth of zero"); - - Self { - half_nodes: SmallVec::with_capacity(depth - 1), - depth, - next_leaf: 1 << (depth - 1), - buffer: SmallVec::with_capacity(32), - root: None, - } - } - - /// Write some bytes to the hasher. - /// - /// ## Errors - /// - /// Returns an error if the given bytes would create a leaf that would exceed the maximum - /// permissible number of leaves defined by the initialization `depth`. E.g., a tree of `depth - /// == 2` can only accept 2 leaves. A tree of `depth == 14` can only accept 8,192 leaves. - pub fn write(&mut self, bytes: &[u8]) -> Result<(), Error> { - let mut ptr = 0; - while ptr <= bytes.len() { - let slice = &bytes[ptr..std::cmp::min(bytes.len(), ptr + HASHSIZE)]; - - if self.buffer.is_empty() && slice.len() == HASHSIZE { - self.process_leaf(slice)?; - ptr += HASHSIZE - } else if self.buffer.len() + slice.len() < HASHSIZE { - self.buffer.extend_from_slice(slice); - ptr += HASHSIZE - } else { - let buf_len = self.buffer.len(); - let required = HASHSIZE - buf_len; - - let mut leaf = [0; HASHSIZE]; - leaf[..buf_len].copy_from_slice(&self.buffer); - leaf[buf_len..].copy_from_slice(&slice[0..required]); - - self.process_leaf(&leaf)?; - self.buffer = smallvec![]; - - ptr += required - } - } - - Ok(()) - } - - /// Process the next leaf in the tree. - /// - /// ## Errors - /// - /// Returns an error if the given leaf would exceed the maximum permissible number of leaves - /// defined by the initialization `depth`. E.g., a tree of `depth == 2` can only accept 2 - /// leaves. A tree of `depth == 14` can only accept 8,192 leaves. - fn process_leaf(&mut self, leaf: &[u8]) -> Result<(), Error> { - assert_eq!(leaf.len(), HASHSIZE, "a leaf must be 32 bytes"); - - let max_leaves = 1 << (self.depth + 1); - - if self.next_leaf > max_leaves { - return Err(Error::MaximumLeavesExceeded { max_leaves }); - } else if self.next_leaf == 1 { - // A tree of depth one has a root that is equal to the first given leaf. - self.root = Some(Hash256::from_slice(leaf)) - } else if self.next_leaf % 2 == 0 { - self.process_left_node(self.next_leaf, Preimage::Slice(leaf)) - } else { - self.process_right_node(self.next_leaf, Preimage::Slice(leaf)) - } - - self.next_leaf += 1; - - Ok(()) - } - - /// Returns the root of the Merkle tree. - /// - /// If not all leaves have been provided, the tree will be efficiently completed under the - /// assumption that all not-yet-provided leaves are equal to `[0; 32]`. - /// - /// ## Errors - /// - /// Returns an error if the bytes remaining in the buffer would create a leaf that would exceed - /// the maximum permissible number of leaves defined by the initialization `depth`. - pub fn finish(mut self) -> Result { - if !self.buffer.is_empty() { - let mut leaf = [0; HASHSIZE]; - leaf[..self.buffer.len()].copy_from_slice(&self.buffer); - self.process_leaf(&leaf)? - } - - // If the tree is incomplete, we must complete it by providing zero-hashes. - loop { - if let Some(root) = self.root { - break Ok(root); - } else if let Some(node) = self.half_nodes.last() { - let right_child = node.id * 2 + 1; - self.process_right_node(right_child, self.zero_hash(right_child)); - } else if self.next_leaf == 1 { - // The next_leaf can only be 1 if the tree has a depth of one. If have been no - // leaves supplied, assume a root of zero. - break Ok(Hash256::zero()); - } else { - // The only scenario where there are (a) no half nodes and (b) a tree of depth - // two or more is where no leaves have been supplied at all. - // - // Once we supply this first zero-hash leaf then all future operations will be - // triggered via the `process_right_node` branch. - self.process_left_node(self.next_leaf, self.zero_hash(self.next_leaf)) - } - } - } - - /// Process a node that will become the left-hand node of some parent. The supplied `id` is - /// that of the node (not the parent). The `preimage` is the value of the node (i.e., if this - /// is a leaf node it will be the value of that leaf). - /// - /// In this scenario, the only option is to push a new half-node. - fn process_left_node(&mut self, id: usize, preimage: Preimage) { - self.half_nodes - .push(HalfNode::new(get_parent(id), preimage)) - } - - /// Process a node that will become the right-hand node of some parent. The supplied `id` is - /// that of the node (not the parent). The `preimage` is the value of the node (i.e., if this - /// is a leaf node it will be the value of that leaf). - /// - /// This operation will always complete one node, then it will attempt to crawl up the tree and - /// collapse all other completed nodes. For example, consider a tree of depth 3 (see diagram - /// below). When providing the node with id `7`, the node with id `3` will be completed which - /// will also provide the right-node for the `1` node. This function will complete both of - /// those nodes and ultimately find the root of the tree. - /// - /// ```ignore - /// 1 <-- completed - /// / \ - /// 2 3 <-- completed - /// / \ / \ - /// 4 5 6 7 <-- supplied right node - /// ``` - fn process_right_node(&mut self, id: usize, mut preimage: Preimage) { - let mut parent = get_parent(id); - - loop { - match self.half_nodes.last() { - Some(node) if node.id == parent => { - preimage = Preimage::Digest( - self.half_nodes - .pop() - .expect("if .last() is Some then .pop() must succeed") - .finish(preimage), - ); - if parent == 1 { - self.root = Some(Hash256::from_slice(preimage.as_bytes())); - break; - } else { - parent = get_parent(parent); - } - } - _ => { - self.half_nodes.push(HalfNode::new(parent, preimage)); - break; - } - } - } - } - - /// Returns a "zero hash" from a pre-computed set for the given node. - /// - /// Note: this node is not always zero, instead it is the result of hashing up a tree where the - /// leaves are all zeros. E.g., in a tree of depth 2, the `zero_hash` of a node at depth 1 - /// will be `[0; 32]`. However, the `zero_hash` for a node at depth 0 will be - /// `hash(concat([0; 32], [0; 32])))`. - fn zero_hash(&self, id: usize) -> Preimage<'static> { - Preimage::Slice(get_zero_hash(self.depth - (get_depth(id) + 1))) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::merkleize_padded; - - /// This test is just to ensure that the stack size of the `Context` remains the same. We choose - /// our smallvec size based upon this, so it's good to know if it suddenly changes in size. - #[test] - fn context_size() { - assert_eq!( - mem::size_of::(), - 224, - "Halfnode size should be as expected" - ); - } - - fn compare_with_reference(leaves: &[Hash256], depth: usize) { - let reference_bytes = leaves - .iter() - .flat_map(|hash| hash.as_bytes()) - .copied() - .collect::>(); - - let reference_root = merkleize_padded(&reference_bytes, 1 << (depth - 1)); - - let merklizer_root_32_bytes = { - let mut m = MerkleHasher::with_depth(depth); - for leaf in leaves.iter() { - m.write(leaf.as_bytes()).expect("should process leaf"); - } - m.finish().expect("should finish") - }; - - assert_eq!( - reference_root, merklizer_root_32_bytes, - "32 bytes should match reference root" - ); - - let merklizer_root_individual_3_bytes = { - let mut m = MerkleHasher::with_depth(depth); - for bytes in reference_bytes.chunks(3) { - m.write(bytes).expect("should process byte"); - } - m.finish().expect("should finish") - }; - - assert_eq!( - reference_root, merklizer_root_individual_3_bytes, - "3 bytes should match reference root" - ); - - let merklizer_root_individual_single_bytes = { - let mut m = MerkleHasher::with_depth(depth); - for byte in reference_bytes.iter() { - m.write(&[*byte]).expect("should process byte"); - } - m.finish().expect("should finish") - }; - - assert_eq!( - reference_root, merklizer_root_individual_single_bytes, - "single bytes should match reference root" - ); - } - - /// A simple wrapper to compare MerkleHasher to the reference function by just giving a number - /// of leaves and a depth. - fn compare_reference_with_len(leaves: u64, depth: usize) { - let leaves = (0..leaves) - .map(Hash256::from_low_u64_be) - .collect::>(); - compare_with_reference(&leaves, depth) - } - - /// Compares the `MerkleHasher::with_depth` and `MerkleHasher::with_leaves` generate consistent - /// results. - fn compare_new_with_leaf_count(num_leaves: u64, depth: usize) { - let leaves = (0..num_leaves) - .map(Hash256::from_low_u64_be) - .collect::>(); - - let from_depth = { - let mut m = MerkleHasher::with_depth(depth); - for leaf in leaves.iter() { - m.write(leaf.as_bytes()).expect("should process leaf"); - } - m.finish() - }; - - let from_num_leaves = { - let mut m = MerkleHasher::with_leaves(num_leaves as usize); - for leaf in leaves.iter() { - m.process_leaf(leaf.as_bytes()) - .expect("should process leaf"); - } - m.finish() - }; - - assert_eq!( - from_depth, from_num_leaves, - "hash generated by depth should match that from num leaves" - ); - } - - #[test] - fn with_leaves() { - compare_new_with_leaf_count(1, 1); - compare_new_with_leaf_count(2, 2); - compare_new_with_leaf_count(3, 3); - compare_new_with_leaf_count(4, 3); - compare_new_with_leaf_count(5, 4); - compare_new_with_leaf_count(6, 4); - compare_new_with_leaf_count(7, 4); - compare_new_with_leaf_count(8, 4); - compare_new_with_leaf_count(9, 5); - compare_new_with_leaf_count(10, 5); - compare_new_with_leaf_count(11, 5); - compare_new_with_leaf_count(12, 5); - compare_new_with_leaf_count(13, 5); - compare_new_with_leaf_count(14, 5); - compare_new_with_leaf_count(15, 5); - } - - #[test] - fn depth() { - assert_eq!(get_depth(1), 0); - assert_eq!(get_depth(2), 1); - assert_eq!(get_depth(3), 1); - assert_eq!(get_depth(4), 2); - assert_eq!(get_depth(5), 2); - assert_eq!(get_depth(6), 2); - assert_eq!(get_depth(7), 2); - assert_eq!(get_depth(8), 3); - } - - #[test] - fn with_0_leaves() { - let hasher = MerkleHasher::with_leaves(0); - assert_eq!(hasher.finish().unwrap(), Hash256::zero()); - } - - #[test] - #[should_panic] - fn too_many_leaves() { - compare_reference_with_len(2, 1); - } - - #[test] - fn full_trees() { - compare_reference_with_len(1, 1); - compare_reference_with_len(2, 2); - compare_reference_with_len(4, 3); - compare_reference_with_len(8, 4); - compare_reference_with_len(16, 5); - compare_reference_with_len(32, 6); - compare_reference_with_len(64, 7); - compare_reference_with_len(128, 8); - compare_reference_with_len(256, 9); - compare_reference_with_len(256, 9); - compare_reference_with_len(8192, 14); - } - - #[test] - fn incomplete_trees() { - compare_reference_with_len(0, 1); - - compare_reference_with_len(0, 2); - compare_reference_with_len(1, 2); - - for i in 0..=4 { - compare_reference_with_len(i, 3); - } - - for i in 0..=7 { - compare_reference_with_len(i, 4); - } - - for i in 0..=15 { - compare_reference_with_len(i, 5); - } - - for i in 0..=32 { - compare_reference_with_len(i, 6); - } - - for i in 0..=64 { - compare_reference_with_len(i, 7); - } - - compare_reference_with_len(0, 14); - compare_reference_with_len(13, 14); - compare_reference_with_len(8191, 14); - } - - #[test] - fn remaining_buffer() { - let a = { - let mut m = MerkleHasher::with_leaves(2); - m.write(&[1]).expect("should write"); - m.finish().expect("should finish") - }; - - let b = { - let mut m = MerkleHasher::with_leaves(2); - let mut leaf = vec![1]; - leaf.extend_from_slice(&[0; 31]); - m.write(&leaf).expect("should write"); - m.write(&[0; 32]).expect("should write"); - m.finish().expect("should finish") - }; - - assert_eq!(a, b, "should complete buffer"); - } -} diff --git a/consensus/tree_hash/src/merkleize_padded.rs b/consensus/tree_hash/src/merkleize_padded.rs deleted file mode 100644 index f7dce39949..0000000000 --- a/consensus/tree_hash/src/merkleize_padded.rs +++ /dev/null @@ -1,330 +0,0 @@ -use super::{get_zero_hash, Hash256, BYTES_PER_CHUNK}; -use eth2_hashing::{hash32_concat, hash_fixed}; - -/// Merkleize `bytes` and return the root, optionally padding the tree out to `min_leaves` number of -/// leaves. -/// -/// **Note**: This function is generally worse than using the `crate::merkle_root` which uses -/// `MerkleHasher`. We only keep this function around for reference testing. -/// -/// First all nodes are extracted from `bytes` and then a padding node is added until the number of -/// leaf chunks is greater than or equal to `min_leaves`. Callers may set `min_leaves` to `0` if no -/// adding additional chunks should be added to the given `bytes`. -/// -/// If `bytes.len() <= BYTES_PER_CHUNK`, no hashing is done and `bytes` is returned, potentially -/// padded out to `BYTES_PER_CHUNK` length with `0`. -/// -/// ## CPU Performance -/// -/// A cache of `MAX_TREE_DEPTH` hashes are stored to avoid re-computing the hashes of padding nodes -/// (or their parents). Therefore, adding padding nodes only incurs one more hash per additional -/// height of the tree. -/// -/// ## Memory Performance -/// -/// This algorithm has two interesting memory usage properties: -/// -/// 1. The maximum memory footprint is roughly `O(V / 2)` memory, where `V` is the number of leaf -/// chunks with values (i.e., leaves that are not padding). The means adding padding nodes to -/// the tree does not increase the memory footprint. -/// 2. At each height of the tree half of the memory is freed until only a single chunk is stored. -/// 3. The input `bytes` are not copied into another list before processing. -/// -/// _Note: there are some minor memory overheads, including a handful of usizes and a list of -/// `MAX_TREE_DEPTH` hashes as `lazy_static` constants._ -pub fn merkleize_padded(bytes: &[u8], min_leaves: usize) -> Hash256 { - // If the bytes are just one chunk or less, pad to one chunk and return without hashing. - if bytes.len() <= BYTES_PER_CHUNK && min_leaves <= 1 { - let mut o = bytes.to_vec(); - o.resize(BYTES_PER_CHUNK, 0); - return Hash256::from_slice(&o); - } - - assert!( - bytes.len() > BYTES_PER_CHUNK || min_leaves > 1, - "Merkle hashing only needs to happen if there is more than one chunk" - ); - - // The number of leaves that can be made directly from `bytes`. - let leaves_with_values = (bytes.len() + (BYTES_PER_CHUNK - 1)) / BYTES_PER_CHUNK; - - // The number of parents that have at least one non-padding leaf. - // - // Since there is more than one node in this tree (see prior assertion), there should always be - // one or more initial parent nodes. - let initial_parents_with_values = std::cmp::max(1, next_even_number(leaves_with_values) / 2); - - // The number of leaves in the full tree (including padding nodes). - let num_leaves = std::cmp::max(leaves_with_values, min_leaves).next_power_of_two(); - - // The number of levels in the tree. - // - // A tree with a single node has `height == 1`. - let height = num_leaves.trailing_zeros() as usize + 1; - - assert!(height >= 2, "The tree should have two or more heights"); - - // A buffer/scratch-space used for storing each round of hashes at each height. - // - // This buffer is kept as small as possible; it will shrink so it never stores a padding node. - let mut chunks = ChunkStore::with_capacity(initial_parents_with_values); - - // Create a parent in the `chunks` buffer for every two chunks in `bytes`. - // - // I.e., do the first round of hashing, hashing from the `bytes` slice and filling the `chunks` - // struct. - for i in 0..initial_parents_with_values { - let start = i * BYTES_PER_CHUNK * 2; - - // Hash two chunks, creating a parent chunk. - let hash = match bytes.get(start..start + BYTES_PER_CHUNK * 2) { - // All bytes are available, hash as usual. - Some(slice) => hash_fixed(slice), - // Unable to get all the bytes, get a small slice and pad it out. - None => { - let mut preimage = bytes - .get(start..) - .expect("`i` can only be larger than zero if there are bytes to read") - .to_vec(); - preimage.resize(BYTES_PER_CHUNK * 2, 0); - hash_fixed(&preimage) - } - }; - - assert_eq!( - hash.len(), - BYTES_PER_CHUNK, - "Hashes should be exactly one chunk" - ); - - // Store the parent node. - chunks - .set(i, &hash) - .expect("Buffer should always have capacity for parent nodes") - } - - // Iterate through all heights above the leaf nodes and either (a) hash two children or, (b) - // hash a left child and a right padding node. - // - // Skip the 0'th height because the leaves have already been processed. Skip the highest-height - // in the tree as it is the root does not require hashing. - // - // The padding nodes for each height are cached via `lazy static` to simulate non-adjacent - // padding nodes (i.e., avoid doing unnecessary hashing). - for height in 1..height - 1 { - let child_nodes = chunks.len(); - let parent_nodes = next_even_number(child_nodes) / 2; - - // For each pair of nodes stored in `chunks`: - // - // - If two nodes are available, hash them to form a parent. - // - If one node is available, hash it and a cached padding node to form a parent. - for i in 0..parent_nodes { - let (left, right) = match (chunks.get(i * 2), chunks.get(i * 2 + 1)) { - (Ok(left), Ok(right)) => (left, right), - (Ok(left), Err(_)) => (left, get_zero_hash(height)), - // Deriving `parent_nodes` from `chunks.len()` has ensured that we never encounter the - // scenario where we expect two nodes but there are none. - (Err(_), Err(_)) => unreachable!("Parent must have one child"), - // `chunks` is a contiguous array so it is impossible for an index to be missing - // when a higher index is present. - (Err(_), Ok(_)) => unreachable!("Parent must have a left child"), - }; - - assert!( - left.len() == right.len() && right.len() == BYTES_PER_CHUNK, - "Both children should be `BYTES_PER_CHUNK` bytes." - ); - - let hash = hash32_concat(left, right); - - // Store a parent node. - chunks - .set(i, &hash) - .expect("Buf is adequate size for parent"); - } - - // Shrink the buffer so it neatly fits the number of new nodes created in this round. - // - // The number of `parent_nodes` is either decreasing or stable. It never increases. - chunks.truncate(parent_nodes); - } - - // There should be a single chunk left in the buffer and it is the Merkle root. - let root = chunks.into_vec(); - - assert_eq!(root.len(), BYTES_PER_CHUNK, "Only one chunk should remain"); - - Hash256::from_slice(&root) -} - -/// A helper struct for storing words of `BYTES_PER_CHUNK` size in a flat byte array. -#[derive(Debug)] -struct ChunkStore(Vec); - -impl ChunkStore { - /// Creates a new instance with `chunks` padding nodes. - fn with_capacity(chunks: usize) -> Self { - Self(vec![0; chunks * BYTES_PER_CHUNK]) - } - - /// Set the `i`th chunk to `value`. - /// - /// Returns `Err` if `value.len() != BYTES_PER_CHUNK` or `i` is out-of-bounds. - fn set(&mut self, i: usize, value: &[u8]) -> Result<(), ()> { - if i < self.len() && value.len() == BYTES_PER_CHUNK { - let slice = &mut self.0[i * BYTES_PER_CHUNK..i * BYTES_PER_CHUNK + BYTES_PER_CHUNK]; - slice.copy_from_slice(value); - Ok(()) - } else { - Err(()) - } - } - - /// Gets the `i`th chunk. - /// - /// Returns `Err` if `i` is out-of-bounds. - fn get(&self, i: usize) -> Result<&[u8], ()> { - if i < self.len() { - Ok(&self.0[i * BYTES_PER_CHUNK..i * BYTES_PER_CHUNK + BYTES_PER_CHUNK]) - } else { - Err(()) - } - } - - /// Returns the number of chunks presently stored in `self`. - fn len(&self) -> usize { - self.0.len() / BYTES_PER_CHUNK - } - - /// Truncates 'self' to `num_chunks` chunks. - /// - /// Functionally identical to `Vec::truncate`. - fn truncate(&mut self, num_chunks: usize) { - self.0.truncate(num_chunks * BYTES_PER_CHUNK) - } - - /// Consumes `self`, returning the underlying byte array. - fn into_vec(self) -> Vec { - self.0 - } -} - -/// Returns the next even number following `n`. If `n` is even, `n` is returned. -fn next_even_number(n: usize) -> usize { - n + n % 2 -} - -#[cfg(test)] -mod test { - use super::*; - use crate::ZERO_HASHES_MAX_INDEX; - - pub fn reference_root(bytes: &[u8]) -> Hash256 { - crate::merkleize_standard(bytes) - } - - macro_rules! common_tests { - ($get_bytes: ident) => { - #[test] - fn zero_value_0_nodes() { - test_against_reference(&$get_bytes(0 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_1_nodes() { - test_against_reference(&$get_bytes(1 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_2_nodes() { - test_against_reference(&$get_bytes(2 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_3_nodes() { - test_against_reference(&$get_bytes(3 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_4_nodes() { - test_against_reference(&$get_bytes(4 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_8_nodes() { - test_against_reference(&$get_bytes(8 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_9_nodes() { - test_against_reference(&$get_bytes(9 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_8_nodes_varying_min_length() { - for i in 0..64 { - test_against_reference(&$get_bytes(8 * BYTES_PER_CHUNK), i); - } - } - - #[test] - fn zero_value_range_of_nodes() { - for i in 0..32 * BYTES_PER_CHUNK { - test_against_reference(&$get_bytes(i), 0); - } - } - - #[test] - fn max_tree_depth_min_nodes() { - let input = vec![0; 10 * BYTES_PER_CHUNK]; - let min_nodes = 2usize.pow(ZERO_HASHES_MAX_INDEX as u32); - assert_eq!( - merkleize_padded(&input, min_nodes).as_bytes(), - get_zero_hash(ZERO_HASHES_MAX_INDEX) - ); - } - }; - } - - mod zero_value { - use super::*; - - fn zero_bytes(bytes: usize) -> Vec { - vec![0; bytes] - } - - common_tests!(zero_bytes); - } - - mod random_value { - use super::*; - use rand::RngCore; - - fn random_bytes(bytes: usize) -> Vec { - let mut bytes = Vec::with_capacity(bytes); - rand::thread_rng().fill_bytes(&mut bytes); - bytes - } - - common_tests!(random_bytes); - } - - fn test_against_reference(input: &[u8], min_nodes: usize) { - let mut reference_input = input.to_vec(); - reference_input.resize( - std::cmp::max( - reference_input.len(), - min_nodes.next_power_of_two() * BYTES_PER_CHUNK, - ), - 0, - ); - - assert_eq!( - reference_root(&reference_input), - merkleize_padded(input, min_nodes), - "input.len(): {:?}", - input.len() - ); - } -} diff --git a/consensus/tree_hash/src/merkleize_standard.rs b/consensus/tree_hash/src/merkleize_standard.rs deleted file mode 100644 index 6dd046991e..0000000000 --- a/consensus/tree_hash/src/merkleize_standard.rs +++ /dev/null @@ -1,81 +0,0 @@ -use super::*; -use eth2_hashing::hash; - -/// Merkleizes bytes and returns the root, using a simple algorithm that does not optimize to avoid -/// processing or storing padding bytes. -/// -/// **Note**: This function is generally worse than using the `crate::merkle_root` which uses -/// `MerkleHasher`. We only keep this function around for reference testing. -/// -/// The input `bytes` will be padded to ensure that the number of leaves is a power-of-two. -/// -/// ## CPU Performance -/// -/// Will hash all nodes in the tree, even if they are padding and pre-determined. -/// -/// ## Memory Performance -/// -/// - Duplicates the input `bytes`. -/// - Stores all internal nodes, even if they are padding. -/// - Does not free up unused memory during operation. -pub fn merkleize_standard(bytes: &[u8]) -> Hash256 { - // If the bytes are just one chunk (or less than one chunk) just return them. - if bytes.len() <= HASHSIZE { - let mut o = bytes.to_vec(); - o.resize(HASHSIZE, 0); - return Hash256::from_slice(&o[0..HASHSIZE]); - } - - let leaves = num_sanitized_leaves(bytes.len()); - let nodes = num_nodes(leaves); - let internal_nodes = nodes - leaves; - - let num_bytes = std::cmp::max(internal_nodes, 1) * HASHSIZE + bytes.len(); - - let mut o: Vec = vec![0; internal_nodes * HASHSIZE]; - - o.append(&mut bytes.to_vec()); - - assert_eq!(o.len(), num_bytes); - - let empty_chunk_hash = hash(&[0; MERKLE_HASH_CHUNK]); - - let mut i = nodes * HASHSIZE; - let mut j = internal_nodes * HASHSIZE; - - while i >= MERKLE_HASH_CHUNK { - i -= MERKLE_HASH_CHUNK; - - j -= HASHSIZE; - let hash = match o.get(i..i + MERKLE_HASH_CHUNK) { - // All bytes are available, hash as usual. - Some(slice) => hash(slice), - // Unable to get all the bytes. - None => { - match o.get(i..) { - // Able to get some of the bytes, pad them out. - Some(slice) => { - let mut bytes = slice.to_vec(); - bytes.resize(MERKLE_HASH_CHUNK, 0); - hash(&bytes) - } - // Unable to get any bytes, use the empty-chunk hash. - None => empty_chunk_hash.clone(), - } - } - }; - - o[j..j + HASHSIZE].copy_from_slice(&hash); - } - - Hash256::from_slice(&o[0..HASHSIZE]) -} - -fn num_sanitized_leaves(num_bytes: usize) -> usize { - let leaves = (num_bytes + HASHSIZE - 1) / HASHSIZE; - leaves.next_power_of_two() -} - -fn num_nodes(num_leaves: usize) -> usize { - 2 * num_leaves - 1 -} diff --git a/consensus/tree_hash/tests/tests.rs b/consensus/tree_hash/tests/tests.rs deleted file mode 100644 index 8b2a4b21be..0000000000 --- a/consensus/tree_hash/tests/tests.rs +++ /dev/null @@ -1,128 +0,0 @@ -use ssz_derive::Encode; -use tree_hash::{Hash256, MerkleHasher, PackedEncoding, TreeHash, BYTES_PER_CHUNK}; -use tree_hash_derive::TreeHash; - -#[derive(Encode)] -struct HashVec { - vec: Vec, -} - -impl From> for HashVec { - fn from(vec: Vec) -> Self { - Self { vec } - } -} - -impl tree_hash::TreeHash for HashVec { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let mut hasher = - MerkleHasher::with_leaves((self.vec.len() + BYTES_PER_CHUNK - 1) / BYTES_PER_CHUNK); - - for item in &self.vec { - hasher.write(&item.tree_hash_packed_encoding()).unwrap() - } - - let root = hasher.finish().unwrap(); - - tree_hash::mix_in_length(&root, self.vec.len()) - } -} - -fn mix_in_selector(a: Hash256, selector: u8) -> Hash256 { - let mut b = [0; 32]; - b[0] = selector; - - Hash256::from_slice(ð2_hashing::hash32_concat(a.as_bytes(), &b)) -} - -fn u8_hash_concat(v1: u8, v2: u8) -> Hash256 { - let mut a = [0; 32]; - let mut b = [0; 32]; - - a[0] = v1; - b[0] = v2; - - Hash256::from_slice(ð2_hashing::hash32_concat(&a, &b)) -} - -fn u8_hash(x: u8) -> Hash256 { - let mut a = [0; 32]; - a[0] = x; - Hash256::from_slice(&a) -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "transparent")] -enum FixedTrans { - A(u8), - B(u8), -} - -#[test] -fn fixed_trans() { - assert_eq!(FixedTrans::A(2).tree_hash_root(), u8_hash(2)); - assert_eq!(FixedTrans::B(2).tree_hash_root(), u8_hash(2)); -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "union")] -enum FixedUnion { - A(u8), - B(u8), -} - -#[test] -fn fixed_union() { - assert_eq!(FixedUnion::A(2).tree_hash_root(), u8_hash_concat(2, 0)); - assert_eq!(FixedUnion::B(2).tree_hash_root(), u8_hash_concat(2, 1)); -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "transparent")] -enum VariableTrans { - A(HashVec), - B(HashVec), -} - -#[test] -fn variable_trans() { - assert_eq!( - VariableTrans::A(HashVec::from(vec![2])).tree_hash_root(), - u8_hash_concat(2, 1) - ); - assert_eq!( - VariableTrans::B(HashVec::from(vec![2])).tree_hash_root(), - u8_hash_concat(2, 1) - ); -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "union")] -enum VariableUnion { - A(HashVec), - B(HashVec), -} - -#[test] -fn variable_union() { - assert_eq!( - VariableUnion::A(HashVec::from(vec![2])).tree_hash_root(), - mix_in_selector(u8_hash_concat(2, 1), 0) - ); - assert_eq!( - VariableUnion::B(HashVec::from(vec![2])).tree_hash_root(), - mix_in_selector(u8_hash_concat(2, 1), 1) - ); -} diff --git a/consensus/tree_hash_derive/Cargo.toml b/consensus/tree_hash_derive/Cargo.toml deleted file mode 100644 index 5f3396eb16..0000000000 --- a/consensus/tree_hash_derive/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "tree_hash_derive" -version = "0.4.0" -authors = ["Paul Hauner "] -edition = "2021" -description = "Procedural derive macros to accompany the tree_hash crate." -license = "Apache-2.0" - -[lib] -proc-macro = true - -[dependencies] -syn = "1.0.42" -quote = "1.0.7" -darling = "0.13.0" diff --git a/consensus/tree_hash_derive/src/lib.rs b/consensus/tree_hash_derive/src/lib.rs deleted file mode 100644 index 85ece80fb5..0000000000 --- a/consensus/tree_hash_derive/src/lib.rs +++ /dev/null @@ -1,336 +0,0 @@ -use darling::FromDeriveInput; -use proc_macro::TokenStream; -use quote::quote; -use std::convert::TryInto; -use syn::{parse_macro_input, Attribute, DataEnum, DataStruct, DeriveInput, Meta}; - -/// The highest possible union selector value (higher values are reserved for backwards compatible -/// extensions). -const MAX_UNION_SELECTOR: u8 = 127; - -#[derive(Debug, FromDeriveInput)] -#[darling(attributes(tree_hash))] -struct StructOpts { - #[darling(default)] - enum_behaviour: Option, -} - -const ENUM_TRANSPARENT: &str = "transparent"; -const ENUM_UNION: &str = "union"; -const ENUM_VARIANTS: &[&str] = &[ENUM_TRANSPARENT, ENUM_UNION]; -const NO_ENUM_BEHAVIOUR_ERROR: &str = "enums require an \"enum_behaviour\" attribute, \ - e.g., #[tree_hash(enum_behaviour = \"transparent\")]"; - -enum EnumBehaviour { - Transparent, - Union, -} - -impl EnumBehaviour { - pub fn new(s: Option) -> Option { - s.map(|s| match s.as_ref() { - ENUM_TRANSPARENT => EnumBehaviour::Transparent, - ENUM_UNION => EnumBehaviour::Union, - other => panic!( - "{} is an invalid enum_behaviour, use either {:?}", - other, ENUM_VARIANTS - ), - }) - } -} - -/// Return a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields -/// that should not be hashed. -/// -/// # Panics -/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time. -fn get_hashable_fields(struct_data: &syn::DataStruct) -> Vec<&syn::Ident> { - get_hashable_fields_and_their_caches(struct_data) - .into_iter() - .map(|(ident, _, _)| ident) - .collect() -} - -/// Return a Vec of the hashable fields of a struct, and each field's type and optional cache field. -fn get_hashable_fields_and_their_caches( - struct_data: &syn::DataStruct, -) -> Vec<(&syn::Ident, syn::Type, Option)> { - struct_data - .fields - .iter() - .filter_map(|f| { - if should_skip_hashing(f) { - None - } else { - let ident = f - .ident - .as_ref() - .expect("tree_hash_derive only supports named struct fields"); - let opt_cache_field = get_cache_field_for(f); - Some((ident, f.ty.clone(), opt_cache_field)) - } - }) - .collect() -} - -/// Parse the cached_tree_hash attribute for a field. -/// -/// Extract the cache field name from `#[cached_tree_hash(cache_field_name)]` -/// -/// Return `Some(cache_field_name)` if the field has a cached tree hash attribute, -/// or `None` otherwise. -fn get_cache_field_for(field: &syn::Field) -> Option { - use syn::{MetaList, NestedMeta}; - - let parsed_attrs = cached_tree_hash_attr_metas(&field.attrs); - if let [Meta::List(MetaList { nested, .. })] = &parsed_attrs[..] { - nested.iter().find_map(|x| match x { - NestedMeta::Meta(Meta::Path(path)) => path.get_ident().cloned(), - _ => None, - }) - } else { - None - } -} - -/// Process the `cached_tree_hash` attributes from a list of attributes into structured `Meta`s. -fn cached_tree_hash_attr_metas(attrs: &[Attribute]) -> Vec { - attrs - .iter() - .filter(|attr| attr.path.is_ident("cached_tree_hash")) - .flat_map(|attr| attr.parse_meta()) - .collect() -} - -/// Returns true if some field has an attribute declaring it should not be hashed. -/// -/// The field attribute is: `#[tree_hash(skip_hashing)]` -fn should_skip_hashing(field: &syn::Field) -> bool { - field.attrs.iter().any(|attr| { - attr.path.is_ident("tree_hash") - && attr.tokens.to_string().replace(' ', "") == "(skip_hashing)" - }) -} - -/// Implements `tree_hash::TreeHash` for some `struct`. -/// -/// Fields are hashed in the order they are defined. -#[proc_macro_derive(TreeHash, attributes(tree_hash))] -pub fn tree_hash_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - let opts = StructOpts::from_derive_input(&item).unwrap(); - let enum_opt = EnumBehaviour::new(opts.enum_behaviour); - - match &item.data { - syn::Data::Struct(s) => { - if enum_opt.is_some() { - panic!("enum_behaviour is invalid for structs"); - } - tree_hash_derive_struct(&item, s) - } - syn::Data::Enum(s) => match enum_opt.expect(NO_ENUM_BEHAVIOUR_ERROR) { - EnumBehaviour::Transparent => tree_hash_derive_enum_transparent(&item, s), - EnumBehaviour::Union => tree_hash_derive_enum_union(&item, s), - }, - _ => panic!("tree_hash_derive only supports structs and enums."), - } -} - -fn tree_hash_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> TokenStream { - let name = &item.ident; - let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - - let idents = get_hashable_fields(struct_data); - let num_leaves = idents.len(); - - let output = quote! { - impl #impl_generics tree_hash::TreeHash for #name #ty_generics #where_clause { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Container - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Struct should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Struct should never be packed.") - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - let mut hasher = tree_hash::MerkleHasher::with_leaves(#num_leaves); - - #( - hasher.write(self.#idents.tree_hash_root().as_bytes()) - .expect("tree hash derive should not apply too many leaves"); - )* - - hasher.finish().expect("tree hash derive should not have a remaining buffer") - } - } - }; - output.into() -} - -/// Derive `TreeHash` for an enum in the "transparent" method. -/// -/// The "transparent" method is distinct from the "union" method specified in the SSZ specification. -/// When using "transparent", the enum will be ignored and the contained field will be hashed as if -/// the enum does not exist. -/// -///## Limitations -/// -/// Only supports: -/// - Enums with a single field per variant, where -/// - All fields are "container" types. -/// -/// ## Panics -/// -/// Will panic at compile-time if the single field requirement isn't met, but will panic *at run -/// time* if the container type requirement isn't met. -fn tree_hash_derive_enum_transparent( - derive_input: &DeriveInput, - enum_data: &DataEnum, -) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let (patterns, type_exprs): (Vec<_>, Vec<_>) = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("TreeHash can only be derived for enums with 1 field per variant"); - } - - let pattern = quote! { - #name::#variant_name(ref inner) - }; - - let ty = &(&variant.fields).into_iter().next().unwrap().ty; - let type_expr = quote! { - <#ty as tree_hash::TreeHash>::tree_hash_type() - }; - (pattern, type_expr) - }) - .unzip(); - - let output = quote! { - impl #impl_generics tree_hash::TreeHash for #name #ty_generics #where_clause { - fn tree_hash_type() -> tree_hash::TreeHashType { - #( - assert_eq!( - #type_exprs, - tree_hash::TreeHashType::Container, - "all variants must be of container type" - ); - )* - tree_hash::TreeHashType::Container - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Enum should never be packed") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Enum should never be packed") - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - match self { - #( - #patterns => inner.tree_hash_root(), - )* - } - } - } - }; - output.into() -} - -/// Derive `TreeHash` for an `enum` following the "union" SSZ spec. -/// -/// The union selector will be determined based upon the order in which the enum variants are -/// defined. E.g., the top-most variant in the enum will have a selector of `0`, the variant -/// beneath it will have a selector of `1` and so on. -/// -/// # Limitations -/// -/// Only supports enums where each variant has a single field. -fn tree_hash_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let patterns: Vec<_> = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("TreeHash can only be derived for enums with 1 field per variant"); - } - - quote! { - #name::#variant_name(ref inner) - } - }) - .collect(); - - let union_selectors = compute_union_selectors(patterns.len()); - - let output = quote! { - impl #impl_generics tree_hash::TreeHash for #name #ty_generics #where_clause { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Container - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Enum should never be packed") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Enum should never be packed") - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - match self { - #( - #patterns => { - let root = inner.tree_hash_root(); - let selector = #union_selectors; - tree_hash::mix_in_selector(&root, selector) - .expect("derive macro should prevent out-of-bounds selectors") - }, - )* - } - } - } - }; - output.into() -} - -fn compute_union_selectors(num_variants: usize) -> Vec { - let union_selectors = (0..num_variants) - .map(|i| { - i.try_into() - .expect("union selector exceeds u8::max_value, union has too many variants") - }) - .collect::>(); - - let highest_selector = union_selectors - .last() - .copied() - .expect("0-variant union is not permitted"); - - assert!( - highest_selector <= MAX_UNION_SELECTOR, - "union selector {} exceeds limit of {}, enum has too many variants", - highest_selector, - MAX_UNION_SELECTOR - ); - - union_selectors -} diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 46b88af66f..91ad3089f1 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -15,7 +15,7 @@ compare_fields = { path = "../../common/compare_fields" } compare_fields_derive = { path = "../../common/compare_fields_derive" } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } ethereum-types = { version = "0.14.1", features = ["arbitrary"] } -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" hex = "0.4.2" int_to_bytes = { path = "../int_to_bytes" } log = "0.4.11" @@ -25,13 +25,13 @@ safe_arith = { path = "../safe_arith" } serde = {version = "1.0.116" , features = ["rc"] } serde_derive = "1.0.116" slog = "2.5.2" -eth2_ssz = { version = "0.4.1", features = ["arbitrary"] } -eth2_ssz_derive = "0.3.1" -eth2_ssz_types = { version = "0.2.2", features = ["arbitrary"] } +ethereum_ssz = { version = "0.5.0", features = ["arbitrary"] } +ethereum_ssz_derive = "0.5.0" +ssz_types = { version = "0.5.0", features = ["arbitrary"] } swap_or_not_shuffle = { path = "../swap_or_not_shuffle", features = ["arbitrary"] } test_random_derive = { path = "../../common/test_random_derive" } -tree_hash = { version = "0.4.1", features = ["arbitrary"] } -tree_hash_derive = "0.4.0" +tree_hash = { version = "0.5.0", features = ["arbitrary"] } +tree_hash_derive = "0.5.0" rand_xorshift = "0.3.0" cached_tree_hash = { path = "../cached_tree_hash" } serde_yaml = "0.8.13" @@ -41,7 +41,7 @@ rusqlite = { version = "0.28.0", features = ["bundled"], optional = true } # The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by # `AbstractExecPayload` arbitrary = { version = "1.0", features = ["derive"] } -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" regex = "1.5.5" lazy_static = "1.4.0" parking_lot = "0.12.0" diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 39a0a28c0c..20d66cd447 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -27,7 +27,7 @@ use tree_hash_derive::TreeHash; #[arbitrary(bound = "T: EthSpec")] pub struct AggregateAndProof { /// The index of the validator that created the attestation. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub aggregator_index: u64, /// The aggregate attestation. pub aggregate: Attestation, diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index c6a661c85d..286502b449 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -27,7 +27,7 @@ use tree_hash_derive::TreeHash; )] pub struct AttestationData { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, // LMD GHOST vote diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation_duty.rs index 87a9c932a4..93a4c147b6 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation_duty.rs @@ -12,6 +12,6 @@ pub struct AttestationDuty { /// The total number of attesters in the committee. pub committee_len: usize, /// The committee count at `attestation_slot`. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committees_at_slot: u64, } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 4bf9e641c0..1b40fe76d4 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -58,7 +58,7 @@ pub struct BeaconBlock = FullPayload #[superstruct(getter(copy))] pub slot: Slot, #[superstruct(getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, #[superstruct(getter(copy))] pub parent_root: Hash256, diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index c6d6678f31..f2ef0a3dcc 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -26,7 +26,7 @@ use tree_hash_derive::TreeHash; )] pub struct BeaconBlockHeader { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, pub parent_root: Hash256, pub state_root: Hash256, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 921dafbbc6..4a9da36404 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -5,7 +5,7 @@ use crate::*; use compare_fields::CompareFields; use compare_fields_derive::CompareFields; use derivative::Derivative; -use eth2_hashing::hash; +use ethereum_hashing::hash; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use pubkey_cache::PubkeyCache; use safe_arith::{ArithError, SafeArith}; @@ -210,7 +210,7 @@ where { // Versioning #[superstruct(getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub genesis_time: u64, #[superstruct(getter(copy))] pub genesis_validators_root: Hash256, @@ -232,7 +232,7 @@ where pub eth1_data: Eth1Data, pub eth1_data_votes: VariableList, #[superstruct(getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub eth1_deposit_index: u64, // Registry @@ -296,10 +296,10 @@ where // Capella #[superstruct(only(Capella), partial_getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub next_withdrawal_index: u64, #[superstruct(only(Capella), partial_getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub next_withdrawal_validator_index: u64, // Deep history valid from Capella onwards. #[superstruct(only(Capella))] diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index efc6573d2b..d1d63e3c80 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -403,7 +403,7 @@ impl ValidatorsListTreeHashCache { validators.len(), ), list_arena, - values: ParallelValidatorTreeHash::new::(validators), + values: ParallelValidatorTreeHash::new(validators), } } @@ -468,7 +468,7 @@ impl ParallelValidatorTreeHash { /// /// Allocates the necessary memory to store all of the cached Merkle trees but does perform any /// hashing. - fn new(validators: &[Validator]) -> Self { + fn new(validators: &[Validator]) -> Self { let num_arenas = std::cmp::max( 1, (validators.len() + VALIDATORS_PER_ARENA - 1) / VALIDATORS_PER_ARENA, diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index b279515bd1..3ed9ee9255 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -21,7 +21,7 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct BlsToExecutionChange { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, pub from_bls_pubkey: PublicKeyBytes, pub to_execution_address: Address, diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index e922e81c70..8723c2afed 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -16,7 +16,7 @@ use tree_hash_derive::TreeHash; pub struct BuilderBid> { #[serde_as(as = "BlindedPayloadAsHeader")] pub header: Payload, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] pub value: Uint256, pub pubkey: PublicKeyBytes, #[serde(skip)] @@ -50,7 +50,7 @@ impl> ForkVersionDeserialize #[derive(Deserialize)] struct Helper { header: serde_json::Value, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] value: Uint256, pubkey: PublicKeyBytes, } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index dc89ab902f..163b07dcd1 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1,9 +1,9 @@ use crate::application_domain::{ApplicationDomain, APPLICATION_DOMAIN_BUILDER}; use crate::*; -use eth2_serde_utils::quoted_u64::MaybeQuoted; use int_to_bytes::int_to_bytes4; use serde::{Deserializer, Serialize, Serializer}; use serde_derive::Deserialize; +use serde_utils::quoted_u64::MaybeQuoted; use std::fs::File; use std::path::Path; use tree_hash::TreeHash; @@ -895,33 +895,33 @@ pub struct Config { pub preset_base: String, #[serde(default = "default_terminal_total_difficulty")] - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] pub terminal_total_difficulty: Uint256, #[serde(default = "default_terminal_block_hash")] pub terminal_block_hash: ExecutionBlockHash, #[serde(default = "default_terminal_block_hash_activation_epoch")] pub terminal_block_hash_activation_epoch: Epoch, #[serde(default = "default_safe_slots_to_import_optimistically")] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub safe_slots_to_import_optimistically: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] min_genesis_active_validator_count: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] min_genesis_time: u64, - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] genesis_fork_version: [u8; 4], - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] genesis_delay: u64, - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] altair_fork_version: [u8; 4], #[serde(serialize_with = "serialize_fork_epoch")] #[serde(deserialize_with = "deserialize_fork_epoch")] pub altair_fork_epoch: Option>, #[serde(default = "default_bellatrix_fork_version")] - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] bellatrix_fork_version: [u8; 4], #[serde(default)] #[serde(serialize_with = "serialize_fork_epoch")] @@ -929,41 +929,41 @@ pub struct Config { pub bellatrix_fork_epoch: Option>, #[serde(default = "default_capella_fork_version")] - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] capella_fork_version: [u8; 4], #[serde(default)] #[serde(serialize_with = "serialize_fork_epoch")] #[serde(deserialize_with = "deserialize_fork_epoch")] pub capella_fork_epoch: Option>, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] seconds_per_slot: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] seconds_per_eth1_block: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] min_validator_withdrawability_delay: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] shard_committee_period: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] eth1_follow_distance: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] inactivity_score_bias: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] inactivity_score_recovery_rate: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] ejection_balance: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] min_per_epoch_churn_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] churn_limit_quotient: u64, #[serde(skip_serializing_if = "Option::is_none")] proposer_score_boost: Option>, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] deposit_chain_id: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] deposit_network_id: u64, deposit_contract_address: Address, } diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/contribution_and_proof.rs index 167b0857c5..7e757f89b1 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/contribution_and_proof.rs @@ -25,7 +25,7 @@ use tree_hash_derive::TreeHash; #[arbitrary(bound = "T: EthSpec")] pub struct ContributionAndProof { /// The index of the validator that created the sync contribution. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub aggregator_index: u64, /// The aggregate contribution. pub contribution: SyncCommitteeContribution, diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index 1969311671..d75643f659 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -26,7 +26,7 @@ use tree_hash_derive::TreeHash; pub struct DepositData { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, pub signature: SignatureBytes, } diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit_message.rs index 63073401c2..1096cfaa28 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit_message.rs @@ -25,7 +25,7 @@ use tree_hash_derive::TreeHash; pub struct DepositMessage { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, } diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs index 21bbab81ff..aea4677f26 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -1,5 +1,5 @@ use crate::*; -use eth2_hashing::{hash32_concat, ZERO_HASHES}; +use ethereum_hashing::{hash32_concat, ZERO_HASHES}; use int_to_bytes::int_to_bytes32; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index 3556e31a9f..409383c904 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -24,9 +24,9 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct EnrForkId { - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub fork_digest: [u8; 4], - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub next_fork_version: [u8; 4], pub next_fork_epoch: Epoch, } diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index 6b2396e112..d8f476b99b 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -26,7 +26,7 @@ use tree_hash_derive::TreeHash; )] pub struct Eth1Data { pub deposit_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub deposit_count: u64, pub block_hash: Hash256, } diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 18da0d161f..77ef6407e8 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -60,21 +60,21 @@ pub struct ExecutionPayload { pub logs_bloom: FixedVector, #[superstruct(getter(copy))] pub prev_randao: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub block_number: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_used: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, #[superstruct(getter(copy))] diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index d193a6cd8e..1fb29db9d3 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -53,21 +53,21 @@ pub struct ExecutionPayloadHeader { pub logs_bloom: FixedVector, #[superstruct(getter(copy))] pub prev_randao: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub block_number: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_used: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, #[superstruct(getter(copy))] diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork.rs index de332f0cad..4650881f72 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork.rs @@ -24,9 +24,9 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct Fork { - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub previous_version: [u8; 4], - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub current_version: [u8; 4], pub epoch: Epoch, } diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork_data.rs index cc79039315..bf9c48cd7e 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork_data.rs @@ -23,7 +23,7 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct ForkData { - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub current_version: [u8; 4], pub genesis_validators_root: Hash256, } diff --git a/consensus/types/src/fork_versioned_response.rs b/consensus/types/src/fork_versioned_response.rs index 07ff40b27e..2d97dc1219 100644 --- a/consensus/types/src/fork_versioned_response.rs +++ b/consensus/types/src/fork_versioned_response.rs @@ -6,14 +6,15 @@ use std::sync::Arc; // Deserialize is only implemented for types that implement ForkVersionDeserialize #[derive(Debug, PartialEq, Clone, Serialize)] -pub struct ExecutionOptimisticForkVersionedResponse { +pub struct ExecutionOptimisticFinalizedForkVersionedResponse { #[serde(skip_serializing_if = "Option::is_none")] pub version: Option, pub execution_optimistic: Option, + pub finalized: Option, pub data: T, } -impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticForkVersionedResponse +impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticFinalizedForkVersionedResponse where F: ForkVersionDeserialize, { @@ -25,6 +26,7 @@ where struct Helper { version: Option, execution_optimistic: Option, + finalized: Option, data: serde_json::Value, } @@ -34,9 +36,10 @@ where None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, }; - Ok(ExecutionOptimisticForkVersionedResponse { + Ok(ExecutionOptimisticFinalizedForkVersionedResponse { version: helper.version, execution_optimistic: helper.execution_optimistic, + finalized: helper.finalized, data, }) } diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index 6288cdbe80..bd4abe37d8 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -27,7 +27,7 @@ impl Graffiti { impl fmt::Display for Graffiti { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) + write!(f, "{}", serde_utils::hex::encode(self.0)) } } @@ -96,7 +96,7 @@ pub mod serde_graffiti { where S: Serializer, { - serializer.serialize_str(ð2_serde_utils::hex::encode(bytes)) + serializer.serialize_str(&serde_utils::hex::encode(bytes)) } pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; GRAFFITI_BYTES_LEN], D::Error> @@ -105,7 +105,7 @@ pub mod serde_graffiti { { let s: String = Deserialize::deserialize(deserializer)?; - let bytes = eth2_serde_utils::hex::decode(&s).map_err(D::Error::custom)?; + let bytes = serde_utils::hex::decode(&s).map_err(D::Error::custom)?; if bytes.len() != GRAFFITI_BYTES_LEN { return Err(D::Error::custom(format!( diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index 16ffb1ad8f..c59cbef307 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -72,9 +72,9 @@ impl Hash for IndexedAttestation { mod quoted_variable_list_u64 { use super::*; use crate::Unsigned; - use eth2_serde_utils::quoted_u64_vec::{QuotedIntVecVisitor, QuotedIntWrapper}; use serde::ser::SerializeSeq; use serde::{Deserializer, Serializer}; + use serde_utils::quoted_u64_vec::{QuotedIntVecVisitor, QuotedIntWrapper}; pub fn serialize(value: &VariableList, serializer: S) -> Result where diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index e5463858f0..b52d9a0823 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -144,9 +144,7 @@ pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; pub use crate::fork_name::{ForkName, InconsistentFork}; -pub use crate::fork_versioned_response::{ - ExecutionOptimisticForkVersionedResponse, ForkVersionDeserialize, ForkVersionedResponse, -}; +pub use crate::fork_versioned_response::{ForkVersionDeserialize, ForkVersionedResponse}; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/participation_flags.rs index bd98f8da07..4f170a60be 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/participation_flags.rs @@ -9,7 +9,7 @@ use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; #[serde(transparent)] #[derive(arbitrary::Arbitrary)] pub struct ParticipationFlags { - #[serde(with = "eth2_serde_utils::quoted_u8")] + #[serde(with = "serde_utils::quoted_u8")] bits: u8, } diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/pending_attestation.rs index 1b9903ebbe..88db0ec4d3 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/pending_attestation.rs @@ -25,9 +25,9 @@ use tree_hash_derive::TreeHash; pub struct PendingAttestation { pub aggregation_bits: BitList, pub data: AttestationData, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inclusion_delay: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, } diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 20c78f0515..e65dd8f60d 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -12,71 +12,71 @@ use serde_derive::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct BasePreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_committees_per_slot: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub target_committee_size: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_validators_per_committee: u64, - #[serde(with = "eth2_serde_utils::quoted_u8")] + #[serde(with = "serde_utils::quoted_u8")] pub shuffle_round_count: u8, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_downward_multiplier: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_upward_multiplier: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub safe_slots_to_update_justified: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_deposit_amount: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_effective_balance: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub effective_balance_increment: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_attestation_inclusion_delay: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub slots_per_epoch: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_seed_lookahead: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_seed_lookahead: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_eth1_voting_period: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub slots_per_historical_root: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_epochs_to_inactivity_penalty: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_historical_vector: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_slashings_vector: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub historical_roots_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_registry_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub base_reward_factor: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub whistleblower_reward_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_reward_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inactivity_penalty_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_slashing_penalty_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proportional_slashing_multiplier: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_proposer_slashings: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_attester_slashings: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_attestations: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_deposits: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_voluntary_exits: u64, } @@ -123,17 +123,17 @@ impl BasePreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct AltairPreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inactivity_penalty_quotient_altair: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_slashing_penalty_quotient_altair: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proportional_slashing_multiplier_altair: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub sync_committee_size: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_sync_committee_period: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_sync_committee_participants: u64, } @@ -153,19 +153,19 @@ impl AltairPreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct BellatrixPreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inactivity_penalty_quotient_bellatrix: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_slashing_penalty_quotient_bellatrix: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proportional_slashing_multiplier_bellatrix: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_bytes_per_transaction: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_transactions_per_payload: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub bytes_per_logs_bloom: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_extra_data_bytes: u64, } @@ -187,11 +187,11 @@ impl BellatrixPreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct CapellaPreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_bls_to_execution_changes: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_withdrawals_per_payload: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_validators_per_withdrawals_sweep: u64, } diff --git a/consensus/types/src/proposer_preparation_data.rs b/consensus/types/src/proposer_preparation_data.rs index 6179828a95..2828b0d4d5 100644 --- a/consensus/types/src/proposer_preparation_data.rs +++ b/consensus/types/src/proposer_preparation_data.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct ProposerPreparationData { /// The validators index. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, /// The fee-recipient address. pub fee_recipient: Address, diff --git a/consensus/types/src/selection_proof.rs b/consensus/types/src/selection_proof.rs index f8bc8ba69f..2a404b3b96 100644 --- a/consensus/types/src/selection_proof.rs +++ b/consensus/types/src/selection_proof.rs @@ -1,7 +1,7 @@ use crate::{ ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, }; -use eth2_hashing::hash; +use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use ssz::Encode; use std::cmp; diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index 2716367c7e..e9f1e192b4 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -38,7 +38,7 @@ use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssi Deserialize, )] #[serde(transparent)] -pub struct Slot(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct Slot(#[serde(with = "serde_utils::quoted_u64")] u64); #[derive( arbitrary::Arbitrary, @@ -54,7 +54,7 @@ pub struct Slot(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); Deserialize, )] #[serde(transparent)] -pub struct Epoch(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct Epoch(#[serde(with = "serde_utils::quoted_u64")] u64); impl_common!(Slot); impl_common!(Epoch); diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index fd06eb78a1..b885f89f7d 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -20,7 +20,7 @@ lazy_static! { #[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] -pub struct SubnetId(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct SubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); pub fn subnet_id_to_string(i: u64) -> &'static str { if i < MAX_SUBNET_ID as u64 { @@ -85,7 +85,7 @@ impl SubnetId { let subscription_event_idx = epoch.as_u64() / spec.epochs_per_subnet_subscription; let permutation_seed = - eth2_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); + ethereum_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); let num_subnets = 1 << spec.attestation_subnet_prefix_bits(); diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_aggregator_selection_data.rs index 9e72438be2..b101068123 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_aggregator_selection_data.rs @@ -21,7 +21,7 @@ use tree_hash_derive::TreeHash; )] pub struct SyncAggregatorSelectionData { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub subcommittee_index: u64, } diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index ef8b52becf..425f8f116d 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -32,7 +32,7 @@ pub enum Error { pub struct SyncCommitteeContribution { pub slot: Slot, pub beacon_block_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub subcommittee_index: u64, pub aggregation_bits: BitVector, pub signature: AggregateSignature, diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee_message.rs index 5c2fb08374..d0301cdf63 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee_message.rs @@ -23,7 +23,7 @@ use tree_hash_derive::TreeHash; pub struct SyncCommitteeMessage { pub slot: Slot, pub beacon_block_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, // Signature by the validator over `beacon_block_root`. pub signature: Signature, diff --git a/consensus/types/src/sync_committee_subscription.rs b/consensus/types/src/sync_committee_subscription.rs index 7f5ed063f6..8e040279d7 100644 --- a/consensus/types/src/sync_committee_subscription.rs +++ b/consensus/types/src/sync_committee_subscription.rs @@ -7,10 +7,10 @@ use ssz_derive::{Decode, Encode}; #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] pub struct SyncCommitteeSubscription { /// The validators index. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, /// The sync committee indices. - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub sync_committee_indices: Vec, /// Epoch until which this subscription is required. pub until_epoch: Epoch, diff --git a/consensus/types/src/sync_duty.rs b/consensus/types/src/sync_duty.rs index bdb0784596..e3ffe62bfd 100644 --- a/consensus/types/src/sync_duty.rs +++ b/consensus/types/src/sync_duty.rs @@ -7,9 +7,9 @@ use std::collections::HashSet; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncDuty { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub validator_sync_committee_indices: Vec, } diff --git a/consensus/types/src/sync_selection_proof.rs b/consensus/types/src/sync_selection_proof.rs index 570abace1e..7cae3946c6 100644 --- a/consensus/types/src/sync_selection_proof.rs +++ b/consensus/types/src/sync_selection_proof.rs @@ -5,7 +5,7 @@ use crate::{ ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, SyncAggregatorSelectionData, }; -use eth2_hashing::hash; +use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use ssz::Encode; use ssz_types::typenum::Unsigned; diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index 11bcf26894..5af756ae01 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -21,7 +21,7 @@ lazy_static! { #[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] -pub struct SyncSubnetId(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct SyncSubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); pub fn sync_subnet_id_to_string(i: u64) -> &'static str { if i < SYNC_COMMITTEE_SUBNET_COUNT { diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 43b892cdf3..6860397fb5 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -25,7 +25,7 @@ use tree_hash_derive::TreeHash; pub struct Validator { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub effective_balance: u64, pub slashed: bool, pub activation_eligibility_epoch: Epoch, diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator_registration_data.rs index 5a3450df08..de7f26cc63 100644 --- a/consensus/types/src/validator_registration_data.rs +++ b/consensus/types/src/validator_registration_data.rs @@ -13,9 +13,9 @@ pub struct SignedValidatorRegistrationData { #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode, TreeHash)] pub struct ValidatorRegistrationData { pub fee_recipient: Address, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub timestamp: u64, pub pubkey: PublicKeyBytes, } diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 20c84986c2..02686fef9a 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -27,7 +27,7 @@ use tree_hash_derive::TreeHash; pub struct VoluntaryExit { /// Earliest epoch when voluntary exit can be processed. pub epoch: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, } diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index 5221ff63f0..eed7c7e277 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -20,12 +20,12 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct Withdrawal { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, pub address: Address, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, } diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index c3331824d9..a610f257cd 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -5,15 +5,15 @@ authors = ["Paul Hauner "] edition = "2021" [dependencies] -eth2_ssz = "0.4.1" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +tree_hash = "0.5.0" milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.4.2", optional = true } rand = "0.7.3" serde = "1.0.116" serde_derive = "1.0.116" -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" hex = "0.4.2" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" ethereum-types = "0.14.1" arbitrary = { version = "1.0", features = ["derive"], optional = true } zeroize = { version = "1.4.2", features = ["zeroize_derive"] } diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index a61529af25..e6e53253f6 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -4,9 +4,9 @@ use crate::{ generic_signature::{GenericSignature, TSignature}, Error, Hash256, INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::hash::{Hash, Hasher}; diff --git a/crypto/bls/src/generic_public_key.rs b/crypto/bls/src/generic_public_key.rs index 847d039c62..462e4cb2cb 100644 --- a/crypto/bls/src/generic_public_key.rs +++ b/crypto/bls/src/generic_public_key.rs @@ -1,8 +1,8 @@ use crate::generic_public_key_bytes::GenericPublicKeyBytes; use crate::Error; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::hash::{Hash, Hasher}; diff --git a/crypto/bls/src/generic_public_key_bytes.rs b/crypto/bls/src/generic_public_key_bytes.rs index c2f318ab65..59b0ffc43f 100644 --- a/crypto/bls/src/generic_public_key_bytes.rs +++ b/crypto/bls/src/generic_public_key_bytes.rs @@ -2,9 +2,9 @@ use crate::{ generic_public_key::{GenericPublicKey, TPublicKey}, Error, PUBLIC_KEY_BYTES_LEN, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index 01e5ed1d48..05e0a222bd 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -2,9 +2,9 @@ use crate::{ generic_public_key::{GenericPublicKey, TPublicKey}, Error, Hash256, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::hash::{Hash, Hasher}; diff --git a/crypto/bls/src/generic_signature_bytes.rs b/crypto/bls/src/generic_signature_bytes.rs index aa33c90d0c..8f9f2a4d88 100644 --- a/crypto/bls/src/generic_signature_bytes.rs +++ b/crypto/bls/src/generic_signature_bytes.rs @@ -3,9 +3,9 @@ use crate::{ generic_signature::{GenericSignature, TSignature}, Error, INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; diff --git a/crypto/bls/src/get_withdrawal_credentials.rs b/crypto/bls/src/get_withdrawal_credentials.rs index 98106434f1..d5e6470504 100644 --- a/crypto/bls/src/get_withdrawal_credentials.rs +++ b/crypto/bls/src/get_withdrawal_credentials.rs @@ -1,5 +1,5 @@ use crate::PublicKey; -use eth2_hashing::hash; +use ethereum_hashing::hash; use ssz::Encode; /// Returns the withdrawal credentials for a given public key. diff --git a/crypto/eth2_hashing/.cargo/config b/crypto/eth2_hashing/.cargo/config deleted file mode 100644 index 4ec2f3b862..0000000000 --- a/crypto/eth2_hashing/.cargo/config +++ /dev/null @@ -1,2 +0,0 @@ -[target.wasm32-unknown-unknown] -runner = 'wasm-bindgen-test-runner' diff --git a/crypto/eth2_hashing/Cargo.toml b/crypto/eth2_hashing/Cargo.toml deleted file mode 100644 index db296c70fe..0000000000 --- a/crypto/eth2_hashing/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "eth2_hashing" -version = "0.3.0" -authors = ["Paul Hauner "] -edition = "2021" -license = "Apache-2.0" -description = "Hashing primitives used in Ethereum 2.0" - -[dependencies] -lazy_static = { version = "1.4.0", optional = true } -cpufeatures = { version = "0.2.5", optional = true } -ring = "0.16.19" -sha2 = "0.10" - -[dev-dependencies] -rustc-hex = "2.1.0" - -[target.'cfg(target_arch = "wasm32")'.dev-dependencies] -wasm-bindgen-test = "0.3.18" - -[features] -default = ["zero_hash_cache", "detect-cpufeatures"] -zero_hash_cache = ["lazy_static"] -detect-cpufeatures = ["cpufeatures"] diff --git a/crypto/eth2_hashing/src/lib.rs b/crypto/eth2_hashing/src/lib.rs deleted file mode 100644 index 36a3d14139..0000000000 --- a/crypto/eth2_hashing/src/lib.rs +++ /dev/null @@ -1,251 +0,0 @@ -//! Optimized SHA256 for use in Ethereum 2.0. -//! -//! The initial purpose of this crate was to provide an abstraction over the hash function used in -//! Ethereum 2.0. The hash function changed during the specification process, so defining it once in -//! this crate made it easy to replace. -//! -//! Now this crate serves primarily as a wrapper over two SHA256 crates: `sha2` and `ring` – -//! which it switches between at runtime based on the availability of SHA intrinsics. - -pub use self::DynamicContext as Context; -use sha2::Digest; - -#[cfg(feature = "zero_hash_cache")] -use lazy_static::lazy_static; - -/// Length of a SHA256 hash in bytes. -pub const HASH_LEN: usize = 32; - -/// Returns the digest of `input` using the best available implementation. -pub fn hash(input: &[u8]) -> Vec { - DynamicImpl::best().hash(input) -} - -/// Hash function returning a fixed-size array (to save on allocations). -/// -/// Uses the best available implementation based on CPU features. -pub fn hash_fixed(input: &[u8]) -> [u8; HASH_LEN] { - DynamicImpl::best().hash_fixed(input) -} - -/// Compute the hash of two slices concatenated. -pub fn hash32_concat(h1: &[u8], h2: &[u8]) -> [u8; 32] { - let mut ctxt = DynamicContext::new(); - ctxt.update(h1); - ctxt.update(h2); - ctxt.finalize() -} - -/// Context trait for abstracting over implementation contexts. -pub trait Sha256Context { - fn new() -> Self; - - fn update(&mut self, bytes: &[u8]); - - fn finalize(self) -> [u8; HASH_LEN]; -} - -/// Top-level trait implemented by both `sha2` and `ring` implementations. -pub trait Sha256 { - type Context: Sha256Context; - - fn hash(&self, input: &[u8]) -> Vec; - - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN]; -} - -/// Implementation of SHA256 using the `sha2` crate (fastest on CPUs with SHA extensions). -struct Sha2CrateImpl; - -impl Sha256Context for sha2::Sha256 { - fn new() -> Self { - sha2::Digest::new() - } - - fn update(&mut self, bytes: &[u8]) { - sha2::Digest::update(self, bytes) - } - - fn finalize(self) -> [u8; HASH_LEN] { - sha2::Digest::finalize(self).into() - } -} - -impl Sha256 for Sha2CrateImpl { - type Context = sha2::Sha256; - - fn hash(&self, input: &[u8]) -> Vec { - Self::Context::digest(input).into_iter().collect() - } - - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN] { - Self::Context::digest(input).into() - } -} - -/// Implementation of SHA256 using the `ring` crate (fastest on CPUs without SHA extensions). -pub struct RingImpl; - -impl Sha256Context for ring::digest::Context { - fn new() -> Self { - Self::new(&ring::digest::SHA256) - } - - fn update(&mut self, bytes: &[u8]) { - self.update(bytes) - } - - fn finalize(self) -> [u8; HASH_LEN] { - let mut output = [0; HASH_LEN]; - output.copy_from_slice(self.finish().as_ref()); - output - } -} - -impl Sha256 for RingImpl { - type Context = ring::digest::Context; - - fn hash(&self, input: &[u8]) -> Vec { - ring::digest::digest(&ring::digest::SHA256, input) - .as_ref() - .into() - } - - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN] { - let mut ctxt = Self::Context::new(&ring::digest::SHA256); - ctxt.update(input); - ctxt.finalize() - } -} - -/// Default dynamic implementation that switches between available implementations. -pub enum DynamicImpl { - Sha2, - Ring, -} - -// Runtime latch for detecting the availability of SHA extensions on x86_64. -// -// Inspired by the runtime switch within the `sha2` crate itself. -#[cfg(all(feature = "detect-cpufeatures", target_arch = "x86_64"))] -cpufeatures::new!(x86_sha_extensions, "sha", "sse2", "ssse3", "sse4.1"); - -#[inline(always)] -pub fn have_sha_extensions() -> bool { - #[cfg(all(feature = "detect-cpufeatures", target_arch = "x86_64"))] - return x86_sha_extensions::get(); - - #[cfg(not(all(feature = "detect-cpufeatures", target_arch = "x86_64")))] - return false; -} - -impl DynamicImpl { - /// Choose the best available implementation based on the currently executing CPU. - #[inline(always)] - pub fn best() -> Self { - if have_sha_extensions() { - Self::Sha2 - } else { - Self::Ring - } - } -} - -impl Sha256 for DynamicImpl { - type Context = DynamicContext; - - #[inline(always)] - fn hash(&self, input: &[u8]) -> Vec { - match self { - Self::Sha2 => Sha2CrateImpl.hash(input), - Self::Ring => RingImpl.hash(input), - } - } - - #[inline(always)] - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN] { - match self { - Self::Sha2 => Sha2CrateImpl.hash_fixed(input), - Self::Ring => RingImpl.hash_fixed(input), - } - } -} - -/// Context encapsulating all implemenation contexts. -/// -/// This enum ends up being 8 bytes larger than the largest inner context. -pub enum DynamicContext { - Sha2(sha2::Sha256), - Ring(ring::digest::Context), -} - -impl Sha256Context for DynamicContext { - fn new() -> Self { - match DynamicImpl::best() { - DynamicImpl::Sha2 => Self::Sha2(Sha256Context::new()), - DynamicImpl::Ring => Self::Ring(Sha256Context::new()), - } - } - - fn update(&mut self, bytes: &[u8]) { - match self { - Self::Sha2(ctxt) => Sha256Context::update(ctxt, bytes), - Self::Ring(ctxt) => Sha256Context::update(ctxt, bytes), - } - } - - fn finalize(self) -> [u8; HASH_LEN] { - match self { - Self::Sha2(ctxt) => Sha256Context::finalize(ctxt), - Self::Ring(ctxt) => Sha256Context::finalize(ctxt), - } - } -} - -/// The max index that can be used with `ZERO_HASHES`. -#[cfg(feature = "zero_hash_cache")] -pub const ZERO_HASHES_MAX_INDEX: usize = 48; - -#[cfg(feature = "zero_hash_cache")] -lazy_static! { - /// Cached zero hashes where `ZERO_HASHES[i]` is the hash of a Merkle tree with 2^i zero leaves. - pub static ref ZERO_HASHES: Vec> = { - let mut hashes = vec![vec![0; 32]; ZERO_HASHES_MAX_INDEX + 1]; - - for i in 0..ZERO_HASHES_MAX_INDEX { - hashes[i + 1] = hash32_concat(&hashes[i], &hashes[i])[..].to_vec(); - } - - hashes - }; -} - -#[cfg(test)] -mod tests { - use super::*; - use rustc_hex::FromHex; - - #[cfg(target_arch = "wasm32")] - use wasm_bindgen_test::*; - - #[cfg_attr(not(target_arch = "wasm32"), test)] - #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] - fn test_hashing() { - let input: Vec = b"hello world".as_ref().into(); - - let output = hash(input.as_ref()); - let expected_hex = "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"; - let expected: Vec = expected_hex.from_hex().unwrap(); - assert_eq!(expected, output); - } - - #[cfg(feature = "zero_hash_cache")] - mod zero_hash { - use super::*; - - #[test] - fn zero_hash_zero() { - assert_eq!(ZERO_HASHES[0], vec![0; 32]); - } - } -} diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 84b66c37d2..9e7f2fdb08 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "4.0.1-rc.0" +version = "4.1.0" authors = ["Paul Hauner "] edition = "2021" @@ -21,12 +21,12 @@ env_logger = "0.9.0" types = { path = "../consensus/types" } state_processing = { path = "../consensus/state_processing" } int_to_bytes = { path = "../consensus/int_to_bytes" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" environment = { path = "../lighthouse/environment" } eth2_network_config = { path = "../common/eth2_network_config" } genesis = { path = "../beacon_node/genesis" } deposit_contract = { path = "../common/deposit_contract" } -tree_hash = "0.4.1" +tree_hash = "0.5.0" clap_utils = { path = "../common/clap_utils" } lighthouse_network = { path = "../beacon_node/lighthouse_network" } validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] } diff --git a/lcli/Dockerfile b/lcli/Dockerfile index feda81d030..98f33f2153 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.65.0-bullseye AS builder +FROM rust:1.68.2-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG PORTABLE diff --git a/lcli/src/check_deposit_data.rs b/lcli/src/check_deposit_data.rs index 56f18f9988..47c2c7addf 100644 --- a/lcli/src/check_deposit_data.rs +++ b/lcli/src/check_deposit_data.rs @@ -2,9 +2,8 @@ use clap::ArgMatches; use clap_utils::{parse_required, parse_ssz_required}; use deposit_contract::{decode_eth1_tx_data, DEPOSIT_DATA_LEN}; use tree_hash::TreeHash; -use types::EthSpec; -pub fn run(matches: &ArgMatches) -> Result<(), String> { +pub fn run(matches: &ArgMatches) -> Result<(), String> { let rlp_bytes = parse_ssz_required::>(matches, "deposit-data")?; let amount = parse_required(matches, "deposit-amount")?; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index cdf9cfa677..eeb098f04d 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -847,7 +847,7 @@ fn run( } ("new-testnet", Some(matches)) => new_testnet::run::(testnet_dir, matches) .map_err(|e| format!("Failed to run new_testnet command: {}", e)), - ("check-deposit-data", Some(matches)) => check_deposit_data::run::(matches) + ("check-deposit-data", Some(matches)) => check_deposit_data::run(matches) .map_err(|e| format!("Failed to run check-deposit-data command: {}", e)), ("generate-bootnode-enr", Some(matches)) => generate_bootnode_enr::run::(matches) .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 44a1772ccd..cf971c69f0 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -74,7 +74,7 @@ use eth2::{ use ssz::Encode; use state_processing::{ block_signature_verifier::BlockSignatureVerifier, per_block_processing, per_slot_processing, - BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, + BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, }; use std::borrow::Cow; use std::fs::File; @@ -381,6 +381,7 @@ fn do_transition( &mut pre_state, &block, BlockSignatureStrategy::NoVerification, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index e7a6e7c2ba..48f47c6d48 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "lighthouse" -version = "4.0.1-rc.0" +version = "4.1.0" authors = ["Sigma Prime "] edition = "2021" autotests = false -rust-version = "1.66" +rust-version = "1.68.2" [features] default = ["slasher-mdbx"] @@ -33,7 +33,7 @@ slog = { version = "2.5.2", features = ["max_level_trace"] } sloggers = { version = "2.1.1", features = ["json"] } types = { "path" = "../consensus/types" } bls = { path = "../crypto/bls" } -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" clap = "2.33.3" env_logger = "0.9.0" environment = { path = "./environment" } diff --git a/lighthouse/build.rs b/lighthouse/build.rs new file mode 100644 index 0000000000..3d8a25ec8c --- /dev/null +++ b/lighthouse/build.rs @@ -0,0 +1,2 @@ +// This is a stub for determining the build profile, see `build_profile_name`. +fn main() {} diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 6f53b9cc19..c8b963a9bd 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -6,8 +6,8 @@ use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, get_eth2_network_config}; use directory::{parse_path_or_default, DEFAULT_BEACON_NODE_DIR, DEFAULT_VALIDATOR_DIR}; use env_logger::{Builder, Env}; use environment::{EnvironmentBuilder, LoggerConfig}; -use eth2_hashing::have_sha_extensions; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODED_NET_NAMES}; +use ethereum_hashing::have_sha_extensions; use lighthouse_version::VERSION; use malloc_utils::configure_memory_allocator; use slog::{crit, info, warn}; @@ -37,6 +37,17 @@ fn allocator_name() -> &'static str { } } +fn build_profile_name() -> String { + // Nice hack from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime + // The profile name is always the 3rd last part of the path (with 1 based indexing). + // e.g. /code/core/target/cli/build/my-build-info-9f91ba6f99d7a061/out + std::env!("OUT_DIR") + .split(std::path::MAIN_SEPARATOR) + .nth_back(3) + .unwrap_or_else(|| "unknown") + .to_string() +} + fn main() { // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. if std::env::var("RUST_BACKTRACE").is_err() { @@ -58,11 +69,13 @@ fn main() { BLS library: {}\n\ SHA256 hardware acceleration: {}\n\ Allocator: {}\n\ + Profile: {}\n\ Specs: mainnet (true), minimal ({}), gnosis ({})", VERSION.replace("Lighthouse/", ""), bls_library_name(), have_sha_extensions(), allocator_name(), + build_profile_name(), cfg!(feature = "spec-minimal"), cfg!(feature = "gnosis"), ).as_str() @@ -152,7 +165,8 @@ fn main() { .help( "If present, log files will be generated as world-readable meaning they can be read by \ any user on the machine. Note that logs can often contain sensitive information \ - about your validator and so this flag should be used with caution.") + about your validator and so this flag should be used with caution. For Windows users, \ + the log file permissions will be inherited from the parent folder.") .global(true), ) .arg( diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 078bca95ef..7e647c904d 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2,6 +2,7 @@ use beacon_node::ClientConfig as Config; use crate::exec::{CommandLineTestExec, CompletedTest}; use beacon_node::beacon_chain::chain_config::{ + DisallowedReOrgOffsets, DEFAULT_RE_ORG_CUTOFF_DENOMINATOR, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, }; use eth1::Eth1Endpoint; @@ -344,6 +345,23 @@ fn trusted_peers_flag() { }); } +#[test] +fn genesis_backfill_flag() { + CommandLineTest::new() + .flag("genesis-backfill", None) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); +} + +/// The genesis backfill flag should be enabled if historic states flag is set. +#[test] +fn genesis_backfill_with_historic_flag() { + CommandLineTest::new() + .flag("reconstruct-historic-states", None) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); +} + #[test] fn always_prefer_builder_payload_flag() { CommandLineTest::new() @@ -715,6 +733,40 @@ fn builder_fallback_flags() { ); } +#[test] +fn builder_user_agent() { + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + None, + None, + |config| { + assert_eq!( + config.execution_layer.as_ref().unwrap().builder_user_agent, + None + ); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-user-agent"), + Some("anon"), + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .builder_user_agent + .as_ref() + .unwrap(), + "anon" + ); + }, + ); +} + fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) { use sensitive_url::SensitiveUrl; @@ -1045,6 +1097,13 @@ fn disable_discovery_flag() { .with_config(|config| assert!(config.network.disable_discovery)); } #[test] +fn disable_peer_scoring_flag() { + CommandLineTest::new() + .flag("disable-peer-scoring", None) + .run_with_zero_port() + .with_config(|config| assert!(config.network.disable_peer_scoring)); +} +#[test] fn disable_upnp_flag() { CommandLineTest::new() .flag("disable-upnp", None) @@ -1052,6 +1111,19 @@ fn disable_upnp_flag() { .with_config(|config| assert!(!config.network.upnp_enabled)); } #[test] +fn disable_backfill_rate_limiting_flag() { + CommandLineTest::new() + .flag("disable-backfill-rate-limiting", None) + .run_with_zero_port() + .with_config(|config| assert!(!config.chain.enable_backfill_rate_limiting)); +} +#[test] +fn default_backfill_rate_limiting_flag() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(config.chain.enable_backfill_rate_limiting)); +} +#[test] fn default_boot_nodes() { let mainnet = vec![ // Lighthouse Team (Sigma Prime) @@ -1614,6 +1686,25 @@ fn block_cache_size_flag() { .with_config(|config| assert_eq!(config.store.block_cache_size, 4_usize)); } #[test] +fn historic_state_cache_size_flag() { + CommandLineTest::new() + .flag("historic-state-cache-size", Some("4")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.store.historic_state_cache_size, 4_usize)); +} +#[test] +fn historic_state_cache_size_default() { + use beacon_node::beacon_chain::store::config::DEFAULT_HISTORIC_STATE_CACHE_SIZE; + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.store.historic_state_cache_size, + DEFAULT_HISTORIC_STATE_CACHE_SIZE + ); + }); +} +#[test] fn auto_compact_db_flag() { CommandLineTest::new() .flag("auto-compact-db", Some("false")) @@ -1868,6 +1959,10 @@ fn enable_proposer_re_orgs_default() { config.chain.re_org_max_epochs_since_finalization, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, ); + assert_eq!( + config.chain.re_org_cutoff(12), + Duration::from_secs(12) / DEFAULT_RE_ORG_CUTOFF_DENOMINATOR + ); }); } @@ -1900,6 +1995,49 @@ fn proposer_re_org_max_epochs_since_finalization() { }); } +#[test] +fn proposer_re_org_cutoff() { + CommandLineTest::new() + .flag("proposer-reorg-cutoff", Some("500")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.chain.re_org_cutoff(12), Duration::from_millis(500)) + }); +} + +#[test] +fn proposer_re_org_disallowed_offsets_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.re_org_disallowed_offsets, + DisallowedReOrgOffsets::new::(vec![0]).unwrap() + ) + }); +} + +#[test] +fn proposer_re_org_disallowed_offsets_override() { + CommandLineTest::new() + .flag("--proposer-reorg-disallowed-offsets", Some("1,2,3")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.re_org_disallowed_offsets, + DisallowedReOrgOffsets::new::(vec![1, 2, 3]).unwrap() + ) + }); +} + +#[test] +#[should_panic] +fn proposer_re_org_disallowed_offsets_invalid() { + CommandLineTest::new() + .flag("--proposer-reorg-disallowed-offsets", Some("32,33,34")) + .run_with_zero_port(); +} + #[test] fn monitoring_endpoint() { CommandLineTest::new() diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index c9fb387681..c4050ac934 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -20,7 +20,7 @@ Modify `vars.env` as desired. Start a local eth1 ganache server plus boot node along with `BN_COUNT` number of beacon nodes and `VC_COUNT` validator clients. -The `start_local_testnet.sh` script takes three options `-v VC_COUNT`, `-d DEBUG_LEVEL` and `-h` for help. +The `start_local_testnet.sh` script takes four options `-v VC_COUNT`, `-d DEBUG_LEVEL`, `-p` to enable builder proposals and `-h` for help. The options may be in any order or absent in which case they take the default value specified. - VC_COUNT: the number of validator clients to create, default: `BN_COUNT` - DEBUG_LEVEL: one of { error, warn, info, debug, trace }, default: `info` diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index dcc0a5382a..e3aba5c3ad 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -28,7 +28,7 @@ while getopts "v:d:ph" flag; do echo "Options:" echo " -v: VC_COUNT default: $VC_COUNT" echo " -d: DEBUG_LEVEL default: info" - echo " -p: enable private tx proposals" + echo " -p: enable builder proposals" echo " -h: this help" exit ;; diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index c5ce8793ad..7f2ac456b5 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -12,8 +12,8 @@ lmdb = ["lmdb-rkv", "lmdb-rkv-sys"] [dependencies] bincode = "1.3.1" byteorder = "1.3.4" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lazy_static = "1.4.0" lighthouse_metrics = { path = "../common/lighthouse_metrics" } @@ -26,8 +26,8 @@ serde = "1.0" serde_derive = "1.0" slog = "2.5.2" sloggers = { version = "2.1.1", features = ["json"] } -tree_hash = "0.4.1" -tree_hash_derive = "0.4.0" +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" types = { path = "../consensus/types" } strum = { version = "0.24.1", features = ["derive"] } diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar index bae1807329..ddc49e13cd 100644 --- a/testing/antithesis/Dockerfile.libvoidstar +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -1,4 +1,4 @@ -FROM rust:1.66.1-bullseye AS builder +FROM rust:1.68.2-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 79664a2622..11283052f0 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -22,10 +22,10 @@ serde = "1.0.116" serde_derive = "1.0.116" serde_repr = "0.1.6" serde_yaml = "0.8.13" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" -tree_hash = "0.4.1" -tree_hash_derive = "0.4.0" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" cached_tree_hash = { path = "../../consensus/cached_tree_hash" } state_processing = { path = "../../consensus/state_processing" } swap_or_not_shuffle = { path = "../../consensus/swap_or_not_shuffle" } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 7c3154a328..4f5d998301 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -425,7 +425,7 @@ impl Tester { .harness .chain .slot_clock - .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .seconds_from_current_slot_start() .unwrap(); let result = self diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index 8a75789724..e51fed1907 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -5,7 +5,7 @@ use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::{ per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, - ConsensusContext, VerifyBlockRoot, + ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, }; use types::{BeaconState, EthSpec, ForkName, RelativeEpoch, SignedBeaconBlock}; @@ -96,6 +96,7 @@ impl Case for SanityBlocks { &mut indiv_state, signed_block, BlockSignatureStrategy::VerifyIndividual, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, @@ -106,6 +107,7 @@ impl Case for SanityBlocks { &mut bulk_state, signed_block, BlockSignatureStrategy::VerifyBulk, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index 314e51d530..bb4efdb6de 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -4,7 +4,7 @@ use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::{ per_block_processing, state_advance::complete_state_advance, BlockSignatureStrategy, - ConsensusContext, VerifyBlockRoot, + ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, }; use std::str::FromStr; use types::{BeaconState, Epoch, ForkName, SignedBeaconBlock}; @@ -101,6 +101,7 @@ impl Case for TransitionTest { &mut state, block, BlockSignatureStrategy::VerifyBulk, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index ff333332ba..726019a848 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -626,9 +626,10 @@ async fn check_payload_reconstruction( ee: &ExecutionPair, payload: &ExecutionPayload, ) { + // check via legacy eth_getBlockByHash let reconstructed = ee .execution_layer - .get_payload_by_block_hash(payload.block_hash(), payload.fork_name()) + .get_payload_by_hash_legacy(payload.block_hash(), payload.fork_name()) .await .unwrap() .unwrap(); diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index f1196502fb..9668ee8cb4 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -24,6 +24,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("4") .help("Number of beacon nodes")) + .arg(Arg::with_name("proposer-nodes") + .short("n") + .long("nodes") + .takes_value(true) + .default_value("2") + .help("Number of proposer-only beacon nodes")) .arg(Arg::with_name("validators_per_node") .short("v") .long("validators_per_node") @@ -57,6 +63,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("4") .help("Number of beacon nodes")) + .arg(Arg::with_name("proposer-nodes") + .short("n") + .long("nodes") + .takes_value(true) + .default_value("2") + .help("Number of proposer-only beacon nodes")) .arg(Arg::with_name("validators_per_node") .short("v") .long("validators_per_node") diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 43e8a5cf4d..1699c0e9ee 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -27,6 +27,8 @@ const SUGGESTED_FEE_RECIPIENT: [u8; 20] = pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let node_count = value_t!(matches, "nodes", usize).expect("missing nodes default"); + let proposer_nodes = value_t!(matches, "proposer-nodes", usize).unwrap_or(0); + println!("PROPOSER-NODES: {}", proposer_nodes); let validators_per_node = value_t!(matches, "validators_per_node", usize) .expect("missing validators_per_node default"); let speed_up_factor = @@ -35,7 +37,8 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let post_merge_sim = matches.is_present("post-merge"); println!("Beacon Chain Simulator:"); - println!(" nodes:{}", node_count); + println!(" nodes:{}, proposer_nodes: {}", node_count, proposer_nodes); + println!(" validators_per_node:{}", validators_per_node); println!(" post merge simulation:{}", post_merge_sim); println!(" continue_after_checks:{}", continue_after_checks); @@ -147,7 +150,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { beacon_config.sync_eth1_chain = true; beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64; beacon_config.eth1.chain_id = Eth1Id::from(chain_id); - beacon_config.network.target_peers = node_count - 1; + beacon_config.network.target_peers = node_count + proposer_nodes - 1; beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); @@ -173,7 +176,17 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { * One by one, add beacon nodes to the network. */ for _ in 0..node_count - 1 { - network.add_beacon_node(beacon_config.clone()).await?; + network + .add_beacon_node(beacon_config.clone(), false) + .await?; + } + + /* + * One by one, add proposer nodes to the network. + */ + for _ in 0..proposer_nodes - 1 { + println!("Adding a proposer node"); + network.add_beacon_node(beacon_config.clone(), true).await?; } /* @@ -310,7 +323,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { */ println!( "Simulation complete. Finished with {} beacon nodes and {} validator clients", - network.beacon_node_count(), + network.beacon_node_count() + network.proposer_node_count(), network.validator_client_count() ); diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 3e481df885..e35870d126 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -25,6 +25,7 @@ pub const TERMINAL_BLOCK: u64 = 64; pub struct Inner { pub context: RuntimeContext, pub beacon_nodes: RwLock>>, + pub proposer_nodes: RwLock>>, pub validator_clients: RwLock>>, pub execution_nodes: RwLock>>, } @@ -97,6 +98,7 @@ impl LocalNetwork { inner: Arc::new(Inner { context, beacon_nodes: RwLock::new(vec![beacon_node]), + proposer_nodes: RwLock::new(vec![]), execution_nodes: RwLock::new(execution_node), validator_clients: RwLock::new(vec![]), }), @@ -111,6 +113,14 @@ impl LocalNetwork { self.beacon_nodes.read().len() } + /// Returns the number of proposer nodes in the network. + /// + /// Note: does not count nodes that are external to this `LocalNetwork` that may have connected + /// (e.g., another Lighthouse process on the same machine.) + pub fn proposer_node_count(&self) -> usize { + self.proposer_nodes.read().len() + } + /// Returns the number of validator clients in the network. /// /// Note: does not count nodes that are external to this `LocalNetwork` that may have connected @@ -120,7 +130,11 @@ impl LocalNetwork { } /// Adds a beacon node to the network, connecting to the 0'th beacon node via ENR. - pub async fn add_beacon_node(&self, mut beacon_config: ClientConfig) -> Result<(), String> { + pub async fn add_beacon_node( + &self, + mut beacon_config: ClientConfig, + is_proposer: bool, + ) -> Result<(), String> { let self_1 = self.clone(); let count = self.beacon_node_count() as u16; println!("Adding beacon node.."); @@ -135,6 +149,7 @@ impl LocalNetwork { .enr() .expect("bootnode must have a network"), ); + let count = (self.beacon_node_count() + self.proposer_node_count()) as u16; beacon_config.network.set_ipv4_listening_address( std::net::Ipv4Addr::UNSPECIFIED, BOOTNODE_PORT + count, @@ -143,6 +158,7 @@ impl LocalNetwork { beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT + count); beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT + count); beacon_config.network.discv5_config.table_filter = |_| true; + beacon_config.network.proposer_only = is_proposer; } if let Some(el_config) = &mut beacon_config.execution_layer { let config = MockExecutionConfig { @@ -173,7 +189,11 @@ impl LocalNetwork { beacon_config, ) .await?; - self_1.beacon_nodes.write().push(beacon_node); + if is_proposer { + self_1.proposer_nodes.write().push(beacon_node); + } else { + self_1.beacon_nodes.write().push(beacon_node); + } Ok(()) } @@ -200,6 +220,16 @@ impl LocalNetwork { .http_api_listen_addr() .expect("Must have http started") }; + // If there is a proposer node for the same index, we will use that for proposing + let proposer_socket_addr = { + let read_lock = self.proposer_nodes.read(); + read_lock.get(beacon_node).map(|proposer_node| { + proposer_node + .client + .http_api_listen_addr() + .expect("Must have http started") + }) + }; let beacon_node = SensitiveUrl::parse( format!("http://{}:{}", socket_addr.ip(), socket_addr.port()).as_str(), @@ -210,6 +240,21 @@ impl LocalNetwork { } else { vec![beacon_node] }; + + // If we have a proposer node established, use it. + if let Some(proposer_socket_addr) = proposer_socket_addr { + let url = SensitiveUrl::parse( + format!( + "http://{}:{}", + proposer_socket_addr.ip(), + proposer_socket_addr.port() + ) + .as_str(), + ) + .unwrap(); + validator_config.proposer_nodes = vec![url]; + } + let validator_client = LocalValidatorClient::production_with_insecure_keypairs( context, validator_config, @@ -223,9 +268,11 @@ impl LocalNetwork { /// For all beacon nodes in `Self`, return a HTTP client to access each nodes HTTP API. pub fn remote_nodes(&self) -> Result, String> { let beacon_nodes = self.beacon_nodes.read(); + let proposer_nodes = self.proposer_nodes.read(); beacon_nodes .iter() + .chain(proposer_nodes.iter()) .map(|beacon_node| beacon_node.remote_node()) .collect() } diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index f1f6dc4426..b7598f9fa7 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -100,7 +100,9 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { */ for _ in 0..node_count - 1 { - network.add_beacon_node(beacon_config.clone()).await?; + network + .add_beacon_node(beacon_config.clone(), false) + .await?; } /* @@ -151,7 +153,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { */ println!( "Simulation complete. Finished with {} beacon nodes and {} validator clients", - network.beacon_node_count(), + network.beacon_node_count() + network.proposer_node_count(), network.validator_client_count() ); diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index c437457c20..5eaed809df 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -228,7 +228,7 @@ pub async fn verify_one_node_sync( ) .await; // Add a beacon node - network.add_beacon_node(beacon_config).await?; + network.add_beacon_node(beacon_config, false).await?; // Check every `epoch_duration` if nodes are synced // limited to at most `sync_timeout` epochs let mut interval = tokio::time::interval(epoch_duration); @@ -265,8 +265,10 @@ pub async fn verify_two_nodes_sync( ) .await; // Add beacon nodes - network.add_beacon_node(beacon_config.clone()).await?; - network.add_beacon_node(beacon_config).await?; + network + .add_beacon_node(beacon_config.clone(), false) + .await?; + network.add_beacon_node(beacon_config, false).await?; // Check every `epoch_duration` if nodes are synced // limited to at most `sync_timeout` epochs let mut interval = tokio::time::interval(epoch_duration); @@ -305,8 +307,10 @@ pub async fn verify_in_between_sync( ) .await; // Add two beacon nodes - network.add_beacon_node(beacon_config.clone()).await?; - network.add_beacon_node(beacon_config).await?; + network + .add_beacon_node(beacon_config.clone(), false) + .await?; + network.add_beacon_node(beacon_config, false).await?; // Delay before adding additional syncing nodes. epoch_delay( Epoch::new(sync_timeout - 5), @@ -315,7 +319,7 @@ pub async fn verify_in_between_sync( ) .await; // Add a beacon node - network.add_beacon_node(config1.clone()).await?; + network.add_beacon_node(config1.clone(), false).await?; // Check every `epoch_duration` if nodes are synced // limited to at most `sync_timeout` epochs let mut interval = tokio::time::interval(epoch_duration); diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 6da9f2f4a6..a25b3c31c1 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" [dependencies] state_processing = { path = "../../consensus/state_processing" } types = { path = "../../consensus/types" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" beacon_chain = { path = "../../beacon_node/beacon_chain" } lazy_static = "1.4.0" tokio = { version = "1.14.0", features = ["rt-multi-thread"] } diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index d581eba965..7e7fd23e0d 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -2,7 +2,7 @@ use super::*; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use state_processing::{ per_block_processing, per_block_processing::errors::ExitInvalid, BlockProcessingError, - BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, + BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, }; use types::{BeaconBlock, BeaconState, Epoch, EthSpec, SignedBeaconBlock}; @@ -69,6 +69,7 @@ impl ExitTest { state, block, BlockSignatureStrategy::VerifyIndividual, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &E::default_spec(), diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index 8ce5830062..c0fbf66723 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -3,8 +3,6 @@ name = "web3signer_tests" version = "0.1.0" edition = "2021" -build = "build.rs" - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -27,9 +25,7 @@ serde = "1.0.116" serde_derive = "1.0.116" serde_yaml = "0.8.13" eth2_network_config = { path = "../../common/eth2_network_config" } - -[build-dependencies] -tokio = { version = "1.14.0", features = ["rt-multi-thread", "macros"] } -reqwest = { version = "0.11.0", features = ["json","stream"] } serde_json = "1.0.58" zip = "0.5.13" +lazy_static = "1.4.0" +parking_lot = "0.12.0" \ No newline at end of file diff --git a/testing/web3signer_tests/build.rs b/testing/web3signer_tests/src/get_web3signer.rs similarity index 88% rename from testing/web3signer_tests/build.rs rename to testing/web3signer_tests/src/get_web3signer.rs index a55c39376a..800feb204a 100644 --- a/testing/web3signer_tests/build.rs +++ b/testing/web3signer_tests/src/get_web3signer.rs @@ -15,17 +15,6 @@ use zip::ZipArchive; /// Use `Some("21.8.1")` to download a specific version. const FIXED_VERSION_STRING: Option<&str> = None; -#[tokio::main] -async fn main() { - let out_dir = env::var("OUT_DIR").unwrap(); - - // Read a Github API token from the environment. This is intended to prevent rate-limits on CI. - // We use a name that is unlikely to accidentally collide with anything the user has configured. - let github_token = env::var("LIGHTHOUSE_GITHUB_TOKEN"); - - download_binary(out_dir.into(), github_token.as_deref().unwrap_or("")).await; -} - pub async fn download_binary(dest_dir: PathBuf, github_token: &str) { let version_file = dest_dir.join("version"); diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 16bffd04f9..dd17ae23b1 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -9,16 +9,21 @@ //! - Lighthouse can issue valid requests to Web3Signer. //! - The signatures generated by Web3Signer are identical to those which Lighthouse generates. //! -//! There is a build script in this crate which obtains the latest version of Web3Signer and makes -//! it available via the `OUT_DIR`. +//! There is a `download_binary` function in the `get_web3signer` module which obtains the latest version of Web3Signer and makes +//! it available via the `TEMP_DIR`. +#![cfg(all(test, unix, not(debug_assertions)))] + +mod get_web3signer; -#[cfg(all(test, unix, not(debug_assertions)))] mod tests { + use crate::get_web3signer::download_binary; use account_utils::validator_definitions::{ SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, }; use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; + use lazy_static::lazy_static; + use parking_lot::Mutex; use reqwest::Client; use serde::Serialize; use slot_clock::{SlotClock, TestingSlotClock}; @@ -31,7 +36,8 @@ mod tests { use std::sync::Arc; use std::time::{Duration, Instant}; use task_executor::TaskExecutor; - use tempfile::TempDir; + use tempfile::{tempdir, TempDir}; + use tokio::sync::OnceCell; use tokio::time::sleep; use types::*; use url::Url; @@ -51,6 +57,13 @@ mod tests { /// debugging. const SUPPRESS_WEB3SIGNER_LOGS: bool = true; + lazy_static! { + static ref TEMP_DIR: Arc> = Arc::new(Mutex::new( + tempdir().expect("Failed to create temporary directory") + )); + static ref GET_WEB3SIGNER_BIN: OnceCell<()> = OnceCell::new(); + } + type E = MainnetEthSpec; /// This marker trait is implemented for objects that we wish to compare to ensure Web3Signer @@ -99,7 +112,10 @@ mod tests { /// The location of the Web3Signer binary generated by the build script. fn web3signer_binary() -> PathBuf { - PathBuf::from(env::var("OUT_DIR").unwrap()) + TEMP_DIR + .lock() + .path() + .to_path_buf() .join("web3signer") .join("bin") .join("web3signer") @@ -143,6 +159,19 @@ mod tests { impl Web3SignerRig { pub async fn new(network: &str, listen_address: &str, listen_port: u16) -> Self { + GET_WEB3SIGNER_BIN + .get_or_init(|| async { + // Read a Github API token from the environment. This is intended to prevent rate-limits on CI. + // We use a name that is unlikely to accidentally collide with anything the user has configured. + let github_token = env::var("LIGHTHOUSE_GITHUB_TOKEN"); + download_binary( + TEMP_DIR.lock().path().to_path_buf(), + github_token.as_deref().unwrap_or(""), + ) + .await; + }) + .await; + let keystore_dir = TempDir::new().unwrap(); let keypair = testing_keypair(); let keystore = diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index b29d97d60d..4095a20470 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -12,7 +12,7 @@ path = "src/lib.rs" tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } [dependencies] -tree_hash = "0.4.1" +tree_hash = "0.5.0" clap = "2.33.3" slashing_protection = { path = "./slashing_protection" } slot_clock = { path = "../common/slot_clock" } @@ -45,7 +45,7 @@ lighthouse_version = { path = "../common/lighthouse_version" } warp_utils = { path = "../common/warp_utils" } warp = "0.3.2" hyper = "0.14.4" -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" libsecp256k1 = "0.7.0" ring = "0.16.19" rand = { version = "0.8.5", features = ["small_rng"] } diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 631e54dc4e..278dc22d0d 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -18,7 +18,7 @@ r2d2_sqlite = "0.21.0" serde = "1.0.116" serde_derive = "1.0.116" serde_json = "1.0.58" -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" filesystem = { path = "../../common/filesystem" } arbitrary = { version = "1.0", features = ["derive"], optional = true } diff --git a/validator_client/slashing_protection/src/interchange.rs b/validator_client/slashing_protection/src/interchange.rs index 3793766b6a..99d37c38b9 100644 --- a/validator_client/slashing_protection/src/interchange.rs +++ b/validator_client/slashing_protection/src/interchange.rs @@ -9,7 +9,7 @@ use types::{Epoch, Hash256, PublicKeyBytes, Slot}; #[serde(deny_unknown_fields)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct InterchangeMetadata { - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub interchange_format_version: u64, pub genesis_validators_root: Hash256, } @@ -27,7 +27,7 @@ pub struct InterchangeData { #[serde(deny_unknown_fields)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedBlock { - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub slot: Slot, #[serde(skip_serializing_if = "Option::is_none")] pub signing_root: Option, @@ -37,9 +37,9 @@ pub struct SignedBlock { #[serde(deny_unknown_fields)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedAttestation { - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub source_epoch: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub target_epoch: Epoch, #[serde(skip_serializing_if = "Option::is_none")] pub signing_root: Option, diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 3b37492377..61a5a094cd 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -7,8 +7,11 @@ use crate::{ }; use crate::{http_metrics::metrics, validator_store::ValidatorStore}; use environment::RuntimeContext; +use eth2::BeaconNodeHttpClient; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; +use std::fmt::Debug; +use std::future::Future; use std::ops::Deref; use std::sync::Arc; use std::time::Duration; @@ -45,6 +48,7 @@ pub struct BlockServiceBuilder { validator_store: Option>>, slot_clock: Option>, beacon_nodes: Option>>, + proposer_nodes: Option>>, context: Option>, graffiti: Option, graffiti_file: Option, @@ -57,6 +61,7 @@ impl BlockServiceBuilder { validator_store: None, slot_clock: None, beacon_nodes: None, + proposer_nodes: None, context: None, graffiti: None, graffiti_file: None, @@ -79,6 +84,11 @@ impl BlockServiceBuilder { self } + pub fn proposer_nodes(mut self, proposer_nodes: Arc>) -> Self { + self.proposer_nodes = Some(proposer_nodes); + self + } + pub fn runtime_context(mut self, context: RuntimeContext) -> Self { self.context = Some(context); self @@ -114,6 +124,7 @@ impl BlockServiceBuilder { context: self .context .ok_or("Cannot build BlockService without runtime_context")?, + proposer_nodes: self.proposer_nodes, graffiti: self.graffiti, graffiti_file: self.graffiti_file, block_delay: self.block_delay, @@ -122,11 +133,81 @@ impl BlockServiceBuilder { } } +// Combines a set of non-block-proposing `beacon_nodes` and only-block-proposing +// `proposer_nodes`. +pub struct ProposerFallback { + beacon_nodes: Arc>, + proposer_nodes: Option>>, +} + +impl ProposerFallback { + // Try `func` on `self.proposer_nodes` first. If that doesn't work, try `self.beacon_nodes`. + pub async fn first_success_try_proposers_first<'a, F, O, Err, R>( + &'a self, + require_synced: RequireSynced, + offline_on_failure: OfflineOnFailure, + func: F, + ) -> Result> + where + F: Fn(&'a BeaconNodeHttpClient) -> R + Clone, + R: Future>, + Err: Debug, + { + // If there are proposer nodes, try calling `func` on them and return early if they are successful. + if let Some(proposer_nodes) = &self.proposer_nodes { + if let Ok(result) = proposer_nodes + .first_success(require_synced, offline_on_failure, func.clone()) + .await + { + return Ok(result); + } + } + + // If the proposer nodes failed, try on the non-proposer nodes. + self.beacon_nodes + .first_success(require_synced, offline_on_failure, func) + .await + } + + // Try `func` on `self.beacon_nodes` first. If that doesn't work, try `self.proposer_nodes`. + pub async fn first_success_try_proposers_last<'a, F, O, Err, R>( + &'a self, + require_synced: RequireSynced, + offline_on_failure: OfflineOnFailure, + func: F, + ) -> Result> + where + F: Fn(&'a BeaconNodeHttpClient) -> R + Clone, + R: Future>, + Err: Debug, + { + // Try running `func` on the non-proposer beacon nodes. + let beacon_nodes_result = self + .beacon_nodes + .first_success(require_synced, offline_on_failure, func.clone()) + .await; + + match (beacon_nodes_result, &self.proposer_nodes) { + // The non-proposer node call succeed, return the result. + (Ok(success), _) => Ok(success), + // The non-proposer node call failed, but we don't have any proposer nodes. Return an error. + (Err(e), None) => Err(e), + // The non-proposer node call failed, try the same call on the proposer nodes. + (Err(_), Some(proposer_nodes)) => { + proposer_nodes + .first_success(require_synced, offline_on_failure, func) + .await + } + } + } +} + /// Helper to minimise `Arc` usage. pub struct Inner { validator_store: Arc>, slot_clock: Arc, beacon_nodes: Arc>, + proposer_nodes: Option>>, context: RuntimeContext, graffiti: Option, graffiti_file: Option, @@ -334,16 +415,23 @@ impl BlockService { let self_ref = &self; let proposer_index = self.validator_store.validator_index(&validator_pubkey); let validator_pubkey_ref = &validator_pubkey; + let proposer_fallback = ProposerFallback { + beacon_nodes: self.beacon_nodes.clone(), + proposer_nodes: self.proposer_nodes.clone(), + }; info!( log, "Requesting unsigned block"; "slot" => slot.as_u64(), ); + // Request block from first responsive beacon node. - let block = self - .beacon_nodes - .first_success( + // + // Try the proposer nodes last, since it's likely that they don't have a + // great view of attestations on the network. + let block = proposer_fallback + .first_success_try_proposers_last( RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { @@ -424,8 +512,12 @@ impl BlockService { ); // Publish block with first available beacon node. - self.beacon_nodes - .first_success( + // + // Try the proposer nodes first, since we've likely gone to efforts to + // protect them from DoS attacks and they're most likely to successfully + // publish a block. + proposer_fallback + .first_success_try_proposers_first( RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async { diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 171bc67af7..5abc211d83 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -26,6 +26,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .takes_value(true), ) + .arg( + Arg::with_name("proposer-nodes") + .long("proposer-nodes") + .value_name("NETWORK_ADDRESSES") + .help("Comma-separated addresses to one or more beacon node HTTP APIs. \ + These specify nodes that are used to send beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes." + ) + .takes_value(true), + ) .arg( Arg::with_name("disable-run-on-all") .long("disable-run-on-all") @@ -118,7 +127,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("CERTIFICATE-FILES") .takes_value(true) .help("Comma-separated paths to custom TLS certificates to use when connecting \ - to a beacon node. These certificates must be in PEM format and are used \ + to a beacon node (and/or proposer node). These certificates must be in PEM format and are used \ in addition to the OS trust store. Commas must only be used as a \ delimiter, and must not be part of the certificate path.") ) diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index d121bdecbb..1330b26bde 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -29,6 +29,8 @@ pub struct Config { /// /// Should be similar to `["http://localhost:8080"]` pub beacon_nodes: Vec, + /// An optional beacon node used for block proposals only. + pub proposer_nodes: Vec, /// If true, the validator client will still poll for duties and produce blocks even if the /// beacon node is not synced at startup. pub allow_unsynced_beacon_node: bool, @@ -95,6 +97,7 @@ impl Default for Config { validator_dir, secrets_dir, beacon_nodes, + proposer_nodes: Vec::new(), allow_unsynced_beacon_node: false, disable_auto_discover: false, init_slashing_protection: false, @@ -186,6 +189,14 @@ impl Config { .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?]; } + if let Some(proposer_nodes) = parse_optional::(cli_args, "proposer_nodes")? { + config.proposer_nodes = proposer_nodes + .split(',') + .map(SensitiveUrl::parse) + .collect::>() + .map_err(|e| format!("Unable to parse proposer node URL: {:?}", e))?; + } + if cli_args.is_present("delete-lockfiles") { warn!( log, diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index c335c67ab1..3cab6e7821 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -16,12 +16,15 @@ use crate::{ validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore}, }; use environment::RuntimeContext; -use eth2::types::{AttesterData, BeaconCommitteeSubscription, ProposerData, StateId, ValidatorId}; +use eth2::types::{ + AttesterData, BeaconCommitteeSubscription, DutiesResponse, ProposerData, StateId, ValidatorId, +}; use futures::{stream, StreamExt}; use parking_lot::RwLock; use safe_arith::ArithError; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; +use std::cmp::min; use std::collections::{hash_map, BTreeMap, HashMap, HashSet}; use std::sync::Arc; use std::time::Duration; @@ -54,6 +57,11 @@ const SELECTION_PROOF_SCHEDULE_DENOM: u32 = 2; /// flag in the cli to enable collection of per validator metrics. const VALIDATOR_METRICS_MIN_COUNT: usize = 64; +/// The number of validators to request duty information for in the initial request. +/// The initial request is used to determine if further requests are required, so that it +/// reduces the amount of data that needs to be transferred. +const INITIAL_DUTIES_QUERY_SIZE: usize = 1; + #[derive(Debug)] pub enum Error { UnableToReadSlotClock, @@ -531,7 +539,6 @@ async fn poll_beacon_attesters( current_epoch, &local_indices, &local_pubkeys, - current_slot, ) .await { @@ -544,6 +551,8 @@ async fn poll_beacon_attesters( ) } + update_per_validator_duty_metrics::(duties_service, current_epoch, current_slot); + drop(current_epoch_timer); let next_epoch_timer = metrics::start_timer_vec( &metrics::DUTIES_SERVICE_TIMES, @@ -551,14 +560,9 @@ async fn poll_beacon_attesters( ); // Download the duties and update the duties for the next epoch. - if let Err(e) = poll_beacon_attesters_for_epoch( - duties_service, - next_epoch, - &local_indices, - &local_pubkeys, - current_slot, - ) - .await + if let Err(e) = + poll_beacon_attesters_for_epoch(duties_service, next_epoch, &local_indices, &local_pubkeys) + .await { error!( log, @@ -569,6 +573,8 @@ async fn poll_beacon_attesters( ) } + update_per_validator_duty_metrics::(duties_service, next_epoch, current_slot); + drop(next_epoch_timer); let subscriptions_timer = metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::SUBSCRIPTIONS]); @@ -655,7 +661,6 @@ async fn poll_beacon_attesters_for_epoch( epoch: Epoch, local_indices: &[u64], local_pubkeys: &HashSet, - current_slot: Slot, ) -> Result<(), Error> { let log = duties_service.context.log(); @@ -674,84 +679,69 @@ async fn poll_beacon_attesters_for_epoch( &[metrics::UPDATE_ATTESTERS_FETCH], ); - let response = duties_service - .beacon_nodes - .first_success( - duties_service.require_synced, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::ATTESTER_DUTIES_HTTP_POST], - ); - beacon_node - .post_validator_duties_attester(epoch, local_indices) - .await - }, - ) - .await - .map_err(|e| Error::FailedToDownloadAttesters(e.to_string()))?; + // Request duties for all uninitialized validators. If there isn't any, we will just request for + // `INITIAL_DUTIES_QUERY_SIZE` validators. We use the `dependent_root` in the response to + // determine whether validator duties need to be updated. This is to ensure that we don't + // request for extra data unless necessary in order to save on network bandwidth. + let uninitialized_validators = + get_uninitialized_validators(duties_service, &epoch, local_pubkeys); + let indices_to_request = if !uninitialized_validators.is_empty() { + uninitialized_validators.as_slice() + } else { + &local_indices[0..min(INITIAL_DUTIES_QUERY_SIZE, local_indices.len())] + }; + + let response = + post_validator_duties_attester(duties_service, epoch, indices_to_request).await?; + let dependent_root = response.dependent_root; + + // Find any validators which have conflicting (epoch, dependent_root) values or missing duties for the epoch. + let validators_to_update: Vec<_> = { + // Avoid holding the read-lock for any longer than required. + let attesters = duties_service.attesters.read(); + local_pubkeys + .iter() + .filter(|pubkey| { + attesters.get(pubkey).map_or(true, |duties| { + duties + .get(&epoch) + .map_or(true, |(prior, _)| *prior != dependent_root) + }) + }) + .collect::>() + }; + + if validators_to_update.is_empty() { + // No validators have conflicting (epoch, dependent_root) values or missing duties for the epoch. + return Ok(()); + } + + // Filter out validators which have already been requested. + let initial_duties = &response.data; + let indices_to_request = validators_to_update + .iter() + .filter(|&&&pubkey| !initial_duties.iter().any(|duty| duty.pubkey == pubkey)) + .filter_map(|pubkey| duties_service.validator_store.validator_index(pubkey)) + .collect::>(); + + let new_duties = if !indices_to_request.is_empty() { + post_validator_duties_attester(duties_service, epoch, indices_to_request.as_slice()) + .await? + .data + .into_iter() + .chain(response.data) + .collect::>() + } else { + response.data + }; drop(fetch_timer); + let _store_timer = metrics::start_timer_vec( &metrics::DUTIES_SERVICE_TIMES, &[metrics::UPDATE_ATTESTERS_STORE], ); - let dependent_root = response.dependent_root; - - // Filter any duties that are not relevant or already known. - let new_duties = { - // Avoid holding the read-lock for any longer than required. - let attesters = duties_service.attesters.read(); - response - .data - .into_iter() - .filter(|duty| { - if duties_service.per_validator_metrics() { - let validator_index = duty.validator_index; - let duty_slot = duty.slot; - if let Some(existing_slot_gauge) = - get_int_gauge(&ATTESTATION_DUTY, &[&validator_index.to_string()]) - { - let existing_slot = Slot::new(existing_slot_gauge.get() as u64); - let existing_epoch = existing_slot.epoch(E::slots_per_epoch()); - - // First condition ensures that we switch to the next epoch duty slot - // once the current epoch duty slot passes. - // Second condition is to ensure that next epoch duties don't override - // current epoch duties. - if existing_slot < current_slot - || (duty_slot.epoch(E::slots_per_epoch()) <= existing_epoch - && duty_slot > current_slot - && duty_slot != existing_slot) - { - existing_slot_gauge.set(duty_slot.as_u64() as i64); - } - } else { - set_int_gauge( - &ATTESTATION_DUTY, - &[&validator_index.to_string()], - duty_slot.as_u64() as i64, - ); - } - } - - local_pubkeys.contains(&duty.pubkey) && { - // Only update the duties if either is true: - // - // - There were no known duties for this epoch. - // - The dependent root has changed, signalling a re-org. - attesters.get(&duty.pubkey).map_or(true, |duties| { - duties - .get(&epoch) - .map_or(true, |(prior, _)| *prior != dependent_root) - }) - } - }) - .collect::>() - }; - debug!( log, "Downloaded attester duties"; @@ -799,6 +789,89 @@ async fn poll_beacon_attesters_for_epoch( Ok(()) } +/// Get a filtered list of local validators for which we don't already know their duties for that epoch +fn get_uninitialized_validators( + duties_service: &Arc>, + epoch: &Epoch, + local_pubkeys: &HashSet, +) -> Vec { + let attesters = duties_service.attesters.read(); + local_pubkeys + .iter() + .filter(|pubkey| { + attesters + .get(pubkey) + .map_or(true, |duties| !duties.contains_key(epoch)) + }) + .filter_map(|pubkey| duties_service.validator_store.validator_index(pubkey)) + .collect::>() +} + +fn update_per_validator_duty_metrics( + duties_service: &Arc>, + epoch: Epoch, + current_slot: Slot, +) { + if duties_service.per_validator_metrics() { + let attesters = duties_service.attesters.read(); + attesters.values().for_each(|attester_duties_by_epoch| { + if let Some((_, duty_and_proof)) = attester_duties_by_epoch.get(&epoch) { + let duty = &duty_and_proof.duty; + let validator_index = duty.validator_index; + let duty_slot = duty.slot; + if let Some(existing_slot_gauge) = + get_int_gauge(&ATTESTATION_DUTY, &[&validator_index.to_string()]) + { + let existing_slot = Slot::new(existing_slot_gauge.get() as u64); + let existing_epoch = existing_slot.epoch(E::slots_per_epoch()); + + // First condition ensures that we switch to the next epoch duty slot + // once the current epoch duty slot passes. + // Second condition is to ensure that next epoch duties don't override + // current epoch duties. + if existing_slot < current_slot + || (duty_slot.epoch(E::slots_per_epoch()) <= existing_epoch + && duty_slot > current_slot + && duty_slot != existing_slot) + { + existing_slot_gauge.set(duty_slot.as_u64() as i64); + } + } else { + set_int_gauge( + &ATTESTATION_DUTY, + &[&validator_index.to_string()], + duty_slot.as_u64() as i64, + ); + } + } + }); + } +} + +async fn post_validator_duties_attester( + duties_service: &Arc>, + epoch: Epoch, + validator_indices: &[u64], +) -> Result>, Error> { + duties_service + .beacon_nodes + .first_success( + duties_service.require_synced, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::ATTESTER_DUTIES_HTTP_POST], + ); + beacon_node + .post_validator_duties_attester(epoch, validator_indices) + .await + }, + ) + .await + .map_err(|e| Error::FailedToDownloadAttesters(e.to_string())) +} + /// Compute the attestation selection proofs for the `duties` and add them to the `attesters` map. /// /// Duties are computed in batches each slot. If a re-org is detected then the process will diff --git a/validator_client/src/http_api/api_secret.rs b/validator_client/src/http_api/api_secret.rs index b42cd11edd..e688792ddc 100644 --- a/validator_client/src/http_api/api_secret.rs +++ b/validator_client/src/http_api/api_secret.rs @@ -60,7 +60,7 @@ impl ApiSecret { // Create and write the secret key to file with appropriate permissions create_with_600_perms( &sk_path, - eth2_serde_utils::hex::encode(sk.serialize()).as_bytes(), + serde_utils::hex::encode(sk.serialize()).as_bytes(), ) .map_err(|e| { format!( @@ -75,7 +75,7 @@ impl ApiSecret { format!( "{}{}", PK_PREFIX, - eth2_serde_utils::hex::encode(&pk.serialize_compressed()[..]) + serde_utils::hex::encode(&pk.serialize_compressed()[..]) ) .as_bytes(), ) @@ -90,7 +90,7 @@ impl ApiSecret { let sk = fs::read(&sk_path) .map_err(|e| format!("cannot read {}: {}", SK_FILENAME, e)) .and_then(|bytes| { - eth2_serde_utils::hex::decode(&String::from_utf8_lossy(&bytes)) + serde_utils::hex::decode(&String::from_utf8_lossy(&bytes)) .map_err(|_| format!("{} should be 0x-prefixed hex", PK_FILENAME)) }) .and_then(|bytes| { @@ -114,7 +114,7 @@ impl ApiSecret { let hex = String::from_utf8(bytes).map_err(|_| format!("{} is not utf8", SK_FILENAME))?; if let Some(stripped) = hex.strip_prefix(PK_PREFIX) { - eth2_serde_utils::hex::decode(stripped) + serde_utils::hex::decode(stripped) .map_err(|_| format!("{} should be 0x-prefixed hex", SK_FILENAME)) } else { Err(format!("unable to parse {}", SK_FILENAME)) @@ -153,7 +153,7 @@ impl ApiSecret { /// Returns the public key of `self` as a 0x-prefixed hex string. fn pubkey_string(&self) -> String { - eth2_serde_utils::hex::encode(&self.pk.serialize_compressed()[..]) + serde_utils::hex::encode(&self.pk.serialize_compressed()[..]) } /// Returns the API token. @@ -205,7 +205,7 @@ impl ApiSecret { let message = Message::parse_slice(digest(&SHA256, input).as_ref()).expect("sha256 is 32 bytes"); let (signature, _) = libsecp256k1::sign(&message, &sk); - eth2_serde_utils::hex::encode(signature.serialize_der().as_ref()) + serde_utils::hex::encode(signature.serialize_der().as_ref()) } } } diff --git a/validator_client/src/http_api/create_signed_voluntary_exit.rs b/validator_client/src/http_api/create_signed_voluntary_exit.rs new file mode 100644 index 0000000000..b777d15806 --- /dev/null +++ b/validator_client/src/http_api/create_signed_voluntary_exit.rs @@ -0,0 +1,69 @@ +use crate::validator_store::ValidatorStore; +use bls::{PublicKey, PublicKeyBytes}; +use slog::{info, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use types::{Epoch, EthSpec, SignedVoluntaryExit, VoluntaryExit}; + +pub async fn create_signed_voluntary_exit( + pubkey: PublicKey, + maybe_epoch: Option, + validator_store: Arc>, + slot_clock: T, + log: Logger, +) -> Result { + let epoch = match maybe_epoch { + Some(epoch) => epoch, + None => get_current_epoch::(slot_clock).ok_or_else(|| { + warp_utils::reject::custom_server_error("Unable to determine current epoch".to_string()) + })?, + }; + + let pubkey_bytes = PublicKeyBytes::from(pubkey); + if !validator_store.has_validator(&pubkey_bytes) { + return Err(warp_utils::reject::custom_not_found(format!( + "{} is disabled or not managed by this validator client", + pubkey_bytes.as_hex_string() + ))); + } + + let validator_index = validator_store + .validator_index(&pubkey_bytes) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "The validator index for {} is not known. The validator client \ + may still be initializing or the validator has not yet had a \ + deposit processed.", + pubkey_bytes.as_hex_string() + )) + })?; + + let voluntary_exit = VoluntaryExit { + epoch, + validator_index, + }; + + info!( + log, + "Signing voluntary exit"; + "validator" => pubkey_bytes.as_hex_string(), + "epoch" => epoch + ); + + let signed_voluntary_exit = validator_store + .sign_voluntary_exit(pubkey_bytes, voluntary_exit) + .await + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Failed to sign voluntary exit: {:?}", + e + )) + })?; + + Ok(signed_voluntary_exit) +} + +/// Calculates the current epoch from the genesis time and current time. +fn get_current_epoch(slot_clock: T) -> Option { + slot_clock.now().map(|s| s.epoch(E::slots_per_epoch())) +} diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs index 61f18c04ef..edc0db5523 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/src/http_api/create_validator.rs @@ -173,7 +173,7 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, gas_limit: request.gas_limit, builder_proposals: request.builder_proposals, voting_pubkey, - eth1_deposit_tx_data: eth2_serde_utils::hex::encode(ð1_deposit_data.rlp), + eth1_deposit_tx_data: serde_utils::hex::encode(ð1_deposit_data.rlp), deposit_gwei: request.deposit_gwei, }); } diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 7402bff9c8..3fb1dd286d 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -1,4 +1,5 @@ mod api_secret; +mod create_signed_voluntary_exit; mod create_validator; mod keystores; mod remotekeys; @@ -6,6 +7,7 @@ mod tests; pub mod test_utils; +use crate::http_api::create_signed_voluntary_exit::create_signed_voluntary_exit; use crate::{determine_graffiti, GraffitiFile, ValidatorStore}; use account_utils::{ mnemonic_from_phrase, @@ -76,6 +78,7 @@ pub struct Context { pub spec: ChainSpec, pub config: Config, pub log: Logger, + pub slot_clock: T, pub _phantom: PhantomData, } @@ -211,6 +214,9 @@ pub fn serve( let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); + let inner_slot_clock = ctx.slot_clock.clone(); + let slot_clock_filter = warp::any().map(move || inner_slot_clock.clone()); + let inner_spec = Arc::new(ctx.spec.clone()); let spec_filter = warp::any().map(move || inner_spec.clone()); @@ -972,6 +978,46 @@ pub fn serve( ) .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::NO_CONTENT)); + // POST /eth/v1/validator/{pubkey}/voluntary_exit + let post_validators_voluntary_exits = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::()) + .and(warp::path("voluntary_exit")) + .and(warp::query::()) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(slot_clock_filter) + .and(log_filter.clone()) + .and(signer.clone()) + .and(task_executor_filter.clone()) + .and_then( + |pubkey: PublicKey, + query: api_types::VoluntaryExitQuery, + validator_store: Arc>, + slot_clock: T, + log, + signer, + task_executor: TaskExecutor| { + blocking_signed_json_task(signer, move || { + if let Some(handle) = task_executor.handle() { + let signed_voluntary_exit = + handle.block_on(create_signed_voluntary_exit( + pubkey, + query.epoch, + validator_store, + slot_clock, + log, + ))?; + Ok(signed_voluntary_exit) + } else { + Err(warp_utils::reject::custom_server_error( + "Lighthouse shutting down".into(), + )) + } + }) + }, + ); + // GET /eth/v1/keystores let get_std_keystores = std_keystores .and(signer.clone()) @@ -1084,6 +1130,7 @@ pub fn serve( .or(post_validators_keystore) .or(post_validators_mnemonic) .or(post_validators_web3signer) + .or(post_validators_voluntary_exits) .or(post_fee_recipient) .or(post_gas_limit) .or(post_std_keystores) diff --git a/validator_client/src/http_api/test_utils.rs b/validator_client/src/http_api/test_utils.rs index 5f9969a7ae..23a37bbc97 100644 --- a/validator_client/src/http_api/test_utils.rs +++ b/validator_client/src/http_api/test_utils.rs @@ -105,7 +105,7 @@ impl ApiTester { Hash256::repeat_byte(42), spec, Some(Arc::new(DoppelgangerService::new(log.clone()))), - slot_clock, + slot_clock.clone(), &config, test_runtime.task_executor.clone(), log.clone(), @@ -135,6 +135,7 @@ impl ApiTester { store_passwords_in_secrets_dir: false, }, log, + slot_clock, _phantom: PhantomData, }); let ctx = context; @@ -363,7 +364,7 @@ impl ApiTester { let withdrawal_keypair = keypairs.withdrawal.decrypt_keypair(PASSWORD_BYTES).unwrap(); - let deposit_bytes = eth2_serde_utils::hex::decode(&item.eth1_deposit_tx_data).unwrap(); + let deposit_bytes = serde_utils::hex::decode(&item.eth1_deposit_tx_data).unwrap(); let (deposit_data, _) = decode_eth1_tx_data(&deposit_bytes, E::default_spec().max_effective_balance) diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index d453d7038a..1c593b1a4e 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -45,6 +45,7 @@ struct ApiTester { initialized_validators: Arc>, validator_store: Arc>, url: SensitiveUrl, + slot_clock: TestingSlotClock, _server_shutdown: oneshot::Sender<()>, _validator_dir: TempDir, _runtime_shutdown: exit_future::Signal, @@ -90,8 +91,12 @@ impl ApiTester { let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); - let slot_clock = - TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); + let genesis_time: u64 = 0; + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(genesis_time), + Duration::from_secs(1), + ); let (runtime_shutdown, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); @@ -101,9 +106,9 @@ impl ApiTester { initialized_validators, slashing_protection, Hash256::repeat_byte(42), - spec, + spec.clone(), Some(Arc::new(DoppelgangerService::new(log.clone()))), - slot_clock, + slot_clock.clone(), &config, executor.clone(), log.clone(), @@ -129,7 +134,8 @@ impl ApiTester { listen_port: 0, allow_origin: None, }, - log, + log: log.clone(), + slot_clock: slot_clock.clone(), _phantom: PhantomData, }); let ctx = context.clone(); @@ -156,6 +162,7 @@ impl ApiTester { initialized_validators, validator_store, url, + slot_clock, _server_shutdown: shutdown_tx, _validator_dir: validator_dir, _runtime_shutdown: runtime_shutdown, @@ -358,7 +365,7 @@ impl ApiTester { let withdrawal_keypair = keypairs.withdrawal.decrypt_keypair(PASSWORD_BYTES).unwrap(); let deposit_bytes = - eth2_serde_utils::hex::decode(&response[i].eth1_deposit_tx_data).unwrap(); + serde_utils::hex::decode(&response[i].eth1_deposit_tx_data).unwrap(); let (deposit_data, _) = decode_eth1_tx_data(&deposit_bytes, E::default_spec().max_effective_balance) @@ -494,6 +501,33 @@ impl ApiTester { self } + pub async fn test_sign_voluntary_exits(self, index: usize, maybe_epoch: Option) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + // manually setting validator index in `ValidatorStore` + self.initialized_validators + .write() + .set_index(&validator.voting_pubkey, 0); + + let expected_exit_epoch = maybe_epoch.unwrap_or_else(|| self.get_current_epoch()); + + let resp = self + .client + .post_validator_voluntary_exit(&validator.voting_pubkey, maybe_epoch) + .await; + + assert!(resp.is_ok()); + assert_eq!(resp.unwrap().message.epoch, expected_exit_epoch); + + self + } + + fn get_current_epoch(&self) -> Epoch { + self.slot_clock + .now() + .map(|s| s.epoch(E::slots_per_epoch())) + .unwrap() + } + pub async fn set_validator_enabled(self, index: usize, enabled: bool) -> Self { let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; @@ -778,6 +812,29 @@ fn hd_validator_creation() { }); } +#[test] +fn validator_exit() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + ApiTester::new(weak_runtime) + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .test_sign_voluntary_exits(0, None) + .await + .test_sign_voluntary_exits(0, Some(Epoch::new(256))) + .await; + }); +} + #[test] fn validator_enabling() { let runtime = build_runtime(); diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index b4e400c3e7..8a52a4d35e 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -88,6 +88,11 @@ lazy_static::lazy_static! { "Total count of attempted SyncSelectionProof signings", &["status"] ); + pub static ref SIGNED_VOLUNTARY_EXITS_TOTAL: Result = try_create_int_counter_vec( + "vc_signed_voluntary_exits_total", + "Total count of VoluntaryExit signings", + &["status"] + ); pub static ref SIGNED_VALIDATOR_REGISTRATIONS_TOTAL: Result = try_create_int_counter_vec( "builder_validator_registrations_total", "Total count of ValidatorRegistrationData signings", diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 37dfcc2106..8302edd678 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -1015,7 +1015,23 @@ impl InitializedValidators { let cache = KeyCache::open_or_create(&self.validators_dir).map_err(Error::UnableToOpenKeyCache)?; - let mut key_cache = self.decrypt_key_cache(cache, &mut key_stores).await?; + + // Check if there is at least one local definition. + let has_local_definitions = self.definitions.as_slice().iter().any(|def| { + matches!( + def.signing_definition, + SigningDefinition::LocalKeystore { .. } + ) + }); + + // Only decrypt cache when there is at least one local definition. + // Decrypting cache is a very expensive operation which is never used for web3signer. + let mut key_cache = if has_local_definitions { + self.decrypt_key_cache(cache, &mut key_stores).await? + } else { + // Assign an empty KeyCache if all definitions are of the Web3Signer type. + KeyCache::new() + }; let mut disabled_uuids = HashSet::new(); for def in self.definitions.as_slice() { @@ -1141,13 +1157,16 @@ impl InitializedValidators { ); } } - for uuid in disabled_uuids { - key_cache.remove(&uuid); + + if has_local_definitions { + for uuid in disabled_uuids { + key_cache.remove(&uuid); + } } let validators_dir = self.validators_dir.clone(); let log = self.log.clone(); - if key_cache.is_modified() { + if has_local_definitions && key_cache.is_modified() { tokio::task::spawn_blocking(move || { match key_cache.save(validators_dir) { Err(e) => warn!( diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 6027427677..eca0b88154 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -24,6 +24,7 @@ pub use config::Config; use initialized_validators::InitializedValidators; use lighthouse_metrics::set_gauge; use monitoring_api::{MonitoringHttpClient, ProcessType}; +use sensitive_url::SensitiveUrl; pub use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use crate::beacon_node_fallback::{ @@ -94,6 +95,7 @@ pub struct ProductionValidatorClient { doppelganger_service: Option>, preparation_service: PreparationService, validator_store: Arc>, + slot_clock: SystemTimeSlotClock, http_api_listen_addr: Option, config: Config, } @@ -262,60 +264,70 @@ impl ProductionValidatorClient { .checked_sub(1) .ok_or_else(|| "No beacon nodes defined.".to_string())?; + let beacon_node_setup = |x: (usize, &SensitiveUrl)| { + let i = x.0; + let url = x.1; + let slot_duration = Duration::from_secs(context.eth2_config.spec.seconds_per_slot); + + let mut beacon_node_http_client_builder = ClientBuilder::new(); + + // Add new custom root certificates if specified. + if let Some(certificates) = &config.beacon_nodes_tls_certs { + for cert in certificates { + beacon_node_http_client_builder = beacon_node_http_client_builder + .add_root_certificate(load_pem_certificate(cert)?); + } + } + + let beacon_node_http_client = beacon_node_http_client_builder + // Set default timeout to be the full slot duration. + .timeout(slot_duration) + .build() + .map_err(|e| format!("Unable to build HTTP client: {:?}", e))?; + + // Use quicker timeouts if a fallback beacon node exists. + let timeouts = if i < last_beacon_node_index && !config.use_long_timeouts { + info!( + log, + "Fallback endpoints are available, using optimized timeouts."; + ); + Timeouts { + attestation: slot_duration / HTTP_ATTESTATION_TIMEOUT_QUOTIENT, + attester_duties: slot_duration / HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT, + liveness: slot_duration / HTTP_LIVENESS_TIMEOUT_QUOTIENT, + proposal: slot_duration / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, + proposer_duties: slot_duration / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, + sync_committee_contribution: slot_duration + / HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT, + sync_duties: slot_duration / HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT, + get_beacon_blocks_ssz: slot_duration + / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, + get_debug_beacon_states: slot_duration / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, + get_deposit_snapshot: slot_duration / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, + } + } else { + Timeouts::set_all(slot_duration) + }; + + Ok(BeaconNodeHttpClient::from_components( + url.clone(), + beacon_node_http_client, + timeouts, + )) + }; + let beacon_nodes: Vec = config .beacon_nodes .iter() .enumerate() - .map(|(i, url)| { - let slot_duration = Duration::from_secs(context.eth2_config.spec.seconds_per_slot); + .map(beacon_node_setup) + .collect::, String>>()?; - let mut beacon_node_http_client_builder = ClientBuilder::new(); - - // Add new custom root certificates if specified. - if let Some(certificates) = &config.beacon_nodes_tls_certs { - for cert in certificates { - beacon_node_http_client_builder = beacon_node_http_client_builder - .add_root_certificate(load_pem_certificate(cert)?); - } - } - - let beacon_node_http_client = beacon_node_http_client_builder - // Set default timeout to be the full slot duration. - .timeout(slot_duration) - .build() - .map_err(|e| format!("Unable to build HTTP client: {:?}", e))?; - - // Use quicker timeouts if a fallback beacon node exists. - let timeouts = if i < last_beacon_node_index && !config.use_long_timeouts { - info!( - log, - "Fallback endpoints are available, using optimized timeouts."; - ); - Timeouts { - attestation: slot_duration / HTTP_ATTESTATION_TIMEOUT_QUOTIENT, - attester_duties: slot_duration / HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT, - liveness: slot_duration / HTTP_LIVENESS_TIMEOUT_QUOTIENT, - proposal: slot_duration / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, - proposer_duties: slot_duration / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, - sync_committee_contribution: slot_duration - / HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT, - sync_duties: slot_duration / HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT, - get_beacon_blocks_ssz: slot_duration - / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, - get_debug_beacon_states: slot_duration - / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, - get_deposit_snapshot: slot_duration / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, - } - } else { - Timeouts::set_all(slot_duration) - }; - - Ok(BeaconNodeHttpClient::from_components( - url.clone(), - beacon_node_http_client, - timeouts, - )) - }) + let proposer_nodes: Vec = config + .proposer_nodes + .iter() + .enumerate() + .map(beacon_node_setup) .collect::, String>>()?; let num_nodes = beacon_nodes.len(); @@ -324,6 +336,12 @@ impl ProductionValidatorClient { .map(CandidateBeaconNode::new) .collect(); + let proposer_nodes_num = proposer_nodes.len(); + let proposer_candidates = proposer_nodes + .into_iter() + .map(CandidateBeaconNode::new) + .collect(); + // Set the count for beacon node fallbacks excluding the primary beacon node. set_gauge( &http_metrics::metrics::ETH2_FALLBACK_CONFIGURED, @@ -348,9 +366,16 @@ impl ProductionValidatorClient { log.clone(), ); + let mut proposer_nodes: BeaconNodeFallback<_, T> = BeaconNodeFallback::new( + proposer_candidates, + config.disable_run_on_all, + context.eth2_config.spec.clone(), + log.clone(), + ); + // Perform some potentially long-running initialization tasks. let (genesis_time, genesis_validators_root) = tokio::select! { - tuple = init_from_beacon_node(&beacon_nodes, &context) => tuple?, + tuple = init_from_beacon_node(&beacon_nodes, &proposer_nodes, &context) => tuple?, () = context.executor.exit() => return Err("Shutting down".to_string()) }; @@ -366,9 +391,14 @@ impl ProductionValidatorClient { ); beacon_nodes.set_slot_clock(slot_clock.clone()); + proposer_nodes.set_slot_clock(slot_clock.clone()); + let beacon_nodes = Arc::new(beacon_nodes); start_fallback_updater_service(context.clone(), beacon_nodes.clone())?; + let proposer_nodes = Arc::new(proposer_nodes); + start_fallback_updater_service(context.clone(), proposer_nodes.clone())?; + let doppelganger_service = if config.enable_doppelganger_protection { Some(Arc::new(DoppelgangerService::new( context @@ -432,15 +462,21 @@ impl ProductionValidatorClient { ctx.shared.write().duties_service = Some(duties_service.clone()); } - let block_service = BlockServiceBuilder::new() + let mut block_service_builder = BlockServiceBuilder::new() .slot_clock(slot_clock.clone()) .validator_store(validator_store.clone()) .beacon_nodes(beacon_nodes.clone()) .runtime_context(context.service_context("block".into())) .graffiti(config.graffiti) .graffiti_file(config.graffiti_file.clone()) - .block_delay(config.block_delay) - .build()?; + .block_delay(config.block_delay); + + // If we have proposer nodes, add them to the block service builder. + if proposer_nodes_num > 0 { + block_service_builder = block_service_builder.proposer_nodes(proposer_nodes.clone()); + } + + let block_service = block_service_builder.build()?; let attestation_service = AttestationServiceBuilder::new() .duties_service(duties_service.clone()) @@ -461,7 +497,7 @@ impl ProductionValidatorClient { let sync_committee_service = SyncCommitteeService::new( duties_service.clone(), validator_store.clone(), - slot_clock, + slot_clock.clone(), beacon_nodes.clone(), context.service_context("sync_committee".into()), ); @@ -482,6 +518,7 @@ impl ProductionValidatorClient { preparation_service, validator_store, config, + slot_clock, http_api_listen_addr: None, }) } @@ -545,6 +582,7 @@ impl ProductionValidatorClient { graffiti_flag: self.config.graffiti, spec: self.context.eth2_config.spec.clone(), config: self.config.http_api.clone(), + slot_clock: self.slot_clock.clone(), log: log.clone(), _phantom: PhantomData, }); @@ -579,13 +617,32 @@ impl ProductionValidatorClient { async fn init_from_beacon_node( beacon_nodes: &BeaconNodeFallback, + proposer_nodes: &BeaconNodeFallback, context: &RuntimeContext, ) -> Result<(u64, Hash256), String> { loop { beacon_nodes.update_unready_candidates().await; + proposer_nodes.update_unready_candidates().await; + let num_available = beacon_nodes.num_available().await; let num_total = beacon_nodes.num_total(); - if num_available > 0 { + + let proposer_available = beacon_nodes.num_available().await; + let proposer_total = beacon_nodes.num_total(); + + if proposer_total > 0 && proposer_available == 0 { + warn!( + context.log(), + "Unable to connect to a proposer node"; + "retry in" => format!("{} seconds", RETRY_DELAY.as_secs()), + "total_proposers" => proposer_total, + "available_proposers" => proposer_available, + "total_beacon_nodes" => num_total, + "available_beacon_nodes" => num_available, + ); + } + + if num_available > 0 && proposer_available == 0 { info!( context.log(), "Initialized beacon node connections"; @@ -593,6 +650,16 @@ async fn init_from_beacon_node( "available" => num_available, ); break; + } else if num_available > 0 { + info!( + context.log(), + "Initialized beacon node connections"; + "total" => num_total, + "available" => num_available, + "proposers_available" => proposer_available, + "proposers_total" => proposer_total, + ); + break; } else { warn!( context.log(), diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index ae9df08096..0de2f2f54f 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -47,6 +47,7 @@ pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload = FullP }, SignedContributionAndProof(&'a ContributionAndProof), ValidatorRegistration(&'a ValidatorRegistrationData), + VoluntaryExit(&'a VoluntaryExit), } impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, T, Payload> { @@ -67,6 +68,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, T, Pay } => beacon_block_root.signing_root(domain), SignableMessage::SignedContributionAndProof(c) => c.signing_root(domain), SignableMessage::ValidatorRegistration(v) => v.signing_root(domain), + SignableMessage::VoluntaryExit(exit) => exit.signing_root(domain), } } } @@ -203,6 +205,7 @@ impl SigningMethod { SignableMessage::ValidatorRegistration(v) => { Web3SignerObject::ValidatorRegistration(v) } + SignableMessage::VoluntaryExit(e) => Web3SignerObject::VoluntaryExit(e), }; // Determine the Web3Signer message type. diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 17e780304e..2c1f0cb3fc 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -54,15 +54,14 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: AbstractExecPayload> { Deposit { pubkey: PublicKeyBytes, withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] amount: u64, - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] genesis_fork_version: [u8; 4], }, RandaoReveal { epoch: Epoch, }, - #[allow(dead_code)] VoluntaryExit(&'a VoluntaryExit), SyncCommitteeMessage { beacon_block_root: Hash256, diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 08c52f2f42..365f7f7347 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -22,8 +22,9 @@ use types::{ AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, - SignedValidatorRegistrationData, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, - SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, + SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncAggregatorSelectionData, + SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, + ValidatorRegistrationData, VoluntaryExit, }; use validator_dir::ValidatorDir; @@ -155,6 +156,14 @@ impl ValidatorStore { self.validators.clone() } + /// Indicates if the `voting_public_key` exists in self and is enabled. + pub fn has_validator(&self, voting_public_key: &PublicKeyBytes) -> bool { + self.validators + .read() + .validator(voting_public_key) + .is_some() + } + /// Insert a new validator to `self`, where the validator is represented by an EIP-2335 /// keystore on the filesystem. #[allow(clippy::too_many_arguments)] @@ -616,6 +625,32 @@ impl ValidatorStore { } } + pub async fn sign_voluntary_exit( + &self, + validator_pubkey: PublicKeyBytes, + voluntary_exit: VoluntaryExit, + ) -> Result { + let signing_epoch = voluntary_exit.epoch; + let signing_context = self.signing_context(Domain::VoluntaryExit, signing_epoch); + let signing_method = self.doppelganger_bypassed_signing_method(validator_pubkey)?; + + let signature = signing_method + .get_signature::>( + SignableMessage::VoluntaryExit(&voluntary_exit), + signing_context, + &self.spec, + &self.task_executor, + ) + .await?; + + metrics::inc_counter_vec(&metrics::SIGNED_VOLUNTARY_EXITS_TOTAL, &[metrics::SUCCESS]); + + Ok(SignedVoluntaryExit { + message: voluntary_exit, + signature, + }) + } + pub async fn sign_validator_registration_data( &self, validator_registration_data: ValidatorRegistrationData, diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index e32f35bb7b..851510820e 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -17,8 +17,8 @@ eth2_keystore = { path = "../crypto/eth2_keystore" } account_utils = { path = "../common/account_utils" } serde = { version = "1.0.116", features = ["derive"] } serde_json = "1.0.58" -eth2_serde_utils = "0.1.1" -tree_hash = "0.4.1" +ethereum_serde_utils = "0.5.0" +tree_hash = "0.5.0" eth2 = { path = "../common/eth2", features = ["lighthouse"]} hex = "0.4.2" tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } diff --git a/validator_manager/src/common.rs b/validator_manager/src/common.rs index be4e8ae009..cc302ea20f 100644 --- a/validator_manager/src/common.rs +++ b/validator_manager/src/common.rs @@ -293,7 +293,7 @@ mod bytes_4_without_0x_prefix { where D: serde::Deserializer<'de>, { - let decoded = deserializer.deserialize_str(eth2_serde_utils::hex::HexVisitor)?; + let decoded = deserializer.deserialize_str(serde_utils::hex::HexVisitor)?; if decoded.len() != BYTES_LEN { return Err(D::Error::custom(format!( diff --git a/watch/.gitignore b/watch/.gitignore new file mode 100644 index 0000000000..5b6b0720c9 --- /dev/null +++ b/watch/.gitignore @@ -0,0 +1 @@ +config.yaml diff --git a/watch/Cargo.toml b/watch/Cargo.toml new file mode 100644 index 0000000000..d1793a9d06 --- /dev/null +++ b/watch/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "watch" +version = "0.1.0" +edition = "2018" + +[lib] +name = "watch" +path = "src/lib.rs" + +[[bin]] +name = "watch" +path = "src/main.rs" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +clap = "2.33.3" +log = "0.4.14" +env_logger = "0.9.0" +types = { path = "../consensus/types" } +eth2 = { path = "../common/eth2" } +beacon_node = { path = "../beacon_node"} +tokio = { version = "1.14.0", features = ["time"] } +axum = "0.5.15" +hyper = "0.14.20" +serde = "1.0.116" +serde_json = "1.0.58" +reqwest = { version = "0.11.0", features = ["json","stream"] } +url = "2.2.2" +rand = "0.7.3" +diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } +diesel_migrations = { version = "2.0.0", features = ["postgres"] } +byteorder = "1.4.3" +bls = { path = "../crypto/bls" } +hex = "0.4.2" +r2d2 = "0.8.9" +serde_yaml = "0.8.24" + +[dev-dependencies] +tokio-postgres = "0.7.5" +http_api = { path = "../beacon_node/http_api" } +beacon_chain = { path = "../beacon_node/beacon_chain" } +network = { path = "../beacon_node/network" } +testcontainers = "0.14.0" +unused_port = { path = "../common/unused_port" } diff --git a/watch/README.md b/watch/README.md new file mode 100644 index 0000000000..18bf393946 --- /dev/null +++ b/watch/README.md @@ -0,0 +1,460 @@ +## beacon.watch + +>beacon.watch is pre-MVP and still under active development and subject to change. + +beacon.watch is an Ethereum Beacon Chain monitoring platform whose goal is to provide fast access to +data which is: +1. Not already stored natively in the Beacon Chain +2. Too specialized for Block Explorers +3. Too sensitive for public Block Explorers + + +### Requirements +- `git` +- `rust` : https://rustup.rs/ +- `libpg` : https://www.postgresql.org/download/ +- `diesel_cli` : +``` +cargo install diesel_cli --no-default-features --features postgres +``` +- `docker` : https://docs.docker.com/engine/install/ +- `docker-compose` : https://docs.docker.com/compose/install/ + +### Setup +1. Setup the database: +``` +cd postgres_docker_compose +docker-compose up +``` + +1. Ensure the tests pass: +``` +cargo test --release +``` + +1. Drop the database (if it already exists) and run the required migrations: +``` +diesel database reset --database-url postgres://postgres:postgres@localhost/dev +``` + +1. Ensure a synced Lighthouse beacon node with historical states is available +at `localhost:5052`. +The smaller the value of `--slots-per-restore-point` the faster beacon.watch +will be able to sync to the beacon node. + +1. Run the updater daemon: +``` +cargo run --release -- run-updater +``` + +1. Start the HTTP API server: +``` +cargo run --release -- serve +``` + +1. Ensure connectivity: +``` +curl "http://localhost:5059/v1/slots/highest" +``` + +> Functionality on MacOS has not been tested. Windows is not supported. + + +### Configuration +beacon.watch can be configured through the use of a config file. +Available options can be seen in `config.yaml.default`. + +You can specify a config file during runtime: +``` +cargo run -- run-updater --config path/to/config.yaml +cargo run -- serve --config path/to/config.yaml +``` + +You can specify only the parts of the config file which you need changed. +Missing values will remain as their defaults. + +For example, if you wish to run with default settings but only wish to alter `log_level` +your config file would be: +```yaml +# config.yaml +log_level = "info" +``` + +### Available Endpoints +As beacon.watch continues to develop, more endpoints will be added. + +> In these examples any data containing information from blockprint has either been redacted or fabricated. + +#### `/v1/slots/{slot}` +```bash +curl "http://localhost:5059/v1/slots/4635296" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "skipped": false, + "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +} +``` + +#### `/v1/slots?start_slot={}&end_slot={}` +```bash +curl "http://localhost:5059/v1/slots?start_slot=4635296&end_slot=4635297" +``` +```json +[ + { + "slot": "4635297", + "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", + "skipped": false, + "beacon_block": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182" + }, + { + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "skipped": false, + "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" + } +] +``` + +#### `/v1/slots/lowest` +```bash +curl "http://localhost:5059/v1/slots/lowest" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "skipped": false, + "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +} +``` + +#### `/v1/slots/highest` +```bash +curl "http://localhost:5059/v1/slots/highest" +``` +```json +{ + "slot": "4635358", + "root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b", + "skipped": false, + "beacon_block": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b" +} +``` + +#### `v1/slots/{slot}/block` +```bash +curl "http://localhost:5059/v1/slots/4635296/block" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks/{block_id}` +```bash +curl "http://localhost:5059/v1/blocks/4635296" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks?start_slot={}&end_slot={}` +```bash +curl "http://localhost:5059/v1/blocks?start_slot=4635296&end_slot=4635297" +``` +```json +[ + { + "slot": "4635297", + "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", + "parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" + }, + { + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" + } +] +``` + +#### `/v1/blocks/{block_id}/previous` +```bash +curl "http://localhost:5059/v1/blocks/4635297/previous" +# OR +curl "http://localhost:5059/v1/blocks/0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182/previous" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks/{block_id}/next` +```bash +curl "http://localhost:5059/v1/blocks/4635296/next" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/next" +``` +```json +{ + "slot": "4635297", + "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", + "parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +} +``` + +#### `/v1/blocks/lowest` +```bash +curl "http://localhost:5059/v1/blocks/lowest" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks/highest` +```bash +curl "http://localhost:5059/v1/blocks/highest" +``` +```json +{ + "slot": "4635358", + "root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b", + "parent_root": "0xb66e05418bb5b1d4a965c994e1f0e5b5f0d7b780e0df12f3f6321510654fa1d2" +} +``` + +#### `/v1/blocks/{block_id}/proposer` +```bash +curl "http://localhost:5059/v1/blocks/4635296/proposer" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/proposer" + +``` +```json +{ + "slot": "4635296", + "proposer_index": 223126, + "graffiti": "" +} +``` + +#### `/v1/blocks/{block_id}/rewards` +```bash +curl "http://localhost:5059/v1/blocks/4635296/reward" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/reward" + +``` +```json +{ + "slot": "4635296", + "total": 25380059, + "attestation_reward": 24351867, + "sync_committee_reward": 1028192 +} +``` + +#### `/v1/blocks/{block_id}/packing` +```bash +curl "http://localhost:5059/v1/blocks/4635296/packing" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/packing" + +``` +```json +{ + "slot": "4635296", + "available": 16152, + "included": 13101, + "prior_skip_slots": 0 +} +``` + +#### `/v1/validators/{validator}` +```bash +curl "http://localhost:5059/v1/validators/1" +# OR +curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c" +``` +```json +{ + "index": 1, + "public_key": "0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c", + "status": "active_ongoing", + "client": null, + "activation_epoch": 0, + "exit_epoch": null +} +``` + +#### `/v1/validators/{validator}/attestation/{epoch}` +```bash +curl "http://localhost:5059/v1/validators/1/attestation/144853" +# OR +curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c/attestation/144853" +``` +```json +{ + "index": 1, + "epoch": "144853", + "source": true, + "head": true, + "target": true +} +``` + +#### `/v1/validators/missed/{vote}/{epoch}` +```bash +curl "http://localhost:5059/v1/validators/missed/head/144853" +``` +```json +[ + 63, + 67, + 98, + ... +] +``` + +#### `/v1/validators/missed/{vote}/{epoch}/graffiti` +```bash +curl "http://localhost:5059/v1/validators/missed/head/144853/graffiti" +``` +```json +{ + "Mr F was here": 3, + "Lighthouse/v3.1.0-aa022f4": 5, + ... +} +``` + +#### `/v1/clients/missed/{vote}/{epoch}` +```bash +curl "http://localhost:5059/v1/clients/missed/source/144853" +``` +```json +{ + "Lighthouse": 100, + "Lodestar": 100, + "Nimbus": 100, + "Prysm": 100, + "Teku": 100, + "Unknown": 100 +} +``` + +#### `/v1/clients/missed/{vote}/{epoch}/percentages` +Note that this endpoint expresses the following: +``` +What percentage of each client implementation missed this vote? +``` + +```bash +curl "http://localhost:5059/v1/clients/missed/target/144853/percentages" +``` +```json +{ + "Lighthouse": 0.51234567890, + "Lodestar": 0.51234567890, + "Nimbus": 0.51234567890, + "Prysm": 0.09876543210, + "Teku": 0.09876543210, + "Unknown": 0.05647382910 +} +``` + +#### `/v1/clients/missed/{vote}/{epoch}/percentages/relative` +Note that this endpoint expresses the following: +``` +For the validators which did miss this vote, what percentage of them were from each client implementation? +``` +You can check these values against the output of `/v1/clients/percentages` to see any discrepancies. + +```bash +curl "http://localhost:5059/v1/clients/missed/target/144853/percentages/relative" +``` +```json +{ + "Lighthouse": 11.11111111111111, + "Lodestar": 11.11111111111111, + "Nimbus": 11.11111111111111, + "Prysm": 16.66666666666667, + "Teku": 16.66666666666667, + "Unknown": 33.33333333333333 +} + +``` + +#### `/v1/clients` +```bash +curl "http://localhost:5059/v1/clients" +``` +```json +{ + "Lighthouse": 5000, + "Lodestar": 5000, + "Nimbus": 5000, + "Prysm": 5000, + "Teku": 5000, + "Unknown": 5000 +} +``` + +#### `/v1/clients/percentages` +```bash +curl "http://localhost:5059/v1/clients/percentages" +``` +```json +{ + "Lighthouse": 16.66666666666667, + "Lodestar": 16.66666666666667, + "Nimbus": 16.66666666666667, + "Prysm": 16.66666666666667, + "Teku": 16.66666666666667, + "Unknown": 16.66666666666667 +} +``` + +### Future work +- New tables + - `skip_slots`? + + +- More API endpoints + - `/v1/proposers?start_epoch={}&end_epoch={}` and similar + - `/v1/validators/{status}/count` + + +- Concurrently backfill and forwards fill, so forwards fill is not bottlenecked by large backfills. + + +- Better/prettier (async?) logging. + + +- Connect to a range of beacon_nodes to sync different components concurrently. +Generally, processing certain api queries such as `block_packing` and `attestation_performance` take the longest to sync. + + +### Architecture +Connection Pooling: +- 1 Pool for Updater (read and write) +- 1 Pool for HTTP Server (should be read only, although not sure if we can enforce this) diff --git a/watch/config.yaml.default b/watch/config.yaml.default new file mode 100644 index 0000000000..131609237c --- /dev/null +++ b/watch/config.yaml.default @@ -0,0 +1,49 @@ +--- +database: + user: "postgres" + password: "postgres" + dbname: "dev" + default_dbname: "postgres" + host: "localhost" + port: 5432 + connect_timeout_millis: 2000 + +server: + listen_addr: "127.0.0.1" + listen_port: 5059 + +updater: + # The URL of the Beacon Node to perform sync tasks with. + # Cannot yet accept multiple beacon nodes. + beacon_node_url: "http://localhost:5052" + # The number of epochs to backfill. Must be below 100. + max_backfill_size_epochs: 2 + # The epoch at which to stop backfilling. + backfill_stop_epoch: 0 + # Whether to sync the attestations table. + attestations: true + # Whether to sync the proposer_info table. + proposer_info: true + # Whether to sync the block_rewards table. + block_rewards: true + # Whether to sync the block_packing table. + block_packing: true + +blockprint: + # Whether to sync client information from blockprint. + enabled: false + # The URL of the blockprint server. + url: "" + # The username used to authenticate to the blockprint server. + username: "" + # The password used to authenticate to the blockprint server. + password: "" + +# Log level. +# Valid options are: +# - "trace" +# - "debug" +# - "info" +# - "warn" +# - "error" +log_level: "debug" diff --git a/watch/diesel.toml b/watch/diesel.toml new file mode 100644 index 0000000000..bfb01bccf0 --- /dev/null +++ b/watch/diesel.toml @@ -0,0 +1,5 @@ +# For documentation on how to configure this file, +# see diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/database/schema.rs" diff --git a/watch/migrations/.gitkeep b/watch/migrations/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/watch/migrations/00000000000000_diesel_initial_setup/down.sql b/watch/migrations/00000000000000_diesel_initial_setup/down.sql new file mode 100644 index 0000000000..a9f5260911 --- /dev/null +++ b/watch/migrations/00000000000000_diesel_initial_setup/down.sql @@ -0,0 +1,6 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + +DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); +DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/watch/migrations/00000000000000_diesel_initial_setup/up.sql b/watch/migrations/00000000000000_diesel_initial_setup/up.sql new file mode 100644 index 0000000000..d68895b1a7 --- /dev/null +++ b/watch/migrations/00000000000000_diesel_initial_setup/up.sql @@ -0,0 +1,36 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + + + + +-- Sets up a trigger for the given table to automatically set a column called +-- `updated_at` whenever the row is modified (unless `updated_at` was included +-- in the modified columns) +-- +-- # Example +-- +-- ```sql +-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); +-- +-- SELECT diesel_manage_updated_at('users'); +-- ``` +CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ +BEGIN + EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ +BEGIN + IF ( + NEW IS DISTINCT FROM OLD AND + NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at + ) THEN + NEW.updated_at := current_timestamp; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/watch/migrations/2022-01-01-000000_canonical_slots/down.sql b/watch/migrations/2022-01-01-000000_canonical_slots/down.sql new file mode 100644 index 0000000000..551ed6605c --- /dev/null +++ b/watch/migrations/2022-01-01-000000_canonical_slots/down.sql @@ -0,0 +1 @@ +DROP TABLE canonical_slots diff --git a/watch/migrations/2022-01-01-000000_canonical_slots/up.sql b/watch/migrations/2022-01-01-000000_canonical_slots/up.sql new file mode 100644 index 0000000000..2629f11a4c --- /dev/null +++ b/watch/migrations/2022-01-01-000000_canonical_slots/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE canonical_slots ( + slot integer PRIMARY KEY, + root bytea NOT NULL, + skipped boolean NOT NULL, + beacon_block bytea UNIQUE +) diff --git a/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql b/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql new file mode 100644 index 0000000000..8901956f47 --- /dev/null +++ b/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql @@ -0,0 +1 @@ +DROP TABLE beacon_blocks diff --git a/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql b/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql new file mode 100644 index 0000000000..250c667b23 --- /dev/null +++ b/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql @@ -0,0 +1,7 @@ +CREATE TABLE beacon_blocks ( + slot integer PRIMARY KEY REFERENCES canonical_slots(slot) ON DELETE CASCADE, + root bytea REFERENCES canonical_slots(beacon_block) NOT NULL, + parent_root bytea NOT NULL, + attestation_count integer NOT NULL, + transaction_count integer +) diff --git a/watch/migrations/2022-01-01-000002_validators/down.sql b/watch/migrations/2022-01-01-000002_validators/down.sql new file mode 100644 index 0000000000..17819fc349 --- /dev/null +++ b/watch/migrations/2022-01-01-000002_validators/down.sql @@ -0,0 +1 @@ +DROP TABLE validators diff --git a/watch/migrations/2022-01-01-000002_validators/up.sql b/watch/migrations/2022-01-01-000002_validators/up.sql new file mode 100644 index 0000000000..69cfef6772 --- /dev/null +++ b/watch/migrations/2022-01-01-000002_validators/up.sql @@ -0,0 +1,7 @@ +CREATE TABLE validators ( + index integer PRIMARY KEY, + public_key bytea NOT NULL, + status text NOT NULL, + activation_epoch integer, + exit_epoch integer +) diff --git a/watch/migrations/2022-01-01-000003_proposer_info/down.sql b/watch/migrations/2022-01-01-000003_proposer_info/down.sql new file mode 100644 index 0000000000..d61330be5b --- /dev/null +++ b/watch/migrations/2022-01-01-000003_proposer_info/down.sql @@ -0,0 +1 @@ +DROP TABLE proposer_info diff --git a/watch/migrations/2022-01-01-000003_proposer_info/up.sql b/watch/migrations/2022-01-01-000003_proposer_info/up.sql new file mode 100644 index 0000000000..488aedb273 --- /dev/null +++ b/watch/migrations/2022-01-01-000003_proposer_info/up.sql @@ -0,0 +1,5 @@ +CREATE TABLE proposer_info ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + proposer_index integer REFERENCES validators(index) ON DELETE CASCADE NOT NULL, + graffiti text NOT NULL +) diff --git a/watch/migrations/2022-01-01-000004_active_config/down.sql b/watch/migrations/2022-01-01-000004_active_config/down.sql new file mode 100644 index 0000000000..b4304eb7b7 --- /dev/null +++ b/watch/migrations/2022-01-01-000004_active_config/down.sql @@ -0,0 +1 @@ +DROP TABLE active_config diff --git a/watch/migrations/2022-01-01-000004_active_config/up.sql b/watch/migrations/2022-01-01-000004_active_config/up.sql new file mode 100644 index 0000000000..476a091160 --- /dev/null +++ b/watch/migrations/2022-01-01-000004_active_config/up.sql @@ -0,0 +1,5 @@ +CREATE TABLE active_config ( + id integer PRIMARY KEY CHECK (id=1), + config_name text NOT NULL, + slots_per_epoch integer NOT NULL +) diff --git a/watch/migrations/2022-01-01-000010_blockprint/down.sql b/watch/migrations/2022-01-01-000010_blockprint/down.sql new file mode 100644 index 0000000000..fa53325dad --- /dev/null +++ b/watch/migrations/2022-01-01-000010_blockprint/down.sql @@ -0,0 +1 @@ +DROP TABLE blockprint diff --git a/watch/migrations/2022-01-01-000010_blockprint/up.sql b/watch/migrations/2022-01-01-000010_blockprint/up.sql new file mode 100644 index 0000000000..2d5741f50b --- /dev/null +++ b/watch/migrations/2022-01-01-000010_blockprint/up.sql @@ -0,0 +1,4 @@ +CREATE TABLE blockprint ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + best_guess text NOT NULL +) diff --git a/watch/migrations/2022-01-01-000011_block_rewards/down.sql b/watch/migrations/2022-01-01-000011_block_rewards/down.sql new file mode 100644 index 0000000000..2dc87995c7 --- /dev/null +++ b/watch/migrations/2022-01-01-000011_block_rewards/down.sql @@ -0,0 +1 @@ +DROP TABLE block_rewards diff --git a/watch/migrations/2022-01-01-000011_block_rewards/up.sql b/watch/migrations/2022-01-01-000011_block_rewards/up.sql new file mode 100644 index 0000000000..47cb4304f0 --- /dev/null +++ b/watch/migrations/2022-01-01-000011_block_rewards/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE block_rewards ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + total integer NOT NULL, + attestation_reward integer NOT NULL, + sync_committee_reward integer NOT NULL +) diff --git a/watch/migrations/2022-01-01-000012_block_packing/down.sql b/watch/migrations/2022-01-01-000012_block_packing/down.sql new file mode 100644 index 0000000000..e9e7755e3e --- /dev/null +++ b/watch/migrations/2022-01-01-000012_block_packing/down.sql @@ -0,0 +1 @@ +DROP TABLE block_packing diff --git a/watch/migrations/2022-01-01-000012_block_packing/up.sql b/watch/migrations/2022-01-01-000012_block_packing/up.sql new file mode 100644 index 0000000000..63a9925f92 --- /dev/null +++ b/watch/migrations/2022-01-01-000012_block_packing/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE block_packing ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + available integer NOT NULL, + included integer NOT NULL, + prior_skip_slots integer NOT NULL +) diff --git a/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql b/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql new file mode 100644 index 0000000000..0f32b6b4f3 --- /dev/null +++ b/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql @@ -0,0 +1 @@ +DROP TABLE suboptimal_attestations diff --git a/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql b/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql new file mode 100644 index 0000000000..5352afefc8 --- /dev/null +++ b/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql @@ -0,0 +1,8 @@ +CREATE TABLE suboptimal_attestations ( + epoch_start_slot integer CHECK (epoch_start_slot % 32 = 0) REFERENCES canonical_slots(slot) ON DELETE CASCADE, + index integer NOT NULL REFERENCES validators(index) ON DELETE CASCADE, + source boolean NOT NULL, + head boolean NOT NULL, + target boolean NOT NULL, + PRIMARY KEY(epoch_start_slot, index) +) diff --git a/watch/migrations/2022-01-01-000020_capella/down.sql b/watch/migrations/2022-01-01-000020_capella/down.sql new file mode 100644 index 0000000000..5903b351db --- /dev/null +++ b/watch/migrations/2022-01-01-000020_capella/down.sql @@ -0,0 +1,2 @@ +ALTER TABLE beacon_blocks +DROP COLUMN withdrawal_count; diff --git a/watch/migrations/2022-01-01-000020_capella/up.sql b/watch/migrations/2022-01-01-000020_capella/up.sql new file mode 100644 index 0000000000..b52b4b0099 --- /dev/null +++ b/watch/migrations/2022-01-01-000020_capella/up.sql @@ -0,0 +1,3 @@ +ALTER TABLE beacon_blocks +ADD COLUMN withdrawal_count integer; + diff --git a/watch/postgres_docker_compose/compose.yml b/watch/postgres_docker_compose/compose.yml new file mode 100644 index 0000000000..eae4de4a2b --- /dev/null +++ b/watch/postgres_docker_compose/compose.yml @@ -0,0 +1,16 @@ +version: "3" + +services: + postgres: + image: postgres:12.3-alpine + restart: always + environment: + POSTGRES_PASSWORD: postgres + POSTGRES_USER: postgres + volumes: + - postgres:/var/lib/postgresql/data + ports: + - 127.0.0.1:5432:5432 + +volumes: + postgres: diff --git a/watch/src/block_packing/database.rs b/watch/src/block_packing/database.rs new file mode 100644 index 0000000000..f7375431cb --- /dev/null +++ b/watch/src/block_packing/database.rs @@ -0,0 +1,140 @@ +use crate::database::{ + schema::{beacon_blocks, block_packing}, + watch_types::{WatchHash, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = block_packing)] +pub struct WatchBlockPacking { + pub slot: WatchSlot, + pub available: i32, + pub included: i32, + pub prior_skip_slots: i32, +} + +/// Insert a batch of values into the `block_packing` table. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_batch_block_packing( + conn: &mut PgConn, + packing: Vec, +) -> Result<(), Error> { + use self::block_packing::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in packing.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(block_packing) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Block packing inserted, count: {count}, time taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `block_packing` table where `slot` is minimum. +pub fn get_lowest_block_packing(conn: &mut PgConn) -> Result, Error> { + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let result = block_packing + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: lowest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `block_packing` table where `slot` is maximum. +pub fn get_highest_block_packing(conn: &mut PgConn) -> Result, Error> { + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let result = block_packing + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: highest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_packing` table corresponding to a given `root_query`. +pub fn get_block_packing_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(block_packing); + + let result = join + .select((slot, available, included, prior_skip_slots)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: {root_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_packing` table corresponding to a given `slot_query`. +pub fn get_block_packing_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let result = block_packing + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: {slot_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding +/// row in `block_packing`. +#[allow(dead_code)] +pub fn get_unknown_block_packing( + conn: &mut PgConn, + slots_per_epoch: u64, +) -> Result>, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; + use self::block_packing::dsl::block_packing; + + let join = beacon_blocks.left_join(block_packing); + + let result = join + .select(slot) + .filter(root.is_null()) + // Block packing cannot be retrieved for epoch 0 so we need to exclude them. + .filter(slot.ge(slots_per_epoch as i32)) + .order_by(slot.desc()) + .nullable() + .load::>(conn)?; + + Ok(result) +} diff --git a/watch/src/block_packing/mod.rs b/watch/src/block_packing/mod.rs new file mode 100644 index 0000000000..5d74fc5979 --- /dev/null +++ b/watch/src/block_packing/mod.rs @@ -0,0 +1,38 @@ +pub mod database; +pub mod server; +pub mod updater; + +use crate::database::watch_types::WatchSlot; +use crate::updater::error::Error; + +pub use database::{ + get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing, + get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing, + WatchBlockPacking, +}; +pub use server::block_packing_routes; + +use eth2::BeaconNodeHttpClient; +use types::Epoch; + +/// Sends a request to `lighthouse/analysis/block_packing`. +/// Formats the response into a vector of `WatchBlockPacking`. +/// +/// Will fail if `start_epoch == 0`. +pub async fn get_block_packing( + bn: &BeaconNodeHttpClient, + start_epoch: Epoch, + end_epoch: Epoch, +) -> Result, Error> { + Ok(bn + .get_lighthouse_analysis_block_packing(start_epoch, end_epoch) + .await? + .into_iter() + .map(|data| WatchBlockPacking { + slot: WatchSlot::from_slot(data.slot), + available: data.available_attestations as i32, + included: data.included_attestations as i32, + prior_skip_slots: data.prior_skip_slots as i32, + }) + .collect()) +} diff --git a/watch/src/block_packing/server.rs b/watch/src/block_packing/server.rs new file mode 100644 index 0000000000..819144562a --- /dev/null +++ b/watch/src/block_packing/server.rs @@ -0,0 +1,31 @@ +use crate::block_packing::database::{ + get_block_packing_by_root, get_block_packing_by_slot, WatchBlockPacking, +}; +use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; +use crate::server::Error; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use eth2::types::BlockId; +use std::str::FromStr; + +pub async fn get_block_packing( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(get_block_packing_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(get_block_packing_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub fn block_packing_routes() -> Router { + Router::new().route("/v1/blocks/:block/packing", get(get_block_packing)) +} diff --git a/watch/src/block_packing/updater.rs b/watch/src/block_packing/updater.rs new file mode 100644 index 0000000000..215964901a --- /dev/null +++ b/watch/src/block_packing/updater.rs @@ -0,0 +1,211 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use crate::block_packing::get_block_packing; + +use eth2::types::{Epoch, EthSpec}; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING: u64 = 50; + +impl UpdateHandler { + /// Forward fills the `block_packing` table starting from the entry with the + /// highest slot. + /// + /// It constructs a request to the `get_block_packing` API with: + /// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest beacon block) + /// `end_epoch` -> epoch of highest beacon block + /// + /// It will resync the latest epoch if it is not fully filled. + /// That is, `if highest_filled_slot % slots_per_epoch != 31` + /// This means that if the last slot of an epoch is a skip slot, the whole epoch will be + //// resynced during the next head update. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. + pub async fn fill_block_packing(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Get the slot of the highest entry in the `block_packing` table. + let highest_filled_slot_opt = if self.config.block_packing { + database::get_highest_block_packing(&mut conn)?.map(|packing| packing.slot) + } else { + return Err(Error::NotEnabled("block_packing".to_string())); + }; + + let mut start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt { + if highest_filled_slot.as_slot() % self.slots_per_epoch + == self.slots_per_epoch.saturating_sub(1) + { + // The whole epoch is filled so we can begin syncing the next one. + highest_filled_slot.as_slot().epoch(self.slots_per_epoch) + 1 + } else { + // The epoch is only partially synced. Try to sync it fully. + highest_filled_slot.as_slot().epoch(self.slots_per_epoch) + } + } else { + // No entries in the `block_packing` table. Use `beacon_blocks` instead. + if let Some(lowest_beacon_block) = database::get_lowest_beacon_block(&mut conn)? { + lowest_beacon_block + .slot + .as_slot() + .epoch(self.slots_per_epoch) + } else { + // There are no blocks in the database, do not fill the `block_packing` table. + warn!("Refusing to fill block packing as there are no blocks in the database"); + return Ok(()); + } + }; + + // The `get_block_packing` API endpoint cannot accept `start_epoch == 0`. + if start_epoch == 0 { + start_epoch += 1 + } + + if let Some(highest_block_slot) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut end_epoch = highest_block_slot.epoch(self.slots_per_epoch); + + if start_epoch > end_epoch { + debug!("Block packing is up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) { + end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING + } + + if let Some(lowest_block_slot) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?; + + // Since we pull a full epoch of data but are not guaranteed to have all blocks of + // that epoch available, only insert blocks with corresponding `beacon_block`s. + packing.retain(|packing| { + packing.slot.as_slot() >= lowest_block_slot + && packing.slot.as_slot() <= highest_block_slot + }); + database::insert_batch_block_packing(&mut conn, packing)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest block when one exists".to_string(), + ))); + } + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_packing` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } + + /// Backfill the `block_packing` table starting from the entry with the lowest slot. + /// + /// It constructs a request to the `get_block_packing` function with: + /// `start_epoch` -> epoch of lowest_beacon_block + /// `end_epoch` -> epoch of lowest filled `block_packing` - 1 (or epoch of highest beacon block) + /// + /// It will resync the lowest epoch if it is not fully filled. + /// That is, `if lowest_filled_slot % slots_per_epoch != 0` + /// This means that if the last slot of an epoch is a skip slot, the whole epoch will be + //// resynced during the next head update. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. + pub async fn backfill_block_packing(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let max_block_packing_backfill = self.config.max_backfill_size_epochs; + + // Get the slot of the lowest entry in the `block_packing` table. + let lowest_filled_slot_opt = if self.config.block_packing { + database::get_lowest_block_packing(&mut conn)?.map(|packing| packing.slot) + } else { + return Err(Error::NotEnabled("block_packing".to_string())); + }; + + let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { + if lowest_filled_slot.as_slot() % self.slots_per_epoch == 0 { + lowest_filled_slot + .as_slot() + .epoch(self.slots_per_epoch) + .saturating_sub(Epoch::new(1)) + } else { + // The epoch is only partially synced. Try to sync it fully. + lowest_filled_slot.as_slot().epoch(self.slots_per_epoch) + } + } else { + // No entries in the `block_packing` table. Use `beacon_blocks` instead. + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + highest_beacon_block.as_slot().epoch(self.slots_per_epoch) + } else { + // There are no blocks in the database, do not backfill the `block_packing` table. + warn!("Refusing to backfill block packing as there are no blocks in the database"); + return Ok(()); + } + }; + + if end_epoch <= 1 { + debug!("Block packing backfill is complete"); + return Ok(()); + } + + if let Some(lowest_block_slot) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut start_epoch = lowest_block_slot.epoch(self.slots_per_epoch); + + if start_epoch >= end_epoch { + debug!("Block packing is up to date with the base of the database"); + return Ok(()); + } + + // Ensure that the request range does not exceed `max_block_packing_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. + if start_epoch < end_epoch.saturating_sub(max_block_packing_backfill) { + start_epoch = end_epoch.saturating_sub(max_block_packing_backfill) + } + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) { + start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) + } + + // The `block_packing` API cannot accept `start_epoch == 0`. + if start_epoch == 0 { + start_epoch += 1 + } + + if let Some(highest_block_slot) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?; + + // Only insert blocks with corresponding `beacon_block`s. + packing.retain(|packing| { + packing.slot.as_slot() >= lowest_block_slot + && packing.slot.as_slot() <= highest_block_slot + }); + + database::insert_batch_block_packing(&mut conn, packing)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest block when one exists".to_string(), + ))); + } + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_packing` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } +} diff --git a/watch/src/block_rewards/database.rs b/watch/src/block_rewards/database.rs new file mode 100644 index 0000000000..a2bf49f3e4 --- /dev/null +++ b/watch/src/block_rewards/database.rs @@ -0,0 +1,137 @@ +use crate::database::{ + schema::{beacon_blocks, block_rewards}, + watch_types::{WatchHash, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = block_rewards)] +pub struct WatchBlockRewards { + pub slot: WatchSlot, + pub total: i32, + pub attestation_reward: i32, + pub sync_committee_reward: i32, +} + +/// Insert a batch of values into the `block_rewards` table. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_batch_block_rewards( + conn: &mut PgConn, + rewards: Vec, +) -> Result<(), Error> { + use self::block_rewards::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in rewards.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(block_rewards) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Block rewards inserted, count: {count}, time_taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `block_rewards` table where `slot` is minimum. +pub fn get_lowest_block_rewards(conn: &mut PgConn) -> Result, Error> { + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let result = block_rewards + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: lowest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `block_rewards` table where `slot` is maximum. +pub fn get_highest_block_rewards(conn: &mut PgConn) -> Result, Error> { + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let result = block_rewards + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: highest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_rewards` table corresponding to a given `root_query`. +pub fn get_block_rewards_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(block_rewards); + + let result = join + .select((slot, total, attestation_reward, sync_committee_reward)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: {root_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_rewards` table corresponding to a given `slot_query`. +pub fn get_block_rewards_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let result = block_rewards + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: {slot_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding +/// row in `block_rewards`. +#[allow(dead_code)] +pub fn get_unknown_block_rewards(conn: &mut PgConn) -> Result>, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; + use self::block_rewards::dsl::block_rewards; + + let join = beacon_blocks.left_join(block_rewards); + + let result = join + .select(slot) + .filter(root.is_null()) + // Block rewards cannot be retrieved for `slot == 0` so we need to exclude it. + .filter(slot.ne(0)) + .order_by(slot.desc()) + .nullable() + .load::>(conn)?; + + Ok(result) +} diff --git a/watch/src/block_rewards/mod.rs b/watch/src/block_rewards/mod.rs new file mode 100644 index 0000000000..0dac88ea58 --- /dev/null +++ b/watch/src/block_rewards/mod.rs @@ -0,0 +1,38 @@ +pub mod database; +mod server; +mod updater; + +use crate::database::watch_types::WatchSlot; +use crate::updater::error::Error; + +pub use database::{ + get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards, + get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards, + WatchBlockRewards, +}; +pub use server::block_rewards_routes; + +use eth2::BeaconNodeHttpClient; +use types::Slot; + +/// Sends a request to `lighthouse/analysis/block_rewards`. +/// Formats the response into a vector of `WatchBlockRewards`. +/// +/// Will fail if `start_slot == 0`. +pub async fn get_block_rewards( + bn: &BeaconNodeHttpClient, + start_slot: Slot, + end_slot: Slot, +) -> Result, Error> { + Ok(bn + .get_lighthouse_analysis_block_rewards(start_slot, end_slot) + .await? + .into_iter() + .map(|data| WatchBlockRewards { + slot: WatchSlot::from_slot(data.meta.slot), + total: data.total as i32, + attestation_reward: data.attestation_rewards.total as i32, + sync_committee_reward: data.sync_committee_rewards as i32, + }) + .collect()) +} diff --git a/watch/src/block_rewards/server.rs b/watch/src/block_rewards/server.rs new file mode 100644 index 0000000000..480346e25b --- /dev/null +++ b/watch/src/block_rewards/server.rs @@ -0,0 +1,31 @@ +use crate::block_rewards::database::{ + get_block_rewards_by_root, get_block_rewards_by_slot, WatchBlockRewards, +}; +use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; +use crate::server::Error; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use eth2::types::BlockId; +use std::str::FromStr; + +pub async fn get_block_rewards( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(get_block_rewards_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(get_block_rewards_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub fn block_rewards_routes() -> Router { + Router::new().route("/v1/blocks/:block/rewards", get(get_block_rewards)) +} diff --git a/watch/src/block_rewards/updater.rs b/watch/src/block_rewards/updater.rs new file mode 100644 index 0000000000..ad34b1f078 --- /dev/null +++ b/watch/src/block_rewards/updater.rs @@ -0,0 +1,157 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use crate::block_rewards::get_block_rewards; + +use eth2::types::EthSpec; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS: u64 = 1600; + +impl UpdateHandler { + /// Forward fills the `block_rewards` table starting from the entry with the + /// highest slot. + /// + /// It constructs a request to the `get_block_rewards` API with: + /// `start_slot` -> highest filled `block_rewards` + 1 (or lowest beacon block) + /// `end_slot` -> highest beacon block + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. + pub async fn fill_block_rewards(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Get the slot of the highest entry in the `block_rewards` table. + let highest_filled_slot_opt = if self.config.block_rewards { + database::get_highest_block_rewards(&mut conn)?.map(|reward| reward.slot) + } else { + return Err(Error::NotEnabled("block_rewards".to_string())); + }; + + let mut start_slot = if let Some(highest_filled_slot) = highest_filled_slot_opt { + highest_filled_slot.as_slot() + 1 + } else { + // No entries in the `block_rewards` table. Use `beacon_blocks` instead. + if let Some(lowest_beacon_block) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot) + { + lowest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not fill the `block_rewards` table. + warn!("Refusing to fill block rewards as there are no blocks in the database"); + return Ok(()); + } + }; + + // The `block_rewards` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1; + } + + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + let mut end_slot = highest_beacon_block.as_slot(); + + if start_slot > end_slot { + debug!("Block rewards are up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) { + end_slot = start_slot + MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS + } + + let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?; + database::insert_batch_block_rewards(&mut conn, rewards)?; + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_rewards` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } + + /// Backfill the `block_rewards` tables starting from the entry with the + /// lowest slot. + /// + /// It constructs a request to the `get_block_rewards` API with: + /// `start_slot` -> lowest_beacon_block + /// `end_slot` -> lowest filled `block_rewards` - 1 (or highest beacon block) + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. + pub async fn backfill_block_rewards(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let max_block_reward_backfill = self.config.max_backfill_size_epochs * self.slots_per_epoch; + + // Get the slot of the lowest entry in the `block_rewards` table. + let lowest_filled_slot_opt = if self.config.block_rewards { + database::get_lowest_block_rewards(&mut conn)?.map(|reward| reward.slot) + } else { + return Err(Error::NotEnabled("block_rewards".to_string())); + }; + + let end_slot = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { + lowest_filled_slot.as_slot().saturating_sub(1_u64) + } else { + // No entries in the `block_rewards` table. Use `beacon_blocks` instead. + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + highest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not backfill the `block_rewards` table. + warn!("Refusing to backfill block rewards as there are no blocks in the database"); + return Ok(()); + } + }; + + if end_slot <= 1 { + debug!("Block rewards backfill is complete"); + return Ok(()); + } + + if let Some(lowest_block_slot) = database::get_lowest_beacon_block(&mut conn)? { + let mut start_slot = lowest_block_slot.slot.as_slot(); + + if start_slot >= end_slot { + debug!("Block rewards are up to date with the base of the database"); + return Ok(()); + } + + // Ensure that the request range does not exceed `max_block_reward_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. + if start_slot < end_slot.saturating_sub(max_block_reward_backfill) { + start_slot = end_slot.saturating_sub(max_block_reward_backfill) + } + + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) { + start_slot = end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) + } + + // The `block_rewards` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1 + } + + let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?; + + if self.config.block_rewards { + database::insert_batch_block_rewards(&mut conn, rewards)?; + } + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_rewards` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } +} diff --git a/watch/src/blockprint/config.rs b/watch/src/blockprint/config.rs new file mode 100644 index 0000000000..721fa7cb19 --- /dev/null +++ b/watch/src/blockprint/config.rs @@ -0,0 +1,40 @@ +use serde::{Deserialize, Serialize}; + +pub const fn enabled() -> bool { + false +} + +pub const fn url() -> Option { + None +} + +pub const fn username() -> Option { + None +} + +pub const fn password() -> Option { + None +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default = "enabled")] + pub enabled: bool, + #[serde(default = "url")] + pub url: Option, + #[serde(default = "username")] + pub username: Option, + #[serde(default = "password")] + pub password: Option, +} + +impl Default for Config { + fn default() -> Self { + Config { + enabled: enabled(), + url: url(), + username: username(), + password: password(), + } + } +} diff --git a/watch/src/blockprint/database.rs b/watch/src/blockprint/database.rs new file mode 100644 index 0000000000..afa35c81b6 --- /dev/null +++ b/watch/src/blockprint/database.rs @@ -0,0 +1,224 @@ +use crate::database::{ + self, + schema::{beacon_blocks, blockprint}, + watch_types::{WatchHash, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::sql_types::{Integer, Text}; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::time::Instant; + +type WatchConsensusClient = String; +pub fn list_consensus_clients() -> Vec { + vec![ + "Lighthouse".to_string(), + "Lodestar".to_string(), + "Nimbus".to_string(), + "Prysm".to_string(), + "Teku".to_string(), + "Unknown".to_string(), + ] +} + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = blockprint)] +pub struct WatchBlockprint { + pub slot: WatchSlot, + pub best_guess: WatchConsensusClient, +} + +#[derive(Debug, QueryableByName, diesel::FromSqlRow)] +pub struct WatchValidatorBlockprint { + #[diesel(sql_type = Integer)] + pub proposer_index: i32, + #[diesel(sql_type = Text)] + pub best_guess: WatchConsensusClient, + #[diesel(sql_type = Integer)] + pub slot: WatchSlot, +} + +/// Insert a batch of values into the `blockprint` table. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_batch_blockprint( + conn: &mut PgConn, + prints: Vec, +) -> Result<(), Error> { + use self::blockprint::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in prints.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(blockprint) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Blockprint inserted, count: {count}, time_taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `blockprint` table where `slot` is minimum. +pub fn get_lowest_blockprint(conn: &mut PgConn) -> Result, Error> { + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let result = blockprint + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: lowest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `blockprint` table where `slot` is maximum. +pub fn get_highest_blockprint(conn: &mut PgConn) -> Result, Error> { + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let result = blockprint + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: highest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `blockprint` table corresponding to a given `root_query`. +pub fn get_blockprint_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(blockprint); + + let result = join + .select((slot, best_guess)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: {root_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `blockprint` table corresponding to a given `slot_query`. +pub fn get_blockprint_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let result = blockprint + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: {slot_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding +/// row in `blockprint`. +#[allow(dead_code)] +pub fn get_unknown_blockprint(conn: &mut PgConn) -> Result>, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; + use self::blockprint::dsl::blockprint; + + let join = beacon_blocks.left_join(blockprint); + + let result = join + .select(slot) + .filter(root.is_null()) + .order_by(slot.desc()) + .nullable() + .load::>(conn)?; + + Ok(result) +} + +/// Constructs a HashMap of `index` -> `best_guess` for each validator's latest proposal at or before +/// `target_slot`. +/// Inserts `"Unknown" if no prior proposals exist. +pub fn construct_validator_blockprints_at_slot( + conn: &mut PgConn, + target_slot: WatchSlot, + slots_per_epoch: u64, +) -> Result, Error> { + use self::blockprint::dsl::{blockprint, slot}; + + let total_validators = + database::count_validators_activated_before_slot(conn, target_slot, slots_per_epoch)? + as usize; + + let mut blockprint_map = HashMap::with_capacity(total_validators); + + let latest_proposals = + database::get_all_validators_latest_proposer_info_at_slot(conn, target_slot)?; + + let latest_proposal_slots: Vec = latest_proposals.clone().into_keys().collect(); + + let result = blockprint + .filter(slot.eq_any(latest_proposal_slots)) + .load::(conn)?; + + // Insert the validators which have available blockprints. + for print in result { + if let Some(proposer) = latest_proposals.get(&print.slot) { + blockprint_map.insert(*proposer, print.best_guess); + } + } + + // Insert the rest of the unknown validators. + for validator_index in 0..total_validators { + blockprint_map + .entry(validator_index as i32) + .or_insert_with(|| "Unknown".to_string()); + } + + Ok(blockprint_map) +} + +/// Counts the number of occurances of each `client` present in the `validators` table at or before some +/// `target_slot`. +pub fn get_validators_clients_at_slot( + conn: &mut PgConn, + target_slot: WatchSlot, + slots_per_epoch: u64, +) -> Result, Error> { + let mut client_map: HashMap = HashMap::new(); + + // This includes all validators which were activated at or before `target_slot`. + let validator_blockprints = + construct_validator_blockprints_at_slot(conn, target_slot, slots_per_epoch)?; + + for client in list_consensus_clients() { + let count = validator_blockprints + .iter() + .filter(|(_, v)| (*v).clone() == client) + .count(); + client_map.insert(client, count); + } + + Ok(client_map) +} diff --git a/watch/src/blockprint/mod.rs b/watch/src/blockprint/mod.rs new file mode 100644 index 0000000000..b8107e5bf5 --- /dev/null +++ b/watch/src/blockprint/mod.rs @@ -0,0 +1,149 @@ +pub mod database; +pub mod server; +pub mod updater; + +mod config; + +use crate::database::WatchSlot; + +use eth2::SensitiveUrl; +use reqwest::{Client, Response, Url}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::time::Duration; +use types::Slot; + +pub use config::Config; +pub use database::{ + get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, + get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint, + list_consensus_clients, WatchBlockprint, +}; +pub use server::blockprint_routes; + +const TIMEOUT: Duration = Duration::from_secs(50); + +#[derive(Debug)] +pub enum Error { + Reqwest(reqwest::Error), + Url(url::ParseError), + BlockprintNotSynced, + Other(String), +} + +impl From for Error { + fn from(e: reqwest::Error) -> Self { + Error::Reqwest(e) + } +} + +impl From for Error { + fn from(e: url::ParseError) -> Self { + Error::Url(e) + } +} + +pub struct WatchBlockprintClient { + pub client: Client, + pub server: SensitiveUrl, + pub username: Option, + pub password: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct BlockprintSyncingResponse { + pub greatest_block_slot: Slot, + pub synced: bool, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct BlockprintResponse { + pub proposer_index: i32, + pub slot: Slot, + pub best_guess_single: String, +} + +impl WatchBlockprintClient { + async fn get(&self, url: Url) -> Result { + let mut builder = self.client.get(url).timeout(TIMEOUT); + if let Some(username) = &self.username { + builder = builder.basic_auth(username, self.password.as_ref()); + } + let response = builder.send().await.map_err(Error::Reqwest)?; + + if !response.status().is_success() { + return Err(Error::Other(response.text().await?)); + } + + Ok(response) + } + + // Returns the `greatest_block_slot` as reported by the Blockprint server. + // Will error if the Blockprint server is not synced. + #[allow(dead_code)] + pub async fn ensure_synced(&self) -> Result { + let url = self.server.full.join("sync/")?.join("status")?; + + let response = self.get(url).await?; + + let result = response.json::().await?; + if !result.synced { + return Err(Error::BlockprintNotSynced); + } + + Ok(result.greatest_block_slot) + } + + // Pulls the latest blockprint for all validators. + #[allow(dead_code)] + pub async fn blockprint_all_validators( + &self, + highest_validator: i32, + ) -> Result, Error> { + let url = self + .server + .full + .join("validator/")? + .join("blocks/")? + .join("latest")?; + + let response = self.get(url).await?; + + let mut result = response.json::>().await?; + result.retain(|print| print.proposer_index <= highest_validator); + + let mut map: HashMap = HashMap::with_capacity(result.len()); + for print in result { + map.insert(print.proposer_index, print.best_guess_single); + } + + Ok(map) + } + + // Construct a request to the Blockprint server for a range of slots between `start_slot` and + // `end_slot`. + pub async fn get_blockprint( + &self, + start_slot: Slot, + end_slot: Slot, + ) -> Result, Error> { + let url = self + .server + .full + .join("blocks/")? + .join(&format!("{start_slot}/{end_slot}"))?; + + let response = self.get(url).await?; + + let result = response + .json::>() + .await? + .iter() + .map(|response| WatchBlockprint { + slot: WatchSlot::from_slot(response.slot), + best_guess: response.best_guess_single.clone(), + }) + .collect(); + Ok(result) + } +} diff --git a/watch/src/blockprint/server.rs b/watch/src/blockprint/server.rs new file mode 100644 index 0000000000..488af15717 --- /dev/null +++ b/watch/src/blockprint/server.rs @@ -0,0 +1,31 @@ +use crate::blockprint::database::{ + get_blockprint_by_root, get_blockprint_by_slot, WatchBlockprint, +}; +use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; +use crate::server::Error; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use eth2::types::BlockId; +use std::str::FromStr; + +pub async fn get_blockprint( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(get_blockprint_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(get_blockprint_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub fn blockprint_routes() -> Router { + Router::new().route("/v1/blocks/:block/blockprint", get(get_blockprint)) +} diff --git a/watch/src/blockprint/updater.rs b/watch/src/blockprint/updater.rs new file mode 100644 index 0000000000..28c3184556 --- /dev/null +++ b/watch/src/blockprint/updater.rs @@ -0,0 +1,172 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use eth2::types::EthSpec; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT: u64 = 1600; + +impl UpdateHandler { + /// Forward fills the `blockprint` table starting from the entry with the + /// highest slot. + /// + /// It constructs a request to the `get_blockprint` API with: + /// `start_slot` -> highest filled `blockprint` + 1 (or lowest beacon block) + /// `end_slot` -> highest beacon block + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. + pub async fn fill_blockprint(&mut self) -> Result<(), Error> { + // Ensure blockprint in enabled. + if let Some(blockprint_client) = &self.blockprint { + let mut conn = database::get_connection(&self.pool)?; + + // Get the slot of the highest entry in the `blockprint` table. + let mut start_slot = if let Some(highest_filled_slot) = + database::get_highest_blockprint(&mut conn)?.map(|print| print.slot) + { + highest_filled_slot.as_slot() + 1 + } else { + // No entries in the `blockprint` table. Use `beacon_blocks` instead. + if let Some(lowest_beacon_block) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot) + { + lowest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not fill the `blockprint` table. + warn!("Refusing to fill blockprint as there are no blocks in the database"); + return Ok(()); + } + }; + + // The `blockprint` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1; + } + + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + let mut end_slot = highest_beacon_block.as_slot(); + + if start_slot > end_slot { + debug!("Blockprint is up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) { + end_slot = start_slot + MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT + } + + let mut prints = blockprint_client + .get_blockprint(start_slot, end_slot) + .await?; + + // Ensure the prints returned from blockprint are for slots which exist in the + // `beacon_blocks` table. + prints.retain(|print| { + database::get_beacon_block_by_slot(&mut conn, print.slot) + .ok() + .flatten() + .is_some() + }); + + database::insert_batch_blockprint(&mut conn, prints)?; + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in either + // `blockprint` table. This is a critical failure. It usually means + // someone has manually tampered with the database tables and should not occur during + // normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + } + + Ok(()) + } + + /// Backfill the `blockprint` table starting from the entry with the lowest slot. + /// + /// It constructs a request to the `get_blockprint` API with: + /// `start_slot` -> lowest_beacon_block + /// `end_slot` -> lowest filled `blockprint` - 1 (or highest beacon block) + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. + pub async fn backfill_blockprint(&mut self) -> Result<(), Error> { + // Ensure blockprint in enabled. + if let Some(blockprint_client) = &self.blockprint { + let mut conn = database::get_connection(&self.pool)?; + let max_blockprint_backfill = + self.config.max_backfill_size_epochs * self.slots_per_epoch; + + // Get the slot of the lowest entry in the `blockprint` table. + let end_slot = if let Some(lowest_filled_slot) = + database::get_lowest_blockprint(&mut conn)?.map(|print| print.slot) + { + lowest_filled_slot.as_slot().saturating_sub(1_u64) + } else { + // No entries in the `blockprint` table. Use `beacon_blocks` instead. + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + highest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not backfill the `blockprint` table. + warn!("Refusing to backfill blockprint as there are no blocks in the database"); + return Ok(()); + } + }; + + if end_slot <= 1 { + debug!("Blockprint backfill is complete"); + return Ok(()); + } + + if let Some(lowest_block_slot) = database::get_lowest_beacon_block(&mut conn)? { + let mut start_slot = lowest_block_slot.slot.as_slot(); + + if start_slot >= end_slot { + debug!("Blockprint are up to date with the base of the database"); + return Ok(()); + } + + // Ensure that the request range does not exceed `max_blockprint_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. + if start_slot < end_slot.saturating_sub(max_blockprint_backfill) { + start_slot = end_slot.saturating_sub(max_blockprint_backfill) + } + + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) { + start_slot = end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) + } + + // The `blockprint` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1 + } + + let mut prints = blockprint_client + .get_blockprint(start_slot, end_slot) + .await?; + + // Ensure the prints returned from blockprint are for slots which exist in the + // `beacon_blocks` table. + prints.retain(|print| { + database::get_beacon_block_by_slot(&mut conn, print.slot) + .ok() + .flatten() + .is_some() + }); + + database::insert_batch_blockprint(&mut conn, prints)?; + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the `blockprint` + // table. This is a critical failure. It usually means someone has manually tampered with the + // database tables and should not occur during normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + } + Ok(()) + } +} diff --git a/watch/src/cli.rs b/watch/src/cli.rs new file mode 100644 index 0000000000..a8e5f3716f --- /dev/null +++ b/watch/src/cli.rs @@ -0,0 +1,55 @@ +use crate::{config::Config, logger, server, updater}; +use clap::{App, Arg}; +use tokio::sync::oneshot; + +pub const SERVE: &str = "serve"; +pub const RUN_UPDATER: &str = "run-updater"; +pub const CONFIG: &str = "config"; + +fn run_updater<'a, 'b>() -> App<'a, 'b> { + App::new(RUN_UPDATER).setting(clap::AppSettings::ColoredHelp) +} + +fn serve<'a, 'b>() -> App<'a, 'b> { + App::new(SERVE).setting(clap::AppSettings::ColoredHelp) +} + +pub fn app<'a, 'b>() -> App<'a, 'b> { + App::new("beacon_watch_daemon") + .author("Sigma Prime ") + .setting(clap::AppSettings::ColoredHelp) + .arg( + Arg::with_name(CONFIG) + .long(CONFIG) + .value_name("PATH_TO_CONFIG") + .help("Path to configuration file") + .takes_value(true) + .global(true), + ) + .subcommand(run_updater()) + .subcommand(serve()) +} + +pub async fn run() -> Result<(), String> { + let matches = app().get_matches(); + + let config = match matches.value_of(CONFIG) { + Some(path) => Config::load_from_file(path.to_string())?, + None => Config::default(), + }; + + logger::init_logger(&config.log_level); + + match matches.subcommand() { + (RUN_UPDATER, Some(_)) => updater::run_updater(config) + .await + .map_err(|e| format!("Failure: {:?}", e)), + (SERVE, Some(_)) => { + let (_shutdown_tx, shutdown_rx) = oneshot::channel(); + server::serve(config, shutdown_rx) + .await + .map_err(|e| format!("Failure: {:?}", e)) + } + _ => Err("Unsupported subcommand. See --help".into()), + } +} diff --git a/watch/src/client.rs b/watch/src/client.rs new file mode 100644 index 0000000000..43aaccde34 --- /dev/null +++ b/watch/src/client.rs @@ -0,0 +1,178 @@ +use crate::block_packing::WatchBlockPacking; +use crate::block_rewards::WatchBlockRewards; +use crate::database::models::{ + WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator, +}; +use crate::suboptimal_attestations::WatchAttestation; + +use eth2::types::BlockId; +use reqwest::Client; +use serde::de::DeserializeOwned; +use types::Hash256; +use url::Url; + +#[derive(Debug)] +pub enum Error { + Reqwest(reqwest::Error), + Url(url::ParseError), +} + +impl From for Error { + fn from(e: reqwest::Error) -> Self { + Error::Reqwest(e) + } +} + +impl From for Error { + fn from(e: url::ParseError) -> Self { + Error::Url(e) + } +} + +pub struct WatchHttpClient { + pub client: Client, + pub server: Url, +} + +impl WatchHttpClient { + async fn get_opt(&self, url: Url) -> Result, Error> { + let response = self.client.get(url).send().await?; + + if response.status() == 404 { + Ok(None) + } else { + response + .error_for_status()? + .json() + .await + .map_err(Into::into) + } + } + + pub async fn get_beacon_blocks( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&block_id.to_string())?; + + self.get_opt(url).await + } + + pub async fn get_lowest_canonical_slot(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("slots/")?.join("lowest")?; + + self.get_opt(url).await + } + + pub async fn get_highest_canonical_slot(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("slots/")?.join("highest")?; + + self.get_opt(url).await + } + + pub async fn get_lowest_beacon_block(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("blocks/")?.join("lowest")?; + + self.get_opt(url).await + } + + pub async fn get_highest_beacon_block(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("blocks/")?.join("highest")?; + + self.get_opt(url).await + } + + pub async fn get_next_beacon_block( + &self, + parent: Hash256, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{parent:?}/"))? + .join("next")?; + + self.get_opt(url).await + } + + pub async fn get_validator_by_index( + &self, + index: i32, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("validators/")? + .join(&format!("{index}"))?; + + self.get_opt(url).await + } + + pub async fn get_proposer_info( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{block_id}/"))? + .join("proposer")?; + + self.get_opt(url).await + } + + pub async fn get_block_reward( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{block_id}/"))? + .join("rewards")?; + + self.get_opt(url).await + } + + pub async fn get_block_packing( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{block_id}/"))? + .join("packing")?; + + self.get_opt(url).await + } + + pub async fn get_all_validators(&self) -> Result>, Error> { + let url = self.server.join("v1/")?.join("validators/")?.join("all")?; + + self.get_opt(url).await + } + + pub async fn get_attestations( + &self, + epoch: i32, + ) -> Result>, Error> { + let url = self + .server + .join("v1/")? + .join("validators/")? + .join("all/")? + .join("attestation/")? + .join(&format!("{epoch}"))?; + + self.get_opt(url).await + } +} diff --git a/watch/src/config.rs b/watch/src/config.rs new file mode 100644 index 0000000000..4e61f9df9c --- /dev/null +++ b/watch/src/config.rs @@ -0,0 +1,50 @@ +use crate::blockprint::Config as BlockprintConfig; +use crate::database::Config as DatabaseConfig; +use crate::server::Config as ServerConfig; +use crate::updater::Config as UpdaterConfig; + +use serde::{Deserialize, Serialize}; +use std::fs::File; + +pub const LOG_LEVEL: &str = "debug"; + +fn log_level() -> String { + LOG_LEVEL.to_string() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default)] + pub blockprint: BlockprintConfig, + #[serde(default)] + pub database: DatabaseConfig, + #[serde(default)] + pub server: ServerConfig, + #[serde(default)] + pub updater: UpdaterConfig, + /// The minimum severity for log messages. + #[serde(default = "log_level")] + pub log_level: String, +} + +impl Default for Config { + fn default() -> Self { + Self { + blockprint: BlockprintConfig::default(), + database: DatabaseConfig::default(), + server: ServerConfig::default(), + updater: UpdaterConfig::default(), + log_level: log_level(), + } + } +} + +impl Config { + pub fn load_from_file(path_to_file: String) -> Result { + let file = + File::open(path_to_file).map_err(|e| format!("Error reading config file: {:?}", e))?; + let config: Config = serde_yaml::from_reader(file) + .map_err(|e| format!("Error parsing config file: {:?}", e))?; + Ok(config) + } +} diff --git a/watch/src/database/compat.rs b/watch/src/database/compat.rs new file mode 100644 index 0000000000..b8cda0b216 --- /dev/null +++ b/watch/src/database/compat.rs @@ -0,0 +1,49 @@ +//! Implementations of PostgreSQL compatibility traits. +use crate::database::watch_types::{WatchHash, WatchPK, WatchSlot}; +use diesel::deserialize::{self, FromSql}; +use diesel::pg::{Pg, PgValue}; +use diesel::serialize::{self, Output, ToSql}; +use diesel::sql_types::{Binary, Integer}; + +use std::convert::TryFrom; + +macro_rules! impl_to_from_sql_int { + ($type:ty) => { + impl ToSql for $type + where + i32: ToSql, + { + fn to_sql<'a>(&'a self, out: &mut Output<'a, '_, Pg>) -> serialize::Result { + let v = i32::try_from(self.as_u64()).map_err(|e| Box::new(e))?; + >::to_sql(&v, &mut out.reborrow()) + } + } + + impl FromSql for $type { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + Ok(Self::new(i32::from_sql(bytes)? as u64)) + } + } + }; +} + +macro_rules! impl_to_from_sql_binary { + ($type:ty) => { + impl ToSql for $type { + fn to_sql<'a>(&'a self, out: &mut Output<'a, '_, Pg>) -> serialize::Result { + let b = self.as_bytes(); + <&[u8] as ToSql>::to_sql(&b, &mut out.reborrow()) + } + } + + impl FromSql for $type { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + Self::from_bytes(bytes.as_bytes()).map_err(|e| e.to_string().into()) + } + } + }; +} + +impl_to_from_sql_int!(WatchSlot); +impl_to_from_sql_binary!(WatchHash); +impl_to_from_sql_binary!(WatchPK); diff --git a/watch/src/database/config.rs b/watch/src/database/config.rs new file mode 100644 index 0000000000..dc0c70832f --- /dev/null +++ b/watch/src/database/config.rs @@ -0,0 +1,74 @@ +use serde::{Deserialize, Serialize}; + +pub const USER: &str = "postgres"; +pub const PASSWORD: &str = "postgres"; +pub const DBNAME: &str = "dev"; +pub const DEFAULT_DBNAME: &str = "postgres"; +pub const HOST: &str = "localhost"; +pub const fn port() -> u16 { + 5432 +} +pub const fn connect_timeout_millis() -> u64 { + 2_000 // 2s +} + +fn user() -> String { + USER.to_string() +} + +fn password() -> String { + PASSWORD.to_string() +} + +fn dbname() -> String { + DBNAME.to_string() +} + +fn default_dbname() -> String { + DEFAULT_DBNAME.to_string() +} + +fn host() -> String { + HOST.to_string() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default = "user")] + pub user: String, + #[serde(default = "password")] + pub password: String, + #[serde(default = "dbname")] + pub dbname: String, + #[serde(default = "default_dbname")] + pub default_dbname: String, + #[serde(default = "host")] + pub host: String, + #[serde(default = "port")] + pub port: u16, + #[serde(default = "connect_timeout_millis")] + pub connect_timeout_millis: u64, +} + +impl Default for Config { + fn default() -> Self { + Self { + user: user(), + password: password(), + dbname: dbname(), + default_dbname: default_dbname(), + host: host(), + port: port(), + connect_timeout_millis: connect_timeout_millis(), + } + } +} + +impl Config { + pub fn build_database_url(&self) -> String { + format!( + "postgres://{}:{}@{}:{}/{}", + self.user, self.password, self.host, self.port, self.dbname + ) + } +} diff --git a/watch/src/database/error.rs b/watch/src/database/error.rs new file mode 100644 index 0000000000..8c5088fa13 --- /dev/null +++ b/watch/src/database/error.rs @@ -0,0 +1,55 @@ +use bls::Error as BlsError; +use diesel::result::{ConnectionError, Error as PgError}; +use eth2::SensitiveError; +use r2d2::Error as PoolError; +use std::fmt; +use types::BeaconStateError; + +#[derive(Debug)] +pub enum Error { + BeaconState(BeaconStateError), + Database(PgError), + DatabaseCorrupted, + InvalidSig(BlsError), + PostgresConnection(ConnectionError), + Pool(PoolError), + SensitiveUrl(SensitiveError), + InvalidRoot, + Other(String), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Error::BeaconState(e) + } +} + +impl From for Error { + fn from(e: ConnectionError) -> Self { + Error::PostgresConnection(e) + } +} + +impl From for Error { + fn from(e: PgError) -> Self { + Error::Database(e) + } +} + +impl From for Error { + fn from(e: PoolError) -> Self { + Error::Pool(e) + } +} + +impl From for Error { + fn from(e: BlsError) -> Self { + Error::InvalidSig(e) + } +} diff --git a/watch/src/database/mod.rs b/watch/src/database/mod.rs new file mode 100644 index 0000000000..b9a7a900a5 --- /dev/null +++ b/watch/src/database/mod.rs @@ -0,0 +1,782 @@ +mod config; +mod error; + +pub mod compat; +pub mod models; +pub mod schema; +pub mod utils; +pub mod watch_types; + +use self::schema::{ + active_config, beacon_blocks, canonical_slots, proposer_info, suboptimal_attestations, + validators, +}; + +use diesel::dsl::max; +use diesel::pg::PgConnection; +use diesel::prelude::*; +use diesel::r2d2::{Builder, ConnectionManager, Pool, PooledConnection}; +use diesel::upsert::excluded; +use log::{debug, info}; +use std::collections::HashMap; +use std::time::Instant; +use types::{EthSpec, SignedBeaconBlock}; + +pub use self::error::Error; +pub use self::models::{WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator}; +pub use self::watch_types::{WatchHash, WatchPK, WatchSlot}; + +pub use crate::block_rewards::{ + get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards, + get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards, + WatchBlockRewards, +}; + +pub use crate::block_packing::{ + get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing, + get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing, + WatchBlockPacking, +}; + +pub use crate::suboptimal_attestations::{ + get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey, + get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations, + WatchAttestation, WatchSuboptimalAttestation, +}; + +pub use crate::blockprint::{ + get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, + get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint, + WatchBlockprint, +}; + +pub use config::Config; + +/// Batch inserts cannot exceed a certain size. +/// See https://github.com/diesel-rs/diesel/issues/2414. +/// For some reason, this seems to translate to 65535 / 5 (13107) records. +pub const MAX_SIZE_BATCH_INSERT: usize = 13107; + +pub type PgPool = Pool>; +pub type PgConn = PooledConnection>; + +/// Connect to a Postgresql database and build a connection pool. +pub fn build_connection_pool(config: &Config) -> Result { + let database_url = config.clone().build_database_url(); + info!("Building connection pool at: {database_url}"); + let pg = ConnectionManager::::new(&database_url); + Builder::new().build(pg).map_err(Error::Pool) +} + +/// Retrieve an idle connection from the pool. +pub fn get_connection(pool: &PgPool) -> Result { + pool.get().map_err(Error::Pool) +} + +/// Insert the active config into the database. This is used to check if the connected beacon node +/// is compatible with the database. These values will not change (except +/// `current_blockprint_checkpoint`). +pub fn insert_active_config( + conn: &mut PgConn, + new_config_name: String, + new_slots_per_epoch: u64, +) -> Result<(), Error> { + use self::active_config::dsl::*; + + diesel::insert_into(active_config) + .values(&vec![( + id.eq(1), + config_name.eq(new_config_name), + slots_per_epoch.eq(new_slots_per_epoch as i32), + )]) + .on_conflict_do_nothing() + .execute(conn)?; + + Ok(()) +} + +/// Get the active config from the database. +pub fn get_active_config(conn: &mut PgConn) -> Result, Error> { + use self::active_config::dsl::*; + Ok(active_config + .select((config_name, slots_per_epoch)) + .filter(id.eq(1)) + .first::<(String, i32)>(conn) + .optional()?) +} + +/// +/// INSERT statements +/// + +/// Inserts a single row into the `canonical_slots` table. +/// If `new_slot.beacon_block` is `None`, the value in the row will be `null`. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_canonical_slot(conn: &mut PgConn, new_slot: WatchCanonicalSlot) -> Result<(), Error> { + diesel::insert_into(canonical_slots::table) + .values(&new_slot) + .on_conflict_do_nothing() + .execute(conn)?; + + debug!("Canonical slot inserted: {}", new_slot.slot); + Ok(()) +} + +pub fn insert_beacon_block( + conn: &mut PgConn, + block: SignedBeaconBlock, + root: WatchHash, +) -> Result<(), Error> { + use self::canonical_slots::dsl::{beacon_block, slot as canonical_slot}; + + let block_message = block.message(); + + // Pull out relevant values from the block. + let slot = WatchSlot::from_slot(block.slot()); + let parent_root = WatchHash::from_hash(block.parent_root()); + let proposer_index = block_message.proposer_index() as i32; + let graffiti = block_message.body().graffiti().as_utf8_lossy(); + let attestation_count = block_message.body().attestations().len() as i32; + + let full_payload = block_message.execution_payload().ok(); + + let transaction_count: Option = if let Some(bellatrix_payload) = + full_payload.and_then(|payload| payload.execution_payload_merge().ok()) + { + Some(bellatrix_payload.transactions.len() as i32) + } else { + full_payload + .and_then(|payload| payload.execution_payload_capella().ok()) + .map(|payload| payload.transactions.len() as i32) + }; + + let withdrawal_count: Option = full_payload + .and_then(|payload| payload.execution_payload_capella().ok()) + .map(|payload| payload.withdrawals.len() as i32); + + let block_to_add = WatchBeaconBlock { + slot, + root, + parent_root, + attestation_count, + transaction_count, + withdrawal_count, + }; + + let proposer_info_to_add = WatchProposerInfo { + slot, + proposer_index, + graffiti, + }; + + // Update the canonical slots table. + diesel::update(canonical_slots::table) + .set(beacon_block.eq(root)) + .filter(canonical_slot.eq(slot)) + // Do not overwrite the value if it already exists. + .filter(beacon_block.is_null()) + .execute(conn)?; + + diesel::insert_into(beacon_blocks::table) + .values(block_to_add) + .on_conflict_do_nothing() + .execute(conn)?; + + diesel::insert_into(proposer_info::table) + .values(proposer_info_to_add) + .on_conflict_do_nothing() + .execute(conn)?; + + debug!("Beacon block inserted at slot: {slot}, root: {root}, parent: {parent_root}"); + Ok(()) +} + +/// Insert a validator into the `validators` table +/// +/// On a conflict, it will only overwrite `status`, `activation_epoch` and `exit_epoch`. +pub fn insert_validator(conn: &mut PgConn, validator: WatchValidator) -> Result<(), Error> { + use self::validators::dsl::*; + let new_index = validator.index; + let new_public_key = validator.public_key; + + diesel::insert_into(validators) + .values(validator) + .on_conflict(index) + .do_update() + .set(( + status.eq(excluded(status)), + activation_epoch.eq(excluded(activation_epoch)), + exit_epoch.eq(excluded(exit_epoch)), + )) + .execute(conn)?; + + debug!("Validator inserted, index: {new_index}, public_key: {new_public_key}"); + Ok(()) +} + +/// Insert a batch of values into the `validators` table. +/// +/// On a conflict, it will do nothing. +/// +/// Should not be used when updating validators. +/// Validators should be updated through the `insert_validator` function which contains the correct +/// `on_conflict` clauses. +pub fn insert_batch_validators( + conn: &mut PgConn, + all_validators: Vec, +) -> Result<(), Error> { + use self::validators::dsl::*; + + let mut count = 0; + + for chunk in all_validators.chunks(1000) { + count += diesel::insert_into(validators) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + debug!("Validators inserted, count: {count}"); + Ok(()) +} + +/// +/// SELECT statements +/// + +/// Selects a single row of the `canonical_slots` table corresponding to a given `slot_query`. +pub fn get_canonical_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `canonical_slots` table corresponding to a given `root_query`. +/// Only returns the non-skipped slot which matches `root`. +pub fn get_canonical_slot_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(root.eq(root_query)) + .filter(skipped.eq(false)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical root requested: {root_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `root` from a single row of the `canonical_slots` table corresponding to a given +/// `slot_query`. +#[allow(dead_code)] +pub fn get_root_at_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .select(root) + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from the row of the `canonical_slots` table corresponding to the minimum value +/// of `slot`. +pub fn get_lowest_canonical_slot(conn: &mut PgConn) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: lowest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from the row of the `canonical_slots` table corresponding to the minimum value +/// of `slot` and where `skipped == false`. +pub fn get_lowest_non_skipped_canonical_slot( + conn: &mut PgConn, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(skipped.eq(false)) + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: lowest_non_skipped, time taken: {time_taken:?})"); + Ok(result) +} + +/// Select 'slot' from the row of the `canonical_slots` table corresponding to the maximum value +/// of `slot`. +pub fn get_highest_canonical_slot(conn: &mut PgConn) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: highest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Select 'slot' from the row of the `canonical_slots` table corresponding to the maximum value +/// of `slot` and where `skipped == false`. +pub fn get_highest_non_skipped_canonical_slot( + conn: &mut PgConn, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(skipped.eq(false)) + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: highest_non_skipped, time taken: {time_taken:?}"); + Ok(result) +} + +/// Select all rows of the `canonical_slots` table where `slot >= `start_slot && slot <= +/// `end_slot`. +pub fn get_canonical_slots_by_range( + conn: &mut PgConn, + start_slot: WatchSlot, + end_slot: WatchSlot, +) -> Result>, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(slot.ge(start_slot)) + .filter(slot.le(end_slot)) + .load::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!( + "Canonical slots by range requested, start_slot: {}, end_slot: {}, time_taken: {:?}", + start_slot.as_u64(), + end_slot.as_u64(), + time_taken + ); + Ok(result) +} + +/// Selects `root` from all rows of the `canonical_slots` table which have `beacon_block == null` +/// and `skipped == false` +pub fn get_unknown_canonical_blocks(conn: &mut PgConn) -> Result, Error> { + use self::canonical_slots::dsl::*; + + let result = canonical_slots + .select(root) + .filter(beacon_block.is_null()) + .filter(skipped.eq(false)) + .order_by(slot.desc()) + .load::(conn)?; + + Ok(result) +} + +/// Selects the row from the `beacon_blocks` table where `slot` is minimum. +pub fn get_lowest_beacon_block(conn: &mut PgConn) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Beacon block requested: lowest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `beacon_blocks` table where `slot` is maximum. +pub fn get_highest_beacon_block(conn: &mut PgConn) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Beacon block requested: highest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `beacon_blocks` table corresponding to a given `root_query`. +pub fn get_beacon_block_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + let time_taken = timer.elapsed(); + debug!("Beacon block requested: {root_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `beacon_blocks` table corresponding to a given `slot_query`. +pub fn get_beacon_block_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + let time_taken = timer.elapsed(); + debug!("Beacon block requested: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `beacon_blocks` table where `parent_root` equals the given `parent`. +/// This fetches the next block in the database. +/// +/// Will return `Ok(None)` if there are no matching blocks (e.g. the tip of the chain). +pub fn get_beacon_block_with_parent( + conn: &mut PgConn, + parent: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(parent_root.eq(parent)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Next beacon block requested: {parent}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Select all rows of the `beacon_blocks` table where `slot >= `start_slot && slot <= +/// `end_slot`. +pub fn get_beacon_blocks_by_range( + conn: &mut PgConn, + start_slot: WatchSlot, + end_slot: WatchSlot, +) -> Result>, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(slot.ge(start_slot)) + .filter(slot.le(end_slot)) + .load::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Beacon blocks by range requested, start_slot: {start_slot}, end_slot: {end_slot}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `proposer_info` table corresponding to a given `root_query`. +pub fn get_proposer_info_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::proposer_info::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(proposer_info); + + let result = join + .select((slot, proposer_index, graffiti)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Proposer info requested for block: {root_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `proposer_info` table corresponding to a given `slot_query`. +pub fn get_proposer_info_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::proposer_info::dsl::*; + let timer = Instant::now(); + + let result = proposer_info + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Proposer info requested for slot: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects multiple rows of the `proposer_info` table between `start_slot` and `end_slot`. +/// Selects a single row of the `proposer_info` table corresponding to a given `slot_query`. +#[allow(dead_code)] +pub fn get_proposer_info_by_range( + conn: &mut PgConn, + start_slot: WatchSlot, + end_slot: WatchSlot, +) -> Result>, Error> { + use self::proposer_info::dsl::*; + let timer = Instant::now(); + + let result = proposer_info + .filter(slot.ge(start_slot)) + .filter(slot.le(end_slot)) + .load::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!( + "Proposer info requested for range: {start_slot} to {end_slot}, time taken: {time_taken:?}" + ); + Ok(result) +} + +pub fn get_validators_latest_proposer_info( + conn: &mut PgConn, + indices_query: Vec, +) -> Result, Error> { + use self::proposer_info::dsl::*; + + let proposers = proposer_info + .filter(proposer_index.eq_any(indices_query)) + .load::(conn)?; + + let mut result = HashMap::new(); + for proposer in proposers { + result + .entry(proposer.proposer_index) + .or_insert_with(|| proposer.clone()); + let entry = result + .get_mut(&proposer.proposer_index) + .ok_or_else(|| Error::Other("An internal error occured".to_string()))?; + if proposer.slot > entry.slot { + entry.slot = proposer.slot + } + } + + Ok(result) +} + +/// Selects the max(`slot`) and `proposer_index` of each unique index in the +/// `proposer_info` table and returns them formatted as a `HashMap`. +/// Only returns rows which have `slot <= target_slot`. +/// +/// Ideally, this would return the full row, but I have not found a way to do that without using +/// a much more expensive SQL query. +pub fn get_all_validators_latest_proposer_info_at_slot( + conn: &mut PgConn, + target_slot: WatchSlot, +) -> Result, Error> { + use self::proposer_info::dsl::*; + + let latest_proposals: Vec<(i32, Option)> = proposer_info + .group_by(proposer_index) + .select((proposer_index, max(slot))) + .filter(slot.le(target_slot)) + .load::<(i32, Option)>(conn)?; + + let mut result = HashMap::new(); + + for proposal in latest_proposals { + if let Some(latest_slot) = proposal.1 { + result.insert(latest_slot, proposal.0); + } + } + + Ok(result) +} + +/// Selects a single row from the `validators` table corresponding to a given +/// `validator_index_query`. +pub fn get_validator_by_index( + conn: &mut PgConn, + validator_index_query: i32, +) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let result = validators + .filter(index.eq(validator_index_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Validator requested: {validator_index_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `validators` table corresponding to a given +/// `public_key_query`. +pub fn get_validator_by_public_key( + conn: &mut PgConn, + public_key_query: WatchPK, +) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let result = validators + .filter(public_key.eq(public_key_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Validator requested: {public_key_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects all rows from the `validators` table which have an `index` contained in +/// the `indices_query`. +#[allow(dead_code)] +pub fn get_validators_by_indices( + conn: &mut PgConn, + indices_query: Vec, +) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let query_len = indices_query.len(); + let result = validators + .filter(index.eq_any(indices_query)) + .load::(conn)?; + + let time_taken = timer.elapsed(); + debug!("{query_len} validators requested, time taken: {time_taken:?}"); + Ok(result) +} + +// Selects all rows from the `validators` table. +pub fn get_all_validators(conn: &mut PgConn) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let result = validators.load::(conn)?; + + let time_taken = timer.elapsed(); + debug!("All validators requested, time taken: {time_taken:?}"); + Ok(result) +} + +/// Counts the number of rows in the `validators` table. +#[allow(dead_code)] +pub fn count_validators(conn: &mut PgConn) -> Result { + use self::validators::dsl::*; + + validators.count().get_result(conn).map_err(Error::Database) +} + +/// Counts the number of rows in the `validators` table where +/// `activation_epoch <= target_slot.epoch()`. +pub fn count_validators_activated_before_slot( + conn: &mut PgConn, + target_slot: WatchSlot, + slots_per_epoch: u64, +) -> Result { + use self::validators::dsl::*; + + let target_epoch = target_slot.epoch(slots_per_epoch); + + validators + .count() + .filter(activation_epoch.le(target_epoch.as_u64() as i32)) + .get_result(conn) + .map_err(Error::Database) +} + +/// +/// DELETE statements. +/// + +/// Deletes all rows of the `canonical_slots` table which have `slot` greater than `slot_query`. +/// +/// Due to the ON DELETE CASCADE clause present in the database migration SQL, deleting rows from +/// `canonical_slots` will delete all corresponding rows in `beacon_blocks, `block_rewards`, +/// `block_packing` and `proposer_info`. +pub fn delete_canonical_slots_above( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result { + use self::canonical_slots::dsl::*; + + let result = diesel::delete(canonical_slots) + .filter(slot.gt(slot_query)) + .execute(conn)?; + + debug!("Deleted canonical slots above {slot_query}: {result} rows deleted"); + Ok(result) +} + +/// Deletes all rows of the `suboptimal_attestations` table which have `epoch_start_slot` greater +/// than `epoch_start_slot_query`. +pub fn delete_suboptimal_attestations_above( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result { + use self::suboptimal_attestations::dsl::*; + + let result = diesel::delete(suboptimal_attestations) + .filter(epoch_start_slot.gt(epoch_start_slot_query)) + .execute(conn)?; + + debug!("Deleted attestations above: {epoch_start_slot_query}, rows deleted: {result}"); + Ok(result) +} diff --git a/watch/src/database/models.rs b/watch/src/database/models.rs new file mode 100644 index 0000000000..f42444d661 --- /dev/null +++ b/watch/src/database/models.rs @@ -0,0 +1,67 @@ +use crate::database::{ + schema::{beacon_blocks, canonical_slots, proposer_info, validators}, + watch_types::{WatchHash, WatchPK, WatchSlot}, +}; +use diesel::{Insertable, Queryable}; +use serde::{Deserialize, Serialize}; +use std::hash::{Hash, Hasher}; + +pub type WatchEpoch = i32; + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = canonical_slots)] +pub struct WatchCanonicalSlot { + pub slot: WatchSlot, + pub root: WatchHash, + pub skipped: bool, + pub beacon_block: Option, +} + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = beacon_blocks)] +pub struct WatchBeaconBlock { + pub slot: WatchSlot, + pub root: WatchHash, + pub parent_root: WatchHash, + pub attestation_count: i32, + pub transaction_count: Option, + pub withdrawal_count: Option, +} + +#[derive(Clone, Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = validators)] +pub struct WatchValidator { + pub index: i32, + pub public_key: WatchPK, + pub status: String, + pub activation_epoch: Option, + pub exit_epoch: Option, +} + +// Implement a minimal version of `Hash` and `Eq` so that we know if a validator status has changed. +impl Hash for WatchValidator { + fn hash(&self, state: &mut H) { + self.index.hash(state); + self.status.hash(state); + self.activation_epoch.hash(state); + self.exit_epoch.hash(state); + } +} + +impl PartialEq for WatchValidator { + fn eq(&self, other: &Self) -> bool { + self.index == other.index + && self.status == other.status + && self.activation_epoch == other.activation_epoch + && self.exit_epoch == other.exit_epoch + } +} +impl Eq for WatchValidator {} + +#[derive(Clone, Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = proposer_info)] +pub struct WatchProposerInfo { + pub slot: WatchSlot, + pub proposer_index: i32, + pub graffiti: String, +} diff --git a/watch/src/database/schema.rs b/watch/src/database/schema.rs new file mode 100644 index 0000000000..32f22d506d --- /dev/null +++ b/watch/src/database/schema.rs @@ -0,0 +1,102 @@ +// @generated automatically by Diesel CLI. + +diesel::table! { + active_config (id) { + id -> Int4, + config_name -> Text, + slots_per_epoch -> Int4, + } +} + +diesel::table! { + beacon_blocks (slot) { + slot -> Int4, + root -> Bytea, + parent_root -> Bytea, + attestation_count -> Int4, + transaction_count -> Nullable, + withdrawal_count -> Nullable, + } +} + +diesel::table! { + block_packing (slot) { + slot -> Int4, + available -> Int4, + included -> Int4, + prior_skip_slots -> Int4, + } +} + +diesel::table! { + block_rewards (slot) { + slot -> Int4, + total -> Int4, + attestation_reward -> Int4, + sync_committee_reward -> Int4, + } +} + +diesel::table! { + blockprint (slot) { + slot -> Int4, + best_guess -> Text, + } +} + +diesel::table! { + canonical_slots (slot) { + slot -> Int4, + root -> Bytea, + skipped -> Bool, + beacon_block -> Nullable, + } +} + +diesel::table! { + proposer_info (slot) { + slot -> Int4, + proposer_index -> Int4, + graffiti -> Text, + } +} + +diesel::table! { + suboptimal_attestations (epoch_start_slot, index) { + epoch_start_slot -> Int4, + index -> Int4, + source -> Bool, + head -> Bool, + target -> Bool, + } +} + +diesel::table! { + validators (index) { + index -> Int4, + public_key -> Bytea, + status -> Text, + activation_epoch -> Nullable, + exit_epoch -> Nullable, + } +} + +diesel::joinable!(block_packing -> beacon_blocks (slot)); +diesel::joinable!(block_rewards -> beacon_blocks (slot)); +diesel::joinable!(blockprint -> beacon_blocks (slot)); +diesel::joinable!(proposer_info -> beacon_blocks (slot)); +diesel::joinable!(proposer_info -> validators (proposer_index)); +diesel::joinable!(suboptimal_attestations -> canonical_slots (epoch_start_slot)); +diesel::joinable!(suboptimal_attestations -> validators (index)); + +diesel::allow_tables_to_appear_in_same_query!( + active_config, + beacon_blocks, + block_packing, + block_rewards, + blockprint, + canonical_slots, + proposer_info, + suboptimal_attestations, + validators, +); diff --git a/watch/src/database/utils.rs b/watch/src/database/utils.rs new file mode 100644 index 0000000000..7e450f0cee --- /dev/null +++ b/watch/src/database/utils.rs @@ -0,0 +1,29 @@ +#![allow(dead_code)] +use crate::database::config::Config; +use diesel::pg::PgConnection; +use diesel::prelude::*; +use diesel_migrations::{FileBasedMigrations, MigrationHarness}; + +/// Sets `config.dbname` to `config.default_dbname` and returns `(new_config, old_dbname)`. +/// +/// This is useful for creating or dropping databases, since these actions must be done by +/// logging into another database. +pub fn get_config_using_default_db(config: &Config) -> (Config, String) { + let mut config = config.clone(); + let new_dbname = std::mem::replace(&mut config.dbname, config.default_dbname.clone()); + (config, new_dbname) +} + +/// Runs the set of migrations as detected in the local directory. +/// Equivalent to `diesel migration run`. +/// +/// Contains `unwrap`s so is only suitable for test code. +/// TODO(mac) refactor to return Result +pub fn run_migrations(config: &Config) -> PgConnection { + let database_url = config.clone().build_database_url(); + let mut conn = PgConnection::establish(&database_url).unwrap(); + let migrations = FileBasedMigrations::find_migrations_directory().unwrap(); + conn.run_pending_migrations(migrations).unwrap(); + conn.begin_test_transaction().unwrap(); + conn +} diff --git a/watch/src/database/watch_types.rs b/watch/src/database/watch_types.rs new file mode 100644 index 0000000000..0b3ba2c304 --- /dev/null +++ b/watch/src/database/watch_types.rs @@ -0,0 +1,119 @@ +use crate::database::error::Error; +use diesel::{ + sql_types::{Binary, Integer}, + AsExpression, FromSqlRow, +}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use std::str::FromStr; +use types::{Epoch, Hash256, PublicKeyBytes, Slot}; +#[derive( + Clone, + Copy, + Debug, + AsExpression, + FromSqlRow, + Deserialize, + Serialize, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +#[diesel(sql_type = Integer)] +pub struct WatchSlot(Slot); + +impl fmt::Display for WatchSlot { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl WatchSlot { + pub fn new(slot: u64) -> Self { + Self(Slot::new(slot)) + } + + pub fn from_slot(slot: Slot) -> Self { + Self(slot) + } + + pub fn as_slot(self) -> Slot { + self.0 + } + + pub fn as_u64(self) -> u64 { + self.0.as_u64() + } + + pub fn epoch(self, slots_per_epoch: u64) -> Epoch { + self.as_slot().epoch(slots_per_epoch) + } +} + +#[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Deserialize, Serialize)] +#[diesel(sql_type = Binary)] +pub struct WatchHash(Hash256); + +impl fmt::Display for WatchHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl WatchHash { + pub fn as_hash(&self) -> Hash256 { + self.0 + } + + pub fn from_hash(hash: Hash256) -> Self { + WatchHash(hash) + } + + pub fn as_bytes(&self) -> &[u8] { + self.0.as_bytes() + } + + pub fn from_bytes(src: &[u8]) -> Result { + if src.len() == 32 { + Ok(WatchHash(Hash256::from_slice(src))) + } else { + Err(Error::InvalidRoot) + } + } +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, AsExpression, FromSqlRow, Serialize, Deserialize)] +#[diesel(sql_type = Binary)] +pub struct WatchPK(PublicKeyBytes); + +impl fmt::Display for WatchPK { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl WatchPK { + pub fn as_bytes(&self) -> &[u8] { + self.0.as_serialized() + } + + pub fn from_bytes(src: &[u8]) -> Result { + Ok(WatchPK(PublicKeyBytes::deserialize(src)?)) + } + + pub fn from_pubkey(key: PublicKeyBytes) -> Self { + WatchPK(key) + } +} + +impl FromStr for WatchPK { + type Err = String; + + fn from_str(s: &str) -> Result { + Ok(WatchPK( + PublicKeyBytes::from_str(s).map_err(|e| format!("Cannot be parsed: {}", e))?, + )) + } +} diff --git a/watch/src/lib.rs b/watch/src/lib.rs new file mode 100644 index 0000000000..664c945165 --- /dev/null +++ b/watch/src/lib.rs @@ -0,0 +1,12 @@ +#![cfg(unix)] +pub mod block_packing; +pub mod block_rewards; +pub mod blockprint; +pub mod cli; +pub mod client; +pub mod config; +pub mod database; +pub mod logger; +pub mod server; +pub mod suboptimal_attestations; +pub mod updater; diff --git a/watch/src/logger.rs b/watch/src/logger.rs new file mode 100644 index 0000000000..49310b42aa --- /dev/null +++ b/watch/src/logger.rs @@ -0,0 +1,24 @@ +use env_logger::Builder; +use log::{info, LevelFilter}; +use std::process; + +pub fn init_logger(log_level: &str) { + let log_level = match log_level.to_lowercase().as_str() { + "trace" => LevelFilter::Trace, + "debug" => LevelFilter::Debug, + "info" => LevelFilter::Info, + "warn" => LevelFilter::Warn, + "error" => LevelFilter::Error, + _ => { + eprintln!("Unsupported log level"); + process::exit(1) + } + }; + + let mut builder = Builder::new(); + builder.filter(Some("watch"), log_level); + + builder.init(); + + info!("Logger initialized with log-level: {log_level}"); +} diff --git a/watch/src/main.rs b/watch/src/main.rs new file mode 100644 index 0000000000..f971747da4 --- /dev/null +++ b/watch/src/main.rs @@ -0,0 +1,41 @@ +#[cfg(unix)] +use std::process; + +#[cfg(unix)] +mod block_packing; +#[cfg(unix)] +mod block_rewards; +#[cfg(unix)] +mod blockprint; +#[cfg(unix)] +mod cli; +#[cfg(unix)] +mod config; +#[cfg(unix)] +mod database; +#[cfg(unix)] +mod logger; +#[cfg(unix)] +mod server; +#[cfg(unix)] +mod suboptimal_attestations; +#[cfg(unix)] +mod updater; + +#[cfg(unix)] +#[tokio::main] +async fn main() { + match cli::run().await { + Ok(()) => process::exit(0), + Err(e) => { + eprintln!("Command failed with: {}", e); + drop(e); + process::exit(1) + } + } +} + +#[cfg(windows)] +fn main() { + eprintln!("Windows is not supported. Exiting."); +} diff --git a/watch/src/server/config.rs b/watch/src/server/config.rs new file mode 100644 index 0000000000..a7d38e706f --- /dev/null +++ b/watch/src/server/config.rs @@ -0,0 +1,28 @@ +use serde::{Deserialize, Serialize}; +use std::net::IpAddr; + +pub const LISTEN_ADDR: &str = "127.0.0.1"; + +pub const fn listen_port() -> u16 { + 5059 +} +fn listen_addr() -> IpAddr { + LISTEN_ADDR.parse().expect("Server address is not valid") +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default = "listen_addr")] + pub listen_addr: IpAddr, + #[serde(default = "listen_port")] + pub listen_port: u16, +} + +impl Default for Config { + fn default() -> Self { + Self { + listen_addr: listen_addr(), + listen_port: listen_port(), + } + } +} diff --git a/watch/src/server/error.rs b/watch/src/server/error.rs new file mode 100644 index 0000000000..d1542f7841 --- /dev/null +++ b/watch/src/server/error.rs @@ -0,0 +1,50 @@ +use crate::database::Error as DbError; +use axum::Error as AxumError; +use axum::{http::StatusCode, response::IntoResponse, Json}; +use hyper::Error as HyperError; +use serde_json::json; + +#[derive(Debug)] +pub enum Error { + Axum(AxumError), + Hyper(HyperError), + Database(DbError), + BadRequest, + NotFound, + Other(String), +} + +impl IntoResponse for Error { + fn into_response(self) -> axum::response::Response { + let (status, error_message) = match self { + Self::BadRequest => (StatusCode::BAD_REQUEST, "Bad Request"), + Self::NotFound => (StatusCode::NOT_FOUND, "Not Found"), + _ => (StatusCode::INTERNAL_SERVER_ERROR, "Internal Server Error"), + }; + (status, Json(json!({ "error": error_message }))).into_response() + } +} + +impl From for Error { + fn from(e: HyperError) -> Self { + Error::Hyper(e) + } +} + +impl From for Error { + fn from(e: AxumError) -> Self { + Error::Axum(e) + } +} + +impl From for Error { + fn from(e: DbError) -> Self { + Error::Database(e) + } +} + +impl From for Error { + fn from(e: String) -> Self { + Error::Other(e) + } +} diff --git a/watch/src/server/handler.rs b/watch/src/server/handler.rs new file mode 100644 index 0000000000..6777026867 --- /dev/null +++ b/watch/src/server/handler.rs @@ -0,0 +1,266 @@ +use crate::database::{ + self, Error as DbError, PgPool, WatchBeaconBlock, WatchCanonicalSlot, WatchHash, WatchPK, + WatchProposerInfo, WatchSlot, WatchValidator, +}; +use crate::server::Error; +use axum::{ + extract::{Path, Query}, + Extension, Json, +}; +use eth2::types::BlockId; +use std::collections::HashMap; +use std::str::FromStr; + +pub async fn get_slot( + Path(slot): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_canonical_slot( + &mut conn, + WatchSlot::new(slot), + )?)) +} + +pub async fn get_slot_lowest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_lowest_canonical_slot(&mut conn)?)) +} + +pub async fn get_slot_highest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_highest_canonical_slot(&mut conn)?)) +} + +pub async fn get_slots_by_range( + Query(query): Query>, + Extension(pool): Extension, +) -> Result>>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if let Some(start_slot) = query.get("start_slot") { + if let Some(end_slot) = query.get("end_slot") { + if start_slot > end_slot { + Err(Error::BadRequest) + } else { + Ok(Json(database::get_canonical_slots_by_range( + &mut conn, + WatchSlot::new(*start_slot), + WatchSlot::new(*end_slot), + )?)) + } + } else { + Err(Error::BadRequest) + } + } else { + Err(Error::BadRequest) + } +} + +pub async fn get_block( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + let block_id: BlockId = BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)?; + match block_id { + BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + BlockId::Root(root) => Ok(Json(database::get_beacon_block_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_block_lowest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_lowest_beacon_block(&mut conn)?)) +} + +pub async fn get_block_highest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_highest_beacon_block(&mut conn)?)) +} + +pub async fn get_block_previous( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => { + if let Some(block) = + database::get_beacon_block_by_root(&mut conn, WatchHash::from_hash(root))? + .map(|block| block.parent_root) + { + Ok(Json(database::get_beacon_block_by_root(&mut conn, block)?)) + } else { + Err(Error::NotFound) + } + } + BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( + &mut conn, + WatchSlot::new(slot.as_u64().checked_sub(1_u64).ok_or(Error::NotFound)?), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_block_next( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(database::get_beacon_block_with_parent( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( + &mut conn, + WatchSlot::from_slot(slot + 1_u64), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_blocks_by_range( + Query(query): Query>, + Extension(pool): Extension, +) -> Result>>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if let Some(start_slot) = query.get("start_slot") { + if let Some(end_slot) = query.get("end_slot") { + if start_slot > end_slot { + Err(Error::BadRequest) + } else { + Ok(Json(database::get_beacon_blocks_by_range( + &mut conn, + WatchSlot::new(*start_slot), + WatchSlot::new(*end_slot), + )?)) + } + } else { + Err(Error::BadRequest) + } + } else { + Err(Error::BadRequest) + } +} + +pub async fn get_block_proposer( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(database::get_proposer_info_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(database::get_proposer_info_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_validator( + Path(validator_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if validator_query.starts_with("0x") { + let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + Ok(Json(database::get_validator_by_public_key( + &mut conn, pubkey, + )?)) + } else { + let index = i32::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + Ok(Json(database::get_validator_by_index(&mut conn, index)?)) + } +} + +pub async fn get_all_validators( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_all_validators(&mut conn)?)) +} + +pub async fn get_validator_latest_proposal( + Path(validator_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if validator_query.starts_with("0x") { + let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + let validator = + database::get_validator_by_public_key(&mut conn, pubkey)?.ok_or(Error::NotFound)?; + Ok(Json(database::get_validators_latest_proposer_info( + &mut conn, + vec![validator.index], + )?)) + } else { + let index = i32::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + Ok(Json(database::get_validators_latest_proposer_info( + &mut conn, + vec![index], + )?)) + } +} + +pub async fn get_client_breakdown( + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + + if let Some(target_slot) = database::get_highest_canonical_slot(&mut conn)? { + Ok(Json(database::get_validators_clients_at_slot( + &mut conn, + target_slot.slot, + slots_per_epoch, + )?)) + } else { + Err(Error::Database(DbError::Other( + "No slots found in database.".to_string(), + ))) + } +} + +pub async fn get_client_breakdown_percentages( + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + + let mut result = HashMap::new(); + if let Some(target_slot) = database::get_highest_canonical_slot(&mut conn)? { + let total = database::count_validators_activated_before_slot( + &mut conn, + target_slot.slot, + slots_per_epoch, + )?; + let clients = + database::get_validators_clients_at_slot(&mut conn, target_slot.slot, slots_per_epoch)?; + for (client, number) in clients.iter() { + let percentage: f64 = *number as f64 / total as f64 * 100.0; + result.insert(client.to_string(), percentage); + } + } + + Ok(Json(result)) +} diff --git a/watch/src/server/mod.rs b/watch/src/server/mod.rs new file mode 100644 index 0000000000..09d5ec6aac --- /dev/null +++ b/watch/src/server/mod.rs @@ -0,0 +1,134 @@ +use crate::block_packing::block_packing_routes; +use crate::block_rewards::block_rewards_routes; +use crate::blockprint::blockprint_routes; +use crate::config::Config as FullConfig; +use crate::database::{self, PgPool}; +use crate::suboptimal_attestations::{attestation_routes, blockprint_attestation_routes}; +use axum::{ + handler::Handler, + http::{StatusCode, Uri}, + routing::get, + Extension, Json, Router, +}; +use eth2::types::ErrorMessage; +use log::info; +use std::future::Future; +use std::net::SocketAddr; +use tokio::sync::oneshot; + +pub use config::Config; +pub use error::Error; + +mod config; +mod error; +mod handler; + +pub async fn serve(config: FullConfig, shutdown: oneshot::Receiver<()>) -> Result<(), Error> { + let db = database::build_connection_pool(&config.database)?; + let (_, slots_per_epoch) = database::get_active_config(&mut database::get_connection(&db)?)? + .ok_or_else(|| { + Error::Other( + "Database not found. Please run the updater prior to starting the server" + .to_string(), + ) + })?; + + let server = start_server(&config, slots_per_epoch as u64, db, async { + let _ = shutdown.await; + })?; + + server.await?; + + Ok(()) +} + +/// Creates a server that will serve requests using information from `config`. +/// +/// The server will create its own connection pool to serve connections to the database. +/// This is separate to the connection pool that is used for the `updater`. +/// +/// The server will shut down gracefully when the `shutdown` future resolves. +/// +/// ## Returns +/// +/// This function will bind the server to the address specified in the config and then return a +/// Future representing the actual server that will need to be awaited. +/// +/// ## Errors +/// +/// Returns an error if the server is unable to bind or there is another error during +/// configuration. +pub fn start_server( + config: &FullConfig, + slots_per_epoch: u64, + pool: PgPool, + shutdown: impl Future + Send + Sync + 'static, +) -> Result> + 'static, Error> { + let mut routes = Router::new() + .route("/v1/slots", get(handler::get_slots_by_range)) + .route("/v1/slots/:slot", get(handler::get_slot)) + .route("/v1/slots/lowest", get(handler::get_slot_lowest)) + .route("/v1/slots/highest", get(handler::get_slot_highest)) + .route("/v1/slots/:slot/block", get(handler::get_block)) + .route("/v1/blocks", get(handler::get_blocks_by_range)) + .route("/v1/blocks/:block", get(handler::get_block)) + .route("/v1/blocks/lowest", get(handler::get_block_lowest)) + .route("/v1/blocks/highest", get(handler::get_block_highest)) + .route( + "/v1/blocks/:block/previous", + get(handler::get_block_previous), + ) + .route("/v1/blocks/:block/next", get(handler::get_block_next)) + .route( + "/v1/blocks/:block/proposer", + get(handler::get_block_proposer), + ) + .route("/v1/validators/:validator", get(handler::get_validator)) + .route("/v1/validators/all", get(handler::get_all_validators)) + .route( + "/v1/validators/:validator/latest_proposal", + get(handler::get_validator_latest_proposal), + ) + .route("/v1/clients", get(handler::get_client_breakdown)) + .route( + "/v1/clients/percentages", + get(handler::get_client_breakdown_percentages), + ) + .merge(attestation_routes()) + .merge(blockprint_routes()) + .merge(block_packing_routes()) + .merge(block_rewards_routes()); + + if config.blockprint.enabled && config.updater.attestations { + routes = routes.merge(blockprint_attestation_routes()) + } + + let app = routes + .fallback(route_not_found.into_service()) + .layer(Extension(pool)) + .layer(Extension(slots_per_epoch)); + + let addr = SocketAddr::new(config.server.listen_addr, config.server.listen_port); + + let server = axum::Server::try_bind(&addr)?.serve(app.into_make_service()); + + let server = server.with_graceful_shutdown(async { + shutdown.await; + }); + + info!("HTTP server listening on {}", addr); + + Ok(server) +} + +// The default route indicating that no available routes matched the request. +async fn route_not_found(uri: Uri) -> (StatusCode, Json) { + ( + StatusCode::METHOD_NOT_ALLOWED, + Json(ErrorMessage { + code: StatusCode::METHOD_NOT_ALLOWED.as_u16(), + message: format!("No route for {uri}"), + stacktraces: vec![], + }), + ) +} diff --git a/watch/src/suboptimal_attestations/database.rs b/watch/src/suboptimal_attestations/database.rs new file mode 100644 index 0000000000..cb947d250a --- /dev/null +++ b/watch/src/suboptimal_attestations/database.rs @@ -0,0 +1,224 @@ +use crate::database::{ + schema::{suboptimal_attestations, validators}, + watch_types::{WatchPK, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +use types::Epoch; + +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +pub struct WatchAttestation { + pub index: i32, + pub epoch: Epoch, + pub source: bool, + pub head: bool, + pub target: bool, +} + +impl WatchAttestation { + pub fn optimal(index: i32, epoch: Epoch) -> WatchAttestation { + WatchAttestation { + index, + epoch, + source: true, + head: true, + target: true, + } + } +} + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = suboptimal_attestations)] +pub struct WatchSuboptimalAttestation { + pub epoch_start_slot: WatchSlot, + pub index: i32, + pub source: bool, + pub head: bool, + pub target: bool, +} + +impl WatchSuboptimalAttestation { + pub fn to_attestation(&self, slots_per_epoch: u64) -> WatchAttestation { + WatchAttestation { + index: self.index, + epoch: self.epoch_start_slot.epoch(slots_per_epoch), + source: self.source, + head: self.head, + target: self.target, + } + } +} + +/// Insert a batch of values into the `suboptimal_attestations` table +/// +/// Since attestations technically occur per-slot but we only store them per-epoch (via its +/// `start_slot`) so if any slot in the epoch changes, we need to resync the whole epoch as a +/// 'suboptimal' attestation could now be 'optimal'. +/// +/// This is handled in the update code, where in the case of a re-org, the affected epoch is +/// deleted completely. +/// +/// On a conflict, it will do nothing. +pub fn insert_batch_suboptimal_attestations( + conn: &mut PgConn, + attestations: Vec, +) -> Result<(), Error> { + use self::suboptimal_attestations::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in attestations.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(suboptimal_attestations) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Attestations inserted, count: {count}, time taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `suboptimal_attestations` table where `epoch_start_slot` is minimum. +pub fn get_lowest_attestation( + conn: &mut PgConn, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .order_by(epoch_start_slot.asc()) + .limit(1) + .first::(conn) + .optional()?) +} + +/// Selects the row from the `suboptimal_attestations` table where `epoch_start_slot` is maximum. +pub fn get_highest_attestation( + conn: &mut PgConn, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .order_by(epoch_start_slot.desc()) + .limit(1) + .first::(conn) + .optional()?) +} + +/// Selects a single row from the `suboptimal_attestations` table corresponding to a given +/// `index_query` and `epoch_query`. +pub fn get_attestation_by_index( + conn: &mut PgConn, + index_query: i32, + epoch_query: Epoch, + slots_per_epoch: u64, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + let timer = Instant::now(); + + let result = suboptimal_attestations + .filter(epoch_start_slot.eq(WatchSlot::from_slot( + epoch_query.start_slot(slots_per_epoch), + ))) + .filter(index.eq(index_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Attestation requested for validator: {index_query}, epoch: {epoch_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `suboptimal_attestations` table corresponding +/// to a given `pubkey_query` and `epoch_query`. +#[allow(dead_code)] +pub fn get_attestation_by_pubkey( + conn: &mut PgConn, + pubkey_query: WatchPK, + epoch_query: Epoch, + slots_per_epoch: u64, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + use self::validators::dsl::{public_key, validators}; + let timer = Instant::now(); + + let join = validators.inner_join(suboptimal_attestations); + + let result = join + .select((epoch_start_slot, index, source, head, target)) + .filter(epoch_start_slot.eq(WatchSlot::from_slot( + epoch_query.start_slot(slots_per_epoch), + ))) + .filter(public_key.eq(pubkey_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Attestation requested for validator: {pubkey_query}, epoch: {epoch_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `index` for all validators in the suboptimal_attestations table +/// that have `source == false` for the corresponding `epoch_start_slot_query`. +pub fn get_validators_missed_source( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .select(index) + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .filter(source.eq(false)) + .load::(conn)?) +} + +/// Selects `index` for all validators in the suboptimal_attestations table +/// that have `head == false` for the corresponding `epoch_start_slot_query`. +pub fn get_validators_missed_head( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .select(index) + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .filter(head.eq(false)) + .load::(conn)?) +} + +/// Selects `index` for all validators in the suboptimal_attestations table +/// that have `target == false` for the corresponding `epoch_start_slot_query`. +pub fn get_validators_missed_target( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .select(index) + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .filter(target.eq(false)) + .load::(conn)?) +} + +/// Selects all rows from the `suboptimal_attestations` table for the given +/// `epoch_start_slot_query`. +pub fn get_all_suboptimal_attestations_for_epoch( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .load::(conn)?) +} diff --git a/watch/src/suboptimal_attestations/mod.rs b/watch/src/suboptimal_attestations/mod.rs new file mode 100644 index 0000000000..a94532e8ab --- /dev/null +++ b/watch/src/suboptimal_attestations/mod.rs @@ -0,0 +1,56 @@ +pub mod database; +pub mod server; +pub mod updater; + +use crate::database::watch_types::WatchSlot; +use crate::updater::error::Error; + +pub use database::{ + get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey, + get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations, + WatchAttestation, WatchSuboptimalAttestation, +}; + +pub use server::{attestation_routes, blockprint_attestation_routes}; + +use eth2::BeaconNodeHttpClient; +use types::Epoch; + +/// Sends a request to `lighthouse/analysis/attestation_performance`. +/// Formats the response into a vector of `WatchSuboptimalAttestation`. +/// +/// Any attestations with `source == true && head == true && target == true` are ignored. +pub async fn get_attestation_performances( + bn: &BeaconNodeHttpClient, + start_epoch: Epoch, + end_epoch: Epoch, + slots_per_epoch: u64, +) -> Result, Error> { + let mut output = Vec::new(); + let result = bn + .get_lighthouse_analysis_attestation_performance( + start_epoch, + end_epoch, + "global".to_string(), + ) + .await?; + for index in result { + for epoch in index.epochs { + if epoch.1.active { + // Check if the attestation is suboptimal. + if !epoch.1.source || !epoch.1.head || !epoch.1.target { + output.push(WatchSuboptimalAttestation { + epoch_start_slot: WatchSlot::from_slot( + Epoch::new(epoch.0).start_slot(slots_per_epoch), + ), + index: index.index as i32, + source: epoch.1.source, + head: epoch.1.head, + target: epoch.1.target, + }) + } + } + } + } + Ok(output) +} diff --git a/watch/src/suboptimal_attestations/server.rs b/watch/src/suboptimal_attestations/server.rs new file mode 100644 index 0000000000..391db9a41b --- /dev/null +++ b/watch/src/suboptimal_attestations/server.rs @@ -0,0 +1,299 @@ +use crate::database::{ + get_canonical_slot, get_connection, get_validator_by_index, get_validator_by_public_key, + get_validators_clients_at_slot, get_validators_latest_proposer_info, PgPool, WatchPK, + WatchSlot, +}; + +use crate::blockprint::database::construct_validator_blockprints_at_slot; +use crate::server::Error; +use crate::suboptimal_attestations::database::{ + get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, + get_validators_missed_head, get_validators_missed_source, get_validators_missed_target, + WatchAttestation, WatchSuboptimalAttestation, +}; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use std::collections::{HashMap, HashSet}; +use std::str::FromStr; +use types::Epoch; + +// Will return Ok(None) if the epoch is not synced or if the validator does not exist. +// In the future it might be worth differentiating these events. +pub async fn get_validator_attestation( + Path((validator_query, epoch_query)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + let epoch = Epoch::new(epoch_query); + + // Ensure the database has synced the target epoch. + if get_canonical_slot( + &mut conn, + WatchSlot::from_slot(epoch.end_slot(slots_per_epoch)), + )? + .is_none() + { + // Epoch is not fully synced. + return Ok(Json(None)); + } + + let index = if validator_query.starts_with("0x") { + let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + get_validator_by_public_key(&mut conn, pubkey)? + .ok_or(Error::NotFound)? + .index + } else { + i32::from_str(&validator_query).map_err(|_| Error::BadRequest)? + }; + let attestation = if let Some(suboptimal_attestation) = + get_attestation_by_index(&mut conn, index, epoch, slots_per_epoch)? + { + Some(suboptimal_attestation.to_attestation(slots_per_epoch)) + } else { + // Attestation was not in database. Check if the validator was active. + match get_validator_by_index(&mut conn, index)? { + Some(validator) => { + if let Some(activation_epoch) = validator.activation_epoch { + if activation_epoch <= epoch.as_u64() as i32 { + if let Some(exit_epoch) = validator.exit_epoch { + if exit_epoch > epoch.as_u64() as i32 { + // Validator is active and has not yet exited. + Some(WatchAttestation::optimal(index, epoch)) + } else { + // Validator has exited. + None + } + } else { + // Validator is active and has not yet exited. + Some(WatchAttestation::optimal(index, epoch)) + } + } else { + // Validator is not yet active. + None + } + } else { + // Validator is not yet active. + None + } + } + None => return Err(Error::Other("Validator index does not exist".to_string())), + } + }; + Ok(Json(attestation)) +} + +pub async fn get_all_validators_attestations( + Path(epoch): Path, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let epoch_start_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + + Ok(Json(get_all_suboptimal_attestations_for_epoch( + &mut conn, + epoch_start_slot, + )?)) +} + +pub async fn get_validators_missed_vote( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let epoch_start_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + match vote.to_lowercase().as_str() { + "source" => Ok(Json(get_validators_missed_source( + &mut conn, + epoch_start_slot, + )?)), + "head" => Ok(Json(get_validators_missed_head( + &mut conn, + epoch_start_slot, + )?)), + "target" => Ok(Json(get_validators_missed_target( + &mut conn, + epoch_start_slot, + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_validators_missed_vote_graffiti( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let Json(indices) = get_validators_missed_vote( + Path((vote, epoch)), + Extension(pool), + Extension(slots_per_epoch), + ) + .await?; + + let graffitis = get_validators_latest_proposer_info(&mut conn, indices)? + .values() + .map(|info| info.graffiti.clone()) + .collect::>(); + + let mut result = HashMap::new(); + for graffiti in graffitis { + if !result.contains_key(&graffiti) { + result.insert(graffiti.clone(), 0); + } + *result + .get_mut(&graffiti) + .ok_or_else(|| Error::Other("An unexpected error occurred".to_string()))? += 1; + } + + Ok(Json(result)) +} + +pub fn attestation_routes() -> Router { + Router::new() + .route( + "/v1/validators/:validator/attestation/:epoch", + get(get_validator_attestation), + ) + .route( + "/v1/validators/all/attestation/:epoch", + get(get_all_validators_attestations), + ) + .route( + "/v1/validators/missed/:vote/:epoch", + get(get_validators_missed_vote), + ) + .route( + "/v1/validators/missed/:vote/:epoch/graffiti", + get(get_validators_missed_vote_graffiti), + ) +} + +/// The functions below are dependent on Blockprint and if it is disabled, the endpoints will be +/// disabled. +pub async fn get_clients_missed_vote( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let Json(indices) = get_validators_missed_vote( + Path((vote, epoch)), + Extension(pool), + Extension(slots_per_epoch), + ) + .await?; + + // All validators which missed the vote. + let indices_map = indices.into_iter().collect::>(); + + let target_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + + // All validators. + let client_map = + construct_validator_blockprints_at_slot(&mut conn, target_slot, slots_per_epoch)?; + + let mut result = HashMap::new(); + + for index in indices_map { + if let Some(print) = client_map.get(&index) { + if !result.contains_key(print) { + result.insert(print.clone(), 0); + } + *result + .get_mut(print) + .ok_or_else(|| Error::Other("An unexpected error occurred".to_string()))? += 1; + } + } + + Ok(Json(result)) +} + +pub async fn get_clients_missed_vote_percentages( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let Json(clients_counts) = get_clients_missed_vote( + Path((vote, epoch)), + Extension(pool.clone()), + Extension(slots_per_epoch), + ) + .await?; + + let target_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + + let mut conn = get_connection(&pool)?; + let totals = get_validators_clients_at_slot(&mut conn, target_slot, slots_per_epoch)?; + + let mut result = HashMap::new(); + for (client, count) in clients_counts.iter() { + let client_total: f64 = *totals + .get(client) + .ok_or_else(|| Error::Other("Client type mismatch".to_string()))? + as f64; + // `client_total` should never be `0`, but if it is, return `0` instead of `inf`. + if client_total == 0.0 { + result.insert(client.to_string(), 0.0); + } else { + let percentage: f64 = *count as f64 / client_total * 100.0; + result.insert(client.to_string(), percentage); + } + } + + Ok(Json(result)) +} + +pub async fn get_clients_missed_vote_percentages_relative( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let Json(clients_counts) = get_clients_missed_vote( + Path((vote, epoch)), + Extension(pool), + Extension(slots_per_epoch), + ) + .await?; + + let mut total: u64 = 0; + for (_, count) in clients_counts.iter() { + total += *count + } + + let mut result = HashMap::new(); + for (client, count) in clients_counts.iter() { + // `total` should never be 0, but if it is, return `-` instead of `inf`. + if total == 0 { + result.insert(client.to_string(), 0.0); + } else { + let percentage: f64 = *count as f64 / total as f64 * 100.0; + result.insert(client.to_string(), percentage); + } + } + + Ok(Json(result)) +} + +pub fn blockprint_attestation_routes() -> Router { + Router::new() + .route( + "/v1/clients/missed/:vote/:epoch", + get(get_clients_missed_vote), + ) + .route( + "/v1/clients/missed/:vote/:epoch/percentages", + get(get_clients_missed_vote_percentages), + ) + .route( + "/v1/clients/missed/:vote/:epoch/percentages/relative", + get(get_clients_missed_vote_percentages_relative), + ) +} diff --git a/watch/src/suboptimal_attestations/updater.rs b/watch/src/suboptimal_attestations/updater.rs new file mode 100644 index 0000000000..aeabff2035 --- /dev/null +++ b/watch/src/suboptimal_attestations/updater.rs @@ -0,0 +1,236 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use crate::suboptimal_attestations::get_attestation_performances; + +use eth2::types::EthSpec; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS: u64 = 50; + +impl UpdateHandler { + /// Forward fills the `suboptimal_attestations` table starting from the entry with the highest + /// slot. + /// + /// It construts a request to the `attestation_performance` API endpoint with: + /// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest canonical slot) + /// `end_epoch` -> epoch of highest canonical slot + /// + /// It will resync the latest epoch if it is not fully filled but will not overwrite existing + /// values unless there is a re-org. + /// That is, `if highest_filled_slot % slots_per_epoch != 31`. + /// + /// In the event the most recent epoch has no suboptimal attestations, it will attempt to + /// resync that epoch. The odds of this occuring on mainnet are vanishingly small so it is not + /// accounted for. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. + pub async fn fill_suboptimal_attestations(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + let highest_filled_slot_opt = if self.config.attestations { + database::get_highest_attestation(&mut conn)? + .map(|attestation| attestation.epoch_start_slot.as_slot()) + } else { + return Err(Error::NotEnabled("attestations".to_string())); + }; + + let start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt { + if highest_filled_slot % self.slots_per_epoch == self.slots_per_epoch.saturating_sub(1) + { + // The whole epoch is filled so we can begin syncing the next one. + highest_filled_slot.epoch(self.slots_per_epoch) + 1 + } else { + // The epoch is only partially synced. Try to sync it fully. + highest_filled_slot.epoch(self.slots_per_epoch) + } + } else { + // No rows present in the `suboptimal_attestations` table. Use `canonical_slots` + // instead. + if let Some(lowest_canonical_slot) = database::get_lowest_canonical_slot(&mut conn)? { + lowest_canonical_slot + .slot + .as_slot() + .epoch(self.slots_per_epoch) + } else { + // There are no slots in the database, do not fill the `suboptimal_attestations` + // table. + warn!("Refusing to fill the `suboptimal_attestations` table as there are no slots in the database"); + return Ok(()); + } + }; + + if let Some(highest_canonical_slot) = + database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut end_epoch = highest_canonical_slot.epoch(self.slots_per_epoch); + + // The `lighthouse/analysis/attestation_performance` endpoint can only retrieve attestations + // which are more than 1 epoch old. + // We assume that `highest_canonical_slot` is near the head of the chain. + end_epoch = end_epoch.saturating_sub(2_u64); + + // If end_epoch == 0 then the chain just started so we need to wait until + // `current_epoch >= 2`. + if end_epoch == 0 { + debug!("Chain just begun, refusing to sync attestations"); + return Ok(()); + } + + if start_epoch > end_epoch { + debug!("Attestations are up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) { + end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS + } + + if let Some(lowest_canonical_slot) = + database::get_lowest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut attestations = get_attestation_performances( + &self.bn, + start_epoch, + end_epoch, + self.slots_per_epoch, + ) + .await?; + + // Only insert attestations with corresponding `canonical_slot`s. + attestations.retain(|attestation| { + attestation.epoch_start_slot.as_slot() >= lowest_canonical_slot + && attestation.epoch_start_slot.as_slot() <= highest_canonical_slot + }); + database::insert_batch_suboptimal_attestations(&mut conn, attestations)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest canonical slot when one exists".to_string(), + ))); + } + } else { + // There are no slots in the `canonical_slots` table, but there are entries in the + // `suboptimal_attestations` table. This is a critical failure. It usually means + // someone has manually tampered with the database tables and should not occur during + // normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } + + /// Backfill the `suboptimal_attestations` table starting from the entry with the lowest slot. + /// + /// It constructs a request to the `attestation_performance` API endpoint with: + /// `start_epoch` -> epoch of the lowest `canonical_slot`. + /// `end_epoch` -> epoch of the lowest filled `suboptimal_attestation` - 1 (or epoch of highest + /// canonical slot) + /// + /// It will resync the lowest epoch if it is not fully filled. + /// That is, `if lowest_filled_slot % slots_per_epoch != 0` + /// + /// In the event there are no suboptimal attestations present in the lowest epoch, it will attempt to + /// resync the epoch. The odds of this occuring on mainnet are vanishingly small so it is not + /// accounted for. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. + pub async fn backfill_suboptimal_attestations(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let max_attestation_backfill = self.config.max_backfill_size_epochs; + + // Get the slot of the lowest entry in the `suboptimal_attestations` table. + let lowest_filled_slot_opt = if self.config.attestations { + database::get_lowest_attestation(&mut conn)? + .map(|attestation| attestation.epoch_start_slot.as_slot()) + } else { + return Err(Error::NotEnabled("attestations".to_string())); + }; + + let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { + if lowest_filled_slot % self.slots_per_epoch == 0 { + lowest_filled_slot + .epoch(self.slots_per_epoch) + .saturating_sub(1_u64) + } else { + // The epoch is only partially synced. Try to sync it fully. + lowest_filled_slot.epoch(self.slots_per_epoch) + } + } else { + // No entries in the `suboptimal_attestations` table. Use `canonical_slots` instead. + if let Some(highest_canonical_slot) = + database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + // Subtract 2 since `end_epoch` must be less than the current epoch - 1. + // We assume that `highest_canonical_slot` is near the head of the chain. + highest_canonical_slot + .epoch(self.slots_per_epoch) + .saturating_sub(2_u64) + } else { + // There are no slots in the database, do not backfill the + // `suboptimal_attestations` table. + warn!("Refusing to backfill attestations as there are no slots in the database"); + return Ok(()); + } + }; + + if end_epoch == 0 { + debug!("Attestations backfill is complete"); + return Ok(()); + } + + if let Some(lowest_canonical_slot) = + database::get_lowest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut start_epoch = lowest_canonical_slot.epoch(self.slots_per_epoch); + + if start_epoch > end_epoch { + debug!("Attestations are up to date with the base of the database"); + return Ok(()); + } + + // Ensure the request range does not exceed `max_attestation_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. + if start_epoch < end_epoch.saturating_sub(max_attestation_backfill) { + start_epoch = end_epoch.saturating_sub(max_attestation_backfill) + } + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) { + start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) + } + + if let Some(highest_canonical_slot) = + database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut attestations = get_attestation_performances( + &self.bn, + start_epoch, + end_epoch, + self.slots_per_epoch, + ) + .await?; + + // Only insert `suboptimal_attestations` with corresponding `canonical_slots`. + attestations.retain(|attestation| { + attestation.epoch_start_slot.as_slot() >= lowest_canonical_slot + && attestation.epoch_start_slot.as_slot() <= highest_canonical_slot + }); + + database::insert_batch_suboptimal_attestations(&mut conn, attestations)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest slot when one exists".to_string(), + ))); + } + } else { + // There are no slots in the `canonical_slot` table, but there are entries in the + // `suboptimal_attestations` table. This is a critical failure. It usually means + // someone has manually tampered with the database tables and should not occur during + // normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } +} diff --git a/watch/src/updater/config.rs b/watch/src/updater/config.rs new file mode 100644 index 0000000000..0179be73db --- /dev/null +++ b/watch/src/updater/config.rs @@ -0,0 +1,65 @@ +use serde::{Deserialize, Serialize}; + +pub const BEACON_NODE_URL: &str = "http://127.0.0.1:5052"; + +pub const fn max_backfill_size_epochs() -> u64 { + 2 +} +pub const fn backfill_stop_epoch() -> u64 { + 0 +} +pub const fn attestations() -> bool { + true +} +pub const fn proposer_info() -> bool { + true +} +pub const fn block_rewards() -> bool { + true +} +pub const fn block_packing() -> bool { + true +} + +fn beacon_node_url() -> String { + BEACON_NODE_URL.to_string() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + /// The URL of the beacon you wish to sync from. + #[serde(default = "beacon_node_url")] + pub beacon_node_url: String, + /// The maximum size each backfill iteration will allow per request (in epochs). + #[serde(default = "max_backfill_size_epochs")] + pub max_backfill_size_epochs: u64, + /// The epoch at which to never backfill past. + #[serde(default = "backfill_stop_epoch")] + pub backfill_stop_epoch: u64, + /// Whether to sync the suboptimal_attestations table. + #[serde(default = "attestations")] + pub attestations: bool, + /// Whether to sync the proposer_info table. + #[serde(default = "proposer_info")] + pub proposer_info: bool, + /// Whether to sync the block_rewards table. + #[serde(default = "block_rewards")] + pub block_rewards: bool, + /// Whether to sync the block_packing table. + #[serde(default = "block_packing")] + pub block_packing: bool, +} + +impl Default for Config { + fn default() -> Self { + Self { + beacon_node_url: beacon_node_url(), + max_backfill_size_epochs: max_backfill_size_epochs(), + backfill_stop_epoch: backfill_stop_epoch(), + attestations: attestations(), + proposer_info: proposer_info(), + block_rewards: block_rewards(), + block_packing: block_packing(), + } + } +} diff --git a/watch/src/updater/error.rs b/watch/src/updater/error.rs new file mode 100644 index 0000000000..74091c8f21 --- /dev/null +++ b/watch/src/updater/error.rs @@ -0,0 +1,56 @@ +use crate::blockprint::Error as BlockprintError; +use crate::database::Error as DbError; +use beacon_node::beacon_chain::BeaconChainError; +use eth2::{Error as Eth2Error, SensitiveError}; +use std::fmt; + +#[derive(Debug)] +pub enum Error { + BeaconChain(BeaconChainError), + Eth2(Eth2Error), + SensitiveUrl(SensitiveError), + Database(DbError), + Blockprint(BlockprintError), + UnableToGetRemoteHead, + BeaconNodeSyncing, + NotEnabled(String), + NoValidatorsFound, + BeaconNodeNotCompatible(String), + InvalidConfig(String), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl From for Error { + fn from(e: BeaconChainError) -> Self { + Error::BeaconChain(e) + } +} + +impl From for Error { + fn from(e: Eth2Error) -> Self { + Error::Eth2(e) + } +} + +impl From for Error { + fn from(e: SensitiveError) -> Self { + Error::SensitiveUrl(e) + } +} + +impl From for Error { + fn from(e: DbError) -> Self { + Error::Database(e) + } +} + +impl From for Error { + fn from(e: BlockprintError) -> Self { + Error::Blockprint(e) + } +} diff --git a/watch/src/updater/handler.rs b/watch/src/updater/handler.rs new file mode 100644 index 0000000000..1e1662bf74 --- /dev/null +++ b/watch/src/updater/handler.rs @@ -0,0 +1,471 @@ +use crate::blockprint::WatchBlockprintClient; +use crate::config::Config as FullConfig; +use crate::database::{self, PgPool, WatchCanonicalSlot, WatchHash, WatchSlot}; +use crate::updater::{Config, Error, WatchSpec}; +use beacon_node::beacon_chain::BeaconChainError; +use eth2::{ + types::{BlockId, SyncingData}, + BeaconNodeHttpClient, SensitiveUrl, +}; +use log::{debug, error, info, warn}; +use std::collections::HashSet; +use std::iter::FromIterator; +use types::{BeaconBlockHeader, EthSpec, Hash256, SignedBeaconBlock, Slot}; + +use crate::updater::{get_beacon_block, get_header, get_validators}; + +const MAX_EXPECTED_REORG_LENGTH: u64 = 32; + +/// Ensure the existing database is valid for this run. +pub async fn ensure_valid_database( + spec: &WatchSpec, + pool: &mut PgPool, +) -> Result<(), Error> { + let mut conn = database::get_connection(pool)?; + + let bn_slots_per_epoch = spec.slots_per_epoch(); + let bn_config_name = spec.network.clone(); + + if let Some((db_config_name, db_slots_per_epoch)) = database::get_active_config(&mut conn)? { + if db_config_name != bn_config_name || db_slots_per_epoch != bn_slots_per_epoch as i32 { + Err(Error::InvalidConfig( + "The config stored in the database does not match the beacon node.".to_string(), + )) + } else { + // Configs match. + Ok(()) + } + } else { + // No config exists in the DB. + database::insert_active_config(&mut conn, bn_config_name, bn_slots_per_epoch)?; + Ok(()) + } +} + +pub struct UpdateHandler { + pub pool: PgPool, + pub bn: BeaconNodeHttpClient, + pub blockprint: Option, + pub config: Config, + pub slots_per_epoch: u64, + pub spec: WatchSpec, +} + +impl UpdateHandler { + pub async fn new( + bn: BeaconNodeHttpClient, + spec: WatchSpec, + config: FullConfig, + ) -> Result, Error> { + let blockprint = if config.blockprint.enabled { + if let Some(server) = config.blockprint.url { + let blockprint_url = SensitiveUrl::parse(&server).map_err(Error::SensitiveUrl)?; + Some(WatchBlockprintClient { + client: reqwest::Client::new(), + server: blockprint_url, + username: config.blockprint.username, + password: config.blockprint.password, + }) + } else { + return Err(Error::NotEnabled( + "blockprint was enabled but url was not set".to_string(), + )); + } + } else { + None + }; + + let mut pool = database::build_connection_pool(&config.database)?; + + ensure_valid_database(&spec, &mut pool).await?; + + Ok(Self { + pool, + bn, + blockprint, + config: config.updater, + slots_per_epoch: spec.slots_per_epoch(), + spec, + }) + } + + /// Gets the syncing status of the connected beacon node. + pub async fn get_bn_syncing_status(&mut self) -> Result { + Ok(self.bn.get_node_syncing().await?.data) + } + + /// Gets a list of block roots from the database which do not yet contain a corresponding + /// entry in the `beacon_blocks` table and inserts them. + pub async fn update_unknown_blocks(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let roots = database::get_unknown_canonical_blocks(&mut conn)?; + for root in roots { + let block_opt: Option> = + get_beacon_block(&self.bn, BlockId::Root(root.as_hash())).await?; + if let Some(block) = block_opt { + database::insert_beacon_block(&mut conn, block, root)?; + } + } + + Ok(()) + } + + /// Performs a head update with the following steps: + /// 1. Pull the latest header from the beacon node and the latest canonical slot from the + /// database. + /// 2. Loop back through the beacon node and database to find the first matching slot -> root + /// pair. + /// 3. Go back `MAX_EXPECTED_REORG_LENGTH` slots through the database ensuring it is + /// consistent with the beacon node. If a re-org occurs beyond this range, we cannot recover. + /// 4. Remove any invalid slots from the database. + /// 5. Sync all blocks between the first valid block of the database and the head of the beacon + /// chain. + /// + /// In the event there are no slots present in the database, it will sync from the head block + /// block back to the first slot of the epoch. + /// This will ensure backfills are always done in full epochs (which helps keep certain syncing + /// tasks efficient). + pub async fn perform_head_update(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + // Load the head from the beacon node. + let bn_header = get_header(&self.bn, BlockId::Head) + .await? + .ok_or(Error::UnableToGetRemoteHead)?; + let header_root = bn_header.canonical_root(); + + if let Some(latest_matching_canonical_slot) = + self.get_first_matching_block(bn_header.clone()).await? + { + // Check for reorgs. + let latest_db_slot = self.check_for_reorg(latest_matching_canonical_slot).await?; + + // Remove all slots above `latest_db_slot` from the database. + let result = database::delete_canonical_slots_above( + &mut conn, + WatchSlot::from_slot(latest_db_slot), + )?; + info!("{result} old records removed during head update"); + + if result > 0 { + // If slots were removed, we need to resync the suboptimal_attestations table for + // the epoch since they will have changed and cannot be fixed by a simple update. + let epoch = latest_db_slot + .epoch(self.slots_per_epoch) + .saturating_sub(1_u64); + debug!("Preparing to resync attestations above epoch {epoch}"); + database::delete_suboptimal_attestations_above( + &mut conn, + WatchSlot::from_slot(epoch.start_slot(self.slots_per_epoch)), + )?; + } + + // Since we are syncing backwards, `start_slot > `end_slot`. + let start_slot = bn_header.slot; + let end_slot = latest_db_slot + 1; + self.reverse_fill_canonical_slots(bn_header, header_root, false, start_slot, end_slot) + .await?; + info!("Reverse sync begun at slot {start_slot} and stopped at slot {end_slot}"); + + // Attempt to sync new blocks with blockprint. + //self.sync_blockprint_until(start_slot).await?; + } else { + // There are no matching parent blocks. Sync from the head block back until the first + // block of the epoch. + let start_slot = bn_header.slot; + let end_slot = start_slot.saturating_sub(start_slot % self.slots_per_epoch); + self.reverse_fill_canonical_slots(bn_header, header_root, false, start_slot, end_slot) + .await?; + info!("Reverse sync begun at slot {start_slot} and stopped at slot {end_slot}"); + } + + Ok(()) + } + + /// Attempt to find a row in the `canonical_slots` table which matches the `canonical_root` of + /// the block header as reported by the beacon node. + /// + /// Any blocks above this value are not canonical according to the beacon node. + /// + /// Note: In the event that there are skip slots above the slot returned by the function, + /// they will not be returned, so may be pruned or re-synced by other code despite being + /// canonical. + pub async fn get_first_matching_block( + &mut self, + mut bn_header: BeaconBlockHeader, + ) -> Result, Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Load latest non-skipped canonical slot from database. + if let Some(db_canonical_slot) = + database::get_highest_non_skipped_canonical_slot(&mut conn)? + { + // Check if the header or parent root matches the entry in the database. + if bn_header.parent_root == db_canonical_slot.root.as_hash() + || bn_header.canonical_root() == db_canonical_slot.root.as_hash() + { + Ok(Some(db_canonical_slot)) + } else { + // Header is not the child of the highest entry in the database. + // From here we need to iterate backwards through the database until we find + // a slot -> root pair that matches the beacon node. + loop { + // Store working `parent_root`. + let parent_root = bn_header.parent_root; + + // Try the next header. + let next_header = get_header(&self.bn, BlockId::Root(parent_root)).await?; + if let Some(header) = next_header { + bn_header = header.clone(); + if let Some(db_canonical_slot) = database::get_canonical_slot_by_root( + &mut conn, + WatchHash::from_hash(header.parent_root), + )? { + // Check if the entry in the database matches the parent of + // the header. + if header.parent_root == db_canonical_slot.root.as_hash() { + return Ok(Some(db_canonical_slot)); + } else { + // Move on to the next header. + continue; + } + } else { + // Database does not have the referenced root. Try the next header. + continue; + } + } else { + // If we get this error it means that the `parent_root` of the header + // did not reference a canonical block. + return Err(Error::BeaconChain(BeaconChainError::MissingBeaconBlock( + parent_root, + ))); + } + } + } + } else { + // There are no non-skipped blocks present in the database. + Ok(None) + } + } + + /// Given the latest slot in the database which matches a root in the beacon node, + /// traverse back through the database for `MAX_EXPECTED_REORG_LENGTH` slots to ensure the tip + /// of the database is consistent with the beacon node (in the case that reorgs have occured). + /// + /// Returns the slot before the oldest canonical_slot which has an invalid child. + pub async fn check_for_reorg( + &mut self, + latest_canonical_slot: WatchCanonicalSlot, + ) -> Result { + let mut conn = database::get_connection(&self.pool)?; + + let end_slot = latest_canonical_slot.slot.as_u64(); + let start_slot = end_slot.saturating_sub(MAX_EXPECTED_REORG_LENGTH); + + for i in start_slot..end_slot { + let slot = Slot::new(i); + let db_canonical_slot_opt = + database::get_canonical_slot(&mut conn, WatchSlot::from_slot(slot))?; + if let Some(db_canonical_slot) = db_canonical_slot_opt { + let header_opt = get_header(&self.bn, BlockId::Slot(slot)).await?; + if let Some(header) = header_opt { + if header.canonical_root() == db_canonical_slot.root.as_hash() { + // The roots match (or are both skip slots). + continue; + } else { + // The block roots do not match. We need to re-sync from here. + warn!("Block {slot} does not match the beacon node. Resyncing"); + return Ok(slot.saturating_sub(1_u64)); + } + } else if !db_canonical_slot.skipped { + // The block exists in the database, but does not exist on the beacon node. + // We need to re-sync from here. + warn!("Block {slot} does not exist on the beacon node. Resyncing"); + return Ok(slot.saturating_sub(1_u64)); + } + } else { + // This slot does not exist in the database. + let lowest_slot = database::get_lowest_canonical_slot(&mut conn)? + .map(|canonical_slot| canonical_slot.slot.as_slot()); + if lowest_slot > Some(slot) { + // The database has not back-filled this slot yet, so skip it. + continue; + } else { + // The database does not contain this block, but has back-filled past it. + // We need to resync from here. + warn!("Slot {slot} missing from database. Resyncing"); + return Ok(slot.saturating_sub(1_u64)); + } + } + } + + // The database is consistent with the beacon node, so return the head of the database. + Ok(latest_canonical_slot.slot.as_slot()) + } + + /// Fills the canonical slots table beginning from `start_slot` and ending at `end_slot`. + /// It fills in reverse order, that is, `start_slot` is higher than `end_slot`. + /// + /// Skip slots set `root` to the root of the previous non-skipped slot and also sets + /// `skipped == true`. + /// + /// Since it uses `insert_canonical_slot` to interact with the database, it WILL NOT overwrite + /// existing rows. This means that any part of the chain within `end_slot..=start_slot` that + /// needs to be resynced, must first be deleted from the database. + pub async fn reverse_fill_canonical_slots( + &mut self, + mut header: BeaconBlockHeader, + mut header_root: Hash256, + mut skipped: bool, + start_slot: Slot, + end_slot: Slot, + ) -> Result { + let mut count = 0; + + let mut conn = database::get_connection(&self.pool)?; + + // Iterate, descending from `start_slot` (higher) to `end_slot` (lower). + for slot in (end_slot.as_u64()..=start_slot.as_u64()).rev() { + // Insert header. + database::insert_canonical_slot( + &mut conn, + WatchCanonicalSlot { + slot: WatchSlot::new(slot), + root: WatchHash::from_hash(header_root), + skipped, + beacon_block: None, + }, + )?; + count += 1; + + // Load the next header: + // We must use BlockId::Slot since we want to include skip slots. + header = if let Some(new_header) = get_header( + &self.bn, + BlockId::Slot(Slot::new(slot.saturating_sub(1_u64))), + ) + .await? + { + header_root = new_header.canonical_root(); + skipped = false; + new_header + } else { + if header.slot == 0 { + info!("Reverse fill exhausted at slot 0"); + break; + } + // Slot was skipped, so use the parent_root (most recent non-skipped block). + skipped = true; + header_root = header.parent_root; + header + }; + } + + Ok(count) + } + + /// Backfills the `canonical_slots` table starting from the lowest non-skipped slot and + /// stopping after `max_backfill_size_epochs` epochs. + pub async fn backfill_canonical_slots(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let backfill_stop_slot = self.config.backfill_stop_epoch * self.slots_per_epoch; + // Check to see if we have finished backfilling. + if let Some(lowest_slot) = database::get_lowest_canonical_slot(&mut conn)? { + if lowest_slot.slot.as_slot() == backfill_stop_slot { + debug!("Backfill sync complete, all slots filled"); + return Ok(()); + } + } + + let backfill_slot_count = self.config.max_backfill_size_epochs * self.slots_per_epoch; + + if let Some(lowest_non_skipped_canonical_slot) = + database::get_lowest_non_skipped_canonical_slot(&mut conn)? + { + // Set `start_slot` equal to the lowest non-skipped slot in the database. + // While this will attempt to resync some parts of the bottom of the chain, it reduces + // complexity when dealing with skip slots. + let start_slot = lowest_non_skipped_canonical_slot.slot.as_slot(); + let mut end_slot = lowest_non_skipped_canonical_slot + .slot + .as_slot() + .saturating_sub(backfill_slot_count); + + // Ensure end_slot doesn't go below `backfill_stop_epoch` + if end_slot <= backfill_stop_slot { + end_slot = Slot::new(backfill_stop_slot); + } + + let header_opt = get_header(&self.bn, BlockId::Slot(start_slot)).await?; + + if let Some(header) = header_opt { + let header_root = header.canonical_root(); + let count = self + .reverse_fill_canonical_slots(header, header_root, false, start_slot, end_slot) + .await?; + + info!("Backfill completed to slot: {end_slot}, records added: {count}"); + } else { + // The lowest slot of the database is inconsistent with the beacon node. + // Currently we have no way to recover from this. The entire database will need to + // be re-synced. + error!( + "Database is inconsistent with the beacon node. \ + Please ensure your beacon node is set to the right network, \ + otherwise you may need to resync" + ); + } + } else { + // There are no blocks in the database. Forward sync needs to happen first. + info!("Backfill was not performed since there are no blocks in the database"); + return Ok(()); + }; + + Ok(()) + } + + // Attempt to update the validator set. + // This downloads the latest validator set from the beacon node, and pulls the known validator + // set from the database. + // We then take any new or updated validators and insert them into the database (overwriting + // exiting validators). + // + // In the event there are no validators in the database, it will initialize the validator set. + pub async fn update_validator_set(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + let current_validators = database::get_all_validators(&mut conn)?; + + if !current_validators.is_empty() { + let old_validators = HashSet::from_iter(current_validators); + + // Pull the new validator set from the beacon node. + let new_validators = get_validators(&self.bn).await?; + + // The difference should only contain validators that contain either a new `exit_epoch` (implying an + // exit) or a new `index` (implying a validator activation). + let val_diff = new_validators.difference(&old_validators); + + for diff in val_diff { + database::insert_validator(&mut conn, diff.clone())?; + } + } else { + info!("No validators present in database. Initializing the validator set"); + self.initialize_validator_set().await?; + } + + Ok(()) + } + + // Initialize the validator set by downloading it from the beacon node, inserting blockprint + // data (if required) and writing it to the database. + pub async fn initialize_validator_set(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Pull all validators from the beacon node. + let validators = Vec::from_iter(get_validators(&self.bn).await?); + + database::insert_batch_validators(&mut conn, validators)?; + + Ok(()) + } +} diff --git a/watch/src/updater/mod.rs b/watch/src/updater/mod.rs new file mode 100644 index 0000000000..1fbb0107ae --- /dev/null +++ b/watch/src/updater/mod.rs @@ -0,0 +1,234 @@ +use crate::config::Config as FullConfig; +use crate::database::{WatchPK, WatchValidator}; +use eth2::{ + types::{BlockId, StateId}, + BeaconNodeHttpClient, SensitiveUrl, Timeouts, +}; +use log::{debug, error, info}; +use std::collections::{HashMap, HashSet}; +use std::marker::PhantomData; +use std::time::{Duration, Instant}; +use types::{BeaconBlockHeader, EthSpec, GnosisEthSpec, MainnetEthSpec, SignedBeaconBlock}; + +pub use config::Config; +pub use error::Error; +pub use handler::UpdateHandler; + +mod config; +pub mod error; +pub mod handler; + +const FAR_FUTURE_EPOCH: u64 = u64::MAX; +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); + +const MAINNET: &str = "mainnet"; +const GNOSIS: &str = "gnosis"; + +pub struct WatchSpec { + network: String, + spec: PhantomData, +} + +impl WatchSpec { + fn slots_per_epoch(&self) -> u64 { + T::slots_per_epoch() + } +} + +impl WatchSpec { + pub fn mainnet(network: String) -> Self { + Self { + network, + spec: PhantomData, + } + } +} + +impl WatchSpec { + fn gnosis(network: String) -> Self { + Self { + network, + spec: PhantomData, + } + } +} + +pub async fn run_updater(config: FullConfig) -> Result<(), Error> { + let beacon_node_url = + SensitiveUrl::parse(&config.updater.beacon_node_url).map_err(Error::SensitiveUrl)?; + let bn = BeaconNodeHttpClient::new(beacon_node_url, Timeouts::set_all(DEFAULT_TIMEOUT)); + + let config_map = bn.get_config_spec::>().await?.data; + + let config_name = config_map + .get("CONFIG_NAME") + .ok_or_else(|| { + Error::BeaconNodeNotCompatible("No field CONFIG_NAME on beacon node spec".to_string()) + })? + .clone(); + + match config_map + .get("PRESET_BASE") + .ok_or_else(|| { + Error::BeaconNodeNotCompatible("No field PRESET_BASE on beacon node spec".to_string()) + })? + .to_lowercase() + .as_str() + { + MAINNET => { + let spec = WatchSpec::mainnet(config_name); + run_once(bn, spec, config).await + } + GNOSIS => { + let spec = WatchSpec::gnosis(config_name); + run_once(bn, spec, config).await + } + _ => unimplemented!("unsupported PRESET_BASE"), + } +} + +pub async fn run_once( + bn: BeaconNodeHttpClient, + spec: WatchSpec, + config: FullConfig, +) -> Result<(), Error> { + let mut watch = UpdateHandler::new(bn, spec, config.clone()).await?; + + let sync_data = watch.get_bn_syncing_status().await?; + if sync_data.is_syncing { + error!( + "Connected beacon node is still syncing: head_slot => {:?}, distance => {}", + sync_data.head_slot, sync_data.sync_distance + ); + return Err(Error::BeaconNodeSyncing); + } + + info!("Performing head update"); + let head_timer = Instant::now(); + watch.perform_head_update().await?; + let head_timer_elapsed = head_timer.elapsed(); + debug!("Head update complete, time taken: {head_timer_elapsed:?}"); + + info!("Performing block backfill"); + let block_backfill_timer = Instant::now(); + watch.backfill_canonical_slots().await?; + let block_backfill_timer_elapsed = block_backfill_timer.elapsed(); + debug!("Block backfill complete, time taken: {block_backfill_timer_elapsed:?}"); + + info!("Updating validator set"); + let validator_timer = Instant::now(); + watch.update_validator_set().await?; + let validator_timer_elapsed = validator_timer.elapsed(); + debug!("Validator update complete, time taken: {validator_timer_elapsed:?}"); + + // Update blocks after updating the validator set since the `proposer_index` must exist in the + // `validators` table. + info!("Updating unknown blocks"); + let unknown_block_timer = Instant::now(); + watch.update_unknown_blocks().await?; + let unknown_block_timer_elapsed = unknown_block_timer.elapsed(); + debug!("Unknown block update complete, time taken: {unknown_block_timer_elapsed:?}"); + + // Run additional modules + if config.updater.attestations { + info!("Updating suboptimal attestations"); + let attestation_timer = Instant::now(); + watch.fill_suboptimal_attestations().await?; + watch.backfill_suboptimal_attestations().await?; + let attestation_timer_elapsed = attestation_timer.elapsed(); + debug!("Attestation update complete, time taken: {attestation_timer_elapsed:?}"); + } + + if config.updater.block_rewards { + info!("Updating block rewards"); + let rewards_timer = Instant::now(); + watch.fill_block_rewards().await?; + watch.backfill_block_rewards().await?; + let rewards_timer_elapsed = rewards_timer.elapsed(); + debug!("Block Rewards update complete, time taken: {rewards_timer_elapsed:?}"); + } + + if config.updater.block_packing { + info!("Updating block packing statistics"); + let packing_timer = Instant::now(); + watch.fill_block_packing().await?; + watch.backfill_block_packing().await?; + let packing_timer_elapsed = packing_timer.elapsed(); + debug!("Block packing update complete, time taken: {packing_timer_elapsed:?}"); + } + + if config.blockprint.enabled { + info!("Updating blockprint"); + let blockprint_timer = Instant::now(); + watch.fill_blockprint().await?; + watch.backfill_blockprint().await?; + let blockprint_timer_elapsed = blockprint_timer.elapsed(); + debug!("Blockprint update complete, time taken: {blockprint_timer_elapsed:?}"); + } + + Ok(()) +} + +/// Queries the beacon node for a given `BlockId` and returns the `BeaconBlockHeader` if it exists. +pub async fn get_header( + bn: &BeaconNodeHttpClient, + block_id: BlockId, +) -> Result, Error> { + let resp = bn + .get_beacon_headers_block_id(block_id) + .await? + .map(|resp| (resp.data.root, resp.data.header.message)); + // When quering with root == 0x000... , slot 0 will be returned with parent_root == 0x0000... + // This check escapes the loop. + if let Some((root, header)) = resp { + if root == header.parent_root { + return Ok(None); + } else { + return Ok(Some(header)); + } + } + Ok(None) +} + +pub async fn get_beacon_block( + bn: &BeaconNodeHttpClient, + block_id: BlockId, +) -> Result>, Error> { + let block = bn.get_beacon_blocks(block_id).await?.map(|resp| resp.data); + + Ok(block) +} + +/// Queries the beacon node for the current validator set. +pub async fn get_validators(bn: &BeaconNodeHttpClient) -> Result, Error> { + let mut validator_map = HashSet::new(); + + let validators = bn + .get_beacon_states_validators(StateId::Head, None, None) + .await? + .ok_or(Error::NoValidatorsFound)? + .data; + + for val in validators { + // Only store `activation_epoch` if it not the `FAR_FUTURE_EPOCH`. + let activation_epoch = if val.validator.activation_epoch.as_u64() == FAR_FUTURE_EPOCH { + None + } else { + Some(val.validator.activation_epoch.as_u64() as i32) + }; + // Only store `exit_epoch` if it is not the `FAR_FUTURE_EPOCH`. + let exit_epoch = if val.validator.exit_epoch.as_u64() == FAR_FUTURE_EPOCH { + None + } else { + Some(val.validator.exit_epoch.as_u64() as i32) + }; + validator_map.insert(WatchValidator { + index: val.index as i32, + public_key: WatchPK::from_pubkey(val.validator.pubkey), + status: val.status.to_string(), + activation_epoch, + exit_epoch, + }); + } + Ok(validator_map) +} diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs new file mode 100644 index 0000000000..acdda8c306 --- /dev/null +++ b/watch/tests/tests.rs @@ -0,0 +1,1254 @@ +#![recursion_limit = "256"] +#![cfg(unix)] + +use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, +}; +use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use http_api::test_utils::{create_api_server, ApiServer}; +use network::NetworkReceivers; + +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; +use tokio::sync::oneshot; +use types::{Hash256, MainnetEthSpec, Slot}; +use url::Url; +use watch::{ + client::WatchHttpClient, + config::Config, + database::{self, Config as DatabaseConfig, PgPool, WatchSlot}, + server::{start_server, Config as ServerConfig}, + updater::{handler::*, run_updater, Config as UpdaterConfig, WatchSpec}, +}; + +use log::error; +use std::net::SocketAddr; +use std::time::Duration; +use tokio::{runtime, task::JoinHandle}; +use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; +use unused_port::unused_tcp4_port; + +use testcontainers::{clients::Cli, images::postgres::Postgres, RunnableImage}; + +type E = MainnetEthSpec; + +const VALIDATOR_COUNT: usize = 32; +const SLOTS_PER_EPOCH: u64 = 32; +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); + +fn build_test_config(config: &DatabaseConfig) -> PostgresConfig { + let mut postgres_config = PostgresConfig::new(); + postgres_config + .user(&config.user) + .password(&config.password) + .dbname(&config.default_dbname) + .host(&config.host) + .port(config.port) + .connect_timeout(Duration::from_millis(config.connect_timeout_millis)); + postgres_config +} + +async fn connect(config: &DatabaseConfig) -> (Client, JoinHandle<()>) { + let db_config = build_test_config(config); + let (client, conn) = db_config + .connect(NoTls) + .await + .expect("Could not connect to db"); + let connection = runtime::Handle::current().spawn(async move { + if let Err(e) = conn.await { + error!("Connection error {:?}", e); + } + }); + + (client, connection) +} + +pub async fn create_test_database(config: &DatabaseConfig) { + let (db, _) = connect(config).await; + + db.execute(&format!("CREATE DATABASE {};", config.dbname), &[]) + .await + .expect("Database creation failed"); +} + +struct TesterBuilder { + pub harness: BeaconChainHarness>, + pub config: Config, + _bn_network_rx: NetworkReceivers, + _bn_api_shutdown_tx: oneshot::Sender<()>, +} + +impl TesterBuilder { + pub async fn new() -> TesterBuilder { + let harness = BeaconChainHarness::builder(E::default()) + .default_spec() + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .build(); + + /* + * Spawn a Beacon Node HTTP API. + */ + let ApiServer { + server, + listening_socket: bn_api_listening_socket, + shutdown_tx: _bn_api_shutdown_tx, + network_rx: _bn_network_rx, + .. + } = create_api_server(harness.chain.clone(), harness.logger().clone()).await; + tokio::spawn(server); + + /* + * Create a watch configuration + */ + let database_port = unused_tcp4_port().expect("Unable to find unused port."); + let server_port = unused_tcp4_port().expect("Unable to find unused port."); + let config = Config { + database: DatabaseConfig { + dbname: random_dbname(), + port: database_port, + ..Default::default() + }, + server: ServerConfig { + listen_port: server_port, + ..Default::default() + }, + updater: UpdaterConfig { + beacon_node_url: format!( + "http://{}:{}", + bn_api_listening_socket.ip(), + bn_api_listening_socket.port() + ), + ..Default::default() + }, + ..Default::default() + }; + + Self { + harness, + config, + _bn_network_rx, + _bn_api_shutdown_tx, + } + } + pub async fn build(self, pool: PgPool) -> Tester { + /* + * Spawn a Watch HTTP API. + */ + let (_watch_shutdown_tx, watch_shutdown_rx) = oneshot::channel(); + let watch_server = start_server(&self.config, SLOTS_PER_EPOCH, pool, async { + let _ = watch_shutdown_rx.await; + }) + .unwrap(); + tokio::spawn(watch_server); + + let addr = SocketAddr::new( + self.config.server.listen_addr, + self.config.server.listen_port, + ); + + /* + * Create a HTTP client to talk to the watch HTTP API. + */ + let client = WatchHttpClient { + client: reqwest::Client::new(), + server: Url::parse(&format!("http://{}:{}", addr.ip(), addr.port())).unwrap(), + }; + + /* + * Create a HTTP client to talk to the Beacon Node API. + */ + let beacon_node_url = SensitiveUrl::parse(&self.config.updater.beacon_node_url).unwrap(); + let bn = BeaconNodeHttpClient::new(beacon_node_url, Timeouts::set_all(DEFAULT_TIMEOUT)); + let spec = WatchSpec::mainnet("mainnet".to_string()); + + /* + * Build update service + */ + let updater = UpdateHandler::new(bn, spec, self.config.clone()) + .await + .unwrap(); + + Tester { + harness: self.harness, + client, + config: self.config, + updater, + _bn_network_rx: self._bn_network_rx, + _bn_api_shutdown_tx: self._bn_api_shutdown_tx, + _watch_shutdown_tx, + } + } + async fn initialize_database(&self) -> PgPool { + create_test_database(&self.config.database).await; + database::utils::run_migrations(&self.config.database); + database::build_connection_pool(&self.config.database) + .expect("Could not build connection pool") + } +} + +struct Tester { + pub harness: BeaconChainHarness>, + pub client: WatchHttpClient, + pub config: Config, + pub updater: UpdateHandler, + _bn_network_rx: NetworkReceivers, + _bn_api_shutdown_tx: oneshot::Sender<()>, + _watch_shutdown_tx: oneshot::Sender<()>, +} + +impl Tester { + /// Extend the chain on the beacon chain harness. Do not update the beacon watch database. + pub async fn extend_chain(&mut self, num_blocks: u64) -> &mut Self { + self.harness.advance_slot(); + self.harness + .extend_chain( + num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self + } + + // Advance the slot clock without a block. This results in a skipped slot. + pub fn skip_slot(&mut self) -> &mut Self { + self.harness.advance_slot(); + self + } + + // Perform a single slot re-org. + pub async fn reorg_chain(&mut self) -> &mut Self { + let previous_slot = self.harness.get_current_slot(); + self.harness.advance_slot(); + let first_slot = self.harness.get_current_slot(); + self.harness + .extend_chain( + 1, + BlockStrategy::ForkCanonicalChainAt { + previous_slot, + first_slot, + }, + AttestationStrategy::AllValidators, + ) + .await; + self + } + + /// Run the watch updater service. + pub async fn run_update_service(&mut self, num_runs: usize) -> &mut Self { + for _ in 0..num_runs { + run_updater(self.config.clone()).await.unwrap(); + } + self + } + + pub async fn perform_head_update(&mut self) -> &mut Self { + self.updater.perform_head_update().await.unwrap(); + self + } + + pub async fn perform_backfill(&mut self) -> &mut Self { + self.updater.backfill_canonical_slots().await.unwrap(); + self + } + + pub async fn update_unknown_blocks(&mut self) -> &mut Self { + self.updater.update_unknown_blocks().await.unwrap(); + self + } + + pub async fn update_validator_set(&mut self) -> &mut Self { + self.updater.update_validator_set().await.unwrap(); + self + } + + pub async fn fill_suboptimal_attestations(&mut self) -> &mut Self { + self.updater.fill_suboptimal_attestations().await.unwrap(); + + self + } + + pub async fn backfill_suboptimal_attestations(&mut self) -> &mut Self { + self.updater + .backfill_suboptimal_attestations() + .await + .unwrap(); + + self + } + + pub async fn fill_block_rewards(&mut self) -> &mut Self { + self.updater.fill_block_rewards().await.unwrap(); + + self + } + + pub async fn backfill_block_rewards(&mut self) -> &mut Self { + self.updater.backfill_block_rewards().await.unwrap(); + + self + } + + pub async fn fill_block_packing(&mut self) -> &mut Self { + self.updater.fill_block_packing().await.unwrap(); + + self + } + + pub async fn backfill_block_packing(&mut self) -> &mut Self { + self.updater.backfill_block_packing().await.unwrap(); + + self + } + + pub async fn assert_canonical_slots_empty(&mut self) -> &mut Self { + let lowest_slot = self + .client + .get_lowest_canonical_slot() + .await + .unwrap() + .map(|slot| slot.slot.as_slot()); + + assert_eq!(lowest_slot, None); + + self + } + + pub async fn assert_lowest_canonical_slot(&mut self, expected: u64) -> &mut Self { + let slot = self + .client + .get_lowest_canonical_slot() + .await + .unwrap() + .unwrap() + .slot + .as_slot(); + + assert_eq!(slot, Slot::new(expected)); + + self + } + + pub async fn assert_highest_canonical_slot(&mut self, expected: u64) -> &mut Self { + let slot = self + .client + .get_highest_canonical_slot() + .await + .unwrap() + .unwrap() + .slot + .as_slot(); + + assert_eq!(slot, Slot::new(expected)); + + self + } + + pub async fn assert_canonical_slots_not_empty(&mut self) -> &mut Self { + self.client + .get_lowest_canonical_slot() + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_slot_is_skipped(&mut self, slot: u64) -> &mut Self { + assert!(self + .client + .get_beacon_blocks(BlockId::Slot(Slot::new(slot))) + .await + .unwrap() + .is_none()); + self + } + + pub async fn assert_all_validators_exist(&mut self) -> &mut Self { + assert_eq!( + self.client + .get_all_validators() + .await + .unwrap() + .unwrap() + .len(), + VALIDATOR_COUNT + ); + self + } + + pub async fn assert_lowest_block_has_proposer_info(&mut self) -> &mut Self { + let mut block = self + .client + .get_lowest_beacon_block() + .await + .unwrap() + .unwrap(); + + if block.slot.as_slot() == 0 { + block = self + .client + .get_next_beacon_block(block.root.as_hash()) + .await + .unwrap() + .unwrap() + } + + self.client + .get_proposer_info(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_highest_block_has_proposer_info(&mut self) -> &mut Self { + let block = self + .client + .get_highest_beacon_block() + .await + .unwrap() + .unwrap(); + + self.client + .get_proposer_info(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_lowest_block_has_block_rewards(&mut self) -> &mut Self { + let mut block = self + .client + .get_lowest_beacon_block() + .await + .unwrap() + .unwrap(); + + if block.slot.as_slot() == 0 { + block = self + .client + .get_next_beacon_block(block.root.as_hash()) + .await + .unwrap() + .unwrap() + } + + self.client + .get_block_reward(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_highest_block_has_block_rewards(&mut self) -> &mut Self { + let block = self + .client + .get_highest_beacon_block() + .await + .unwrap() + .unwrap(); + + self.client + .get_block_reward(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_lowest_block_has_block_packing(&mut self) -> &mut Self { + let mut block = self + .client + .get_lowest_beacon_block() + .await + .unwrap() + .unwrap(); + + while block.slot.as_slot() <= SLOTS_PER_EPOCH { + block = self + .client + .get_next_beacon_block(block.root.as_hash()) + .await + .unwrap() + .unwrap() + } + + self.client + .get_block_packing(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_highest_block_has_block_packing(&mut self) -> &mut Self { + let block = self + .client + .get_highest_beacon_block() + .await + .unwrap() + .unwrap(); + + self.client + .get_block_packing(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + /// Check that the canonical chain in watch matches that of the harness. Also check that all + /// canonical blocks can be retrieved. + pub async fn assert_canonical_chain_consistent(&mut self, last_slot: u64) -> &mut Self { + let head_root = self.harness.chain.head_beacon_block_root(); + let mut chain: Vec<(Hash256, Slot)> = self + .harness + .chain + .rev_iter_block_roots_from(head_root) + .unwrap() + .map(Result::unwrap) + .collect(); + + // `chain` contains skip slots, but the `watch` API will not return blocks that do not + // exist. + // We need to filter them out. + chain.reverse(); + chain.dedup_by(|(hash1, _), (hash2, _)| hash1 == hash2); + + // Remove any slots below `last_slot` since it is known that the database has not + // backfilled past it. + chain.retain(|(_, slot)| slot.as_u64() >= last_slot); + + for (root, slot) in &chain { + let block = self + .client + .get_beacon_blocks(BlockId::Root(*root)) + .await + .unwrap() + .unwrap(); + assert_eq!(block.slot.as_slot(), *slot); + } + + self + } + + /// Check that every block in the `beacon_blocks` table has corresponding entries in the + /// `proposer_info`, `block_rewards` and `block_packing` tables. + pub async fn assert_all_blocks_have_metadata(&mut self) -> &mut Self { + let pool = database::build_connection_pool(&self.config.database).unwrap(); + + let mut conn = database::get_connection(&pool).unwrap(); + let highest_block_slot = database::get_highest_beacon_block(&mut conn) + .unwrap() + .unwrap() + .slot + .as_slot(); + let lowest_block_slot = database::get_lowest_beacon_block(&mut conn) + .unwrap() + .unwrap() + .slot + .as_slot(); + for slot in lowest_block_slot.as_u64()..=highest_block_slot.as_u64() { + let canonical_slot = database::get_canonical_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + if !canonical_slot.skipped { + database::get_block_rewards_by_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + database::get_proposer_info_by_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + database::get_block_packing_by_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + } + } + + self + } +} + +pub fn random_dbname() -> String { + let mut s: String = thread_rng() + .sample_iter(&Alphanumeric) + .take(8) + .map(char::from) + .collect(); + // Postgres gets weird about capitals in database names. + s.make_ascii_lowercase(); + format!("test_{}", s) +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .extend_chain(16) + .await + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain_sync_starts_on_skip_slot() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .skip_slot() + .skip_slot() + .extend_chain(6) + .await + .skip_slot() + .extend_chain(6) + .await + .skip_slot() + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_canonical_chain_consistent(0) + .await + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain_with_skip_slot() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .extend_chain(5) + .await + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_highest_canonical_slot(5) + .await + .assert_lowest_canonical_slot(0) + .await + .assert_canonical_chain_consistent(0) + .await + .skip_slot() + .extend_chain(1) + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_highest_canonical_slot(7) + .await + .assert_slot_is_skipped(6) + .await + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain_with_reorg() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .extend_chain(5) + .await + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_highest_canonical_slot(5) + .await + .assert_lowest_canonical_slot(0) + .await + .assert_canonical_chain_consistent(0) + .await + .skip_slot() + .reorg_chain() + .await + .extend_chain(1) + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_highest_canonical_slot(8) + .await + .assert_slot_is_skipped(6) + .await + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + // Apply four blocks to the chain. + tester + .extend_chain(4) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(4) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(0) + .await + // Fill back to genesis. + .perform_backfill() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(4) + .await + // Apply one block to the chain. + .extend_chain(1) + .await + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(5) + .await + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(7) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows_with_metadata() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + // Apply four blocks to the chain. + .extend_chain(4) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(4) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(0) + .await + // Fill back to genesis. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + // Backfill before forward fill to ensure order is arbitrary. + .backfill_block_rewards() + .await + .fill_block_rewards() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(4) + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // Apply one block to the chain. + .extend_chain(1) + .await + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(5) + .await + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(7) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows_with_metadata_and_multiple_skip_slots() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + // Apply four blocks to the chain. + tester + .extend_chain(4) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(4) + // And also backfill to the epoch boundary. + .await + .assert_lowest_canonical_slot(0) + .await + // Fill back to genesis. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(4) + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // Add multiple skip slots. + .skip_slot() + .skip_slot() + .skip_slot() + // Apply one block to the chain. + .extend_chain(1) + .await + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(8) + .await + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(10) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + // Backfill before forward fill to ensure order is arbitrary. + .backfill_block_rewards() + .await + .fill_block_rewards() + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows_to_second_epoch() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + // Apply 40 blocks to the chain. + tester + .extend_chain(40) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(40) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(32) + .await + // Fill back to genesis. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(0) + .await + // Get block packings. + .fill_block_packing() + .await + .backfill_block_packing() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(40) + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Skip a slot + .skip_slot() + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(43) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Update new block_packing + // Backfill before forward fill to ensure order is arbitrary + .backfill_block_packing() + .await + .fill_block_packing() + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn large_chain() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + // Apply 40 blocks to the chain. + tester + .extend_chain(400) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(400) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(384) + .await + // Backfill 2 epochs as per default config. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(384) + .await + // Get block rewards and proposer info. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // Get block packings. + .fill_block_packing() + .await + .backfill_block_packing() + .await + // Should have backfilled 2 more epochs. + .assert_lowest_canonical_slot(320) + .await + .assert_highest_canonical_slot(400) + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Skip a slot + .skip_slot() + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + .perform_backfill() + .await + // Should have backfilled 2 more epochs + .assert_lowest_canonical_slot(256) + .await + .assert_highest_canonical_slot(403) + .await + // Update validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Get suboptimal attestations. + .fill_suboptimal_attestations() + .await + .backfill_suboptimal_attestations() + .await + // Get block rewards and proposer info. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // Get block packing. + // Backfill before forward fill to ensure order is arbitrary. + .backfill_block_packing() + .await + .fill_block_packing() + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(256) + .await + // Check every block has rewards, proposer info and packing statistics. + .assert_all_blocks_have_metadata() + .await; +}