diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 51665d86d0..98e97972a9 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -74,3 +74,10 @@ jobs: - uses: actions/checkout@v1 - name: Typecheck benchmark code without running it run: make check-benches + clippy: + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Lint code for quality and style with Clippy + run: make lint diff --git a/Cargo.lock b/Cargo.lock index bda042408e..2bc14dfad0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6,6 +6,7 @@ version = "0.0.1" dependencies = [ "bls 0.2.0", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", + "clap_utils 0.1.0", "deposit_contract 0.2.0", "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "environment 0.2.0", @@ -20,9 +21,10 @@ dependencies = [ "slog-async 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "slog-term 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "types 0.2.0", "validator_client 0.2.0", - "web3 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "web3 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -80,7 +82,7 @@ dependencies = [ [[package]] name = "amcl" version = "0.2.0" -source = "git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10#38c6c33925b24c9319a1febfb621ff9bbf6d49f7" +source = "git+https://github.com/sigp/milagro_bls?tag=v1.0.1#2ccdd4b517c1ab3debe10277deed9d1b1cbbe9ce" dependencies = [ "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -139,7 +141,7 @@ name = "atty" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "hermit-abi 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -225,6 +227,7 @@ dependencies = [ "proto_array_fork_choice 0.2.0", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "safe_arith 0.1.0", "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", @@ -367,7 +370,7 @@ dependencies = [ "eth2_ssz 0.1.2", "eth2_ssz_types 0.2.0", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "milagro_bls 1.0.1 (git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10)", + "milagro_bls 1.0.1 (git+https://github.com/sigp/milagro_bls?tag=v1.0.1)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", @@ -498,6 +501,18 @@ dependencies = [ "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "clap_utils" +version = "0.1.0" +dependencies = [ + "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", + "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz 0.1.2", + "eth2_testnet_config 0.2.0", + "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.2.0", +] + [[package]] name = "clear_on_drop" version = "0.2.3" @@ -895,6 +910,16 @@ dependencies = [ "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "derive_more" +version = "0.99.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "digest" version = "0.8.1" @@ -988,13 +1013,13 @@ dependencies = [ [[package]] name = "enr" version = "0.1.0-alpha.3" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "base64 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "ed25519-dalek 1.0.0-pre.3 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "libsecp256k1 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1085,7 +1110,7 @@ dependencies = [ "toml 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", "tree_hash 0.1.1", "types 0.2.0", - "web3 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "web3 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1097,7 +1122,7 @@ dependencies = [ "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", "types 0.2.0", - "web3 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "web3 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1115,7 +1140,7 @@ dependencies = [ "hashmap_delay 0.2.0", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "lighthouse_metrics 0.2.0", "lru 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1134,6 +1159,7 @@ dependencies = [ "types 0.2.0", "unsigned-varint 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "version 0.2.0", + "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1165,7 +1191,7 @@ dependencies = [ "eth2_hashing 0.1.1", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "milagro_bls 1.0.1 (git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10)", + "milagro_bls 1.0.1 (git+https://github.com/sigp/milagro_bls?tag=v1.0.1)", "num-bigint 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1230,6 +1256,20 @@ dependencies = [ "tiny-keccak 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ethabi" +version = "9.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "error-chain 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ethereum-types 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ethabi" version = "11.0.0" @@ -1451,61 +1491,6 @@ dependencies = [ "num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "futures-executor" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "futures-io" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-macro" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "futures-sink" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-task" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "futures-util" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-macro 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-utils 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro-nested 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "gcc" version = "0.3.55" @@ -1640,7 +1625,7 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.12" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1967,6 +1952,18 @@ dependencies = [ "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "jsonrpc-core" +version = "14.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "keccak" version = "0.1.0" @@ -2001,10 +1998,12 @@ name = "lcli" version = "0.2.0" dependencies = [ "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", + "clap_utils 0.1.0", "deposit_contract 0.2.0", "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "environment 0.2.0", "eth1_test_rig 0.2.0", + "eth2-libp2p 0.2.0", "eth2_ssz 0.1.2", "eth2_testnet_config 0.2.0", "futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2059,34 +2058,34 @@ dependencies = [ [[package]] name = "libp2p" version = "0.13.2" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-core-derive 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-deflate 0.5.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-discv5 0.1.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-dns 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-floodsub 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-gossipsub 0.1.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-identify 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-kad 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-mdns 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-mplex 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-noise 0.11.1 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-ping 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-plaintext 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-secio 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-tcp 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-uds 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-wasm-ext 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-websocket 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-yamux 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-core-derive 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-deflate 0.5.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-discv5 0.1.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-dns 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-floodsub 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-gossipsub 0.1.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-identify 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-kad 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-mdns 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-mplex 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-noise 0.11.1 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-ping 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-plaintext 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-secio 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-tcp 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-uds 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-wasm-ext 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-websocket 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-yamux 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2098,7 +2097,7 @@ dependencies = [ [[package]] name = "libp2p-core" version = "0.13.2" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "asn1_der 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", "bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2110,15 +2109,15 @@ dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "libsecp256k1 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "multistream-select 0.6.1 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "multistream-select 0.6.1 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", - "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2133,7 +2132,7 @@ dependencies = [ [[package]] name = "libp2p-core-derive" version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "syn 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2142,34 +2141,34 @@ dependencies = [ [[package]] name = "libp2p-deflate" version = "0.5.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "flate2 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libp2p-discv5" version = "0.1.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "bigint 4.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "enr 0.1.0-alpha.3 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "enr 0.1.0-alpha.3 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "hkdf 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "libsecp256k1 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "openssl 0.10.29 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2184,10 +2183,10 @@ dependencies = [ [[package]] name = "libp2p-dns" version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-dns-unofficial 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2195,15 +2194,15 @@ dependencies = [ [[package]] name = "libp2p-floodsub" version = "0.13.1" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "cuckoofilter 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2213,7 +2212,7 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" version = "0.1.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "bs58 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2221,8 +2220,8 @@ dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "lru 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2238,14 +2237,14 @@ dependencies = [ [[package]] name = "libp2p-identify" version = "0.13.2" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2257,18 +2256,18 @@ dependencies = [ [[package]] name = "libp2p-kad" version = "0.13.2" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2284,16 +2283,16 @@ dependencies = [ [[package]] name = "libp2p-mdns" version = "0.13.1" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "data-encoding 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "dns-parser 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2306,12 +2305,12 @@ dependencies = [ [[package]] name = "libp2p-mplex" version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2322,13 +2321,13 @@ dependencies = [ [[package]] name = "libp2p-noise" version = "0.11.1" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "curve25519-dalek 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2342,14 +2341,14 @@ dependencies = [ [[package]] name = "libp2p-ping" version = "0.13.1" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2359,14 +2358,14 @@ dependencies = [ [[package]] name = "libp2p-plaintext" version = "0.13.1" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2374,7 +2373,7 @@ dependencies = [ [[package]] name = "libp2p-secio" version = "0.13.1" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "aes-ctr 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2383,13 +2382,13 @@ dependencies = [ "hmac 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parity-send-wrapper 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", - "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2403,10 +2402,10 @@ dependencies = [ [[package]] name = "libp2p-swarm" version = "0.3.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2416,13 +2415,13 @@ dependencies = [ [[package]] name = "libp2p-tcp" version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "get_if_addrs 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "ipnet 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-tcp 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2432,10 +2431,10 @@ dependencies = [ [[package]] name = "libp2p-uds" version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-uds 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2443,11 +2442,11 @@ dependencies = [ [[package]] name = "libp2p-wasm-ext" version = "0.6.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "parity-send-wrapper 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2457,13 +2456,13 @@ dependencies = [ [[package]] name = "libp2p-websocket" version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "soketto 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2475,10 +2474,10 @@ dependencies = [ [[package]] name = "libp2p-yamux" version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "yamux 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2517,6 +2516,7 @@ dependencies = [ "account_manager 0.0.1", "beacon_node 0.2.0", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", + "clap_utils 0.1.0", "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "environment 0.2.0", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2642,14 +2642,15 @@ dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "quickcheck 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", "quickcheck_macros 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "safe_arith 0.1.0", ] [[package]] name = "milagro_bls" version = "1.0.1" -source = "git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10#38c6c33925b24c9319a1febfb621ff9bbf6d49f7" +source = "git+https://github.com/sigp/milagro_bls?tag=v1.0.1#2ccdd4b517c1ab3debe10277deed9d1b1cbbe9ce" dependencies = [ - "amcl 0.2.0 (git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10)", + "amcl 0.2.0 (git+https://github.com/sigp/milagro_bls?tag=v1.0.1)", "hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2759,7 +2760,7 @@ dependencies = [ [[package]] name = "multistream-select" version = "0.6.1" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2896,7 +2897,7 @@ name = "num_cpus" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "hermit-abi 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2976,14 +2977,14 @@ dependencies = [ [[package]] name = "parity-multiaddr" version = "0.6.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "arrayref 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "data-encoding 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)", + "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)", "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "unsigned-varint 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2993,7 +2994,7 @@ dependencies = [ [[package]] name = "parity-multihash" version = "0.2.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "blake2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3774,7 +3775,7 @@ dependencies = [ [[package]] name = "rw-stream-sink" version = "0.1.2" -source = "git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97#4e3003d5283040fee10da1299252dd060a838d97" +source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3786,6 +3787,10 @@ name = "ryu" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "safe_arith" +version = "0.1.0" + [[package]] name = "safemem" version = "0.3.3" @@ -4237,6 +4242,7 @@ dependencies = [ "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "merkle_proof 0.2.0", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "safe_arith 0.1.0", "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4903,6 +4909,7 @@ dependencies = [ "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "rand_xorshift 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "safe_arith 0.1.0", "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5062,6 +5069,7 @@ dependencies = [ "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", "tree_hash 0.1.1", "types 0.2.0", + "web3 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -5276,6 +5284,34 @@ dependencies = [ "websocket 0.21.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "web3" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "derive_more 0.99.5 (registry+https://github.com/rust-lang/crates.io-index)", + "ethabi 9.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ethereum-types 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 14.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-uds 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "websocket 0.21.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "webpki" version = "0.21.2" @@ -5463,7 +5499,7 @@ dependencies = [ "checksum aesni 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f70a6b5f971e473091ab7cfb5ffac6cde81666c4556751d8d5620ead8abf100" "checksum ahash 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "6f33b5018f120946c1dcf279194f238a9f146725593ead1c08fa47ff22b0b5d3" "checksum aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)" = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" -"checksum amcl 0.2.0 (git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10)" = "" +"checksum amcl 0.2.0 (git+https://github.com/sigp/milagro_bls?tag=v1.0.1)" = "" "checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" "checksum arc-swap 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b585a98a234c46fc563103e9278c9391fde1f4e6850334da895d27edb9580f62" "checksum arrayref 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" @@ -5541,6 +5577,7 @@ dependencies = [ "checksum db-key 0.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b72465f46d518f6015d9cf07f7f3013a95dd6b9c2747c3d65ae0cce43929d14f" "checksum derivative 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3c6d883546668a3e2011b6a716a7330b82eabb0151b138217f632c8243e17135" "checksum derive_more 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a141330240c921ec6d074a3e188a7c7ef95668bb95e7d44fa0e5778ec2a7afe" +"checksum derive_more 0.99.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e2323f3f47db9a0e77ce7a300605d8d2098597fc451ed1a97bb1f6411bb550a7" "checksum digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" "checksum dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "13aea89a5c93364a98e9b37b2fa237effbb694d5cfe01c5b70941f7eb087d5e3" "checksum dirs-sys 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "afa0b23de8fd801745c471deffa6e12d248f962c9fd4b4c33787b055599bde7b" @@ -5549,12 +5586,13 @@ dependencies = [ "checksum ed25519-dalek 1.0.0-pre.3 (registry+https://github.com/rust-lang/crates.io-index)" = "978710b352437433c97b2bff193f2fb1dfd58a093f863dd95e225a19baa599a2" "checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" "checksum encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)" = "cd8d03faa7fe0c1431609dfad7bbe827af30f82e1e2ae6f7ee4fca6bd764bc28" -"checksum enr 0.1.0-alpha.3 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" +"checksum enr 0.1.0-alpha.3 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" "checksum env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "aafcde04e90a5226a6443b7aabdb016ba2f8307c847d524724bd9b346dd1a2d3" "checksum env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" "checksum error-chain 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d371106cc88ffdfb1eabd7111e432da544f16f3e2d7bf1dfe8bf575f1df045cd" "checksum ethabi 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "97652a7d1f2504d6c885c87e242a06ccef5bd3054093d3fb742d8fb64806231a" "checksum ethabi 8.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ebdeeea85a6d217b9fcc862906d7e283c047e04114165c433756baf5dce00a6c" +"checksum ethabi 9.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "965126c64662832991f5a748893577630b558e47fa94e7f35aefcd20d737cef7" "checksum ethbloom 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3932e82d64d347a045208924002930dc105a138995ccdc1479d0f05f0359f17c" "checksum ethbloom 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "32cfe1c169414b709cf28aa30c74060bdb830a03a8ba473314d079ac79d80a5f" "checksum ethereum-types 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "62d1bc682337e2c5ec98930853674dd2b4bd5d0d246933a9e98e5280f7c76c5f" @@ -5594,7 +5632,7 @@ dependencies = [ "checksum hashbrown 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e1de41fb8dba9714efd92241565cdff73f78508c95697dd56787d3cba27e2353" "checksum hashbrown 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8e6073d0ca812575946eb5f35ff68dbe519907b25c42530389ff946dc84c6ead" "checksum heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1679e6ea370dee694f91f1dc469bf94cf8f52051d147aec3e1f9497c6fc22461" -"checksum hermit-abi 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "61565ff7aaace3525556587bd2dc31d4a07071957be715e63ce7b1eccf51a8f4" +"checksum hermit-abi 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8a0d737e0f947a1864e93d33fdef4af8445a00d1ed8dc0c8ddb73139ea6abf15" "checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77" "checksum hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" "checksum hkdf 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3fa08a006102488bd9cd5b8013aabe84955cf5ae22e304c2caf655b633aefae3" @@ -5627,6 +5665,7 @@ dependencies = [ "checksum itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" "checksum js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)" = "6a27d435371a2fa5b6d2b028a74bbdb1234f308da363226a2854ca3ff8ba7055" "checksum jsonrpc-core 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "97b83fdc5e0218128d0d270f2f2e7a5ea716f3240c8518a58bc89e6716ba8581" +"checksum jsonrpc-core 14.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "25525f6002338fb4debb5167a89a0b47f727a5a48418417545ad3429758b7fec" "checksum keccak 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" @@ -5636,28 +5675,28 @@ dependencies = [ "checksum leveldb-sys 2.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "71f46429bb70612c3e939aaeed27ffd31a24a773d21728a1a426e4089d6778d2" "checksum libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)" = "99e85c08494b21a9054e7fe1374a732aeadaff3980b6990b94bfd3a70f690005" "checksum libflate 0.1.27 (registry+https://github.com/rust-lang/crates.io-index)" = "d9135df43b1f5d0e333385cb6e7897ecd1a43d7d11b91ac003f4d2c2d2401fdd" -"checksum libp2p 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-core-derive 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-deflate 0.5.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-discv5 0.1.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-dns 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-floodsub 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-gossipsub 0.1.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-identify 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-kad 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-mdns 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-mplex 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-noise 0.11.1 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-ping 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-plaintext 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-secio 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-tcp 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-uds 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-wasm-ext 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-websocket 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum libp2p-yamux 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" +"checksum libp2p 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-core-derive 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-deflate 0.5.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-discv5 0.1.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-dns 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-floodsub 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-gossipsub 0.1.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-identify 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-kad 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-mdns 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-mplex 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-noise 0.11.1 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-ping 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-plaintext 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-secio 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-tcp 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-uds 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-wasm-ext 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-websocket 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum libp2p-yamux 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" "checksum libsecp256k1 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" "checksum libz-sys 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "2eb5e43362e38e2bca2fd5f5134c4d4564a23a5c28e9b95411652021a8675ebe" "checksum linked-hash-map 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ae91b68aebc4ddb91978b11a1b02ddd8602a05ec19002801c5666000e05e0f83" @@ -5672,7 +5711,7 @@ dependencies = [ "checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" "checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" "checksum memoffset 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b4fc2c02a7e374099d4ee95a193111f72d2110197fe200272371758f6c3643d8" -"checksum milagro_bls 1.0.1 (git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10)" = "" +"checksum milagro_bls 1.0.1 (git+https://github.com/sigp/milagro_bls?tag=v1.0.1)" = "" "checksum mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" "checksum mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" "checksum mime_guess 2.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" @@ -5682,8 +5721,7 @@ dependencies = [ "checksum mio-named-pipes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f5e374eff525ce1c5b7687c4cef63943e7686524a387933ad27ca7ec43779cb3" "checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" -"checksum miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "396aa0f2003d7df8395cb93e09871561ccc3e785f0acb369170e8cc74ddf9226" -"checksum multistream-select 0.6.1 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" +"checksum multistream-select 0.6.1 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" "checksum native-tls 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "2b0d88c06fe90d5ee94048ba40409ef1d9315d86f6f38c2efdaad4fb50c58b2d" "checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" "checksum nix 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "50e4785f2c3b7589a0d0c1dd60285e1188adac4006e8abd6dd578e1567027363" @@ -5700,8 +5738,8 @@ dependencies = [ "checksum openssl-sys 0.9.55 (registry+https://github.com/rust-lang/crates.io-index)" = "7717097d810a0f2e2323f9e5d11e71608355e24828410b55b9d4f18aa5f9a5d8" "checksum owning_ref 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" "checksum parity-codec 3.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "2b9df1283109f542d8852cd6b30e9341acc2137481eb6157d2e62af68b0afec9" -"checksum parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" +"checksum parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" "checksum parity-scale-codec 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "329c8f7f4244ddb5c37c103641027a76c530e65e8e4b8240b29f81ea40508b17" "checksum parity-send-wrapper 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" "checksum parking_lot 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" @@ -5773,8 +5811,8 @@ dependencies = [ "checksum rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" "checksum rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" -"checksum rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p?rev=4e3003d5283040fee10da1299252dd060a838d97)" = "" -"checksum ryu 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ed3d612bc64430efeb3f7ee6ef26d590dce0c43249217bddc62112540c7941e1" +"checksum rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add)" = "" +"checksum ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76" "checksum safemem 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" "checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" "checksum schannel 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "039c25b130bd8c1321ee2d7de7fde2659fa9c2744e4bb29711cfc852ea53cd19" @@ -5910,6 +5948,7 @@ dependencies = [ "checksum wasm-bindgen-test-macro 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)" = "cf2f86cd78a2aa7b1fb4bb6ed854eccb7f9263089c79542dca1576a1518a8467" "checksum wasm-timer 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "aa3e01d234bb71760e685cfafa5e2c96f8ad877c161a721646356651069e26ac" "checksum web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)" = "2d6f51648d8c56c366144378a33290049eafdd784071077f6fe37dae64c1c4cb" +"checksum web3 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a0631c83208cf420eeb2ed9b6cb2d5fc853aa76a43619ccec2a3d52d741f1261" "checksum web3 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "076f34ed252d74a8521e3b013254b1a39f94a98f23aae7cfc85cda6e7b395664" "checksum webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f1f50e1972865d6b1adb54167d1c8ed48606004c2c9d0ea5f1eeb34d95e863ef" "checksum webpki-roots 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "91cd5736df7f12a964a5067a12c62fa38e1bd8080aff1f80bc29be7c80d19ab4" diff --git a/Cargo.toml b/Cargo.toml index 6912e33882..453577d231 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "eth2/state_processing", "eth2/types", "eth2/utils/bls", + "eth2/utils/clap_utils", "eth2/utils/compare_fields", "eth2/utils/compare_fields_derive", "eth2/utils/deposit_contract", @@ -17,6 +18,7 @@ members = [ "eth2/utils/lighthouse_metrics", "eth2/utils/merkle_proof", "eth2/utils/int_to_bytes", + "eth2/utils/safe_arith", "eth2/utils/serde_hex", "eth2/utils/slot_clock", "eth2/utils/rest_types", diff --git a/Makefile b/Makefile index d84e3237b3..bebf7f8265 100644 --- a/Makefile +++ b/Makefile @@ -2,11 +2,14 @@ EF_TESTS = "tests/ef_tests" -# Builds the entire workspace in release (optimized). +# Builds the Lighthouse binary in release (optimized). # # Binaries will most likely be found in `./target/release` install: cargo install --path lighthouse --force --locked + +# Builds the lcli binary in release (optimized). +install-lcli: cargo install --path lcli --force --locked # Runs the full workspace tests in **release**, without downloading any additional @@ -42,6 +45,11 @@ test: test-release # Runs the entire test suite, downloading test vectors if required. test-full: cargo-fmt test-release test-debug test-ef +# Lints the code for bad style and potentially unsafe arithmetic using Clippy. +# Clippy lints are opt-in per-crate for now, which is why we allow all by default. +lint: + cargo clippy --all -- -A clippy::all + # Runs the makefile in the `ef_tests` repo. # # May download and extract an archive of test vectors from the ethereum diff --git a/README.md b/README.md index 9b5854dd9c..08106b97f3 100644 --- a/README.md +++ b/README.md @@ -49,10 +49,10 @@ Current development overview: - ~~**April 2019**: Inital single-client testnets.~~ - ~~**September 2019**: Inter-operability with other Ethereum 2.0 clients.~~ -- ~~ **Q1 2020**: `lighthouse-0.1.0` release: All major phase 0 features implemented.~~ -- **Q1 2020**: Public, multi-client testnet with user-facing functionality. +- ~~**Q1 2020**: `lighthouse-0.1.0` release: All major phase 0 features implemented.~~ +- **Q2 2020**: Public, multi-client testnet with user-facing functionality. - **Q2 2020**: Third-party security review. -- **Q3 2020**: Production Beacon Chain testnet (tentative). +- **Q4 2020**: Production Beacon Chain testnet (tentative). ## Documentation diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index ad91ad4efe..9588767faf 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -24,5 +24,7 @@ hex = "0.3" validator_client = { path = "../validator_client" } rayon = "1.2.0" eth2_testnet_config = { path = "../eth2/utils/eth2_testnet_config" } -web3 = "0.8.0" +web3 = "0.10.0" futures = "0.1.25" +clap_utils = { path = "../eth2/utils/clap_utils" } +tokio = "0.1.22" diff --git a/account_manager/src/cli.rs b/account_manager/src/cli.rs index 07685fb706..68892b972a 100644 --- a/account_manager/src/cli.rs +++ b/account_manager/src/cli.rs @@ -1,3 +1,4 @@ +use crate::deposits; use clap::{App, Arg, SubCommand}; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { @@ -6,7 +7,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .about("Utilities for generating and managing Ethereum 2.0 accounts.") .subcommand( SubCommand::with_name("validator") - .about("Generate or manage Etheruem 2.0 validators.") + .about("Generate or manage Ethereum 2.0 validators.") + .subcommand(deposits::cli_app()) .subcommand( SubCommand::with_name("new") .about("Create a new Ethereum 2.0 validator.") @@ -52,14 +54,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .help("The password file to unlock the eth1 account (see --index)"), ) - .arg( - Arg::with_name("testnet-dir") - .long("testnet-dir") - .value_name("DIRECTORY") - .takes_value(true) - .help("The directory from which to read the deposit contract / - address. Defaults to the current Lighthouse testnet."), - ) .subcommand( SubCommand::with_name("insecure") .about("Produce insecure, ephemeral validators. DO NOT USE TO STORE VALUE.") diff --git a/account_manager/src/deposits.rs b/account_manager/src/deposits.rs new file mode 100644 index 0000000000..5f6cc82fc0 --- /dev/null +++ b/account_manager/src/deposits.rs @@ -0,0 +1,255 @@ +use clap::{App, Arg, ArgMatches}; +use clap_utils; +use environment::Environment; +use futures::{ + future::{self, loop_fn, Loop}, + Future, +}; +use slog::{info, Logger}; +use std::fs; +use std::path::PathBuf; +use std::time::{Duration, Instant}; +use tokio::timer::Delay; +use types::EthSpec; +use validator_client::validator_directory::ValidatorDirectoryBuilder; +use web3::{ + transports::Ipc, + types::{Address, SyncInfo, SyncState}, + Transport, Web3, +}; + +const SYNCING_STATE_RETRY_DELAY: Duration = Duration::from_secs(2); + +pub fn cli_app<'a, 'b>() -> App<'a, 'b> { + App::new("deposited") + .about("Creates new Lighthouse validator keys and directories. Each newly-created validator + will have a deposit transaction formed and submitted to the deposit contract via + --eth1-ipc. This application will only write each validator keys to disk if the deposit + transaction returns successfully from the eth1 node. The process exits immediately if any + Eth1 tx fails. Does not wait for Eth1 confirmation blocks, so there is no guarantee that a + deposit will be accepted in the Eth1 chain. Before key generation starts, this application + will wait until the eth1 indicates that it is not syncing via the eth_syncing endpoint") + .arg( + Arg::with_name("validator-dir") + .long("validator-dir") + .value_name("VALIDATOR_DIRECTORY") + .help("The path where the validator directories will be created. Defaults to ~/.lighthouse/validators") + .takes_value(true), + ) + .arg( + Arg::with_name("eth1-ipc") + .long("eth1-ipc") + .value_name("ETH1_IPC_PATH") + .help("Path to an Eth1 JSON-RPC IPC endpoint") + .takes_value(true) + .required(true) + ) + .arg( + Arg::with_name("from-address") + .long("from-address") + .value_name("FROM_ETH1_ADDRESS") + .help("The address that will submit the eth1 deposit. Must be unlocked on the node + at --eth1-ipc.") + .takes_value(true) + .required(true) + ) + .arg( + Arg::with_name("deposit-gwei") + .long("deposit-gwei") + .value_name("DEPOSIT_GWEI") + .help("The GWEI value of the deposit amount. Defaults to the minimum amount + required for an active validator (MAX_EFFECTIVE_BALANCE.") + .takes_value(true), + ) + .arg( + Arg::with_name("count") + .long("count") + .value_name("DEPOSIT_COUNT") + .help("The number of deposits to create, regardless of how many already exist") + .conflicts_with("limit") + .takes_value(true), + ) + .arg( + Arg::with_name("at-most") + .long("at-most") + .value_name("VALIDATOR_COUNT") + .help("Observe the number of validators in --validator-dir, only creating enough to + ensure reach the given count. Never deletes an existing validator.") + .conflicts_with("count") + .takes_value(true), + ) +} + +pub fn cli_run(matches: &ArgMatches, mut env: Environment) -> Result<(), String> { + let spec = env.core_context().eth2_config.spec; + let log = env.core_context().log; + + let validator_dir = clap_utils::parse_path_with_default_in_home_dir( + matches, + "validator_dir", + PathBuf::new().join(".lighthouse").join("validators"), + )?; + let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, "eth1-ipc")?; + let from_address: Address = clap_utils::parse_required(matches, "from-address")?; + let deposit_gwei = clap_utils::parse_optional(matches, "deposit-gwei")? + .unwrap_or_else(|| spec.max_effective_balance); + let count: Option = clap_utils::parse_optional(matches, "count")?; + let at_most: Option = clap_utils::parse_optional(matches, "at-most")?; + + let starting_validator_count = existing_validator_count(&validator_dir)?; + + let n = match (count, at_most) { + (Some(_), Some(_)) => Err("Cannot supply --count and --at-most".to_string()), + (None, None) => Err("Must supply either --count or --at-most".to_string()), + (Some(count), None) => Ok(count), + (None, Some(at_most)) => Ok(at_most.saturating_sub(starting_validator_count)), + }?; + + if n == 0 { + info!( + log, + "No need to produce and validators, exiting"; + "--count" => count, + "--at-most" => at_most, + "existing_validators" => starting_validator_count, + ); + return Ok(()); + } + + let deposit_contract = env + .testnet + .as_ref() + .ok_or_else(|| "Unable to run account manager without a testnet dir".to_string())? + .deposit_contract_address() + .map_err(|e| format!("Unable to parse deposit contract address: {}", e))?; + + if deposit_contract == Address::zero() { + return Err("Refusing to deposit to the zero address. Check testnet configuration.".into()); + } + + let (_event_loop_handle, transport) = + Ipc::new(eth1_ipc_path).map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?; + let web3 = Web3::new(transport); + + env.runtime() + .block_on(poll_until_synced(web3.clone(), log.clone()))?; + + for i in 0..n { + let tx_hash_log = log.clone(); + + env.runtime() + .block_on( + ValidatorDirectoryBuilder::default() + .spec(spec.clone()) + .custom_deposit_amount(deposit_gwei) + .thread_random_keypairs() + .submit_eth1_deposit(web3.clone(), from_address, deposit_contract) + .map(move |(builder, tx_hash)| { + info!( + tx_hash_log, + "Validator deposited"; + "eth1_tx_hash" => format!("{:?}", tx_hash), + "index" => format!("{}/{}", i + 1, n), + ); + builder + }), + )? + .create_directory(validator_dir.clone())? + .write_keypair_files()? + .write_eth1_data_file()? + .build()?; + } + + let ending_validator_count = existing_validator_count(&validator_dir)?; + let delta = ending_validator_count.saturating_sub(starting_validator_count); + + info!( + log, + "Success"; + "validators_created_and_deposited" => delta, + ); + + Ok(()) +} + +/// Returns the number of validators that exist in the given `validator_dir`. +/// +/// This function just assumes any file is a validator directory, making it likely to return a +/// higher number than accurate but never a lower one. +fn existing_validator_count(validator_dir: &PathBuf) -> Result { + fs::read_dir(&validator_dir) + .map(|iter| iter.count()) + .map_err(|e| format!("Unable to read {:?}: {}", validator_dir, e)) +} + +/// Run a poll on the `eth_syncing` endpoint, blocking until the node is synced. +fn poll_until_synced(web3: Web3, log: Logger) -> impl Future + Send +where + T: Transport + Send + 'static, + ::Out: Send, +{ + loop_fn((web3.clone(), log.clone()), move |(web3, log)| { + web3.clone() + .eth() + .syncing() + .map_err(|e| format!("Unable to read syncing state from eth1 node: {:?}", e)) + .and_then::<_, Box + Send>>(move |sync_state| { + match sync_state { + SyncState::Syncing(SyncInfo { + current_block, + highest_block, + .. + }) => { + info!( + log, + "Waiting for eth1 node to sync"; + "est_highest_block" => format!("{}", highest_block), + "current_block" => format!("{}", current_block), + ); + + Box::new( + Delay::new(Instant::now() + SYNCING_STATE_RETRY_DELAY) + .map_err(|e| format!("Failed to trigger delay: {:?}", e)) + .and_then(|_| future::ok(Loop::Continue((web3, log)))), + ) + } + SyncState::NotSyncing => Box::new( + web3.clone() + .eth() + .block_number() + .map_err(|e| { + format!("Unable to read block number from eth1 node: {:?}", e) + }) + .and_then::<_, Box + Send>>( + |block_number| { + if block_number > 0.into() { + info!( + log, + "Eth1 node is synced"; + "head_block" => format!("{}", block_number), + ); + Box::new(future::ok(Loop::Break((web3, log)))) + } else { + Box::new( + Delay::new(Instant::now() + SYNCING_STATE_RETRY_DELAY) + .map_err(|e| { + format!("Failed to trigger delay: {:?}", e) + }) + .and_then(|_| { + info!( + log, + "Waiting for eth1 node to sync"; + "current_block" => 0, + ); + future::ok(Loop::Continue((web3, log))) + }), + ) + } + }, + ), + ), + } + }) + }) + .map(|_| ()) +} diff --git a/account_manager/src/lib.rs b/account_manager/src/lib.rs index 101c7634ec..4f3c80ec76 100644 --- a/account_manager/src/lib.rs +++ b/account_manager/src/lib.rs @@ -1,4 +1,5 @@ mod cli; +mod deposits; use clap::ArgMatches; use deposit_contract::DEPOSIT_GAS; @@ -6,7 +7,7 @@ use environment::{Environment, RuntimeContext}; use eth2_testnet_config::Eth2TestnetConfig; use futures::{future, Future, IntoFuture, Stream}; use rayon::prelude::*; -use slog::{crit, error, info, Logger}; +use slog::{error, info, Logger}; use std::fs; use std::fs::File; use std::io::Read; @@ -21,20 +22,8 @@ use web3::{ pub use cli::cli_app; -/// Run the account manager, logging an error if the operation did not succeed. -pub fn run(matches: &ArgMatches, mut env: Environment) { - let log = env.core_context().log.clone(); - match run_account_manager(matches, env) { - Ok(()) => (), - Err(e) => crit!(log, "Account manager failed"; "error" => e), - } -} - /// Run the account manager, returning an error if the operation did not succeed. -fn run_account_manager( - matches: &ArgMatches, - mut env: Environment, -) -> Result<(), String> { +pub fn run(matches: &ArgMatches, mut env: Environment) -> Result<(), String> { let context = env.core_context(); let log = context.log.clone(); @@ -60,6 +49,7 @@ fn run_account_manager( match matches.subcommand() { ("validator", Some(matches)) => match matches.subcommand() { + ("deposited", Some(matches)) => deposits::cli_run(matches, env)?, ("new", Some(matches)) => run_new_validator_subcommand(matches, datadir, env)?, _ => { return Err("Invalid 'validator new' command. See --help.".to_string()); diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 7239edbf75..9244d155ae 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -11,6 +11,9 @@ path = "src/lib.rs" [dev-dependencies] node_test_rig = { path = "../tests/node_test_rig" } +[features] +write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing. + [dependencies] eth2_config = { path = "../eth2/utils/eth2_config" } beacon_chain = { path = "beacon_chain" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 1de7d458c3..fe8d6d8778 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -5,7 +5,6 @@ authors = ["Paul Hauner ", "Age Manning ; - type StoreMigrator: store::Migrate; + type StoreMigrator: Migrate; type SlotClock: slot_clock::SlotClock; type Eth1Chain: Eth1ChainBackend; type EthSpec: types::EthSpec; @@ -199,7 +204,7 @@ pub struct BeaconChain { /// A handler for events generated by the beacon chain. pub event_handler: T::EventHandler, /// Used to track the heads of the beacon chain. - pub(crate) head_tracker: HeadTracker, + pub(crate) head_tracker: Arc, /// A cache dedicated to block processing. pub(crate) snapshot_cache: TimeoutRwLock>, /// Caches the shuffling for a given epoch and state root. @@ -497,6 +502,10 @@ impl BeaconChain { self.head_tracker.heads() } + pub fn knows_head(&self, block_hash: &SignedBeaconBlockHash) -> bool { + self.head_tracker.contains_head((*block_hash).into()) + } + /// Returns the `BeaconState` at the given slot. /// /// Returns `None` when the state is not found in the database or there is an error skipping @@ -1115,11 +1124,7 @@ impl BeaconChain { })?; let signature_set = indexed_attestation_signature_set_from_pubkeys( - |validator_index| { - pubkey_cache - .get(validator_index) - .map(|pk| Cow::Borrowed(pk.as_point())) - }, + |validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed), &attestation.signature, &indexed_attestation, &fork, @@ -1230,6 +1235,76 @@ impl BeaconChain { } } + /// Check that the shuffling at `block_root` is equal to one of the shufflings of `state`. + /// + /// The `target_epoch` argument determines which shuffling to check compatibility with, it + /// should be equal to the current or previous epoch of `state`, or else `false` will be + /// returned. + /// + /// The compatibility check is designed to be fast: we check that the block that + /// determined the RANDAO mix for the `target_epoch` matches the ancestor of the block + /// identified by `block_root` (at that slot). + pub fn shuffling_is_compatible( + &self, + block_root: &Hash256, + target_epoch: Epoch, + state: &BeaconState, + ) -> bool { + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + let shuffling_lookahead = 1 + self.spec.min_seed_lookahead.as_u64(); + + // Shuffling can't have changed if we're in the first few epochs + if state.current_epoch() < shuffling_lookahead { + return true; + } + + // Otherwise the shuffling is determined by the block at the end of the target epoch + // minus the shuffling lookahead (usually 2). We call this the "pivot". + let pivot_slot = + if target_epoch == state.previous_epoch() || target_epoch == state.current_epoch() { + (target_epoch - shuffling_lookahead).end_slot(slots_per_epoch) + } else { + return false; + }; + + let state_pivot_block_root = match state.get_block_root(pivot_slot) { + Ok(root) => *root, + Err(e) => { + warn!( + &self.log, + "Missing pivot block root for attestation"; + "slot" => pivot_slot, + "error" => format!("{:?}", e), + ); + return false; + } + }; + + // Use fork choice's view of the block DAG to quickly evaluate whether the attestation's + // pivot block is the same as the current state's pivot block. If it is, then the + // attestation's shuffling is the same as the current state's. + // To account for skipped slots, find the first block at *or before* the pivot slot. + let fork_choice_lock = self.fork_choice.core_proto_array(); + let pivot_block_root = fork_choice_lock + .iter_block_roots(block_root) + .find(|(_, slot)| *slot <= pivot_slot) + .map(|(block_root, _)| block_root); + drop(fork_choice_lock); + + match pivot_block_root { + Some(root) => root == state_pivot_block_root, + None => { + debug!( + &self.log, + "Discarding attestation because of missing ancestor"; + "pivot_slot" => pivot_slot.as_u64(), + "block_root" => format!("{:?}", block_root), + ); + false + } + } + } + /// Accept some exit and queue it for inclusion in an appropriate block. pub fn process_voluntary_exit( &self, @@ -1365,6 +1440,20 @@ impl BeaconChain { Err(BlockError::BlockIsAlreadyKnown) => continue, // If the block is the genesis block, simply ignore this block. Err(BlockError::GenesisBlock) => continue, + // If the block is is for a finalized slot, simply ignore this block. + // + // The block is either: + // + // 1. In the canonical finalized chain. + // 2. In some non-canonical chain at a slot that has been finalized already. + // + // In the case of (1), there's no need to re-import and later blocks in this + // segement might be useful. + // + // In the case of (2), skipping the block is valid since we should never import it. + // However, we will potentially get a `ParentUnknown` on a later block. The sync + // protocol will need to ensure this is handled gracefully. + Err(BlockError::WouldRevertFinalizedSlot { .. }) => continue, // If there was an error whilst determining if the block was invalid, return that // error. Err(BlockError::BeaconChainError(e)) => { @@ -1445,7 +1534,34 @@ impl BeaconChain { &self, block: SignedBeaconBlock, ) -> Result, BlockError> { - GossipVerifiedBlock::new(block, self) + let slot = block.message.slot; + let graffiti_string = String::from_utf8(block.message.body.graffiti[..].to_vec()) + .unwrap_or_else(|_| format!("{:?}", &block.message.body.graffiti[..])); + + match GossipVerifiedBlock::new(block, self) { + Ok(verified) => { + debug!( + self.log, + "Successfully processed gossip block"; + "graffiti" => graffiti_string, + "slot" => slot, + "root" => format!("{:?}", verified.block_root()), + ); + + Ok(verified) + } + Err(e) => { + debug!( + self.log, + "Rejected gossip block"; + "error" => format!("{:?}", e), + "graffiti" => graffiti_string, + "slot" => slot, + ); + + Err(e) + } + } } /// Returns `Ok(block_root)` if the given `unverified_block` was successfully verified and @@ -1722,6 +1838,21 @@ impl BeaconChain { .deposits_for_block_inclusion(&state, ð1_data, &self.spec)? .into(); + // Map from attestation head block root to shuffling compatibility. + // Used to memoize the `attestation_shuffling_is_compatible` function. + let mut shuffling_filter_cache = HashMap::new(); + let attestation_filter = |att: &&Attestation| -> bool { + *shuffling_filter_cache + .entry((att.data.beacon_block_root, att.data.target.epoch)) + .or_insert_with(|| { + self.shuffling_is_compatible( + &att.data.beacon_block_root, + att.data.target.epoch, + &state, + ) + }) + }; + let mut block = SignedBeaconBlock { message: BeaconBlock { slot: state.slot, @@ -1736,7 +1867,7 @@ impl BeaconChain { attester_slashings: attester_slashings.into(), attestations: self .op_pool - .get_attestations(&state, &self.spec) + .get_attestations(&state, attestation_filter, &self.spec) .map_err(BlockProductionError::OpPoolError)? .into(), deposits, @@ -1794,6 +1925,7 @@ impl BeaconChain { let beacon_block_root = self.fork_choice.find_head(&self)?; let current_head = self.head_info()?; + let old_finalized_root = current_head.finalized_checkpoint.root; if beacon_block_root == current_head.block_root { return Ok(()); @@ -1921,7 +2053,11 @@ impl BeaconChain { }); if new_finalized_epoch != old_finalized_epoch { - self.after_finalization(old_finalized_epoch, finalized_root)?; + self.after_finalization( + old_finalized_epoch, + finalized_root, + old_finalized_root.into(), + )?; } let _ = self.event_handler.register(EventKind::BeaconHeadChanged { @@ -1950,6 +2086,7 @@ impl BeaconChain { &self, old_finalized_epoch: Epoch, finalized_block_root: Hash256, + old_finalized_root: SignedBeaconBlockHash, ) -> Result<(), Error> { let finalized_block = self .store @@ -1989,10 +2126,13 @@ impl BeaconChain { // TODO: configurable max finality distance let max_finality_distance = 0; - self.store_migrator.freeze_to_state( + self.store_migrator.process_finalization( finalized_block.state_root, finalized_state, max_finality_distance, + Arc::clone(&self.head_tracker), + old_finalized_root, + finalized_block_root.into(), ); let _ = self.event_handler.register(EventKind::BeaconFinalization { @@ -2076,6 +2216,100 @@ impl BeaconChain { self.slot_clock .duration_to_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) } + + pub fn dump_as_dot(&self, output: &mut W) { + let canonical_head_hash = self + .canonical_head + .try_read_for(HEAD_LOCK_TIMEOUT) + .ok_or_else(|| Error::CanonicalHeadLockTimeout) + .unwrap() + .beacon_block_root; + let mut visited: HashSet = HashSet::new(); + let mut finalized_blocks: HashSet = HashSet::new(); + + let genesis_block_hash = Hash256::zero(); + write!(output, "digraph beacon {{\n").unwrap(); + write!(output, "\t_{:?}[label=\"genesis\"];\n", genesis_block_hash).unwrap(); + + // Canonical head needs to be processed first as otherwise finalized blocks aren't detected + // properly. + let heads = { + let mut heads = self.heads(); + let canonical_head_index = heads + .iter() + .position(|(block_hash, _)| *block_hash == canonical_head_hash) + .unwrap(); + let (canonical_head_hash, canonical_head_slot) = + heads.swap_remove(canonical_head_index); + heads.insert(0, (canonical_head_hash, canonical_head_slot)); + heads + }; + + for (head_hash, _head_slot) in heads { + for (block_hash, signed_beacon_block) in + ParentRootBlockIterator::new(&*self.store, head_hash) + { + if visited.contains(&block_hash) { + break; + } + visited.insert(block_hash); + + if signed_beacon_block.slot() % T::EthSpec::slots_per_epoch() == 0 { + let block = self.get_block(&block_hash).unwrap().unwrap(); + let state = self + .get_state(&block.state_root(), Some(block.slot())) + .unwrap() + .unwrap(); + finalized_blocks.insert(state.finalized_checkpoint.root); + } + + if block_hash == canonical_head_hash { + write!( + output, + "\t_{:?}[label=\"{} ({})\" shape=box3d];\n", + block_hash, + block_hash, + signed_beacon_block.slot() + ) + .unwrap(); + } else if finalized_blocks.contains(&block_hash) { + write!( + output, + "\t_{:?}[label=\"{} ({})\" shape=Msquare];\n", + block_hash, + block_hash, + signed_beacon_block.slot() + ) + .unwrap(); + } else { + write!( + output, + "\t_{:?}[label=\"{} ({})\" shape=box];\n", + block_hash, + block_hash, + signed_beacon_block.slot() + ) + .unwrap(); + } + write!( + output, + "\t_{:?} -> _{:?};\n", + block_hash, + signed_beacon_block.parent_root() + ) + .unwrap(); + } + } + + write!(output, "}}\n").unwrap(); + } + + // Used for debugging + #[allow(dead_code)] + pub fn dump_dot_file(&self, file_name: &str) { + let mut file = std::fs::File::create(file_name).unwrap(); + self.dump_as_dot(&mut file); + } } impl Drop for BeaconChain { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 679dfc667a..886835c6d0 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -49,19 +49,22 @@ use crate::{ metrics, BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, }; use parking_lot::RwLockReadGuard; +use slog::{error, Logger}; use slot_clock::SlotClock; +use ssz::Encode; use state_processing::{ - block_signature_verifier::{ - BlockSignatureVerifier, Error as BlockSignatureVerifierError, G1Point, - }, + block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, SlotProcessingError, }; use std::borrow::Cow; +use std::fs; +use std::io::Write; use store::{Error as DBError, StateBatch}; +use tree_hash::TreeHash; use types::{ BeaconBlock, BeaconState, BeaconStateError, ChainSpec, CloneConfig, EthSpec, Hash256, - RelativeEpoch, SignedBeaconBlock, Slot, + PublicKey, RelativeEpoch, SignedBeaconBlock, Slot, }; mod block_processing_outcome; @@ -71,6 +74,12 @@ pub use block_processing_outcome::BlockProcessingOutcome; /// Maximum block slot number. Block with slots bigger than this constant will NOT be processed. const MAXIMUM_BLOCK_SLOT_NUMBER: u64 = 4_294_967_296; // 2^32 +/// If true, everytime a block is processed the pre-state, post-state and block are written to SSZ +/// files in the temp directory. +/// +/// Only useful for testing. +const WRITE_BLOCK_PROCESSING_SSZ: bool = cfg!(feature = "write_ssz_files"); + /// Returned when a block was not verified. A block is not verified for two reasons: /// /// - The block is malformed/invalid (indicated by all results other than `BeaconChainError`. @@ -304,6 +313,10 @@ impl GossipVerifiedBlock { Err(BlockError::ProposalSignatureInvalid) } } + + pub fn block_root(&self) -> Hash256 { + self.block_root + } } impl IntoFullyVerifiedBlock for GossipVerifiedBlock { @@ -517,6 +530,13 @@ impl FullyVerifiedBlock { * invalid. */ + write_state( + &format!("state_pre_block_{}", block_root), + &state, + &chain.log, + ); + write_block(&block, block_root, &chain.log); + let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE); if let Err(err) = per_block_processing( @@ -547,6 +567,12 @@ impl FullyVerifiedBlock { metrics::stop_timer(state_root_timer); + write_state( + &format!("state_post_block_{}", block_root), + &state, + &chain.log, + ); + /* * Check to ensure the state root on the block matches the one we have calculated. */ @@ -785,7 +811,7 @@ fn get_signature_verifier<'a, E: EthSpec>( state: &'a BeaconState, validator_pubkey_cache: &'a ValidatorPubkeyCache, spec: &'a ChainSpec, -) -> BlockSignatureVerifier<'a, E, impl Fn(usize) -> Option> + Clone> { +) -> BlockSignatureVerifier<'a, E, impl Fn(usize) -> Option> + Clone> { BlockSignatureVerifier::new( state, move |validator_index| { @@ -794,7 +820,7 @@ fn get_signature_verifier<'a, E: EthSpec>( if validator_index < state.validators.len() { validator_pubkey_cache .get(validator_index) - .map(|pk| Cow::Borrowed(pk.as_point())) + .map(|pk| Cow::Borrowed(pk)) } else { None } @@ -802,3 +828,46 @@ fn get_signature_verifier<'a, E: EthSpec>( spec, ) } + +fn write_state(prefix: &str, state: &BeaconState, log: &Logger) { + if WRITE_BLOCK_PROCESSING_SSZ { + let root = state.tree_hash_root(); + let filename = format!("{}_slot_{}_root_{}.ssz", prefix, state.slot, root); + let mut path = std::env::temp_dir().join("lighthouse"); + let _ = fs::create_dir_all(path.clone()); + path = path.join(filename); + + match fs::File::create(path.clone()) { + Ok(mut file) => { + let _ = file.write_all(&state.as_ssz_bytes()); + } + Err(e) => error!( + log, + "Failed to log state"; + "path" => format!("{:?}", path), + "error" => format!("{:?}", e) + ), + } + } +} + +fn write_block(block: &SignedBeaconBlock, root: Hash256, log: &Logger) { + if WRITE_BLOCK_PROCESSING_SSZ { + let filename = format!("block_slot_{}_root{}.ssz", block.message.slot, root); + let mut path = std::env::temp_dir().join("lighthouse"); + let _ = fs::create_dir_all(path.clone()); + path = path.join(filename); + + match fs::File::create(path.clone()) { + Ok(mut file) => { + let _ = file.write_all(&block.as_ssz_bytes()); + } + Err(e) => error!( + log, + "Failed to log block"; + "path" => format!("{:?}", path), + "error" => format!("{:?}", e) + ), + } + } +} diff --git a/beacon_node/beacon_chain/src/block_verification/block_processing_outcome.rs b/beacon_node/beacon_chain/src/block_verification/block_processing_outcome.rs index cdfa35cab9..fe0f71a50e 100644 --- a/beacon_node/beacon_chain/src/block_verification/block_processing_outcome.rs +++ b/beacon_node/beacon_chain/src/block_verification/block_processing_outcome.rs @@ -4,7 +4,7 @@ use types::{Hash256, Slot}; /// This is a legacy object that is being kept around to reduce merge conflicts. /// -/// As soon as this is merged into master, it should be removed as soon as possible. +/// TODO: As soon as this is merged into master, it should be removed as soon as possible. #[derive(Debug, PartialEq)] pub enum BlockProcessingOutcome { /// Block was valid and imported into the block graph. diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index f5df3e4ac4..7a433aab4e 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -5,6 +5,7 @@ use crate::eth1_chain::{CachingEth1Backend, SszEth1}; use crate::events::NullEventHandler; use crate::fork_choice::SszForkChoice; use crate::head_tracker::HeadTracker; +use crate::migrate::Migrate; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::ShufflingCache; use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE}; @@ -47,7 +48,7 @@ impl for Witness where TStore: Store + 'static, - TStoreMigrator: store::Migrate + 'static, + TStoreMigrator: Migrate + 'static, TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -97,7 +98,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate + 'static, + TStoreMigrator: Migrate + 'static, TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -229,7 +230,7 @@ where .get::(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY)) .map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))? .ok_or_else(|| { - "No persisted beacon chain found in store. Try deleting the .lighthouse/beacon dir." + "No persisted beacon chain found in store. Try purging the beacon chain database." .to_string() })?; @@ -442,7 +443,7 @@ where event_handler: self .event_handler .ok_or_else(|| "Cannot build without an event handler".to_string())?, - head_tracker: self.head_tracker.unwrap_or_default(), + head_tracker: Arc::new(self.head_tracker.unwrap_or_default()), snapshot_cache: TimeoutRwLock::new(SnapshotCache::new( DEFAULT_SNAPSHOT_CACHE_SIZE, canonical_head, @@ -475,7 +476,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate + 'static, + TStoreMigrator: Migrate + 'static, TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -545,7 +546,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate + 'static, + TStoreMigrator: Migrate + 'static, TSlotClock: SlotClock + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -583,7 +584,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate + 'static, + TStoreMigrator: Migrate + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -622,7 +623,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate + 'static, + TStoreMigrator: Migrate + 'static, TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -654,12 +655,12 @@ fn genesis_block( #[cfg(test)] mod test { use super::*; + use crate::migrate::{MemoryStore, NullMigrator}; use eth2_hashing::hash; use genesis::{generate_deterministic_keypairs, interop_genesis_state}; use sloggers::{null::NullLoggerBuilder, Build}; use ssz::Encode; use std::time::Duration; - use store::{migrate::NullMigrator, MemoryStore}; use tempfile::tempdir; use types::{EthSpec, MinimalEthSpec, Slot}; diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index a740c514f5..ccc86227de 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -43,6 +43,14 @@ pub enum Error { /// /// The eth1 caches are stale, or a junk value was voted into the chain. UnknownPreviousEth1BlockHash, + /// An arithmetic error occurred. + ArithError(safe_arith::ArithError), +} + +impl From for Error { + fn from(e: safe_arith::ArithError) -> Self { + Self::ArithError(e) + } } #[derive(Encode, Decode, Clone)] @@ -367,7 +375,7 @@ impl> Eth1ChainBackend for CachingEth1Backend Result, Error> { let deposit_index = state.eth1_deposit_index; - let deposit_count = if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data_vote) { + let deposit_count = if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data_vote)? { new_eth1_data.deposit_count } else { state.eth1_data.deposit_count diff --git a/beacon_node/beacon_chain/src/head_tracker.rs b/beacon_node/beacon_chain/src/head_tracker.rs index 7bae0ce62d..7f4e64122c 100644 --- a/beacon_node/beacon_chain/src/head_tracker.rs +++ b/beacon_node/beacon_chain/src/head_tracker.rs @@ -25,11 +25,22 @@ impl HeadTracker { /// the upstream user. pub fn register_block(&self, block_root: Hash256, block: &BeaconBlock) { let mut map = self.0.write(); - map.remove(&block.parent_root); map.insert(block_root, block.slot); } + /// Removes abandoned head. + pub fn remove_head(&self, block_root: Hash256) { + let mut map = self.0.write(); + debug_assert!(map.contains_key(&block_root)); + map.remove(&block_root); + } + + /// Returns true iff `block_root` is a recognized head. + pub fn contains_head(&self, block_root: Hash256) -> bool { + self.0.read().contains_key(&block_root) + } + /// Returns the list of heads in the chain. pub fn heads(&self) -> Vec<(Hash256, Slot)> { self.0 diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index c4ff203082..0ab3594ae5 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -12,6 +12,7 @@ pub mod events; mod fork_choice; mod head_tracker; mod metrics; +pub mod migrate; mod naive_aggregation_pool; mod persisted_beacon_chain; mod shuffling_cache; diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs new file mode 100644 index 0000000000..5107a4b035 --- /dev/null +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -0,0 +1,349 @@ +use crate::errors::BeaconChainError; +use crate::head_tracker::HeadTracker; +use parking_lot::Mutex; +use slog::{debug, warn, Logger}; +use std::collections::{HashMap, HashSet}; +use std::iter::FromIterator; +use std::mem; +use std::sync::mpsc; +use std::sync::Arc; +use std::thread; +use store::iter::{ParentRootBlockIterator, RootsIterator}; +use store::{hot_cold_store::HotColdDBError, Error, SimpleDiskStore, Store}; +pub use store::{DiskStore, MemoryStore}; +use types::*; +use types::{BeaconState, EthSpec, Hash256, Slot}; + +/// Trait for migration processes that update the database upon finalization. +pub trait Migrate, E: EthSpec>: Send + Sync + 'static { + fn new(db: Arc, log: Logger) -> Self; + + fn process_finalization( + &self, + _state_root: Hash256, + _new_finalized_state: BeaconState, + _max_finality_distance: u64, + _head_tracker: Arc, + _old_finalized_block_hash: SignedBeaconBlockHash, + _new_finalized_block_hash: SignedBeaconBlockHash, + ) { + } + + /// Traverses live heads and prunes blocks and states of chains that we know can't be built + /// upon because finalization would prohibit it. This is a optimisation intended to save disk + /// space. + /// + /// Assumptions: + /// * It is called after every finalization. + fn prune_abandoned_forks( + store: Arc, + head_tracker: Arc, + old_finalized_block_hash: SignedBeaconBlockHash, + new_finalized_block_hash: SignedBeaconBlockHash, + new_finalized_slot: Slot, + ) -> Result<(), BeaconChainError> { + let old_finalized_slot = store + .get_block(&old_finalized_block_hash.into())? + .ok_or_else(|| BeaconChainError::MissingBeaconBlock(old_finalized_block_hash.into()))? + .slot(); + + // Collect hashes from new_finalized_block back to old_finalized_block (inclusive) + let mut found_block = false; // hack for `take_until` + let newly_finalized_blocks: HashMap = HashMap::from_iter( + ParentRootBlockIterator::new(&*store, new_finalized_block_hash.into()) + .take_while(|(block_hash, _)| { + if found_block { + false + } else { + found_block |= *block_hash == old_finalized_block_hash.into(); + true + } + }) + .map(|(block_hash, block)| (block_hash.into(), block.slot())), + ); + + // We don't know which blocks are shared among abandoned chains, so we buffer and delete + // everything in one fell swoop. + let mut abandoned_blocks: HashSet = HashSet::new(); + let mut abandoned_states: HashSet<(Slot, BeaconStateHash)> = HashSet::new(); + let mut abandoned_heads: HashSet = HashSet::new(); + + for (head_hash, head_slot) in head_tracker.heads() { + let mut potentially_abandoned_head: Option = Some(head_hash); + let mut potentially_abandoned_blocks: Vec<( + Slot, + Option, + Option, + )> = Vec::new(); + + let head_state_hash = store + .get_block(&head_hash)? + .ok_or_else(|| BeaconStateError::MissingBeaconBlock(head_hash.into()))? + .state_root(); + + let iterator = std::iter::once((head_hash, head_state_hash, head_slot)) + .chain(RootsIterator::from_block(Arc::clone(&store), head_hash)?); + for (block_hash, state_hash, slot) in iterator { + if slot < old_finalized_slot { + // We must assume here any candidate chains include old_finalized_block_hash, + // i.e. there aren't any forks starting at a block that is a strict ancestor of + // old_finalized_block_hash. + break; + } + match newly_finalized_blocks.get(&block_hash.into()).copied() { + // Block is not finalized, mark it and its state for deletion + None => { + potentially_abandoned_blocks.push(( + slot, + Some(block_hash.into()), + Some(state_hash.into()), + )); + } + Some(finalized_slot) => { + // Block root is finalized, and we have reached the slot it was finalized + // at: we've hit a shared part of the chain. + if finalized_slot == slot { + // The first finalized block of a candidate chain lies after (in terms + // of slots order) the newly finalized block. It's not a candidate for + // prunning. + if finalized_slot == new_finalized_slot { + potentially_abandoned_blocks.clear(); + potentially_abandoned_head.take(); + } + + break; + } + // Block root is finalized, but we're at a skip slot: delete the state only. + else { + potentially_abandoned_blocks.push(( + slot, + None, + Some(state_hash.into()), + )); + } + } + } + } + + abandoned_heads.extend(potentially_abandoned_head.into_iter()); + if !potentially_abandoned_blocks.is_empty() { + abandoned_blocks.extend( + potentially_abandoned_blocks + .iter() + .filter_map(|(_, maybe_block_hash, _)| *maybe_block_hash), + ); + abandoned_states.extend(potentially_abandoned_blocks.iter().filter_map( + |(slot, _, maybe_state_hash)| match maybe_state_hash { + None => None, + Some(state_hash) => Some((*slot, *state_hash)), + }, + )); + } + } + + // XXX Should be performed atomically, see + // https://github.com/sigp/lighthouse/issues/692 + for block_hash in abandoned_blocks.into_iter() { + store.delete_block(&block_hash.into())?; + } + for (slot, state_hash) in abandoned_states.into_iter() { + store.delete_state(&state_hash.into(), slot)?; + } + for head_hash in abandoned_heads.into_iter() { + head_tracker.remove_head(head_hash); + } + + Ok(()) + } +} + +/// Migrator that does nothing, for stores that don't need migration. +pub struct NullMigrator; + +impl Migrate, E> for NullMigrator { + fn new(_: Arc>, _: Logger) -> Self { + NullMigrator + } +} + +impl Migrate, E> for NullMigrator { + fn new(_: Arc>, _: Logger) -> Self { + NullMigrator + } +} + +/// Migrator that immediately calls the store's migration function, blocking the current execution. +/// +/// Mostly useful for tests. +pub struct BlockingMigrator { + db: Arc, +} + +impl> Migrate for BlockingMigrator { + fn new(db: Arc, _: Logger) -> Self { + BlockingMigrator { db } + } + + fn process_finalization( + &self, + state_root: Hash256, + new_finalized_state: BeaconState, + _max_finality_distance: u64, + head_tracker: Arc, + old_finalized_block_hash: SignedBeaconBlockHash, + new_finalized_block_hash: SignedBeaconBlockHash, + ) { + if let Err(e) = S::process_finalization(self.db.clone(), state_root, &new_finalized_state) { + // This migrator is only used for testing, so we just log to stderr without a logger. + eprintln!("Migration error: {:?}", e); + } + + if let Err(e) = Self::prune_abandoned_forks( + self.db.clone(), + head_tracker, + old_finalized_block_hash, + new_finalized_block_hash, + new_finalized_state.slot, + ) { + eprintln!("Pruning error: {:?}", e); + } + } +} + +type MpscSender = mpsc::Sender<( + Hash256, + BeaconState, + Arc, + SignedBeaconBlockHash, + SignedBeaconBlockHash, + Slot, +)>; + +/// Migrator that runs a background thread to migrate state from the hot to the cold database. +pub struct BackgroundMigrator { + db: Arc>, + tx_thread: Mutex<(MpscSender, thread::JoinHandle<()>)>, + log: Logger, +} + +impl Migrate, E> for BackgroundMigrator { + fn new(db: Arc>, log: Logger) -> Self { + let tx_thread = Mutex::new(Self::spawn_thread(db.clone(), log.clone())); + Self { db, tx_thread, log } + } + + /// Perform the freezing operation on the database, + fn process_finalization( + &self, + finalized_state_root: Hash256, + new_finalized_state: BeaconState, + max_finality_distance: u64, + head_tracker: Arc, + old_finalized_block_hash: SignedBeaconBlockHash, + new_finalized_block_hash: SignedBeaconBlockHash, + ) { + if !self.needs_migration(new_finalized_state.slot, max_finality_distance) { + return; + } + + let (ref mut tx, ref mut thread) = *self.tx_thread.lock(); + + let new_finalized_slot = new_finalized_state.slot; + if let Err(tx_err) = tx.send(( + finalized_state_root, + new_finalized_state, + head_tracker, + old_finalized_block_hash, + new_finalized_block_hash, + new_finalized_slot, + )) { + let (new_tx, new_thread) = Self::spawn_thread(self.db.clone(), self.log.clone()); + + drop(mem::replace(tx, new_tx)); + let old_thread = mem::replace(thread, new_thread); + + // Join the old thread, which will probably have panicked, or may have + // halted normally just now as a result of us dropping the old `mpsc::Sender`. + if let Err(thread_err) = old_thread.join() { + warn!( + self.log, + "Migration thread died, so it was restarted"; + "reason" => format!("{:?}", thread_err) + ); + } + + // Retry at most once, we could recurse but that would risk overflowing the stack. + let _ = tx.send(tx_err.0); + } + } +} + +impl BackgroundMigrator { + /// Return true if a migration needs to be performed, given a new `finalized_slot`. + fn needs_migration(&self, finalized_slot: Slot, max_finality_distance: u64) -> bool { + let finality_distance = finalized_slot - self.db.get_split_slot(); + finality_distance > max_finality_distance + } + + /// Spawn a new child thread to run the migration process. + /// + /// Return a channel handle for sending new finalized states to the thread. + fn spawn_thread( + db: Arc>, + log: Logger, + ) -> ( + mpsc::Sender<( + Hash256, + BeaconState, + Arc, + SignedBeaconBlockHash, + SignedBeaconBlockHash, + Slot, + )>, + thread::JoinHandle<()>, + ) { + let (tx, rx) = mpsc::channel(); + let thread = thread::spawn(move || { + while let Ok(( + state_root, + state, + head_tracker, + old_finalized_block_hash, + new_finalized_block_hash, + new_finalized_slot, + )) = rx.recv() + { + match DiskStore::process_finalization(db.clone(), state_root, &state) { + Ok(()) => {} + Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { + debug!( + log, + "Database migration postponed, unaligned finalized block"; + "slot" => slot.as_u64() + ); + } + Err(e) => { + warn!( + log, + "Database migration failed"; + "error" => format!("{:?}", e) + ); + } + }; + + match Self::prune_abandoned_forks( + db.clone(), + head_tracker, + old_finalized_block_hash, + new_finalized_block_hash, + new_finalized_slot, + ) { + Ok(()) => {} + Err(e) => warn!(log, "Block pruning failed: {:?}", e), + } + } + }); + + (tx, thread) + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 05dc5258b7..91f3a833c4 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,6 +1,7 @@ pub use crate::beacon_chain::{ BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY, }; +use crate::migrate::{BlockingMigrator, Migrate, NullMigrator}; pub use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::{ builder::{BeaconChainBuilder, Witness}, @@ -14,16 +15,16 @@ use sloggers::{null::NullLoggerBuilder, Build}; use slot_clock::TestingSlotClock; use state_processing::per_slot_processing; use std::borrow::Cow; +use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; -use store::{ - migrate::{BlockingMigrator, NullMigrator}, - DiskStore, MemoryStore, Migrate, Store, -}; +use store::{DiskStore, MemoryStore, Store}; use tempfile::{tempdir, TempDir}; +use tree_hash::TreeHash; use types::{ - AggregateSignature, Attestation, BeaconState, ChainSpec, Domain, EthSpec, Hash256, Keypair, - SecretKey, Signature, SignedBeaconBlock, SignedRoot, Slot, + AggregateSignature, Attestation, BeaconState, BeaconStateHash, ChainSpec, Domain, EthSpec, + Hash256, Keypair, SecretKey, Signature, SignedBeaconBlock, SignedBeaconBlockHash, SignedRoot, + Slot, }; pub use types::test_utils::generate_deterministic_keypairs; @@ -135,7 +136,10 @@ impl BeaconChainHarness> { .logger(log.clone()) .custom_spec(spec.clone()) .store(store.clone()) - .store_migrator( as Migrate<_, E>>::new(store)) + .store_migrator( as Migrate<_, E>>::new( + store, + log.clone(), + )) .data_dir(data_dir.path().to_path_buf()) .genesis_state( interop_genesis_state::(&keypairs, HARNESS_GENESIS_TIME, &spec) @@ -175,7 +179,10 @@ impl BeaconChainHarness> { .logger(log.clone()) .custom_spec(spec) .store(store.clone()) - .store_migrator( as Migrate<_, E>>::new(store)) + .store_migrator( as Migrate<_, E>>::new( + store, + log.clone(), + )) .data_dir(data_dir.path().to_path_buf()) .resume_from_db() .expect("should resume beacon chain from db") @@ -272,6 +279,123 @@ where head_block_root.expect("did not produce any blocks") } + /// Returns current canonical head slot + pub fn get_chain_slot(&self) -> Slot { + self.chain.slot().unwrap() + } + + /// Returns current canonical head state + pub fn get_head_state(&self) -> BeaconState { + self.chain.head().unwrap().beacon_state + } + + /// Adds a single block (synchronously) onto either the canonical chain (block_strategy == + /// OnCanonicalHead) or a fork (block_strategy == ForkCanonicalChainAt). + pub fn add_block( + &self, + state: &BeaconState, + block_strategy: BlockStrategy, + slot: Slot, + validators: &[usize], + ) -> (SignedBeaconBlockHash, BeaconState) { + while self.chain.slot().expect("should have a slot") < slot { + self.advance_slot(); + } + + let (block, new_state) = self.build_block(state.clone(), slot, block_strategy); + + let block_root = self + .chain + .process_block(block) + .expect("should not error during block processing"); + + self.chain.fork_choice().expect("should find head"); + + let attestation_strategy = AttestationStrategy::SomeValidators(validators.to_vec()); + self.add_free_attestations(&attestation_strategy, &new_state, block_root, slot); + (block_root.into(), new_state) + } + + /// `add_block()` repeated `num_blocks` times. + pub fn add_blocks( + &self, + mut state: BeaconState, + mut slot: Slot, + num_blocks: usize, + attesting_validators: &[usize], + block_strategy: BlockStrategy, + ) -> ( + HashMap, + HashMap, + Slot, + SignedBeaconBlockHash, + BeaconState, + ) { + let mut blocks: HashMap = HashMap::with_capacity(num_blocks); + let mut states: HashMap = HashMap::with_capacity(num_blocks); + for _ in 0..num_blocks { + let (new_root_hash, new_state) = + self.add_block(&state, block_strategy, slot, attesting_validators); + blocks.insert(slot, new_root_hash); + states.insert(slot, new_state.tree_hash_root().into()); + state = new_state; + slot += 1; + } + let head_hash = blocks[&(slot - 1)]; + (blocks, states, slot, head_hash, state) + } + + /// A wrapper on `add_blocks()` to avoid passing enums explicitly. + pub fn add_canonical_chain_blocks( + &self, + state: BeaconState, + slot: Slot, + num_blocks: usize, + attesting_validators: &[usize], + ) -> ( + HashMap, + HashMap, + Slot, + SignedBeaconBlockHash, + BeaconState, + ) { + let block_strategy = BlockStrategy::OnCanonicalHead; + self.add_blocks( + state, + slot, + num_blocks, + attesting_validators, + block_strategy, + ) + } + + /// A wrapper on `add_blocks()` to avoid passing enums explicitly. + pub fn add_stray_blocks( + &self, + state: BeaconState, + slot: Slot, + num_blocks: usize, + attesting_validators: &[usize], + ) -> ( + HashMap, + HashMap, + Slot, + SignedBeaconBlockHash, + BeaconState, + ) { + let block_strategy = BlockStrategy::ForkCanonicalChainAt { + previous_slot: slot, + first_slot: slot + 2, + }; + self.add_blocks( + state, + slot + 2, + num_blocks, + attesting_validators, + block_strategy, + ) + } + /// Returns a newly created block, signed by the proposer for the given slot. fn build_block( &self, @@ -347,7 +471,9 @@ where .process_attestation(attestation, AttestationType::Aggregated) .expect("should not error during attestation processing") { - AttestationProcessingOutcome::Processed => (), + // PastEpoch can occur if we fork over several epochs + AttestationProcessingOutcome::Processed + | AttestationProcessingOutcome::PastEpoch { .. } => (), other => panic!("did not successfully process attestation: {:?}", other), } }); diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 3b84a78ff5..5a305e7ccc 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -40,8 +40,8 @@ fn produces_attestations() { let state = &harness.chain.head().expect("should get head").beacon_state; assert_eq!(state.slot, num_blocks_produced, "head should have updated"); - assert!( - state.finalized_checkpoint.epoch > 0, + assert_ne!( + state.finalized_checkpoint.epoch, 0, "head should have updated" ); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index d9cfdbc203..c5a855fc06 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -6,9 +6,12 @@ extern crate lazy_static; use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, }; -use beacon_chain::{AttestationProcessingOutcome, AttestationType}; +use beacon_chain::BeaconSnapshot; +use beacon_chain::{AttestationProcessingOutcome, AttestationType, StateSkipConfig}; use rand::Rng; use sloggers::{null::NullLoggerBuilder, Build}; +use std::collections::HashMap; +use std::collections::HashSet; use std::sync::Arc; use store::{ iter::{BlockRootsIterator, StateRootsIterator}, @@ -20,11 +23,12 @@ use types::test_utils::{SeedableRng, XorShiftRng}; use types::*; // Should ideally be divisible by 3. -pub const VALIDATOR_COUNT: usize = 24; +pub const LOW_VALIDATOR_COUNT: usize = 24; +pub const HIGH_VALIDATOR_COUNT: usize = 64; lazy_static! { /// A cached set of keys. - static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); + static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(HIGH_VALIDATOR_COUNT); } type E = MinimalEthSpec; @@ -57,7 +61,7 @@ fn full_participation_no_skips() { let num_blocks_produced = E::slots_per_epoch() * 5; let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), VALIDATOR_COUNT); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); harness.extend_chain( num_blocks_produced as usize, @@ -77,7 +81,7 @@ fn randomised_skips() { let mut num_blocks_produced = 0; let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), VALIDATOR_COUNT); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let rng = &mut XorShiftRng::from_seed([42; 16]); let mut head_slot = 0; @@ -113,14 +117,16 @@ fn randomised_skips() { fn long_skip() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), VALIDATOR_COUNT); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); // Number of blocks to create in the first run, intentionally not falling on an epoch // boundary in order to check that the DB hot -> cold migration is capable of reaching // back across the skip distance, and correctly migrating those extra non-finalized states. let initial_blocks = E::slots_per_epoch() * 5 + E::slots_per_epoch() / 2; let skip_slots = E::slots_per_historical_root() as u64 * 8; - let final_blocks = E::slots_per_epoch() * 4; + // Create the minimum ~2.5 epochs of extra blocks required to re-finalize the chain. + // Having this set lower ensures that we start justifying and finalizing quickly after a skip. + let final_blocks = 2 * E::slots_per_epoch() + E::slots_per_epoch() / 2; harness.extend_chain( initial_blocks as usize, @@ -223,7 +229,7 @@ fn split_slot_restore() { let split_slot = { let store = get_store(&db_path); - let harness = get_harness(store.clone(), VALIDATOR_COUNT); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let num_blocks = 4 * E::slots_per_epoch(); @@ -251,10 +257,10 @@ fn epoch_boundary_state_attestation_processing() { let num_blocks_produced = E::slots_per_epoch() * 5; let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), VALIDATOR_COUNT); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let late_validators = vec![0, 1]; - let timely_validators = (2..VALIDATOR_COUNT).collect::>(); + let timely_validators = (2..LOW_VALIDATOR_COUNT).collect::>(); let mut late_attestations = vec![]; @@ -333,7 +339,7 @@ fn epoch_boundary_state_attestation_processing() { fn delete_blocks_and_states() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let harness = get_harness(store.clone(), VALIDATOR_COUNT); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let unforked_blocks = 4 * E::slots_per_epoch(); @@ -345,13 +351,11 @@ fn delete_blocks_and_states() { ); // Create a fork post-finalization. - let two_thirds = (VALIDATOR_COUNT / 3) * 2; + let two_thirds = (LOW_VALIDATOR_COUNT / 3) * 2; let honest_validators: Vec = (0..two_thirds).collect(); - let faulty_validators: Vec = (two_thirds..VALIDATOR_COUNT).collect(); + let faulty_validators: Vec = (two_thirds..LOW_VALIDATOR_COUNT).collect(); - // NOTE: should remove this -1 and/or write a similar test once #845 is resolved - // https://github.com/sigp/lighthouse/issues/845 - let fork_blocks = 2 * E::slots_per_epoch() - 1; + let fork_blocks = 2 * E::slots_per_epoch(); let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block( &honest_validators, @@ -425,6 +429,825 @@ fn delete_blocks_and_states() { check_chain_dump(&harness, unforked_blocks + fork_blocks + 1); } +// Check that we never produce invalid blocks when there is deep forking that changes the shuffling. +// See https://github.com/sigp/lighthouse/issues/845 +fn multi_epoch_fork_valid_blocks_test( + initial_blocks: usize, + num_fork1_blocks: usize, + num_fork2_blocks: usize, + num_fork1_validators: usize, +) -> (TempDir, TestHarness, Hash256, Hash256) { + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + // Create the initial portion of the chain + if initial_blocks > 0 { + harness.extend_chain( + initial_blocks, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + } + + assert!(num_fork1_validators <= LOW_VALIDATOR_COUNT); + let fork1_validators: Vec = (0..num_fork1_validators).collect(); + let fork2_validators: Vec = (num_fork1_validators..LOW_VALIDATOR_COUNT).collect(); + + let (head1, head2) = harness.generate_two_forks_by_skipping_a_block( + &fork1_validators, + &fork2_validators, + num_fork1_blocks, + num_fork2_blocks, + ); + + (db_path, harness, head1, head2) +} + +// This is the minimal test of block production with different shufflings. +#[test] +fn block_production_different_shuffling_early() { + let slots_per_epoch = E::slots_per_epoch() as usize; + multi_epoch_fork_valid_blocks_test( + slots_per_epoch - 2, + slots_per_epoch + 3, + slots_per_epoch + 3, + LOW_VALIDATOR_COUNT / 2, + ); +} + +#[test] +fn block_production_different_shuffling_long() { + let slots_per_epoch = E::slots_per_epoch() as usize; + multi_epoch_fork_valid_blocks_test( + 2 * slots_per_epoch - 2, + 3 * slots_per_epoch, + 3 * slots_per_epoch, + LOW_VALIDATOR_COUNT / 2, + ); +} + +// Check that the op pool safely includes multiple attestations per block when necessary. +// This checks the correctness of the shuffling compatibility memoization. +#[test] +fn multiple_attestations_per_block() { + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store, HIGH_VALIDATOR_COUNT); + let chain = &harness.chain; + + harness.extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + let head = chain.head().unwrap(); + let committees_per_slot = head + .beacon_state + .get_committee_count_at_slot(head.beacon_state.slot) + .unwrap(); + assert!(committees_per_slot > 1); + + for snapshot in chain.chain_dump().unwrap() { + assert_eq!( + snapshot.beacon_block.message.body.attestations.len() as u64, + if snapshot.beacon_block.slot() <= 1 { + 0 + } else { + committees_per_slot + } + ); + } +} + +#[test] +fn shuffling_compatible_linear_chain() { + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + // Skip the block at the end of the first epoch. + let head_block_root = harness.extend_chain( + 4 * E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + check_shuffling_compatible( + &harness, + &get_state_for_block(&harness, head_block_root), + head_block_root, + true, + true, + None, + None, + ); +} + +#[test] +fn shuffling_compatible_missing_pivot_block() { + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + // Skip the block at the end of the first epoch. + harness.extend_chain( + E::slots_per_epoch() as usize - 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + harness.advance_slot(); + harness.advance_slot(); + let head_block_root = harness.extend_chain( + 2 * E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + check_shuffling_compatible( + &harness, + &get_state_for_block(&harness, head_block_root), + head_block_root, + true, + true, + Some(E::slots_per_epoch() - 2), + Some(E::slots_per_epoch() - 2), + ); +} + +#[test] +fn shuffling_compatible_simple_fork() { + let slots_per_epoch = E::slots_per_epoch() as usize; + let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test( + 2 * slots_per_epoch, + 3 * slots_per_epoch, + 3 * slots_per_epoch, + LOW_VALIDATOR_COUNT / 2, + ); + + let head1_state = get_state_for_block(&harness, head1); + let head2_state = get_state_for_block(&harness, head2); + + check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None); + check_shuffling_compatible(&harness, &head1_state, head2, false, false, None, None); + check_shuffling_compatible(&harness, &head2_state, head1, false, false, None, None); + check_shuffling_compatible(&harness, &head2_state, head2, true, true, None, None); + + drop(db_path); +} + +#[test] +fn shuffling_compatible_short_fork() { + let slots_per_epoch = E::slots_per_epoch() as usize; + let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test( + 2 * slots_per_epoch - 2, + slots_per_epoch + 2, + slots_per_epoch + 2, + LOW_VALIDATOR_COUNT / 2, + ); + + let head1_state = get_state_for_block(&harness, head1); + let head2_state = get_state_for_block(&harness, head2); + + check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None); + check_shuffling_compatible(&harness, &head1_state, head2, false, true, None, None); + // NOTE: don't check this case, as block 14 from the first chain appears valid on the second + // chain due to it matching the second chain's block 15. + // check_shuffling_compatible(&harness, &head2_state, head1, false, true, None, None); + check_shuffling_compatible( + &harness, + &head2_state, + head2, + true, + true, + // Required because of the skipped slot. + Some(2 * E::slots_per_epoch() - 2), + None, + ); + + drop(db_path); +} + +fn get_state_for_block(harness: &TestHarness, block_root: Hash256) -> BeaconState { + let head_block = harness.chain.get_block(&block_root).unwrap().unwrap(); + harness + .chain + .get_state(&head_block.state_root(), Some(head_block.slot())) + .unwrap() + .unwrap() +} + +/// Check the invariants that apply to `shuffling_is_compatible`. +fn check_shuffling_compatible( + harness: &TestHarness, + head_state: &BeaconState, + head_block_root: Hash256, + current_epoch_valid: bool, + previous_epoch_valid: bool, + current_epoch_cutoff_slot: Option, + previous_epoch_cutoff_slot: Option, +) { + let shuffling_lookahead = harness.chain.spec.min_seed_lookahead.as_u64() + 1; + let current_pivot_slot = + (head_state.current_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch()); + let previous_pivot_slot = + (head_state.previous_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch()); + + for (block_root, slot) in harness + .chain + .rev_iter_block_roots_from(head_block_root) + .unwrap() + { + // Shuffling is compatible targeting the current epoch, + // iff slot is greater than or equal to the current epoch pivot block + assert_eq!( + harness.chain.shuffling_is_compatible( + &block_root, + head_state.current_epoch(), + &head_state + ), + current_epoch_valid + && slot >= current_epoch_cutoff_slot.unwrap_or(current_pivot_slot.as_u64()) + ); + // Similarly for the previous epoch + assert_eq!( + harness.chain.shuffling_is_compatible( + &block_root, + head_state.previous_epoch(), + &head_state + ), + previous_epoch_valid + && slot >= previous_epoch_cutoff_slot.unwrap_or(previous_pivot_slot.as_u64()) + ); + // Targeting the next epoch should always return false + assert_eq!( + harness.chain.shuffling_is_compatible( + &block_root, + head_state.current_epoch() + 1, + &head_state + ), + false + ); + // Targeting two epochs before the current epoch should also always return false + if head_state.current_epoch() >= 2 { + assert_eq!( + harness.chain.shuffling_is_compatible( + &block_root, + head_state.current_epoch() - 2, + &head_state + ), + false + ); + } + } +} + +// Ensure blocks from abandoned forks are pruned from the Hot DB +#[test] +fn prunes_abandoned_fork_between_two_finalized_checkpoints() { + const VALIDATOR_COUNT: usize = 24; + const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT); + const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; + let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); + let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize; + + let slot = harness.get_chain_slot(); + let state = harness.get_head_state(); + let (canonical_blocks_pre_finalization, _, slot, _, state) = + harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators); + let (stray_blocks, stray_states, _, stray_head, _) = harness.add_stray_blocks( + harness.get_head_state(), + slot, + slots_per_epoch - 1, + &faulty_validators, + ); + + // Precondition: Ensure all stray_blocks blocks are still known + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_some(), + "stray block {} should be still present", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness + .chain + .get_state(&state_hash.into(), Some(slot)) + .unwrap(); + assert!( + state.is_some(), + "stray state {} at slot {} should be still present", + state_hash, + slot + ); + } + + // Precondition: Only genesis is finalized + let chain_dump = harness.chain.chain_dump().unwrap(); + assert_eq!( + get_finalized_epoch_boundary_blocks(&chain_dump), + vec![Hash256::zero().into()].into_iter().collect(), + ); + + assert!(harness.chain.knows_head(&stray_head)); + + // Trigger finalization + let (canonical_blocks_post_finalization, _, _, _, _) = + harness.add_canonical_chain_blocks(state, slot, slots_per_epoch * 5, &honest_validators); + + // Postcondition: New blocks got finalized + let chain_dump = harness.chain.chain_dump().unwrap(); + let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump); + assert_eq!( + finalized_blocks, + vec![ + Hash256::zero().into(), + canonical_blocks_pre_finalization[&Slot::new(slots_per_epoch as u64)], + canonical_blocks_post_finalization[&Slot::new((slots_per_epoch * 2) as u64)], + ] + .into_iter() + .collect() + ); + + // Postcondition: Ensure all stray_blocks blocks have been pruned + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_none(), + "abandoned block {} should have been pruned", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_none(), + "stray state {} at slot {} should have been deleted", + state_hash, + slot + ); + } + + assert!(!harness.chain.knows_head(&stray_head)); +} + +#[test] +fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { + const VALIDATOR_COUNT: usize = 24; + const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT); + const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; + let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); + let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let all_validators: Vec = (0..VALIDATOR_COUNT).collect(); + let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize; + + // Fill up 0th epoch + let slot = harness.get_chain_slot(); + let state = harness.get_head_state(); + let (canonical_blocks_zeroth_epoch, _, slot, _, state) = + harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators); + + // Fill up 1st epoch + let (_, _, canonical_slot, shared_head, canonical_state) = + harness.add_canonical_chain_blocks(state, slot, 1, &all_validators); + let (stray_blocks, stray_states, _, stray_head, _) = harness.add_stray_blocks( + canonical_state.clone(), + canonical_slot, + 1, + &faulty_validators, + ); + + // Preconditions + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_some(), + "stray block {} should be still present", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_some(), + "stray state {} at slot {} should be still present", + state_hash, + slot + ); + } + + let chain_dump = harness.chain.chain_dump().unwrap(); + assert_eq!( + get_finalized_epoch_boundary_blocks(&chain_dump), + vec![Hash256::zero().into()].into_iter().collect(), + ); + + assert!(get_blocks(&chain_dump).contains(&shared_head)); + + // Trigger finalization + let (canonical_blocks, _, _, _, _) = harness.add_canonical_chain_blocks( + canonical_state, + canonical_slot, + slots_per_epoch * 5, + &honest_validators, + ); + + // Postconditions + let chain_dump = harness.chain.chain_dump().unwrap(); + let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump); + assert_eq!( + finalized_blocks, + vec![ + Hash256::zero().into(), + canonical_blocks_zeroth_epoch[&Slot::new(slots_per_epoch as u64)], + canonical_blocks[&Slot::new((slots_per_epoch * 2) as u64)], + ] + .into_iter() + .collect() + ); + + for &block_hash in stray_blocks.values() { + assert!( + harness + .chain + .get_block(&block_hash.into()) + .unwrap() + .is_none(), + "stray block {} should have been pruned", + block_hash, + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_none(), + "stray state {} at slot {} should have been deleted", + state_hash, + slot + ); + } + + assert!(!harness.chain.knows_head(&stray_head)); + assert!(get_blocks(&chain_dump).contains(&shared_head)); +} + +#[test] +fn pruning_does_not_touch_blocks_prior_to_finalization() { + const VALIDATOR_COUNT: usize = 24; + const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT); + const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; + let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); + let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize; + + // Fill up 0th epoch with canonical chain blocks + let slot = harness.get_chain_slot(); + let state = harness.get_head_state(); + let (canonical_blocks_zeroth_epoch, _, slot, _, state) = + harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators); + + // Fill up 1st epoch. Contains a fork. + let (stray_blocks, stray_states, _, stray_head, _) = + harness.add_stray_blocks(state.clone(), slot, slots_per_epoch - 1, &faulty_validators); + + // Preconditions + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_some(), + "stray block {} should be still present", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_some(), + "stray state {} at slot {} should be still present", + state_hash, + slot + ); + } + + let chain_dump = harness.chain.chain_dump().unwrap(); + assert_eq!( + get_finalized_epoch_boundary_blocks(&chain_dump), + vec![Hash256::zero().into()].into_iter().collect(), + ); + + // Trigger finalization + let (_, _, _, _, _) = + harness.add_canonical_chain_blocks(state, slot, slots_per_epoch * 4, &honest_validators); + + // Postconditions + let chain_dump = harness.chain.chain_dump().unwrap(); + let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump); + assert_eq!( + finalized_blocks, + vec![ + Hash256::zero().into(), + canonical_blocks_zeroth_epoch[&Slot::new(slots_per_epoch as u64)], + ] + .into_iter() + .collect() + ); + + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_some(), + "stray block {} should be still present", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_some(), + "stray state {} at slot {} should be still present", + state_hash, + slot + ); + } + + assert!(harness.chain.knows_head(&stray_head)); +} + +#[test] +fn prunes_fork_running_past_finalized_checkpoint() { + const VALIDATOR_COUNT: usize = 24; + const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT); + const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; + let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); + let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize; + + // Fill up 0th epoch with canonical chain blocks + let slot = harness.get_chain_slot(); + let state = harness.get_head_state(); + let (canonical_blocks_zeroth_epoch, _, slot, _, state) = + harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators); + + // Fill up 1st epoch. Contains a fork. + let (stray_blocks_first_epoch, stray_states_first_epoch, stray_slot, _, stray_state) = + harness.add_stray_blocks(state.clone(), slot, slots_per_epoch, &faulty_validators); + + let (canonical_blocks_first_epoch, _, canonical_slot, _, canonical_state) = + harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators); + + // Fill up 2nd epoch. Extends both the canonical chain and the fork. + let (stray_blocks_second_epoch, stray_states_second_epoch, _, stray_head, _) = harness + .add_stray_blocks( + stray_state, + stray_slot, + slots_per_epoch - 1, + &faulty_validators, + ); + + // Precondition: Ensure all stray_blocks blocks are still known + let stray_blocks: HashMap = stray_blocks_first_epoch + .into_iter() + .chain(stray_blocks_second_epoch.into_iter()) + .collect(); + + let stray_states: HashMap = stray_states_first_epoch + .into_iter() + .chain(stray_states_second_epoch.into_iter()) + .collect(); + + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_some(), + "stray block {} should be still present", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_some(), + "stray state {} at slot {} should be still present", + state_hash, + slot + ); + } + + // Precondition: Only genesis is finalized + let chain_dump = harness.chain.chain_dump().unwrap(); + assert_eq!( + get_finalized_epoch_boundary_blocks(&chain_dump), + vec![Hash256::zero().into()].into_iter().collect(), + ); + + assert!(harness.chain.knows_head(&stray_head)); + + // Trigger finalization + let (canonical_blocks_second_epoch, _, _, _, _) = harness.add_canonical_chain_blocks( + canonical_state, + canonical_slot, + slots_per_epoch * 4, + &honest_validators, + ); + + // Postconditions + let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch + .into_iter() + .chain(canonical_blocks_first_epoch.into_iter()) + .chain(canonical_blocks_second_epoch.into_iter()) + .collect(); + + // Postcondition: New blocks got finalized + let chain_dump = harness.chain.chain_dump().unwrap(); + let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump); + assert_eq!( + finalized_blocks, + vec![ + Hash256::zero().into(), + canonical_blocks[&Slot::new(slots_per_epoch as u64)], + canonical_blocks[&Slot::new((slots_per_epoch * 2) as u64)], + ] + .into_iter() + .collect() + ); + + // Postcondition: Ensure all stray_blocks blocks have been pruned + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_none(), + "abandoned block {} should have been pruned", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_none(), + "stray state {} at slot {} should have been deleted", + state_hash, + slot + ); + } + + assert!(!harness.chain.knows_head(&stray_head)); +} + +// This is to check if state outside of normal block processing are pruned correctly. +#[test] +fn prunes_skipped_slots_states() { + const VALIDATOR_COUNT: usize = 24; + const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT); + const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY; + let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); + let faulty_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); + let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize; + + // Arrange skipped slots so as to cross the epoch boundary. That way, we excercise the code + // responsible for storing state outside of normal block processing. + + let canonical_slot = harness.get_chain_slot(); + let canonical_state = harness.get_head_state(); + let (canonical_blocks_zeroth_epoch, _, canonical_slot, _, canonical_state) = harness + .add_canonical_chain_blocks( + canonical_state, + canonical_slot, + slots_per_epoch - 1, + &honest_validators, + ); + + let (stray_blocks, stray_states, stray_slot, _, _) = harness.add_stray_blocks( + canonical_state.clone(), + canonical_slot, + slots_per_epoch, + &faulty_validators, + ); + + // Preconditions + for &block_hash in stray_blocks.values() { + let block = harness.chain.get_block(&block_hash.into()).unwrap(); + assert!( + block.is_some(), + "stray block {} should be still present", + block_hash + ); + } + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_some(), + "stray state {} at slot {} should be still present", + state_hash, + slot + ); + } + + let chain_dump = harness.chain.chain_dump().unwrap(); + assert_eq!( + get_finalized_epoch_boundary_blocks(&chain_dump), + vec![Hash256::zero().into()].into_iter().collect(), + ); + + // Make sure slots were skipped + let stray_state = harness + .chain + .state_at_slot(stray_slot, StateSkipConfig::WithoutStateRoots) + .unwrap(); + let block_root = stray_state.get_block_root(canonical_slot - 1); + assert_eq!(stray_state.get_block_root(canonical_slot), block_root); + assert_eq!(stray_state.get_block_root(canonical_slot + 1), block_root); + + let skipped_slots = vec![canonical_slot, canonical_slot + 1]; + for &slot in &skipped_slots { + assert_eq!(stray_state.get_block_root(slot), block_root); + let state_hash = stray_state.get_state_root(slot).unwrap(); + assert!( + harness + .chain + .get_state(&state_hash, Some(slot)) + .unwrap() + .is_some(), + "skipped slots state should be still present" + ); + } + + // Trigger finalization + let (canonical_blocks_post_finalization, _, _, _, _) = harness.add_canonical_chain_blocks( + canonical_state, + canonical_slot, + slots_per_epoch * 5, + &honest_validators, + ); + + // Postconditions + let chain_dump = harness.chain.chain_dump().unwrap(); + let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump); + let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch + .into_iter() + .chain(canonical_blocks_post_finalization.into_iter()) + .collect(); + assert_eq!( + finalized_blocks, + vec![ + Hash256::zero().into(), + canonical_blocks[&Slot::new(slots_per_epoch as u64)], + ] + .into_iter() + .collect() + ); + + for (&slot, &state_hash) in &stray_states { + let state = harness.chain.get_state(&state_hash.into(), None).unwrap(); + assert!( + state.is_none(), + "stray state {} at slot {} should have been deleted", + state_hash, + slot + ); + } + + for &slot in &skipped_slots { + assert_eq!(stray_state.get_block_root(slot), block_root); + let state_hash = stray_state.get_state_root(slot).unwrap(); + assert!( + harness + .chain + .get_state(&state_hash, Some(slot)) + .unwrap() + .is_none(), + "skipped slot states should have been pruned" + ); + } +} + /// Check that the head state's slot matches `expected_slot`. fn check_slot(harness: &TestHarness, expected_slot: u64) { let state = &harness.chain.head().expect("should get head").beacon_state; @@ -548,3 +1371,19 @@ fn check_iterators(harness: &TestHarness) { Some(Slot::new(0)) ); } + +fn get_finalized_epoch_boundary_blocks( + dump: &[BeaconSnapshot], +) -> HashSet { + dump.iter() + .cloned() + .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint.root.into()) + .collect() +} + +fn get_blocks(dump: &[BeaconSnapshot]) -> HashSet { + dump.iter() + .cloned() + .map(|checkpoint| checkpoint.beacon_block_root.into()) + .collect() +} diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index eeee91230f..ad6443bd6d 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -4,11 +4,9 @@ use crate::Client; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::{CachingEth1Backend, Eth1Chain}, + migrate::{BackgroundMigrator, Migrate, NullMigrator}, slot_clock::{SlotClock, SystemTimeSlotClock}, - store::{ - migrate::{BackgroundMigrator, Migrate, NullMigrator}, - DiskStore, MemoryStore, SimpleDiskStore, Store, StoreConfig, - }, + store::{DiskStore, MemoryStore, SimpleDiskStore, Store, StoreConfig}, BeaconChain, BeaconChainTypes, Eth1ChainBackend, EventHandler, }; use environment::RuntimeContext; @@ -68,7 +66,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate, + TStoreMigrator: Migrate, TSlotClock: SlotClock + Clone + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -403,7 +401,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate, + TStoreMigrator: Migrate, TSlotClock: SlotClock + Clone + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -450,7 +448,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate, + TStoreMigrator: Migrate, TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -498,7 +496,7 @@ impl > where TSlotClock: SlotClock + 'static, - TStoreMigrator: store::Migrate, TEthSpec> + 'static, + TStoreMigrator: Migrate, TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend> + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -540,7 +538,7 @@ impl > where TSlotClock: SlotClock + 'static, - TStoreMigrator: store::Migrate, TEthSpec> + 'static, + TStoreMigrator: Migrate, TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend> + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -600,10 +598,15 @@ where TEventHandler: EventHandler + 'static, { pub fn background_migrator(mut self) -> Result { + let context = self + .runtime_context + .as_ref() + .ok_or_else(|| "disk_store requires a log".to_string())? + .service_context("freezer_db".into()); let store = self.store.clone().ok_or_else(|| { "background_migrator requires the store to be initialized".to_string() })?; - self.store_migrator = Some(BackgroundMigrator::new(store)); + self.store_migrator = Some(BackgroundMigrator::new(store, context.log.clone())); Ok(self) } } @@ -621,7 +624,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate, + TStoreMigrator: Migrate, TSlotClock: SlotClock + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -727,7 +730,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: store::Migrate, + TStoreMigrator: Migrate, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 825c76c7e8..38da10261e 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -48,18 +48,18 @@ pub fn spawn_notifier( let speedo = Mutex::new(Speedo::default()); - // Note: `interval_at` panics when interval_duration is 0 - // TODO: `Return type of closure passed to `for_each` is restricted to `Future` - // Hence, shifting the .then() error logs into the `for_each` closure. - // Can be solved with `TryStreamExt::try_for_each` if `Interval` implemented `TryStream`. - // Check if this can be refactored better. - let interval_future = interval_at(start_instant, interval_duration).for_each(|_| { - let connected_peer_count = network.connected_peers(); + let interval_future = Interval::new(start_instant, interval_duration) + .map_err( + move |e| error!(log_1, "Slot notifier timer failed"; "error" => format!("{:?}", e)), + ) + .for_each(move |_| { + let log = log_2.clone(); - let head_info = match beacon_chain.head_info() { - Ok(head) => head, - Err(e) => { - error!( + let connected_peer_count = network.connected_peers(); + let sync_state = network.sync_state(); + + let head_info = beacon_chain.head_info() + .map_err(|e| error!( log, "Notifier failed to notify, Failed to get beacon chain head info"; "error" => format!("{:?}", e) @@ -68,11 +68,8 @@ pub fn spawn_notifier( } }; - let head_slot = head_info.slot; - let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch()); - let current_slot = match beacon_chain.slot() { - Ok(slot) => slot, - Err(e) => { + let head_slot = head_info.slot; + let current_slot = beacon_chain.slot().map_err(|e| { error!( log, "Notify failed to notify, Unable to read current slot"; @@ -124,19 +121,45 @@ pub fn spawn_notifier( log, "Syncing"; "peers" => peer_count_pretty(connected_peer_count), - "distance" => distance, - "speed" => sync_speed_pretty(speedo.slots_per_second()), - "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)), + "finalized_root" => format!("{}", finalized_root), + "finalized_epoch" => finalized_epoch, + "head_block" => format!("{}", head_root), + "head_slot" => head_slot, + "current_slot" => current_slot, + "sync_state" =>format!("{}", sync_state) ); - return futures::future::ready(()); - }; - macro_rules! not_quite_synced_log { - ($message: expr) => { + // Log if we are syncing + if sync_state.is_syncing() { + let distance = format!( + "{} slots ({})", + head_distance.as_u64(), + slot_distance_pretty(head_distance, slot_duration) + ); + info!( + log, + "Syncing"; + "peers" => peer_count_pretty(connected_peer_count), + "distance" => distance, + "speed" => sync_speed_pretty(speedo.slots_per_second()), + "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)), + ); + } else { + if sync_state.is_synced() { info!( - log, - $message; + log_2, + "Synced"; + "peers" => peer_count_pretty(connected_peer_count), + "finalized_root" => format!("{}", finalized_root), + "finalized_epoch" => finalized_epoch, + "epoch" => current_epoch, + "slot" => current_slot, + ); + } else { + info!( + log_2, + "Searching for peers"; "peers" => peer_count_pretty(connected_peer_count), "finalized_root" => format!("{}", finalized_root), "finalized_epoch" => finalized_epoch, @@ -145,25 +168,19 @@ pub fn spawn_notifier( ); } } - - if head_epoch + 1 == current_epoch { - not_quite_synced_log!("Synced to previous epoch") - } else if head_slot != current_slot { - not_quite_synced_log!("Synced to current epoch") - } else { - info!( - log, - "Synced"; - "peers" => peer_count_pretty(connected_peer_count), - "finalized_root" => format!("{}", finalized_root), - "finalized_epoch" => finalized_epoch, - "epoch" => current_epoch, - "slot" => current_slot, - ); - }; - - futures::future::ready(()) - }); + Ok(()) + }) + .then(move |result| { + match result { + Ok(()) => Ok(()), + Err(e) => { + error!( + log_3, + "Notifier failed to notify"; + "error" => format!("{:?}", e) + ); + Ok(()) + } } }); let (exit_signal, exit) = tokio::sync::oneshot::channel(); diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index bf2e78fa85..f0171a39a2 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" hex = "0.3" # rust-libp2p is presently being sourced from a Sigma Prime fork of the # `libp2p/rust-libp2p` repository. -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "4e3003d5283040fee10da1299252dd060a838d97" } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "71cf486b4d992862f5a05f9f4ef5e5c1631f4add" } types = { path = "../../eth2/types" } hashmap_delay = { path = "../../eth2/utils/hashmap_delay" } eth2_ssz_types = { path = "../../eth2/utils/ssz_types" } @@ -33,6 +33,7 @@ parking_lot = "0.9.0" sha2 = "0.8.0" base64 = "0.11.0" snap = "1" +void = "1.0.2" [dev-dependencies] slog-stdlog = "4.0.0" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index d4b54f516a..bc8be50141 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -49,6 +49,7 @@ pub struct Behaviour { /// A cache of recently seen gossip messages. This is used to filter out any possible /// duplicates that may still be seen over gossipsub. #[behaviour(ignore)] + // TODO: Remove this seen_gossip_messages: LruCache, /// A collections of variables accessible outside the network service. #[behaviour(ignore)] @@ -297,14 +298,17 @@ impl Behaviour } } } else { - warn!(self.log, "A duplicate gossipsub message was received"; "message" => format!("{:?}", gs_msg)); + match PubsubMessage::::decode(&gs_msg.topics, &gs_msg.data) { + Err(e) => { + debug!(self.log, "Could not decode gossipsub message"; "error" => format!("{}", e)) + } + Ok(msg) => { + crit!(self.log, "A duplicate gossipsub message was received"; "message_source" => format!("{}", gs_msg.source), "propagated_peer" => format!("{}",propagation_source), "message" => format!("{}", msg)); + } + } } } GossipsubEvent::Subscribed { peer_id, topic } => { @@ -417,7 +428,8 @@ impl RPCEvent::Request(id, RPCRequest::Ping(ping)) => { // inform the peer manager and send the response self.peer_manager.ping_request(&peer_id, ping.data); - self.send_ping(id, peer_id); + // send a ping response + self.send_ping(id, peer_id, false); } RPCEvent::Request(id, RPCRequest::MetaData(_)) => { // send the requested meta-data @@ -466,16 +478,16 @@ impl Behaviour { - // send a ping to this peer - self.send_ping(RequestId::from(0usize), peer_id); + // send a ping request to this peer + self.send_ping(RequestId::from(0usize), peer_id, true); } PeerManagerEvent::MetaData(peer_id) => { self.send_meta_data_request(peer_id); } - PeerManagerEvent::DisconnectPeer(_peer_id) => { + PeerManagerEvent::_DisconnectPeer(_peer_id) => { //TODO: Implement } - PeerManagerEvent::BanPeer(_peer_id) => { + PeerManagerEvent::_BanPeer(_peer_id) => { //TODO: Implement } }, diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index 95e78e4136..41395cef9e 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -101,7 +101,7 @@ impl Default for Config { // parameter. let gs_config = GossipsubConfigBuilder::new() .max_transmit_size(GOSSIP_MAX_SIZE) - .heartbeat_interval(Duration::from_secs(20)) // TODO: Reduce for mainnet + .heartbeat_interval(Duration::from_secs(1)) .manual_propagation() // require validation before propagation .no_source_id() .message_id_fn(gossip_message_id) @@ -114,7 +114,8 @@ impl Default for Config { .enr_update(true) // update IP based on PONG responses .enr_peer_update_min(2) // prevents NAT's should be raised for mainnet .query_parallelism(5) - .query_timeout(Duration::from_secs(2)) + .query_timeout(Duration::from_secs(60)) + .query_peer_timeout(Duration::from_secs(2)) .ip_limit(false) // limits /24 IP's in buckets. Enable for mainnet .ping_interval(Duration::from_secs(300)) .build(); diff --git a/beacon_node/eth2-libp2p/src/discovery/enr.rs b/beacon_node/eth2-libp2p/src/discovery/enr.rs index 6cd3beac41..edd08bc9ac 100644 --- a/beacon_node/eth2-libp2p/src/discovery/enr.rs +++ b/beacon_node/eth2-libp2p/src/discovery/enr.rs @@ -1,10 +1,11 @@ //! Helper functions and an extension trait for Ethereum 2 ENRs. +pub use libp2p::{core::identity::Keypair, discv5::enr::CombinedKey}; + use super::ENR_FILENAME; use crate::types::{Enr, EnrBitfield}; use crate::NetworkConfig; -use libp2p::core::identity::Keypair; -use libp2p::discv5::enr::{CombinedKey, EnrBuilder}; +use libp2p::discv5::enr::EnrBuilder; use slog::{debug, warn}; use ssz::{Decode, Encode}; use ssz_types::BitVector; diff --git a/beacon_node/eth2-libp2p/src/discovery/mod.rs b/beacon_node/eth2-libp2p/src/discovery/mod.rs index a30f860921..2cd40b8a15 100644 --- a/beacon_node/eth2-libp2p/src/discovery/mod.rs +++ b/beacon_node/eth2-libp2p/src/discovery/mod.rs @@ -2,21 +2,21 @@ pub(crate) mod enr; // Allow external use of the lighthouse ENR builder -pub use enr::build_enr; +pub use enr::{build_enr, CombinedKey, Keypair}; use crate::metrics; use crate::{error, Enr, NetworkConfig, NetworkGlobals}; use enr::{Eth2Enr, BITFIELD_ENR_KEY, ETH2_ENR_KEY}; use futures::prelude::*; -use libp2p::core::{identity::Keypair, ConnectedPoint, Multiaddr, PeerId}; +use libp2p::core::{ConnectedPoint, Multiaddr, PeerId}; use libp2p::discv5::enr::NodeId; use libp2p::discv5::{Discv5, Discv5Event}; use libp2p::multiaddr::Protocol; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; -use slog::{crit, debug, info, trace, warn}; +use slog::{crit, debug, info, warn}; use ssz::{Decode, Encode}; use ssz_types::BitVector; -use std::collections::HashSet; +use std::collections::{HashSet, VecDeque}; use std::net::SocketAddr; use std::path::Path; use std::sync::Arc; @@ -30,13 +30,16 @@ const MAX_TIME_BETWEEN_PEER_SEARCHES: u64 = 120; /// Initial delay between peer searches. const INITIAL_SEARCH_DELAY: u64 = 5; /// Local ENR storage filename. -const ENR_FILENAME: &str = "enr.dat"; +pub const ENR_FILENAME: &str = "enr.dat"; /// Number of peers we'd like to have connected to a given long-lived subnet. const TARGET_SUBNET_PEERS: u64 = 3; /// Lighthouse discovery behaviour. This provides peer management and discovery using the Discv5 /// libp2p protocol. pub struct Discovery { + /// Events to be processed by the behaviour. + events: VecDeque>, + /// The currently banned peers. banned_peers: HashSet, @@ -105,7 +108,7 @@ impl Discovery { "peer_id" => format!("{}", bootnode_enr.peer_id()), "ip" => format!("{:?}", bootnode_enr.ip()), "udp" => format!("{:?}", bootnode_enr.udp()), - "tcp" => format!("{:?}", bootnode_enr.udp()) + "tcp" => format!("{:?}", bootnode_enr.tcp()) ); let _ = discovery.add_enr(bootnode_enr).map_err(|e| { warn!( @@ -117,6 +120,7 @@ impl Discovery { } Ok(Self { + events: VecDeque::with_capacity(16), banned_peers: HashSet::new(), max_peers: config.max_peers, peer_discovery_delay: Delay::new(Instant::now()), @@ -409,16 +413,18 @@ where match self.discovery.poll(params) { Async::Ready(NetworkBehaviourAction::GenerateEvent(event)) => { match event { - Discv5Event::Discovered(enr) => { + Discv5Event::Discovered(_enr) => { // peers that get discovered during a query but are not contactable or // don't match a predicate can end up here. For debugging purposes we // log these to see if we are unnecessarily dropping discovered peers + /* if enr.eth2() == self.local_enr().eth2() { trace!(self.log, "Peer found in process of query"; "peer_id" => format!("{}", enr.peer_id()), "tcp_socket" => enr.tcp_socket()); } else { // this is temporary warning for debugging the DHT warn!(self.log, "Found peer during discovery not on correct fork"; "peer_id" => format!("{}", enr.peer_id()), "tcp_socket" => enr.tcp_socket()); } + */ } Discv5Event::SocketUpdated(socket) => { info!(self.log, "Address updated"; "ip" => format!("{}",socket.ip()), "udp_port" => format!("{}", socket.port())); @@ -448,19 +454,19 @@ where for peer_id in closer_peers { // if we need more peers, attempt a connection - if self.network_globals.connected_peers() < self.max_peers - && self + if self.network_globals.connected_or_dialing_peers() + < self.max_peers + && !self .network_globals .peers .read() - .peer_info(&peer_id) - .is_none() + .is_connected_or_dialing(&peer_id) && !self.banned_peers.contains(&peer_id) { - debug!(self.log, "Peer discovered"; "peer_id"=> format!("{:?}", peer_id)); - return Async::Ready(NetworkBehaviourAction::DialPeer { - peer_id, - }); + debug!(self.log, "Connecting to discovered peer"; "peer_id"=> format!("{:?}", peer_id)); + self.network_globals.peers.write().dialing_peer(&peer_id); + self.events + .push_back(NetworkBehaviourAction::DialPeer { peer_id }); } } } @@ -472,6 +478,12 @@ where Async::NotReady => break, } } + + // process any queued events + if let Some(event) = self.events.pop_front() { + return Async::Ready(event); + } + Async::NotReady } } diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 98f3ffc9f3..9230a4afb0 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -20,6 +20,6 @@ pub use config::Config as NetworkConfig; pub use libp2p::gossipsub::{MessageId, Topic, TopicHash}; pub use libp2p::{multiaddr, Multiaddr}; pub use libp2p::{PeerId, Swarm}; -pub use peer_manager::{PeerDB, PeerInfo, PeerSyncStatus}; +pub use peer_manager::{PeerDB, PeerInfo, PeerSyncStatus, SyncInfo}; pub use rpc::RPCEvent; -pub use service::Service; +pub use service::{Service, NETWORK_KEY_FILENAME}; diff --git a/beacon_node/eth2-libp2p/src/peer_manager/client.rs b/beacon_node/eth2-libp2p/src/peer_manager/client.rs index 3eea0707f0..3ba68faaa3 100644 --- a/beacon_node/eth2-libp2p/src/peer_manager/client.rs +++ b/beacon_node/eth2-libp2p/src/peer_manager/client.rs @@ -3,9 +3,10 @@ //! Currently using identify to fingerprint. use libp2p::identify::IdentifyInfo; +use serde::Serialize; -#[derive(Debug)] /// Various client and protocol information related to a node. +#[derive(Clone, Debug, Serialize)] pub struct Client { /// The client's name (Ex: lighthouse, prism, nimbus, etc) pub kind: ClientKind, @@ -19,7 +20,7 @@ pub struct Client { pub agent_string: Option, } -#[derive(Debug)] +#[derive(Clone, Debug, Serialize)] pub enum ClientKind { /// A lighthouse node (the best kind). Lighthouse, @@ -125,6 +126,11 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String } (kind, version, os_version) } + Some("github.com") => { + let kind = ClientKind::Prysm; + let unknown = String::from("unknown"); + (kind, unknown.clone(), unknown) + } _ => { let unknown = String::from("unknown"); (ClientKind::Unknown, unknown.clone(), unknown) diff --git a/beacon_node/eth2-libp2p/src/peer_manager/mod.rs b/beacon_node/eth2-libp2p/src/peer_manager/mod.rs index 43ff1314b5..7060e82243 100644 --- a/beacon_node/eth2-libp2p/src/peer_manager/mod.rs +++ b/beacon_node/eth2-libp2p/src/peer_manager/mod.rs @@ -16,12 +16,14 @@ use types::EthSpec; mod client; mod peer_info; +mod peer_sync_status; mod peerdb; -pub use peer_info::{PeerInfo, PeerSyncStatus}; +pub use peer_info::PeerInfo; +pub use peer_sync_status::{PeerSyncStatus, SyncInfo}; /// The minimum reputation before a peer is disconnected. // Most likely this needs tweaking -const MINIMUM_REPUTATION_BEFORE_BAN: Rep = 20; +const _MINIMUM_REPUTATION_BEFORE_BAN: Rep = 20; /// The time in seconds between re-status's peers. const STATUS_INTERVAL: u64 = 300; /// The time in seconds between PING events. We do not send a ping if the other peer as PING'd us within @@ -48,13 +50,13 @@ pub struct PeerManager { /// Each variant has an associated reputation change. pub enum PeerAction { /// The peer timed out on an RPC request/response. - TimedOut = -10, + _TimedOut = -10, /// The peer sent and invalid request/response or encoding. - InvalidMessage = -20, + _InvalidMessage = -20, /// The peer sent something objectively malicious. - Malicious = -50, + _Malicious = -50, /// Received an expected message. - ValidMessage = 20, + _ValidMessage = 20, /// Peer disconnected. Disconnected = -30, } @@ -68,9 +70,9 @@ pub enum PeerManagerEvent { /// Request METADATA from a peer. MetaData(PeerId), /// The peer should be disconnected. - DisconnectPeer(PeerId), + _DisconnectPeer(PeerId), /// The peer should be disconnected and banned. - BanPeer(PeerId), + _BanPeer(PeerId), } impl PeerManager { @@ -89,10 +91,12 @@ impl PeerManager { /// A ping request has been received. // NOTE: The behaviour responds with a PONG automatically + // TODO: Update last seen pub fn ping_request(&mut self, peer_id: &PeerId, seq: u64) { if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) { // received a ping // reset the to-ping timer for this peer + debug!(self.log, "Received a ping request"; "peer_id" => format!("{}", peer_id), "seq_no" => seq); self.ping_peers.insert(peer_id.clone()); // if the sequence number is unknown send update the meta data of the peer. @@ -114,6 +118,7 @@ impl PeerManager { } /// A PONG has been returned from a peer. + // TODO: Update last seen pub fn pong_response(&mut self, peer_id: &PeerId, seq: u64) { if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) { // received a pong @@ -137,11 +142,13 @@ impl PeerManager { } /// Received a metadata response from a peer. + // TODO: Update last seen pub fn meta_data_response(&mut self, peer_id: &PeerId, meta_data: MetaData) { if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { if let Some(known_meta_data) = &peer_info.meta_data { if known_meta_data.seq_number < meta_data.seq_number { debug!(self.log, "Updating peer's metadata"; "peer_id" => format!("{}", peer_id), "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number); + peer_info.meta_data = Some(meta_data); } else { warn!(self.log, "Received old metadata"; "peer_id" => format!("{}", peer_id), "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number); } @@ -162,23 +169,27 @@ impl PeerManager { /// Checks the reputation of a peer and if it is too low, bans it and /// sends the corresponding event. Informs if it got banned - fn gets_banned(&mut self, peer_id: &PeerId) -> bool { + fn _gets_banned(&mut self, peer_id: &PeerId) -> bool { // if the peer was already banned don't inform again let mut peerdb = self.network_globals.peers.write(); - if peerdb.reputation(peer_id) < MINIMUM_REPUTATION_BEFORE_BAN - && !peerdb.connection_status(peer_id).is_banned() - { - peerdb.ban(peer_id); - self.events.push(PeerManagerEvent::BanPeer(peer_id.clone())); - return true; + + if let Some(connection_status) = peerdb.connection_status(peer_id) { + if peerdb.reputation(peer_id) < _MINIMUM_REPUTATION_BEFORE_BAN + && !connection_status.is_banned() + { + peerdb.ban(peer_id); + self.events + .push(PeerManagerEvent::_BanPeer(peer_id.clone())); + return true; + } } false } /// Requests that a peer get disconnected. - pub fn disconnect_peer(&mut self, peer_id: &PeerId) { + pub fn _disconnect_peer(&mut self, peer_id: &PeerId) { self.events - .push(PeerManagerEvent::DisconnectPeer(peer_id.clone())); + .push(PeerManagerEvent::_DisconnectPeer(peer_id.clone())); } /// Updates the state of the peer as disconnected. @@ -213,7 +224,7 @@ impl PeerManager { } /// Provides a given peer's reputation if it exists. - pub fn get_peer_rep(&self, peer_id: &PeerId) -> Rep { + pub fn _get_peer_rep(&self, peer_id: &PeerId) -> Rep { self.network_globals.peers.read().reputation(peer_id) } @@ -235,7 +246,7 @@ impl PeerManager { /// Reports a peer for some action. /// /// If the peer doesn't exist, log a warning and insert defaults. - pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction) { + pub fn _report_peer(&mut self, peer_id: &PeerId, action: PeerAction) { self.update_reputations(); self.network_globals .peers @@ -269,7 +280,8 @@ impl PeerManager { { let mut peerdb = self.network_globals.peers.write(); - if peerdb.connection_status(peer_id).is_banned() { + if peerdb.connection_status(peer_id).map(|c| c.is_banned()) == Some(true) { + // don't connect if the peer is banned return false; } @@ -293,6 +305,11 @@ impl PeerManager { true } + + /// Notifies the peer manager that this peer is being dialed. + pub fn _dialing_peer(&mut self, peer_id: &PeerId) { + self.network_globals.peers.write().dialing_peer(peer_id); + } } impl Stream for PeerManager { @@ -304,12 +321,18 @@ impl Stream for PeerManager { while let Async::Ready(Some(peer_id)) = self.ping_peers.poll().map_err(|e| { error!(self.log, "Failed to check for peers to ping"; "error" => format!("{}",e)); })? { + debug!(self.log, "Pinging peer"; "peer_id" => format!("{}", peer_id)); + // add the ping timer back + self.ping_peers.insert(peer_id.clone()); self.events.push(PeerManagerEvent::Ping(peer_id)); } - while let Async::Ready(Some(peer_id)) = self.ping_peers.poll().map_err(|e| { + while let Async::Ready(Some(peer_id)) = self.status_peers.poll().map_err(|e| { error!(self.log, "Failed to check for peers to status"; "error" => format!("{}",e)); })? { + debug!(self.log, "Sending Status to peer"; "peer_id" => format!("{}", peer_id)); + // add the status timer back + self.status_peers.insert(peer_id.clone()); self.events.push(PeerManagerEvent::Status(peer_id)); } diff --git a/beacon_node/eth2-libp2p/src/peer_manager/peer_info.rs b/beacon_node/eth2-libp2p/src/peer_manager/peer_info.rs index b9e817f8c3..611766a160 100644 --- a/beacon_node/eth2-libp2p/src/peer_manager/peer_info.rs +++ b/beacon_node/eth2-libp2p/src/peer_manager/peer_info.rs @@ -1,13 +1,19 @@ use super::client::Client; use super::peerdb::{Rep, DEFAULT_REPUTATION}; +use super::PeerSyncStatus; use crate::rpc::MetaData; use crate::Multiaddr; +use serde::{ + ser::{SerializeStructVariant, Serializer}, + Serialize, +}; use std::time::Instant; -use types::{EthSpec, Slot, SubnetId}; +use types::{EthSpec, SubnetId}; use PeerConnectionStatus::*; /// Information about a given connected peer. -#[derive(Debug)] +#[derive(Clone, Debug, Serialize)] +#[serde(bound = "T: EthSpec")] pub struct PeerInfo { /// The connection status of the peer _status: PeerStatus, @@ -54,12 +60,13 @@ impl PeerInfo { } } -#[derive(Debug)] +#[derive(Clone, Debug, Serialize)] +/// The current health status of the peer. pub enum PeerStatus { - /// The peer is healthy + /// The peer is healthy. Healthy, - /// The peer is clogged. It has not been responding to requests on time - Clogged, + /// The peer is clogged. It has not been responding to requests on time. + _Clogged, } impl Default for PeerStatus { @@ -68,48 +75,65 @@ impl Default for PeerStatus { } } -/// Connection Status of the peer +/// Connection Status of the peer. #[derive(Debug, Clone)] pub enum PeerConnectionStatus { + /// The peer is connected. Connected { - /// number of ingoing connections + /// number of ingoing connections. n_in: u8, - /// number of outgoing connections + /// number of outgoing connections. n_out: u8, }, + /// The peer has disconnected. Disconnected { - /// last time the peer was connected or discovered + /// last time the peer was connected or discovered. since: Instant, }, + /// The peer has been banned and is disconnected. Banned { - /// moment when the peer was banned + /// moment when the peer was banned. since: Instant, }, - Unknown { - /// time since we know of this peer + /// We are currently dialing this peer. + Dialing { + /// time since we last communicated with the peer. since: Instant, }, } -#[derive(Debug, Clone, PartialEq)] -pub enum PeerSyncStatus { - /// At the current state as our node or ahead of us. - Synced { - /// The last known head slot from the peer's handshake. - status_head_slot: Slot, - }, - /// Is behind our current head and not useful for block downloads. - Behind { - /// The last known head slot from the peer's handshake. - status_head_slot: Slot, - }, - /// Not currently known as a STATUS handshake has not occurred. - Unknown, +/// Serialization for http requests. +impl Serialize for PeerConnectionStatus { + fn serialize(&self, serializer: S) -> Result { + match self { + Connected { n_in, n_out } => { + let mut s = serializer.serialize_struct_variant("", 0, "Connected", 2)?; + s.serialize_field("in", n_in)?; + s.serialize_field("out", n_out)?; + s.end() + } + Disconnected { since } => { + let mut s = serializer.serialize_struct_variant("", 1, "Disconnected", 1)?; + s.serialize_field("since", &since.elapsed().as_secs())?; + s.end() + } + Banned { since } => { + let mut s = serializer.serialize_struct_variant("", 2, "Banned", 1)?; + s.serialize_field("since", &since.elapsed().as_secs())?; + s.end() + } + Dialing { since } => { + let mut s = serializer.serialize_struct_variant("", 3, "Dialing", 1)?; + s.serialize_field("since", &since.elapsed().as_secs())?; + s.end() + } + } + } } impl Default for PeerConnectionStatus { fn default() -> Self { - PeerConnectionStatus::Unknown { + PeerConnectionStatus::Dialing { since: Instant::now(), } } @@ -124,6 +148,14 @@ impl PeerConnectionStatus { } } + /// Checks if the status is connected + pub fn is_dialing(&self) -> bool { + match self { + PeerConnectionStatus::Dialing { .. } => true, + _ => false, + } + } + /// Checks if the status is banned pub fn is_banned(&self) -> bool { match self { @@ -145,7 +177,7 @@ impl PeerConnectionStatus { pub fn connect_ingoing(&mut self) { match self { Connected { n_in, .. } => *n_in += 1, - Disconnected { .. } | Banned { .. } | Unknown { .. } => { + Disconnected { .. } | Banned { .. } | Dialing { .. } => { *self = Connected { n_in: 1, n_out: 0 } } } @@ -156,7 +188,7 @@ impl PeerConnectionStatus { pub fn connect_outgoing(&mut self) { match self { Connected { n_out, .. } => *n_out += 1, - Disconnected { .. } | Banned { .. } | Unknown { .. } => { + Disconnected { .. } | Banned { .. } | Dialing { .. } => { *self = Connected { n_in: 0, n_out: 1 } } } diff --git a/beacon_node/eth2-libp2p/src/peer_manager/peer_sync_status.rs b/beacon_node/eth2-libp2p/src/peer_manager/peer_sync_status.rs new file mode 100644 index 0000000000..9d0e6c1cfa --- /dev/null +++ b/beacon_node/eth2-libp2p/src/peer_manager/peer_sync_status.rs @@ -0,0 +1,104 @@ +//! Handles individual sync status for peers. + +use serde::Serialize; +use types::{Epoch, Hash256, Slot}; + +#[derive(Clone, Debug, Serialize)] +/// The current sync status of the peer. +pub enum PeerSyncStatus { + /// At the current state as our node or ahead of us. + Synced { info: SyncInfo }, + /// The peer has greater knowledge about the canonical chain than we do. + Advanced { info: SyncInfo }, + /// Is behind our current head and not useful for block downloads. + Behind { info: SyncInfo }, + /// Not currently known as a STATUS handshake has not occurred. + Unknown, +} + +/// This is stored inside the PeerSyncStatus and is very similar to `PeerSyncInfo` in the +/// `Network` crate. +#[derive(Clone, Debug, Serialize)] +pub struct SyncInfo { + pub status_head_slot: Slot, + pub status_head_root: Hash256, + pub status_finalized_epoch: Epoch, + pub status_finalized_root: Hash256, +} + +impl PeerSyncStatus { + /// Returns true if the peer has advanced knowledge of the chain. + pub fn is_advanced(&self) -> bool { + match self { + PeerSyncStatus::Advanced { .. } => true, + _ => false, + } + } + + /// Returns true if the peer is up to date with the current chain. + pub fn is_synced(&self) -> bool { + match self { + PeerSyncStatus::Synced { .. } => true, + _ => false, + } + } + + /// Returns true if the peer is behind the current chain. + pub fn is_behind(&self) -> bool { + match self { + PeerSyncStatus::Behind { .. } => true, + _ => false, + } + } + + /// Updates the sync state given a fully synced peer. + /// Returns true if the state has changed. + pub fn update_synced(&mut self, info: SyncInfo) -> bool { + let new_state = PeerSyncStatus::Synced { info }; + + match self { + PeerSyncStatus::Synced { .. } | PeerSyncStatus::Unknown => { + *self = new_state; + false // state was not updated + } + _ => { + *self = new_state; + true + } + } + } + + /// Updates the sync state given a peer that is further ahead in the chain than us. + /// Returns true if the state has changed. + pub fn update_advanced(&mut self, info: SyncInfo) -> bool { + let new_state = PeerSyncStatus::Advanced { info }; + + match self { + PeerSyncStatus::Advanced { .. } | PeerSyncStatus::Unknown => { + *self = new_state; + false // state was not updated + } + _ => { + *self = new_state; + true + } + } + } + + /// Updates the sync state given a peer that is behind us in the chain. + /// Returns true if the state has changed. + pub fn update_behind(&mut self, info: SyncInfo) -> bool { + let new_state = PeerSyncStatus::Behind { info }; + + match self { + PeerSyncStatus::Behind { .. } | PeerSyncStatus::Unknown => { + *self = new_state; + false // state was not updated + } + _ => { + *self = new_state; + true + } + } + } +} diff --git a/beacon_node/eth2-libp2p/src/peer_manager/peerdb.rs b/beacon_node/eth2-libp2p/src/peer_manager/peerdb.rs index 05af9e0197..066fa3736b 100644 --- a/beacon_node/eth2-libp2p/src/peer_manager/peerdb.rs +++ b/beacon_node/eth2-libp2p/src/peer_manager/peerdb.rs @@ -1,8 +1,10 @@ -use super::peer_info::{PeerConnectionStatus, PeerInfo, PeerSyncStatus}; +use super::peer_info::{PeerConnectionStatus, PeerInfo}; +use super::peer_sync_status::PeerSyncStatus; use crate::rpc::methods::MetaData; use crate::PeerId; use slog::{crit, warn}; use std::collections::HashMap; +use std::time::Instant; use types::{EthSpec, SubnetId}; /// A peer's reputation. @@ -41,8 +43,13 @@ impl PeerDB { .map_or(DEFAULT_REPUTATION, |info| info.reputation) } + /// Returns an iterator over all peers in the db. + pub fn peers(&self) -> impl Iterator)> { + self.peers.iter() + } + /// Gives the ids of all known peers. - pub fn peers(&self) -> impl Iterator { + pub fn peer_ids(&self) -> impl Iterator { self.peers.keys() } @@ -66,10 +73,27 @@ impl PeerDB { } /// Gives the ids of all known connected peers. - pub fn connected_peers(&self) -> impl Iterator { + pub fn connected_peers(&self) -> impl Iterator)> { self.peers .iter() .filter(|(_, info)| info.connection_status.is_connected()) + } + + /// Gives the ids of all known connected peers. + pub fn connected_peer_ids(&self) -> impl Iterator { + self.peers + .iter() + .filter(|(_, info)| info.connection_status.is_connected()) + .map(|(peer_id, _)| peer_id) + } + + /// Connected or dialing peers + pub fn connected_or_dialing_peers(&self) -> impl Iterator { + self.peers + .iter() + .filter(|(_, info)| { + info.connection_status.is_connected() || info.connection_status.is_dialing() + }) .map(|(peer_id, _)| peer_id) } @@ -78,7 +102,7 @@ impl PeerDB { self.peers .iter() .filter(|(_, info)| { - if let PeerSyncStatus::Synced { .. } = info.sync_status { + if info.sync_status.is_synced() || info.sync_status.is_advanced() { return info.connection_status.is_connected(); } false @@ -141,16 +165,47 @@ impl PeerDB { } /// Returns the peer's connection status. Returns unknown if the peer is not in the DB. - pub fn connection_status(&self, peer_id: &PeerId) -> PeerConnectionStatus { + pub fn connection_status(&self, peer_id: &PeerId) -> Option { self.peer_info(peer_id) - .map_or(PeerConnectionStatus::default(), |info| { - info.connection_status.clone() - }) + .map(|info| info.connection_status.clone()) + } + + /// Returns if the peer is already connected. + pub fn is_connected(&self, peer_id: &PeerId) -> bool { + if let Some(PeerConnectionStatus::Connected { .. }) = self.connection_status(peer_id) { + true + } else { + false + } + } + + /// If we are connected or currently dialing the peer returns true. + pub fn is_connected_or_dialing(&self, peer_id: &PeerId) -> bool { + match self.connection_status(peer_id) { + Some(PeerConnectionStatus::Connected { .. }) + | Some(PeerConnectionStatus::Dialing { .. }) => true, + _ => false, + } } /* Setters */ - /// Sets a peer as connected with an ingoing connection + /// A peer is being dialed. + pub fn dialing_peer(&mut self, peer_id: &PeerId) { + let info = self + .peers + .entry(peer_id.clone()) + .or_insert_with(|| Default::default()); + + if info.connection_status.is_disconnected() { + self.n_dc -= 1; + } + info.connection_status = PeerConnectionStatus::Dialing { + since: Instant::now(), + }; + } + + /// Sets a peer as connected with an ingoing connection. pub fn connect_ingoing(&mut self, peer_id: &PeerId) { let info = self .peers @@ -163,7 +218,7 @@ impl PeerDB { info.connection_status.connect_ingoing(); } - /// Sets a peer as connected with an outgoing connection + /// Sets a peer as connected with an outgoing connection. pub fn connect_outgoing(&mut self, peer_id: &PeerId) { let info = self .peers @@ -176,7 +231,7 @@ impl PeerDB { info.connection_status.connect_outgoing(); } - /// Sets the peer as disconnected + /// Sets the peer as disconnected. pub fn disconnect(&mut self, peer_id: &PeerId) { let log_ref = &self.log; let info = self.peers.entry(peer_id.clone()).or_insert_with(|| { @@ -364,7 +419,7 @@ mod tests { } assert_eq!(pdb.n_dc, 0); - for p in pdb.connected_peers().cloned().collect::>() { + for p in pdb.connected_peer_ids().cloned().collect::>() { pdb.disconnect(&p); } diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs index b3c7f2ce7b..bd8cbfd9f8 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs @@ -1,10 +1,7 @@ use crate::rpc::methods::*; use crate::rpc::{ codec::base::OutboundCodec, - protocol::{ - ProtocolId, RPCError, RPC_BLOCKS_BY_RANGE, RPC_BLOCKS_BY_ROOT, RPC_GOODBYE, RPC_META_DATA, - RPC_PING, RPC_STATUS, - }, + protocol::{Encoding, Protocol, ProtocolId, RPCError, Version}, }; use crate::rpc::{ErrorMessage, RPCErrorResponse, RPCRequest, RPCResponse}; use libp2p::bytes::{BufMut, Bytes, BytesMut}; @@ -28,7 +25,7 @@ impl SSZInboundCodec { uvi_codec.set_max_len(max_packet_size); // this encoding only applies to ssz. - debug_assert!(protocol.encoding.as_str() == "ssz"); + debug_assert_eq!(protocol.encoding, Encoding::SSZ); SSZInboundCodec { inner: uvi_codec, @@ -81,39 +78,34 @@ impl Decoder for SSZInboundCodec { fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { match self.inner.decode(src).map_err(RPCError::from) { - Ok(Some(packet)) => match self.protocol.message_name.as_str() { - RPC_STATUS => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes( + Ok(Some(packet)) => match self.protocol.message_name { + Protocol::Status => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes( &packet, )?))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_GOODBYE => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes( + Protocol::Goodbye => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes( &packet, )?))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_BLOCKS_BY_RANGE => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCRequest::BlocksByRange( + Protocol::BlocksByRange => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::BlocksByRange( BlocksByRangeRequest::from_ssz_bytes(&packet)?, ))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_BLOCKS_BY_ROOT => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCRequest::BlocksByRoot(BlocksByRootRequest { + Protocol::BlocksByRoot => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::BlocksByRoot(BlocksByRootRequest { block_roots: Vec::from_ssz_bytes(&packet)?, }))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_PING => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCRequest::Ping(Ping { + Protocol::Ping => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::Ping(Ping { data: u64::from_ssz_bytes(&packet)?, }))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_META_DATA => match self.protocol.version.as_str() { - "1" => { + Protocol::MetaData => match self.protocol.version { + Version::V1 => { if packet.len() > 0 { Err(RPCError::Custom( "Get metadata request should be empty".into(), @@ -122,9 +114,7 @@ impl Decoder for SSZInboundCodec { Ok(Some(RPCRequest::MetaData(PhantomData))) } } - _ => unreachable!("Cannot negotiate an unknown version"), }, - _ => unreachable!("Cannot negotiate an unknown protocol"), }, Ok(None) => Ok(None), Err(e) => Err(e), @@ -146,7 +136,7 @@ impl SSZOutboundCodec { uvi_codec.set_max_len(max_packet_size); // this encoding only applies to ssz. - debug_assert!(protocol.encoding.as_str() == "ssz"); + debug_assert_eq!(protocol.encoding, Encoding::SSZ); SSZOutboundCodec { inner: uvi_codec, @@ -191,39 +181,35 @@ impl Decoder for SSZOutboundCodec { // the object is empty. We return the empty object if this is the case // clear the buffer and return an empty object src.clear(); - match self.protocol.message_name.as_str() { - RPC_STATUS => match self.protocol.version.as_str() { - "1" => Err(RPCError::Custom( + match self.protocol.message_name { + Protocol::Status => match self.protocol.version { + Version::V1 => Err(RPCError::Custom( "Status stream terminated unexpectedly".into(), )), // cannot have an empty HELLO message. The stream has terminated unexpectedly - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_GOODBYE => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")), - RPC_BLOCKS_BY_RANGE => match self.protocol.version.as_str() { - "1" => Err(RPCError::Custom( + Protocol::Goodbye => { + Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")) + } + Protocol::BlocksByRange => match self.protocol.version { + Version::V1 => Err(RPCError::Custom( "Status stream terminated unexpectedly, empty block".into(), )), // cannot have an empty block message. - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_BLOCKS_BY_ROOT => match self.protocol.version.as_str() { - "1" => Err(RPCError::Custom( + Protocol::BlocksByRoot => match self.protocol.version { + Version::V1 => Err(RPCError::Custom( "Status stream terminated unexpectedly, empty block".into(), )), // cannot have an empty block message. - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_PING => match self.protocol.version.as_str() { - "1" => Err(RPCError::Custom( + Protocol::Ping => match self.protocol.version { + Version::V1 => Err(RPCError::Custom( "PING stream terminated unexpectedly".into(), )), // cannot have an empty block message. - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_META_DATA => match self.protocol.version.as_str() { - "1" => Err(RPCError::Custom( + Protocol::MetaData => match self.protocol.version { + Version::V1 => Err(RPCError::Custom( "Metadata stream terminated unexpectedly".into(), )), // cannot have an empty block message. - _ => unreachable!("Cannot negotiate an unknown version"), }, - _ => unreachable!("Cannot negotiate an unknown protocol"), } } else { match self.inner.decode(src).map_err(RPCError::from) { @@ -231,41 +217,35 @@ impl Decoder for SSZOutboundCodec { // take the bytes from the buffer let raw_bytes = packet.take(); - match self.protocol.message_name.as_str() { - RPC_STATUS => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::Status(StatusMessage::from_ssz_bytes( - &raw_bytes, - )?))), - _ => unreachable!("Cannot negotiate an unknown version"), + match self.protocol.message_name { + Protocol::Status => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::Status( + StatusMessage::from_ssz_bytes(&raw_bytes)?, + ))), }, - RPC_GOODBYE => { + Protocol::Goodbye => { Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")) } - RPC_BLOCKS_BY_RANGE => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::BlocksByRange(Box::new( + Protocol::BlocksByRange => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::BlocksByRange(Box::new( SignedBeaconBlock::from_ssz_bytes(&raw_bytes)?, )))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_BLOCKS_BY_ROOT => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + Protocol::BlocksByRoot => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::BlocksByRoot(Box::new( SignedBeaconBlock::from_ssz_bytes(&raw_bytes)?, )))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_PING => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::Pong(Ping { + Protocol::Ping => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::Pong(Ping { data: u64::from_ssz_bytes(&raw_bytes)?, }))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_META_DATA => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::MetaData(MetaData::from_ssz_bytes( - &raw_bytes, - )?))), - _ => unreachable!("Cannot negotiate an unknown version"), + Protocol::MetaData => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::MetaData( + MetaData::from_ssz_bytes(&raw_bytes)?, + ))), }, - _ => unreachable!("Cannot negotiate an unknown protocol"), } } Ok(None) => Ok(None), // waiting for more bytes diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz_snappy.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz_snappy.rs index c345a71451..e2f0db1ff4 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz_snappy.rs @@ -1,10 +1,7 @@ use crate::rpc::methods::*; use crate::rpc::{ codec::base::OutboundCodec, - protocol::{ - ProtocolId, RPCError, RPC_BLOCKS_BY_RANGE, RPC_BLOCKS_BY_ROOT, RPC_GOODBYE, RPC_META_DATA, - RPC_PING, RPC_STATUS, - }, + protocol::{Encoding, Protocol, ProtocolId, RPCError, Version}, }; use crate::rpc::{ErrorMessage, RPCErrorResponse, RPCRequest, RPCResponse}; use libp2p::bytes::BytesMut; @@ -34,7 +31,7 @@ impl SSZSnappyInboundCodec { pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self { let uvi_codec = Uvi::default(); // this encoding only applies to ssz_snappy. - debug_assert!(protocol.encoding.as_str() == "ssz_snappy"); + debug_assert_eq!(protocol.encoding, Encoding::SSZSnappy); SSZSnappyInboundCodec { inner: uvi_codec, @@ -122,39 +119,34 @@ impl Decoder for SSZSnappyInboundCodec { let n = reader.get_ref().position(); self.len = None; src.split_to(n as usize); - match self.protocol.message_name.as_str() { - RPC_STATUS => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes( + match self.protocol.message_name { + Protocol::Status => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes( &decoded_buffer, )?))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_GOODBYE => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes( - &decoded_buffer, - )?))), - _ => unreachable!("Cannot negotiate an unknown version"), + Protocol::Goodbye => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::Goodbye( + GoodbyeReason::from_ssz_bytes(&decoded_buffer)?, + ))), }, - RPC_BLOCKS_BY_RANGE => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCRequest::BlocksByRange( + Protocol::BlocksByRange => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::BlocksByRange( BlocksByRangeRequest::from_ssz_bytes(&decoded_buffer)?, ))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_BLOCKS_BY_ROOT => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCRequest::BlocksByRoot(BlocksByRootRequest { + Protocol::BlocksByRoot => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::BlocksByRoot(BlocksByRootRequest { block_roots: Vec::from_ssz_bytes(&decoded_buffer)?, }))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_PING => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCRequest::Ping(Ping::from_ssz_bytes( + Protocol::Ping => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::Ping(Ping::from_ssz_bytes( &decoded_buffer, )?))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_META_DATA => match self.protocol.version.as_str() { - "1" => { + Protocol::MetaData => match self.protocol.version { + Version::V1 => { if decoded_buffer.len() > 0 { Err(RPCError::Custom( "Get metadata request should be empty".into(), @@ -163,9 +155,7 @@ impl Decoder for SSZSnappyInboundCodec { Ok(Some(RPCRequest::MetaData(PhantomData))) } } - _ => unreachable!("Cannot negotiate an unknown version"), }, - _ => unreachable!("Cannot negotiate an unknown protocol"), } } Err(e) => match e.kind() { @@ -194,7 +184,7 @@ impl SSZSnappyOutboundCodec { pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self { let uvi_codec = Uvi::default(); // this encoding only applies to ssz_snappy. - debug_assert!(protocol.encoding.as_str() == "ssz_snappy"); + debug_assert_eq!(protocol.encoding, Encoding::SSZSnappy); SSZSnappyOutboundCodec { inner: uvi_codec, @@ -279,41 +269,35 @@ impl Decoder for SSZSnappyOutboundCodec { let n = reader.get_ref().position(); self.len = None; src.split_to(n as usize); - match self.protocol.message_name.as_str() { - RPC_STATUS => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::Status(StatusMessage::from_ssz_bytes( - &decoded_buffer, - )?))), - _ => unreachable!("Cannot negotiate an unknown version"), + match self.protocol.message_name { + Protocol::Status => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::Status( + StatusMessage::from_ssz_bytes(&decoded_buffer)?, + ))), }, - RPC_GOODBYE => { + Protocol::Goodbye => { Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")) } - RPC_BLOCKS_BY_RANGE => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::BlocksByRange(Box::new( + Protocol::BlocksByRange => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::BlocksByRange(Box::new( SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?, )))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_BLOCKS_BY_ROOT => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + Protocol::BlocksByRoot => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::BlocksByRoot(Box::new( SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?, )))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_PING => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::Pong(Ping { + Protocol::Ping => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::Pong(Ping { data: u64::from_ssz_bytes(&decoded_buffer)?, }))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_META_DATA => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::MetaData(MetaData::from_ssz_bytes( + Protocol::MetaData => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::MetaData(MetaData::from_ssz_bytes( &decoded_buffer, )?))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - _ => unreachable!("Cannot negotiate an unknown protocol"), } } Err(e) => match e.kind() { diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index fbfecaad2d..c9e86d3ecd 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -1,6 +1,7 @@ //! Available RPC methods types and ids. use crate::types::EnrBitfield; +use serde::Serialize; use ssz_derive::{Decode, Encode}; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -37,7 +38,8 @@ pub struct Ping { } /// The METADATA response structure. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] +#[derive(Encode, Decode, Clone, Debug, PartialEq, Serialize)] +#[serde(bound = "T: EthSpec")] pub struct MetaData { /// A sequential counter indicating when data gets modified. pub seq_number: u64, diff --git a/beacon_node/eth2-libp2p/src/rpc/protocol.rs b/beacon_node/eth2-libp2p/src/rpc/protocol.rs index 058ea78e75..76567cf466 100644 --- a/beacon_node/eth2-libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2-libp2p/src/rpc/protocol.rs @@ -34,18 +34,68 @@ const TTFB_TIMEOUT: u64 = 5; const REQUEST_TIMEOUT: u64 = 15; /// Protocol names to be used. -/// The Status protocol name. -pub const RPC_STATUS: &str = "status"; -/// The Goodbye protocol name. -pub const RPC_GOODBYE: &str = "goodbye"; -/// The `BlocksByRange` protocol name. -pub const RPC_BLOCKS_BY_RANGE: &str = "beacon_blocks_by_range"; -/// The `BlocksByRoot` protocol name. -pub const RPC_BLOCKS_BY_ROOT: &str = "beacon_blocks_by_root"; -/// The `Ping` protocol name. -pub const RPC_PING: &str = "ping"; -/// The `MetaData` protocol name. -pub const RPC_META_DATA: &str = "metadata"; +#[derive(Debug, Clone)] +pub enum Protocol { + /// The Status protocol name. + Status, + /// The Goodbye protocol name. + Goodbye, + /// The `BlocksByRange` protocol name. + BlocksByRange, + /// The `BlocksByRoot` protocol name. + BlocksByRoot, + /// The `Ping` protocol name. + Ping, + /// The `MetaData` protocol name. + MetaData, +} + +/// RPC Versions +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Version { + /// Version 1 of RPC + V1, +} + +/// RPC Encondings supported. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Encoding { + SSZ, + SSZSnappy, +} + +impl std::fmt::Display for Protocol { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let repr = match self { + Protocol::Status => "status", + Protocol::Goodbye => "goodbye", + Protocol::BlocksByRange => "beacon_blocks_by_range", + Protocol::BlocksByRoot => "beacon_blocks_by_root", + Protocol::Ping => "ping", + Protocol::MetaData => "metadata", + }; + f.write_str(repr) + } +} + +impl std::fmt::Display for Encoding { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let repr = match self { + Encoding::SSZ => "ssz", + Encoding::SSZSnappy => "ssz_snappy", + }; + f.write_str(repr) + } +} + +impl std::fmt::Display for Version { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let repr = match self { + Version::V1 => "1", + }; + f.write_str(repr) + } +} #[derive(Debug, Clone)] pub struct RPCProtocol { @@ -59,18 +109,18 @@ impl UpgradeInfo for RPCProtocol { /// The list of supported RPC protocols for Lighthouse. fn protocol_info(&self) -> Self::InfoIter { vec![ - ProtocolId::new(RPC_STATUS, "1", "ssz_snappy"), - ProtocolId::new(RPC_STATUS, "1", "ssz"), - ProtocolId::new(RPC_GOODBYE, "1", "ssz_snappy"), - ProtocolId::new(RPC_GOODBYE, "1", "ssz"), - ProtocolId::new(RPC_BLOCKS_BY_RANGE, "1", "ssz_snappy"), - ProtocolId::new(RPC_BLOCKS_BY_RANGE, "1", "ssz"), - ProtocolId::new(RPC_BLOCKS_BY_ROOT, "1", "ssz_snappy"), - ProtocolId::new(RPC_BLOCKS_BY_ROOT, "1", "ssz"), - ProtocolId::new(RPC_PING, "1", "ssz_snappy"), - ProtocolId::new(RPC_PING, "1", "ssz"), - ProtocolId::new(RPC_META_DATA, "1", "ssz_snappy"), - ProtocolId::new(RPC_META_DATA, "1", "ssz"), + ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZ), + ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZ), + ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZ), + ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZ), + ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZ), + ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZ), ] } } @@ -79,13 +129,13 @@ impl UpgradeInfo for RPCProtocol { #[derive(Clone, Debug)] pub struct ProtocolId { /// The rpc message type/name. - pub message_name: String, + pub message_name: Protocol, /// The version of the RPC. - pub version: String, + pub version: Version, /// The encoding of the RPC. - pub encoding: String, + pub encoding: Encoding, /// The protocol id that is formed from the above fields. protocol_id: String, @@ -93,16 +143,16 @@ pub struct ProtocolId { /// An RPC protocol ID. impl ProtocolId { - pub fn new(message_name: &str, version: &str, encoding: &str) -> Self { + pub fn new(message_name: Protocol, version: Version, encoding: Encoding) -> Self { let protocol_id = format!( "{}/{}/{}/{}", PROTOCOL_PREFIX, message_name, version, encoding ); ProtocolId { - message_name: message_name.into(), - version: version.into(), - encoding: encoding.into(), + message_name, + version: version, + encoding, protocol_id, } } @@ -154,13 +204,13 @@ where protocol: ProtocolId, ) -> Self::Future { let protocol_name = protocol.message_name.clone(); - let codec = match protocol.encoding.as_str() { - "ssz_snappy" => { + let codec = match protocol.encoding { + Encoding::SSZSnappy => { let ssz_snappy_codec = BaseInboundCodec::new(SSZSnappyInboundCodec::new(protocol, MAX_RPC_SIZE)); InboundCodec::SSZSnappy(ssz_snappy_codec) } - "ssz" | _ => { + Encoding::SSZ => { let ssz_codec = BaseInboundCodec::new(SSZInboundCodec::new(protocol, MAX_RPC_SIZE)); InboundCodec::SSZ(ssz_codec) } @@ -171,13 +221,13 @@ where let socket = Framed::new(timed_socket, codec); // MetaData requests should be empty, return the stream - if protocol_name == RPC_META_DATA { - futures::future::Either::A(futures::future::ok(( + match protocol_name { + Protocol::MetaData => futures::future::Either::A(futures::future::ok(( RPCRequest::MetaData(PhantomData), socket, - ))) - } else { - futures::future::Either::B( + ))), + + _ => futures::future::Either::B( socket .into_future() .timeout(Duration::from_secs(REQUEST_TIMEOUT)) @@ -190,7 +240,7 @@ where )), } } as FnAndThen), - ) + ), } } } @@ -226,28 +276,28 @@ impl RPCRequest { match self { // add more protocols when versions/encodings are supported RPCRequest::Status(_) => vec![ - ProtocolId::new(RPC_STATUS, "1", "ssz_snappy"), - ProtocolId::new(RPC_STATUS, "1", "ssz"), + ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZ), ], RPCRequest::Goodbye(_) => vec![ - ProtocolId::new(RPC_GOODBYE, "1", "ssz_snappy"), - ProtocolId::new(RPC_GOODBYE, "1", "ssz"), + ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZ), ], RPCRequest::BlocksByRange(_) => vec![ - ProtocolId::new(RPC_BLOCKS_BY_RANGE, "1", "ssz_snappy"), - ProtocolId::new(RPC_BLOCKS_BY_RANGE, "1", "ssz"), + ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZ), ], RPCRequest::BlocksByRoot(_) => vec![ - ProtocolId::new(RPC_BLOCKS_BY_ROOT, "1", "ssz_snappy"), - ProtocolId::new(RPC_BLOCKS_BY_ROOT, "1", "ssz"), + ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZ), ], RPCRequest::Ping(_) => vec![ - ProtocolId::new(RPC_PING, "1", "ssz_snappy"), - ProtocolId::new(RPC_PING, "1", "ssz"), + ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZ), ], RPCRequest::MetaData(_) => vec![ - ProtocolId::new(RPC_META_DATA, "1", "ssz_snappy"), - ProtocolId::new(RPC_META_DATA, "1", "ssz"), + ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZ), ], } } @@ -316,13 +366,13 @@ where socket: upgrade::Negotiated, protocol: Self::Info, ) -> Self::Future { - let codec = match protocol.encoding.as_str() { - "ssz_snappy" => { + let codec = match protocol.encoding { + Encoding::SSZSnappy => { let ssz_snappy_codec = BaseOutboundCodec::new(SSZSnappyOutboundCodec::new(protocol, MAX_RPC_SIZE)); OutboundCodec::SSZSnappy(ssz_snappy_codec) } - "ssz" | _ => { + Encoding::SSZ => { let ssz_codec = BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, MAX_RPC_SIZE)); OutboundCodec::SSZ(ssz_codec) diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 49b3909105..9b5e4e473c 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -27,7 +27,7 @@ use types::{EnrForkId, EthSpec}; type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>; type Libp2pBehaviour = Behaviour, TSpec>; -const NETWORK_KEY_FILENAME: &str = "key"; +pub const NETWORK_KEY_FILENAME: &str = "key"; /// The time in milliseconds to wait before banning a peer. This allows for any Goodbye messages to be /// flushed and protocols to be negotiated. const BAN_PEER_WAIT_TIMEOUT: u64 = 200; @@ -138,6 +138,11 @@ impl Service { if let Protocol::Udp(_) = components[1] { continue; } + // inform the peer manager that we are currently dialing this peer + network_globals + .peers + .write() + .dialing_peer(&bootnode_enr.peer_id()); dial_addr(multiaddr); } } diff --git a/beacon_node/eth2-libp2p/src/types/globals.rs b/beacon_node/eth2-libp2p/src/types/globals.rs index 0be9d4cb44..3912bf86c3 100644 --- a/beacon_node/eth2-libp2p/src/types/globals.rs +++ b/beacon_node/eth2-libp2p/src/types/globals.rs @@ -80,7 +80,12 @@ impl NetworkGlobals { /// Returns the number of libp2p connected peers. pub fn connected_peers(&self) -> usize { - self.peers.read().connected_peers().count() + self.peers.read().connected_peer_ids().count() + } + + /// Returns the number of libp2p peers that are either connected or being dialed. + pub fn connected_or_dialing_peers(&self) -> usize { + self.peers.read().connected_or_dialing_peers().count() } /// Returns in the node is syncing. diff --git a/beacon_node/eth2-libp2p/src/types/pubsub.rs b/beacon_node/eth2-libp2p/src/types/pubsub.rs index 8ed3e3ce23..279dde28b5 100644 --- a/beacon_node/eth2-libp2p/src/types/pubsub.rs +++ b/beacon_node/eth2-libp2p/src/types/pubsub.rs @@ -172,3 +172,30 @@ impl PubsubMessage { } } } + +impl std::fmt::Display for PubsubMessage { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PubsubMessage::BeaconBlock(block) => write!( + f, + "Beacon Block: slot: {}, proposer_index: {}", + block.message.slot, block.message.proposer_index + ), + PubsubMessage::AggregateAndProofAttestation(att) => write!( + f, + "Aggregate and Proof: slot: {}, index: {}, aggregator_index: {}", + att.message.aggregate.data.slot, + att.message.aggregate.data.index, + att.message.aggregator_index, + ), + PubsubMessage::Attestation(data) => write!( + f, + "Attestation: subnet_id: {}, attestation_slot: {}, attestation_index: {}", + *data.0, data.1.data.slot, data.1.data.index, + ), + PubsubMessage::VoluntaryExit(_data) => write!(f, "Voluntary Exit"), + PubsubMessage::ProposerSlashing(_data) => write!(f, "Proposer Slashing"), + PubsubMessage::AttesterSlashing(_data) => write!(f, "Attester Slashing"), + } + } +} diff --git a/beacon_node/eth2-libp2p/tests/common/mod.rs b/beacon_node/eth2-libp2p/tests/common/mod.rs index 15918a21a3..45168c2e6d 100644 --- a/beacon_node/eth2-libp2p/tests/common/mod.rs +++ b/beacon_node/eth2-libp2p/tests/common/mod.rs @@ -4,6 +4,7 @@ use eth2_libp2p::Multiaddr; use eth2_libp2p::NetworkConfig; use eth2_libp2p::Service as LibP2PService; use slog::{debug, error, o, Drain}; +use std::net::{TcpListener, UdpSocket}; use std::time::Duration; use types::{EnrForkId, MinimalEthSpec}; @@ -22,6 +23,38 @@ pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { } } +// A bit of hack to find an unused port. +/// +/// Does not guarantee that the given port is unused after the function exists, just that it was +/// unused before the function started (i.e., it does not reserve a port). +pub fn unused_port(transport: &str) -> Result { + let local_addr = match transport { + "tcp" => { + let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| { + format!("Failed to create TCP listener to find unused port: {:?}", e) + })?; + listener.local_addr().map_err(|e| { + format!( + "Failed to read TCP listener local_addr to find unused port: {:?}", + e + ) + })? + } + "udp" => { + let socket = UdpSocket::bind("127.0.0.1:0") + .map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?; + socket.local_addr().map_err(|e| { + format!( + "Failed to read UDP socket local_addr to find unused port: {:?}", + e + ) + })? + } + _ => return Err("Invalid transport to find unused port".into()), + }; + Ok(local_addr.port()) +} + pub fn build_config( port: u16, mut boot_nodes: Vec, @@ -45,11 +78,11 @@ pub fn build_config( } pub fn build_libp2p_instance( - port: u16, boot_nodes: Vec, secret_key: Option, log: slog::Logger, ) -> LibP2PService { + let port = unused_port("tcp").unwrap(); let config = build_config(port, boot_nodes, secret_key); // launch libp2p service LibP2PService::new(&config, EnrForkId::default(), log.clone()) @@ -66,14 +99,9 @@ pub fn get_enr(node: &LibP2PService) -> Enr { // Returns `n` libp2p peers in fully connected topology. #[allow(dead_code)] -pub fn build_full_mesh( - log: slog::Logger, - n: usize, - start_port: Option, -) -> Vec> { - let base_port = start_port.unwrap_or(10000); - let mut nodes: Vec> = (base_port..base_port + n as u16) - .map(|p| build_libp2p_instance(p, vec![], None, log.clone())) +pub fn build_full_mesh(log: slog::Logger, n: usize) -> Vec> { + let mut nodes: Vec> = (0..n) + .map(|_| build_libp2p_instance(vec![], None, log.clone())) .collect(); let multiaddrs: Vec = nodes .iter() @@ -96,15 +124,12 @@ pub fn build_full_mesh( // Constructs a pair of nodes with seperate loggers. The sender dials the receiver. // This returns a (sender, receiver) pair. #[allow(dead_code)] -pub fn build_node_pair( - log: &slog::Logger, - start_port: u16, -) -> (LibP2PService, LibP2PService) { +pub fn build_node_pair(log: &slog::Logger) -> (LibP2PService, LibP2PService) { let sender_log = log.new(o!("who" => "sender")); let receiver_log = log.new(o!("who" => "receiver")); - let mut sender = build_libp2p_instance(start_port, vec![], None, sender_log); - let receiver = build_libp2p_instance(start_port + 1, vec![], None, receiver_log); + let mut sender = build_libp2p_instance(vec![], None, sender_log); + let receiver = build_libp2p_instance(vec![], None, receiver_log); let receiver_multiaddr = receiver.swarm.discovery().local_enr().clone().multiaddr()[1].clone(); match libp2p::Swarm::dial_addr(&mut sender.swarm, receiver_multiaddr) { @@ -116,10 +141,9 @@ pub fn build_node_pair( // Returns `n` peers in a linear topology #[allow(dead_code)] -pub fn build_linear(log: slog::Logger, n: usize, start_port: Option) -> Vec> { - let base_port = start_port.unwrap_or(9000); - let mut nodes: Vec> = (base_port..base_port + n as u16) - .map(|p| build_libp2p_instance(p, vec![], None, log.clone())) +pub fn build_linear(log: slog::Logger, n: usize) -> Vec> { + let mut nodes: Vec> = (0..n) + .map(|_| build_libp2p_instance(vec![], None, log.clone())) .collect(); let multiaddrs: Vec = nodes .iter() diff --git a/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs b/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs index 8268fc5d87..aac5387444 100644 --- a/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs +++ b/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs @@ -25,7 +25,7 @@ fn test_gossipsub_forward() { let log = common::build_log(Level::Info, false); let num_nodes = 20; - let mut nodes = common::build_linear(log.clone(), num_nodes, Some(19000)); + let mut nodes = common::build_linear(log.clone(), num_nodes); let mut received_count = 0; let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); @@ -98,7 +98,7 @@ fn test_gossipsub_full_mesh_publish() { // as nodes may get pruned out of the mesh before the gossipsub message // is published to them. let num_nodes = 12; - let mut nodes = common::build_full_mesh(log, num_nodes, Some(11320)); + let mut nodes = common::build_full_mesh(log, num_nodes); let mut publishing_node = nodes.pop().unwrap(); let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); diff --git a/beacon_node/eth2-libp2p/tests/noise.rs b/beacon_node/eth2-libp2p/tests/noise.rs index ac29f3959b..236150b632 100644 --- a/beacon_node/eth2-libp2p/tests/noise.rs +++ b/beacon_node/eth2-libp2p/tests/noise.rs @@ -125,12 +125,14 @@ fn test_secio_noise_fallback() { let log = common::build_log(log_level, enable_logging); - let noisy_config = common::build_config(56010, vec![], None); + let port = common::unused_port("tcp").unwrap(); + let noisy_config = common::build_config(port, vec![], None); let mut noisy_node = Service::new(&noisy_config, EnrForkId::default(), log.clone()) .expect("should build a libp2p instance") .1; - let secio_config = common::build_config(56011, vec![common::get_enr(&noisy_node)], None); + let port = common::unused_port("tcp").unwrap(); + let secio_config = common::build_config(port, vec![common::get_enr(&noisy_node)], None); // Building a custom Libp2pService from outside the crate isn't possible because of // private fields in the Libp2pService struct. A swarm is good enough for testing diff --git a/beacon_node/eth2-libp2p/tests/rpc_tests.rs b/beacon_node/eth2-libp2p/tests/rpc_tests.rs index 016e3ab1d4..74a0a7a1b7 100644 --- a/beacon_node/eth2-libp2p/tests/rpc_tests.rs +++ b/beacon_node/eth2-libp2p/tests/rpc_tests.rs @@ -25,7 +25,7 @@ fn test_status_rpc() { let log = common::build_log(log_level, enable_logging); // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log, 10500); + let (mut sender, mut receiver) = common::build_node_pair(&log); // Dummy STATUS RPC message let rpc_request = RPCRequest::Status(StatusMessage { @@ -140,7 +140,7 @@ fn test_blocks_by_range_chunked_rpc() { let log = common::build_log(log_level, enable_logging); // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log, 10505); + let (mut sender, mut receiver) = common::build_node_pair(&log); // BlocksByRange Request let rpc_request = RPCRequest::BlocksByRange(BlocksByRangeRequest { @@ -275,7 +275,7 @@ fn test_blocks_by_range_single_empty_rpc() { let log = common::build_log(log_level, enable_logging); // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log, 10510); + let (mut sender, mut receiver) = common::build_node_pair(&log); // BlocksByRange Request let rpc_request = RPCRequest::BlocksByRange(BlocksByRangeRequest { @@ -411,7 +411,7 @@ fn test_blocks_by_root_chunked_rpc() { let spec = E::default_spec(); // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log, 10515); + let (mut sender, mut receiver) = common::build_node_pair(&log); // BlocksByRoot Request let rpc_request = RPCRequest::BlocksByRoot(BlocksByRootRequest { @@ -539,7 +539,7 @@ fn test_goodbye_rpc() { let log = common::build_log(log_level, enable_logging); // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log, 10520); + let (mut sender, mut receiver) = common::build_node_pair(&log); // Goodbye Request let rpc_request = RPCRequest::Goodbye(GoodbyeReason::ClientShutdown); diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index b6a94029bf..53fa26260a 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -346,7 +346,8 @@ impl Eth1GenesisService { .map_err(|e| format!("Error whilst processing deposit: {:?}", e)) })?; - process_activations(&mut local_state, spec); + process_activations(&mut local_state, spec) + .map_err(|e| format!("Error whilst processing activations: {:?}", e))?; let is_valid = is_valid_genesis_state(&local_state, spec); trace!( diff --git a/beacon_node/network/src/attestation_service/mod.rs b/beacon_node/network/src/attestation_service/mod.rs index e4c3195129..5c21d45a81 100644 --- a/beacon_node/network/src/attestation_service/mod.rs +++ b/beacon_node/network/src/attestation_service/mod.rs @@ -190,10 +190,24 @@ impl AttestationService { pub fn should_process_attestation( &mut self, _message_id: &MessageId, - _peer_id: &PeerId, - _subnet: &SubnetId, - _attestation: &Attestation, + peer_id: &PeerId, + subnet: &SubnetId, + attestation: &Attestation, ) -> bool { + // verify the attestation is on the correct subnet + let expected_subnet = match attestation.subnet_id(&self.beacon_chain.spec) { + Ok(v) => v, + Err(e) => { + warn!(self.log, "Could not obtain attestation subnet_id"; "error" => format!("{:?}", e)); + return false; + } + }; + + if expected_subnet != *subnet { + warn!(self.log, "Received an attestation on the wrong subnet"; "subnet_received" => format!("{:?}", subnet), "subnet_expected" => format!("{:?}",expected_subnet), "peer_id" => format!("{}", peer_id)); + return false; + } + // TODO: Correctly handle validation aggregator checks true } diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 073b38cc2f..466c70363f 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -8,7 +8,7 @@ pub mod processor; use crate::error; use crate::service::NetworkMessage; -use beacon_chain::{AttestationType, BeaconChain, BeaconChainTypes}; +use beacon_chain::{AttestationType, BeaconChain, BeaconChainTypes, BlockError}; use eth2_libp2p::{ rpc::{RPCError, RPCErrorResponse, RPCRequest, RPCResponse, RequestId, ResponseTermination}, MessageId, NetworkGlobals, PeerId, PubsubMessage, RPCEvent, @@ -60,7 +60,7 @@ impl Router { executor: &tokio::runtime::TaskExecutor, log: slog::Logger, ) -> error::Result>> { - let message_handler_log = log.new(o!("service"=> "msg_handler")); + let message_handler_log = log.new(o!("service"=> "router")); trace!(message_handler_log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); @@ -262,16 +262,20 @@ impl Router { AttestationType::Unaggregated { should_store: true }, ); } - PubsubMessage::BeaconBlock(block) => match self.processor.should_forward_block(block) { - Ok(verified_block) => { - self.propagate_message(id, peer_id.clone()); - self.processor.on_block_gossip(peer_id, verified_block); - } - Err(e) => { - warn!(self.log, "Could not verify block for gossip"; + PubsubMessage::BeaconBlock(block) => { + match self.processor.should_forward_block(&peer_id, block) { + Ok(verified_block) => { + self.propagate_message(id, peer_id.clone()); + self.processor.on_block_gossip(peer_id, verified_block); + } + Err(BlockError::ParentUnknown { .. }) => {} // performing a parent lookup + Err(e) => { + // performing a parent lookup + warn!(self.log, "Could not verify block for gossip"; "error" => format!("{:?}", e)); + } } - }, + } PubsubMessage::VoluntaryExit(_exit) => { // TODO: Apply more sophisticated validation self.propagate_message(id, peer_id.clone()); diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index f113adf70a..d309ee9bb1 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -1,5 +1,5 @@ use crate::service::NetworkMessage; -use crate::sync::SyncMessage; +use crate::sync::{PeerSyncInfo, SyncMessage}; use beacon_chain::{ AttestationProcessingOutcome, AttestationType, BeaconChain, BeaconChainTypes, BlockError, BlockProcessingOutcome, GossipVerifiedBlock, @@ -13,7 +13,8 @@ use std::sync::Arc; use store::Store; use tokio::sync::{mpsc, oneshot}; use types::{ - Attestation, Epoch, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, Slot, + Attestation, ChainSpec, Epoch, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, + Slot, }; //TODO: Rate limit requests @@ -22,34 +23,6 @@ use types::{ /// Otherwise we queue it. pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; -/// Keeps track of syncing information for known connected peers. -#[derive(Clone, Copy, Debug)] -pub struct PeerSyncInfo { - fork_digest: [u8; 4], - pub finalized_root: Hash256, - pub finalized_epoch: Epoch, - pub head_root: Hash256, - pub head_slot: Slot, -} - -impl From for PeerSyncInfo { - fn from(status: StatusMessage) -> PeerSyncInfo { - PeerSyncInfo { - fork_digest: status.fork_digest, - finalized_root: status.finalized_root, - finalized_epoch: status.finalized_epoch, - head_root: status.head_root, - head_slot: status.head_slot, - } - } -} - -impl PeerSyncInfo { - pub fn from_chain(chain: &Arc>) -> Option { - Some(Self::from(status_message(chain)?)) - } -} - /// Processes validated messages from the network. It relays necessary data to the syncing thread /// and processes blocks from the pubsub network. pub struct Processor { @@ -172,7 +145,7 @@ impl Processor { /// Process a `Status` response from a peer. pub fn on_status_response(&mut self, peer_id: PeerId, status: StatusMessage) { - trace!( + debug!( self.log, "Received Status Response"; "peer" => format!("{:?}", peer_id), @@ -489,9 +462,18 @@ impl Processor { /// across the network. pub fn should_forward_block( &mut self, + peer_id: &PeerId, block: Box>, ) -> Result, BlockError> { - self.chain.verify_block_for_gossip(*block) + let result = self.chain.verify_block_for_gossip(*block.clone()); + + if let Err(BlockError::ParentUnknown(block_hash)) = result { + // if we don't know the parent, start a parent lookup + // TODO: Modify the return to avoid the block clone. + debug!(self.log, "Unknown block received. Starting a parent lookup"; "block_slot" => block.message.slot, "block_hash" => format!("{}", block_hash)); + self.send_to_sync(SyncMessage::UnknownBlock(peer_id.clone(), block)); + } + result } /// Process a gossip message declaring a new block. @@ -533,7 +515,8 @@ impl Processor { } BlockProcessingOutcome::ParentUnknown { .. } => { // Inform the sync manager to find parents for this block - debug!(self.log, "Block with unknown parent received"; + // This should not occur. It should be checked by `should_forward_block` + error!(self.log, "Block with unknown parent attempted to be processed"; "peer_id" => format!("{:?}",peer_id)); self.send_to_sync(SyncMessage::UnknownBlock(peer_id, block)); } @@ -644,10 +627,13 @@ pub(crate) fn status_message( beacon_chain: &BeaconChain, ) -> Option { let head_info = beacon_chain.head_info().ok()?; + let genesis_validators_root = beacon_chain.genesis_validators_root; + + let fork_digest = + ChainSpec::compute_fork_digest(head_info.fork.current_version, genesis_validators_root); - // TODO: Update fork digest calculation Some(StatusMessage { - fork_digest: head_info.fork.current_version, + fork_digest, finalized_root: head_info.finalized_checkpoint.root, finalized_epoch: head_info.finalized_checkpoint.epoch, head_root: head_info.block_root, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a88e743a95..5eff3654e8 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -306,7 +306,6 @@ fn spawn_service( .map_err(|_| { debug!(log, "Failed to send peer disconnect to router");})?; } BehaviourEvent::StatusPeer(peer_id) => { - debug!(log, "Re-status peer"; "peer_id" => format!("{}", peer_id)); service.router_send .try_send(RouterMessage::StatusPeer(peer_id)) .map_err(|_| { debug!(log, "Failed to send re-status peer to router");})?; diff --git a/beacon_node/network/src/sync/block_processor.rs b/beacon_node/network/src/sync/block_processor.rs index 77d77cfe07..8c53869e40 100644 --- a/beacon_node/network/src/sync/block_processor.rs +++ b/beacon_node/network/src/sync/block_processor.rs @@ -1,9 +1,9 @@ use crate::router::processor::FUTURE_SLOT_TOLERANCE; use crate::sync::manager::SyncMessage; -use crate::sync::range_sync::BatchId; +use crate::sync::range_sync::{BatchId, ChainId}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, ChainSegmentResult}; use eth2_libp2p::PeerId; -use slog::{crit, debug, error, trace, warn}; +use slog::{debug, error, trace, warn}; use std::sync::{Arc, Weak}; use tokio::sync::mpsc; use types::SignedBeaconBlock; @@ -12,7 +12,7 @@ use types::SignedBeaconBlock; #[derive(Clone, Debug, PartialEq)] pub enum ProcessId { /// Processing Id of a range syncing batch. - RangeBatchId(BatchId), + RangeBatchId(ChainId, BatchId), /// Processing Id of the parent lookup of a block ParentLookup(PeerId), } @@ -40,7 +40,7 @@ pub fn spawn_block_processor( std::thread::spawn(move || { match process_id { // this a request from the range sync - ProcessId::RangeBatchId(batch_id) => { + ProcessId::RangeBatchId(chain_id, batch_id) => { debug!(log, "Processing batch"; "id" => *batch_id, "blocks" => downloaded_blocks.len()); let result = match process_blocks(chain, downloaded_blocks.iter(), &log) { (_, Ok(_)) => { @@ -59,8 +59,9 @@ pub fn spawn_block_processor( }; let msg = SyncMessage::BatchProcessed { - batch_id: batch_id, - downloaded_blocks: downloaded_blocks, + chain_id, + batch_id, + downloaded_blocks, result, }; sync_send.try_send(msg).unwrap_or_else(|_| { @@ -102,8 +103,6 @@ pub fn spawn_block_processor( } /// Helper function to process blocks batches which only consumes the chain and blocks to process. -// TODO: Verify the fork choice logic and the correct error handling from `process_chain_segment`. -// Ensure fork-choice doesn't need to be run during the failed errors. fn process_blocks< 'a, T: BeaconChainTypes, @@ -125,7 +124,6 @@ fn process_blocks< "count" => imported_blocks, ); // Batch completed successfully with at least one block, run fork choice. - // TODO: Verify this logic run_fork_choice(chain, log); } @@ -135,8 +133,10 @@ fn process_blocks< imported_blocks, error, } => { - let r = handle_failed_chain_segment(chain, imported_blocks, error, log); - + let r = handle_failed_chain_segment(error, log); + if imported_blocks > 0 { + run_fork_choice(chain, log); + } (imported_blocks, r) } }; @@ -166,31 +166,16 @@ fn run_fork_choice(chain: Arc>, log: &slog:: } /// Helper function to handle a `BlockError` from `process_chain_segment` -fn handle_failed_chain_segment( - chain: Arc>, - imported_blocks: usize, - error: BlockError, - log: &slog::Logger, -) -> Result<(), String> { +fn handle_failed_chain_segment(error: BlockError, log: &slog::Logger) -> Result<(), String> { match error { BlockError::ParentUnknown(parent) => { // blocks should be sequential and all parents should exist - warn!( - log, "Parent block is unknown"; - "parent_root" => format!("{}", parent), - ); - - // NOTE: logic from master. TODO: check - if imported_blocks > 0 { - run_fork_choice(chain, log); - } Err(format!("Block has an unknown parent: {}", parent)) } BlockError::BlockIsAlreadyKnown => { - // TODO: Check handling of this - crit!(log, "Unknown handling of block error"); - + // This can happen for many reasons. Head sync's can download multiples and parent + // lookups can download blocks before range sync Ok(()) } BlockError::FutureSlot { @@ -206,10 +191,6 @@ fn handle_failed_chain_segment( "block_slot" => block_slot, "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, ); - // NOTE: logic from master. TODO: check - if imported_blocks > 0 { - run_fork_choice(chain, log); - } } else { // The block is in the future, but not too far. debug!( @@ -226,26 +207,15 @@ fn handle_failed_chain_segment( )) } BlockError::WouldRevertFinalizedSlot { .. } => { - //TODO: Check handling. Run fork choice? - debug!( - log, "Finalized or earlier block processed"; - ); - // block reached our finalized slot or was earlier, move to the next block - // TODO: How does this logic happen for the chain segment. We would want to - // continue processing in this case. + debug!( log, "Finalized or earlier block processed";); Ok(()) } BlockError::GenesisBlock => { - debug!( - log, "Genesis block was processed"; - ); - // TODO: Similarly here. Prefer to continue processing. - + debug!(log, "Genesis block was processed"); Ok(()) } BlockError::BeaconChainError(e) => { - // TODO: Run fork choice? warn!( log, "BlockProcessingFailure"; "msg" => "unexpected condition in processing block.", @@ -255,11 +225,6 @@ fn handle_failed_chain_segment( Err(format!("Internal error whilst processing block: {:?}", e)) } other => { - // TODO: Run fork choice? - // NOTE: logic from master. TODO: check - if imported_blocks > 0 { - run_fork_choice(chain, log); - } warn!( log, "Invalid block received"; "msg" => "peer sent invalid block", diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 5b9b5f4e50..47fa65d881 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -35,16 +35,15 @@ use super::block_processor::{spawn_block_processor, BatchProcessResult, ProcessId}; use super::network_context::SyncNetworkContext; -use super::range_sync::{BatchId, RangeSync}; -use crate::router::processor::PeerSyncInfo; +use super::peer_sync_info::{PeerSyncInfo, PeerSyncType}; +use super::range_sync::{BatchId, ChainId, RangeSync}; use crate::service::NetworkMessage; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::rpc::{methods::*, RequestId}; use eth2_libp2p::types::NetworkGlobals; -use eth2_libp2p::{PeerId, PeerSyncStatus}; +use eth2_libp2p::PeerId; use fnv::FnvHashMap; use futures::prelude::*; -use rand::seq::SliceRandom; use slog::{crit, debug, error, info, trace, warn, Logger}; use smallvec::SmallVec; use std::boxed::Box; @@ -56,9 +55,9 @@ use types::{EthSpec, Hash256, SignedBeaconBlock, Slot}; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a /// fully sync'd peer. -const SLOT_IMPORT_TOLERANCE: usize = 20; +pub const SLOT_IMPORT_TOLERANCE: usize = 20; /// How many attempts we try to find a parent of a block before we give up trying . -const PARENT_FAIL_TOLERANCE: usize = 3; +const PARENT_FAIL_TOLERANCE: usize = 5; /// The maximum depth we will search for a parent block. In principle we should have sync'd any /// canonical chain to its head once the peer connects. A chain should not appear where it's depth /// is further back than the most recent head slot. @@ -99,6 +98,7 @@ pub enum SyncMessage { /// A batch has been processed by the block processor thread. BatchProcessed { + chain_id: ChainId, batch_id: BatchId, downloaded_blocks: Vec>, result: BatchProcessResult, @@ -152,7 +152,7 @@ pub struct SyncManager { /// received or not. /// /// The flag allows us to determine if the peer returned data or sent us nothing. - single_block_lookups: FnvHashMap, + single_block_lookups: FnvHashMap, /// The logger for the import manager. log: Logger, @@ -161,6 +161,23 @@ pub struct SyncManager { sync_send: mpsc::UnboundedSender>, } +/// Object representing a single block lookup request. +struct SingleBlockRequest { + /// The hash of the requested block. + pub hash: Hash256, + /// Whether a block was received from this request, or the peer returned an empty response. + pub block_returned: bool, +} + +impl SingleBlockRequest { + pub fn new(hash: Hash256) -> Self { + Self { + hash, + block_returned: false, + } + } +} + /// Spawns a new `SyncManager` thread which has a weak reference to underlying beacon /// chain. This allows the chain to be /// dropped during the syncing process which will gracefully end the `SyncManager`. @@ -224,7 +241,7 @@ impl SyncManager { /// ours that we consider it fully sync'd with respect to our current chain. fn add_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo) { // ensure the beacon chain still exists - let local = match PeerSyncInfo::from_chain(&self.chain) { + let local_peer_info = match PeerSyncInfo::from_chain(&self.chain) { Some(local) => local, None => { return error!( @@ -235,31 +252,45 @@ impl SyncManager { } }; - // If a peer is within SLOT_IMPORT_TOLERANCE from our head slot, ignore a batch/range sync, - // consider it a fully-sync'd peer. - if remote.head_slot.sub(local.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { - trace!(self.log, "Peer synced to our head found"; - "peer" => format!("{:?}", peer_id), - "peer_head_slot" => remote.head_slot, - "local_head_slot" => local.head_slot, - ); - self.synced_peer(&peer_id, remote.head_slot); - // notify the range sync that a peer has been added - self.range_sync.fully_synced_peer_found(); - return; - } + match local_peer_info.peer_sync_type(&remote) { + PeerSyncType::FullySynced => { + trace!(self.log, "Peer synced to our head found"; + "peer" => format!("{:?}", peer_id), + "peer_head_slot" => remote.head_slot, + "local_head_slot" => local_peer_info.head_slot, + ); + self.synced_peer(&peer_id, remote); + // notify the range sync that a peer has been added + self.range_sync.fully_synced_peer_found(); + } + PeerSyncType::Advanced => { + trace!(self.log, "Useful peer for sync found"; + "peer" => format!("{:?}", peer_id), + "peer_head_slot" => remote.head_slot, + "local_head_slot" => local_peer_info.head_slot, + "peer_finalized_epoch" => remote.finalized_epoch, + "local_finalized_epoch" => local_peer_info.finalized_epoch, + ); - // Check if the peer is significantly behind us. If within `SLOT_IMPORT_TOLERANCE` - // treat them as a fully synced peer. If not, ignore them in the sync process - if local.head_slot.sub(remote.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { - self.synced_peer(&peer_id, remote.head_slot); - } else { - self.behind_peer(&peer_id, remote.head_slot); - return; - } + // if we don't know about the peer's chain add it to the range sync, otherwise + // consider it synced (it can be the case that the peer seems ahead of us, but we + // reject its chain). - // Add the peer to our RangeSync - self.range_sync.add_peer(&mut self.network, peer_id, remote); + if self.chain.fork_choice.contains_block(&remote.head_root) { + self.synced_peer(&peer_id, remote); + // notify the range sync that a peer has been added + self.range_sync.fully_synced_peer_found(); + } else { + // Add the peer to our RangeSync + self.range_sync + .add_peer(&mut self.network, peer_id.clone(), remote); + self.advanced_peer(&peer_id, remote); + } + } + PeerSyncType::Behind => { + self.behind_peer(&peer_id, remote); + } + } } /// The response to a `BlocksByRoot` request. @@ -280,12 +311,10 @@ impl SyncManager { // check if this is a single block lookup - i.e we were searching for a specific hash let mut single_block_hash = None; - if let Some((block_hash, data_received)) = - self.single_block_lookups.get_mut(&request_id) - { + if let Some(block_request) = self.single_block_lookups.get_mut(&request_id) { // update the state of the lookup indicating a block was received from the peer - *data_received = true; - single_block_hash = Some(block_hash.clone()); + block_request.block_returned = true; + single_block_hash = Some(block_request.hash.clone()); } if let Some(block_hash) = single_block_hash { self.single_block_lookup_response(peer_id, block, block_hash); @@ -316,12 +345,10 @@ impl SyncManager { // this is a stream termination // stream termination for a single block lookup, remove the key - if let Some((block_hash, data_received)) = - self.single_block_lookups.remove(&request_id) - { + if let Some(single_block_request) = self.single_block_lookups.remove(&request_id) { // the peer didn't respond with a block that it referenced - if !data_received { - warn!(self.log, "Peer didn't respond with a block it referenced"; "referenced_block_hash" => format!("{}", block_hash), "peer_id" => format!("{}", peer_id)); + if !single_block_request.block_returned { + warn!(self.log, "Peer didn't respond with a block it referenced"; "referenced_block_hash" => format!("{}", single_block_request.hash), "peer_id" => format!("{}", peer_id)); self.network.downvote_peer(peer_id); } return; @@ -410,9 +437,24 @@ impl SyncManager { /// A block has been sent to us that has an unknown parent. This begins a parent lookup search /// to find the parent or chain of parents that match our current chain. fn add_unknown_block(&mut self, peer_id: PeerId, block: SignedBeaconBlock) { - // If we are not synced ignore the block + // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore if !self.network_globals.sync_state.read().is_synced() { - return; + let head_slot = self + .chain + .head_info() + .map(|info| info.slot) + .unwrap_or_else(|_| Slot::from(0u64)); + let unknown_block_slot = block.message.slot; + + // if the block is far in the future, ignore it. If its within the slot tolerance of + // our current head, regardless of the syncing state, fetch it. + if (head_slot >= unknown_block_slot + && head_slot.sub(unknown_block_slot).as_usize() > SLOT_IMPORT_TOLERANCE) + || (head_slot < unknown_block_slot + && unknown_block_slot.sub(head_slot).as_usize() > SLOT_IMPORT_TOLERANCE) + { + return; + } } // Make sure this block is not already being searched for @@ -446,13 +488,23 @@ impl SyncManager { return; } + // Do not re-request a block that is already being requested + if self + .single_block_lookups + .values() + .find(|single_block_request| single_block_request.hash == block_hash) + .is_some() + { + return; + } + let request = BlocksByRootRequest { block_roots: vec![block_hash], }; if let Ok(request_id) = self.network.blocks_by_root_request(peer_id, request) { self.single_block_lookups - .insert(request_id, (block_hash, false)); + .insert(request_id, SingleBlockRequest::new(block_hash)); } } @@ -487,17 +539,14 @@ impl SyncManager { self.update_sync_state(); } + // TODO: Group these functions into one. /// Updates the syncing state of a peer to be synced. - fn synced_peer(&mut self, peer_id: &PeerId, status_head_slot: Slot) { + fn synced_peer(&mut self, peer_id: &PeerId, sync_info: PeerSyncInfo) { if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { - match peer_info.sync_status { - PeerSyncStatus::Synced { .. } => { - peer_info.sync_status = PeerSyncStatus::Synced { status_head_slot } - } // just update block - PeerSyncStatus::Behind { .. } | PeerSyncStatus::Unknown => { - peer_info.sync_status = PeerSyncStatus::Synced { status_head_slot }; - debug!(self.log, "Peer transitioned to synced status"; "peer_id" => format!("{}", peer_id)); - } + let head_slot = sync_info.head_slot; + let finalized_epoch = sync_info.finalized_epoch; + if peer_info.sync_status.update_synced(sync_info.into()) { + debug!(self.log, "Peer transitioned sync state"; "new_state" => "synced", "peer_id" => format!("{}", peer_id), "head_slot" => head_slot, "finalized_epoch" => finalized_epoch); } } else { crit!(self.log, "Status'd peer is unknown"; "peer_id" => format!("{}", peer_id)); @@ -506,21 +555,26 @@ impl SyncManager { } /// Updates the syncing state of a peer to be behind. - fn behind_peer(&mut self, peer_id: &PeerId, status_head_slot: Slot) { + fn advanced_peer(&mut self, peer_id: &PeerId, sync_info: PeerSyncInfo) { if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { - match peer_info.sync_status { - PeerSyncStatus::Synced { .. } => { - debug!(self.log, "Peer transitioned to from synced state to behind"; "peer_id" => format!("{}", peer_id), "head_slot" => status_head_slot); - peer_info.sync_status = PeerSyncStatus::Behind { status_head_slot } - } - PeerSyncStatus::Behind { .. } => { - peer_info.sync_status = PeerSyncStatus::Behind { status_head_slot } - } // just update + let head_slot = sync_info.head_slot; + let finalized_epoch = sync_info.finalized_epoch; + if peer_info.sync_status.update_advanced(sync_info.into()) { + debug!(self.log, "Peer transitioned sync state"; "new_state" => "advanced", "peer_id" => format!("{}", peer_id), "head_slot" => head_slot, "finalized_epoch" => finalized_epoch); + } + } else { + crit!(self.log, "Status'd peer is unknown"; "peer_id" => format!("{}", peer_id)); + } + self.update_sync_state(); + } - PeerSyncStatus::Unknown => { - debug!(self.log, "Peer transitioned to behind sync status"; "peer_id" => format!("{}", peer_id), "head_slot" => status_head_slot); - peer_info.sync_status = PeerSyncStatus::Behind { status_head_slot } - } + /// Updates the syncing state of a peer to be behind. + fn behind_peer(&mut self, peer_id: &PeerId, sync_info: PeerSyncInfo) { + if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { + let head_slot = sync_info.head_slot; + let finalized_epoch = sync_info.finalized_epoch; + if peer_info.sync_status.update_behind(sync_info.into()) { + debug!(self.log, "Peer transitioned sync state"; "new_state" => "behind", "peer_id" => format!("{}", peer_id), "head_slot" => head_slot, "finalized_epoch" => finalized_epoch); } } else { crit!(self.log, "Status'd peer is unknown"; "peer_id" => format!("{}", peer_id)); @@ -641,9 +695,16 @@ impl SyncManager { if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE || parent_request.downloaded_blocks.len() >= PARENT_DEPTH_TOLERANCE { + let error = if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { + "too many failed attempts" + } else { + "reached maximum lookup-depth" + }; + debug!(self.log, "Parent import failed"; "block" => format!("{:?}",parent_request.downloaded_blocks[0].canonical_root()), - "ancestors_found" => parent_request.downloaded_blocks.len() + "ancestors_found" => parent_request.downloaded_blocks.len(), + "reason" => error ); return; // drop the request } @@ -658,20 +719,10 @@ impl SyncManager { let request = BlocksByRootRequest { block_roots: vec![parent_hash], }; - // select a random fully synced peer to attempt to download the parent block - let available_peers = self - .network_globals - .peers - .read() - .synced_peers() - .cloned() - .collect::>(); - let peer_id = if let Some(peer_id) = available_peers.choose(&mut rand::thread_rng()) { - (*peer_id).clone() - } else { - // there were no peers to choose from. We drop the lookup request - return; - }; + + // We continue to search for the chain of blocks from the same peer. Other peers are not + // guaranteed to have this chain of blocks. + let peer_id = parent_request.last_submitted_peer.clone(); if let Ok(request_id) = self.network.blocks_by_root_request(peer_id, request) { // if the request was successful add the queue back into self @@ -725,12 +776,14 @@ impl Future for SyncManager { self.inject_error(peer_id, request_id); } SyncMessage::BatchProcessed { + chain_id, batch_id, downloaded_blocks, result, } => { self.range_sync.handle_block_process_result( &mut self.network, + chain_id, batch_id, downloaded_blocks, result, diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 26274ef97f..2e68dc6e81 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -4,6 +4,8 @@ mod block_processor; pub mod manager; mod network_context; +mod peer_sync_info; mod range_sync; pub use manager::SyncMessage; +pub use peer_sync_info::PeerSyncInfo; diff --git a/beacon_node/network/src/sync/peer_sync_info.rs b/beacon_node/network/src/sync/peer_sync_info.rs new file mode 100644 index 0000000000..f03d1a1dfb --- /dev/null +++ b/beacon_node/network/src/sync/peer_sync_info.rs @@ -0,0 +1,113 @@ +use super::manager::SLOT_IMPORT_TOLERANCE; +use crate::router::processor::status_message; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2_libp2p::rpc::methods::*; +use eth2_libp2p::SyncInfo; +use std::ops::Sub; +use std::sync::Arc; +use types::{Epoch, Hash256, Slot}; + +/// Keeps track of syncing information for known connected peers. +#[derive(Clone, Copy, Debug)] +pub struct PeerSyncInfo { + pub fork_digest: [u8; 4], + pub finalized_root: Hash256, + pub finalized_epoch: Epoch, + pub head_root: Hash256, + pub head_slot: Slot, +} + +/// The type of peer relative to our current state. +pub enum PeerSyncType { + /// The peer is on our chain and is fully synced with respect to our chain. + FullySynced, + /// The peer has a greater knowledge of the chain that us that warrants a full sync. + Advanced, + /// A peer is behind in the sync and not useful to us for downloading blocks. + Behind, +} + +impl From for PeerSyncInfo { + fn from(status: StatusMessage) -> PeerSyncInfo { + PeerSyncInfo { + fork_digest: status.fork_digest, + finalized_root: status.finalized_root, + finalized_epoch: status.finalized_epoch, + head_root: status.head_root, + head_slot: status.head_slot, + } + } +} + +impl Into for PeerSyncInfo { + fn into(self) -> SyncInfo { + SyncInfo { + status_head_slot: self.head_slot, + status_head_root: self.head_root, + status_finalized_epoch: self.finalized_epoch, + status_finalized_root: self.finalized_root, + } + } +} + +impl PeerSyncInfo { + /// Derives the peer sync information from a beacon chain. + pub fn from_chain(chain: &Arc>) -> Option { + Some(Self::from(status_message(chain)?)) + } + + /// Given another peer's `PeerSyncInfo` this will determine how useful that peer is for us in + /// regards to syncing. This returns the peer sync type that can then be handled by the + /// `SyncManager`. + pub fn peer_sync_type(&self, remote_peer_sync_info: &PeerSyncInfo) -> PeerSyncType { + // check if the peer is fully synced with our current chain + if self.is_fully_synced_peer(remote_peer_sync_info) { + PeerSyncType::FullySynced + } + // if not, check if the peer is ahead of our chain + else if self.is_advanced_peer(remote_peer_sync_info) { + PeerSyncType::Advanced + } else { + // the peer must be behind and not useful + PeerSyncType::Behind + } + } + + /// Determines if another peer is fully synced with the current peer. + /// + /// A fully synced peer is a peer whose finalized epoch and hash match our own and their + /// head is within SLOT_IMPORT_TOLERANCE of our own. + /// In this case we ignore any batch/range syncing. + fn is_fully_synced_peer(&self, remote: &PeerSyncInfo) -> bool { + // ensure we are on the same chain, with minor differing heads + if remote.finalized_epoch == self.finalized_epoch + && remote.finalized_root == self.finalized_root + { + // that we are within SLOT_IMPORT_TOLERANCE of our two heads + if (self.head_slot >= remote.head_slot + && self.head_slot.sub(remote.head_slot).as_usize() <= SLOT_IMPORT_TOLERANCE) + || (self.head_slot < remote.head_slot) + && remote.head_slot.sub(self.head_slot).as_usize() <= SLOT_IMPORT_TOLERANCE + { + return true; + } + } + false + } + + /// Determines if a peer has more knowledge about the current chain than we do. + /// + /// There are two conditions here. + /// 1) The peer could have a head slot that is greater + /// than SLOT_IMPORT_TOLERANCE of our current head. + /// 2) The peer has a greater finalized slot/epoch than our own. + fn is_advanced_peer(&self, remote: &PeerSyncInfo) -> bool { + if remote.head_slot.sub(self.head_slot).as_usize() > SLOT_IMPORT_TOLERANCE + || self.finalized_epoch < remote.finalized_epoch + { + true + } else { + false + } + } +} diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 58df693399..bd8b604e3f 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -1,4 +1,4 @@ -use super::chain::BLOCKS_PER_BATCH; +use super::chain::EPOCHS_PER_BATCH; use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::RequestId; use eth2_libp2p::PeerId; @@ -76,7 +76,10 @@ impl Batch { pub fn to_blocks_by_range_request(&self) -> BlocksByRangeRequest { BlocksByRangeRequest { start_slot: self.start_slot.into(), - count: std::cmp::min(BLOCKS_PER_BATCH, self.end_slot.sub(self.start_slot).into()), + count: std::cmp::min( + T::slots_per_epoch() * EPOCHS_PER_BATCH, + self.end_slot.sub(self.start_slot).into(), + ), step: 1, } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 0edb431163..424c3a7e88 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -10,14 +10,15 @@ use slog::{crit, debug, warn}; use std::collections::HashSet; use std::sync::Arc; use tokio::sync::mpsc; -use types::{Hash256, SignedBeaconBlock, Slot}; +use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; -/// Blocks are downloaded in batches from peers. This constant specifies how many blocks per batch -/// is requested. There is a timeout for each batch request. If this value is too high, we will -/// downvote peers with poor bandwidth. This can be set arbitrarily high, in which case the +/// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of +/// blocks per batch are requested _at most_. A batch may request less blocks to account for +/// already requested slots. There is a timeout for each batch request. If this value is too high, +/// we will downvote peers with poor bandwidth. This can be set arbitrarily high, in which case the /// responder will fill the response up to the max request size, assuming they have the bandwidth /// to do so. -pub const BLOCKS_PER_BATCH: u64 = 64; +pub const EPOCHS_PER_BATCH: u64 = 2; /// The number of times to retry a batch before the chain is considered failed and removed. const MAX_BATCH_RETRIES: u8 = 5; @@ -38,12 +39,18 @@ pub enum ProcessingResult { RemoveChain, } +/// A chain identifier +pub type ChainId = u64; + /// A chain of blocks that need to be downloaded. Peers who claim to contain the target head /// root are grouped into the peer pool and queried for batches when downloading the /// chain. pub struct SyncingChain { + /// A random id used to identify this chain. + id: ChainId, + /// The original start slot when this chain was initialised. - pub start_slot: Slot, + pub start_epoch: Epoch, /// The target head slot. pub target_head_slot: Slot, @@ -74,8 +81,7 @@ pub struct SyncingChain { /// The current state of the chain. pub state: ChainSyncingState, - /// A random id given to a batch process request. This is None if there is no ongoing batch - /// process. + /// The current processing batch, if any. current_processing_batch: Option>, /// A send channel to the sync manager. This is given to the batch processor thread to report @@ -99,7 +105,8 @@ pub enum ChainSyncingState { impl SyncingChain { pub fn new( - start_slot: Slot, + id: u64, + start_epoch: Epoch, target_head_slot: Slot, target_head_root: Hash256, peer_id: PeerId, @@ -111,7 +118,8 @@ impl SyncingChain { peer_pool.insert(peer_id); SyncingChain { - start_slot, + id, + start_epoch, target_head_slot, target_head_root, pending_batches: PendingBatches::new(), @@ -130,8 +138,13 @@ impl SyncingChain { /// Returns the latest slot number that has been processed. fn current_processed_slot(&self) -> Slot { - self.start_slot - .saturating_add(self.to_be_processed_id.saturating_sub(1u64) * BLOCKS_PER_BATCH) + self.start_epoch + .start_slot(T::EthSpec::slots_per_epoch()) + .saturating_add( + self.to_be_processed_id.saturating_sub(1u64) + * T::EthSpec::slots_per_epoch() + * EPOCHS_PER_BATCH, + ) } /// A batch of blocks has been received. This function gets run on all chains and should @@ -242,11 +255,11 @@ impl SyncingChain { /// Sends a batch to the batch processor. fn process_batch(&mut self, mut batch: Batch) { let downloaded_blocks = std::mem::replace(&mut batch.downloaded_blocks, Vec::new()); - let batch_id = ProcessId::RangeBatchId(batch.id.clone()); + let process_id = ProcessId::RangeBatchId(self.id.clone(), batch.id.clone()); self.current_processing_batch = Some(batch); spawn_block_processor( Arc::downgrade(&self.chain.clone()), - batch_id, + process_id, downloaded_blocks, self.sync_send.clone(), self.log.clone(), @@ -258,26 +271,36 @@ impl SyncingChain { pub fn on_batch_process_result( &mut self, network: &mut SyncNetworkContext, + chain_id: ChainId, batch_id: BatchId, downloaded_blocks: &mut Option>>, result: &BatchProcessResult, ) -> Option { - if let Some(current_batch) = &self.current_processing_batch { - if current_batch.id != batch_id { - // batch process does not belong to this chain + if chain_id != self.id { + // the result does not belong to this chain + return None; + } + match &self.current_processing_batch { + Some(current_batch) if current_batch.id != batch_id => { + debug!(self.log, "Unexpected batch result"; + "chain_id" => self.id, "batch_id" => *batch_id, "expected_batch_id" => *current_batch.id); return None; } - // Continue. This is our processing request - } else { - // not waiting on a processing result - return None; + None => { + debug!(self.log, "Chain was not expecting a batch result"; + "chain_id" => self.id, "batch_id" => *batch_id); + return None; + } + _ => { + // chain_id and batch_id match, continue + } } // claim the result by consuming the option let downloaded_blocks = downloaded_blocks.take().or_else(|| { // if taken by another chain, we are no longer waiting on a result. self.current_processing_batch = None; - crit!(self.log, "Processed batch taken by another chain"); + crit!(self.log, "Processed batch taken by another chain"; "chain_id" => self.id); None })?; @@ -289,6 +312,7 @@ impl SyncingChain { // double check batches are processed in order TODO: Remove for prod if batch.id != self.to_be_processed_id { crit!(self.log, "Batch processed out of order"; + "chain_id" => self.id, "processed_batch_id" => *batch.id, "expected_id" => *self.to_be_processed_id); } @@ -330,7 +354,7 @@ impl SyncingChain { } BatchProcessResult::Partial => { warn!(self.log, "Batch processing failed but at least one block was imported"; - "id" => *batch.id, "peer" => format!("{}", batch.current_peer) + "chain_id" => self.id, "id" => *batch.id, "peer" => format!("{}", batch.current_peer) ); // At least one block was successfully verified and imported, so we can be sure all // previous batches are valid and we only need to download the current failed @@ -343,7 +367,8 @@ impl SyncingChain { // that it is likely all peers in this chain are are sending invalid batches // repeatedly and are either malicious or faulty. We drop the chain and // downvote all peers. - warn!(self.log, "Batch failed to download. Dropping chain and downvoting peers"; "id"=> *batch.id); + warn!(self.log, "Batch failed to download. Dropping chain and downvoting peers"; + "chain_id" => self.id, "id"=> *batch.id); for peer_id in self.peer_pool.drain() { network.downvote_peer(peer_id); } @@ -355,7 +380,8 @@ impl SyncingChain { } } BatchProcessResult::Failed => { - warn!(self.log, "Batch processing failed"; "id" => *batch.id, "peer" => format!("{}", batch.current_peer)); + warn!(self.log, "Batch processing failed"; + "chain_id" => self.id,"id" => *batch.id, "peer" => format!("{}", batch.current_peer)); // The batch processing failed // This could be because this batch is invalid, or a previous invalidated batch // is invalid. We need to find out which and downvote the peer that has sent us @@ -367,7 +393,8 @@ impl SyncingChain { // that it is likely all peers in this chain are are sending invalid batches // repeatedly and are either malicious or faulty. We drop the chain and // downvote all peers. - warn!(self.log, "Batch failed to download. Dropping chain and downvoting peers"; "id"=> *batch.id); + warn!(self.log, "Batch failed to download. Dropping chain and downvoting peers"; + "chain_id" => self.id, "id"=> *batch.id); for peer_id in self.peer_pool.drain() { network.downvote_peer(peer_id); } @@ -399,8 +426,9 @@ impl SyncingChain { let processed_batch = self.processed_batches.remove(0); if *processed_batch.id >= *last_batch.id { crit!(self.log, "A processed batch had a greater id than the current process id"; - "processed_id" => *processed_batch.id, - "current_id" => *last_batch.id); + "chain_id" => self.id, + "processed_id" => *processed_batch.id, + "current_id" => *last_batch.id); } if let Some(prev_hash) = processed_batch.original_hash { @@ -415,9 +443,10 @@ impl SyncingChain { // now. debug!( self.log, "Re-processed batch validated. Downvoting original peer"; - "batch_id" => *processed_batch.id, - "original_peer" => format!("{}",processed_batch.original_peer), - "new_peer" => format!("{}", processed_batch.current_peer) + "chain_id" => self.id, + "batch_id" => *processed_batch.id, + "original_peer" => format!("{}",processed_batch.original_peer), + "new_peer" => format!("{}", processed_batch.current_peer) ); network.downvote_peer(processed_batch.original_peer); } @@ -494,6 +523,7 @@ impl SyncingChain { batch.current_peer = new_peer.clone(); debug!(self.log, "Re-requesting batch"; + "chain_id" => self.id, "start_slot" => batch.start_slot, "end_slot" => batch.end_slot, "id" => *batch.id, @@ -514,7 +544,7 @@ impl SyncingChain { pub fn start_syncing( &mut self, network: &mut SyncNetworkContext, - local_finalized_slot: Slot, + local_finalized_epoch: Epoch, ) { // A local finalized slot is provided as other chains may have made // progress whilst this chain was Stopped or paused. If so, update the `processed_batch_id` to @@ -525,10 +555,17 @@ impl SyncingChain { // to start from this point and re-index all subsequent batches starting from one // (effectively creating a new chain). - if local_finalized_slot > self.current_processed_slot() { + let local_finalized_slot = local_finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let current_processed_slot = self.current_processed_slot(); + + if local_finalized_slot > current_processed_slot { + // Advance the chain to account for already downloaded blocks. + self.start_epoch = local_finalized_epoch; + debug!(self.log, "Updating chain's progress"; - "prev_completed_slot" => self.current_processed_slot(), - "new_completed_slot" => local_finalized_slot.as_u64()); + "chain_id" => self.id, + "prev_completed_slot" => current_processed_slot, + "new_completed_slot" => self.current_processed_slot()); // Re-index batches *self.to_be_downloaded_id = 1; *self.to_be_processed_id = 1; @@ -554,7 +591,8 @@ impl SyncingChain { self.peer_pool.insert(peer_id.clone()); // do not request blocks if the chain is not syncing if let ChainSyncingState::Stopped = self.state { - debug!(self.log, "Peer added to a non-syncing chain"; "peer_id" => format!("{}", peer_id)); + debug!(self.log, "Peer added to a non-syncing chain"; + "chain_id" => self.id, "peer_id" => format!("{}", peer_id)); return; } @@ -583,6 +621,7 @@ impl SyncingChain { ) -> Option { if let Some(batch) = self.pending_batches.remove(request_id) { warn!(self.log, "Batch failed. RPC Error"; + "chain_id" => self.id, "id" => *batch.id, "retries" => batch.retries, "peer" => format!("{:?}", peer_id)); @@ -606,10 +645,7 @@ impl SyncingChain { ) -> ProcessingResult { batch.retries += 1; - // TODO: Handle partially downloaded batches. Update this when building a new batch - // processor thread. - - if batch.retries > MAX_BATCH_RETRIES { + if batch.retries > MAX_BATCH_RETRIES || self.peer_pool.is_empty() { // chain is unrecoverable, remove it ProcessingResult::RemoveChain } else { @@ -623,6 +659,7 @@ impl SyncingChain { batch.current_peer = new_peer.clone(); debug!(self.log, "Re-Requesting batch"; + "chain_id" => self.id, "start_slot" => batch.start_slot, "end_slot" => batch.end_slot, "id" => *batch.id, @@ -647,6 +684,7 @@ impl SyncingChain { if let Some(peer_id) = self.get_next_peer() { if let Some(batch) = self.get_next_batch(peer_id) { debug!(self.log, "Requesting batch"; + "chain_id" => self.id, "start_slot" => batch.start_slot, "end_slot" => batch.end_slot, "id" => *batch.id, @@ -679,6 +717,9 @@ impl SyncingChain { /// Returns the next required batch from the chain if it exists. If there are no more batches /// required, `None` is returned. fn get_next_batch(&mut self, peer_id: PeerId) -> Option> { + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + let blocks_per_batch = slots_per_epoch * EPOCHS_PER_BATCH; + // only request batches up to the buffer size limit if self .completed_batches @@ -689,16 +730,23 @@ impl SyncingChain { return None; } + let batch_start_slot = self.start_epoch.start_slot(slots_per_epoch) + + self.to_be_downloaded_id.saturating_sub(1) * blocks_per_batch; + // don't request batches beyond the target head slot - let batch_start_slot = - self.start_slot + self.to_be_downloaded_id.saturating_sub(1) * BLOCKS_PER_BATCH; if batch_start_slot > self.target_head_slot { return None; } - // truncate the batch to the target head of the chain + + // truncate the batch to the epoch containing the target head of the chain let batch_end_slot = std::cmp::min( - batch_start_slot + BLOCKS_PER_BATCH, - self.target_head_slot.saturating_add(1u64), + // request either a batch containing the max number of blocks per batch + batch_start_slot + blocks_per_batch, + // or a batch of one epoch of blocks, which contains the `target_head_slot` + self.target_head_slot + .saturating_add(slots_per_epoch) + .epoch(slots_per_epoch) + .start_slot(slots_per_epoch), ); let batch_id = self.to_be_downloaded_id; diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 7ce4f0552e..becfd7df24 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -4,16 +4,16 @@ //! with this struct to to simplify the logic of the other layers of sync. use super::chain::{ChainSyncingState, SyncingChain}; -use crate::router::processor::PeerSyncInfo; use crate::sync::manager::SyncMessage; use crate::sync::network_context::SyncNetworkContext; +use crate::sync::PeerSyncInfo; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::{types::SyncState, NetworkGlobals, PeerId}; use slog::{debug, error, info}; use std::sync::Arc; use tokio::sync::mpsc; use types::EthSpec; -use types::{Hash256, Slot}; +use types::{Epoch, Hash256, Slot}; /// The state of the long range/batch sync. #[derive(Clone)] @@ -110,10 +110,9 @@ impl ChainCollection { } /// Updates the global sync state and logs any changes. - fn update_sync_state(&mut self, state: RangeSyncState) { + pub fn update_sync_state(&mut self) { // if there is no range sync occurring, the state is either synced or not based on // connected peers. - self.state = state; if self.state == RangeSyncState::Idle { // there is no range sync, let the state of peers determine the global node sync state @@ -150,7 +149,8 @@ impl ChainCollection { if let RangeSyncState::Head { .. } = self.state { if self.head_chains.is_empty() { // Update the global network state to either synced or stalled. - self.update_sync_state(RangeSyncState::Idle); + self.state = RangeSyncState::Idle; + self.update_sync_state(); } } } @@ -165,13 +165,14 @@ impl ChainCollection { .head_info() .map(|info| info.slot) .unwrap_or_else(|_| Slot::from(0u64)); + // NOTE: This will modify the /node/syncing API to show current slot for all fields // while we update peers to look for new potentially HEAD chains. let temp_head_state = RangeSyncState::Head { start_slot: current_slot, head_slot: current_slot, }; - self.update_sync_state(temp_head_state); + self.state = temp_head_state; } } @@ -206,7 +207,7 @@ impl ChainCollection { /// This removes any out-dated chains, swaps to any higher priority finalized chains and /// updates the state of the collection. pub fn update_finalized(&mut self, network: &mut SyncNetworkContext) { - let local_slot = { + let local_epoch = { let local = match PeerSyncInfo::from_chain(&self.beacon_chain) { Some(local) => local, None => { @@ -218,9 +219,7 @@ impl ChainCollection { } }; - local - .finalized_epoch - .start_slot(T::EthSpec::slots_per_epoch()) + local.finalized_epoch }; // Remove any outdated finalized chains @@ -241,20 +240,20 @@ impl ChainCollection { }) { // A chain has more peers. Swap the syncing chain - debug!(self.log, "Switching finalized chains to sync"; "new_target_root" => format!("{}", chain.target_head_root), "new_end_slot" => chain.target_head_slot, "new_start_slot"=> chain.start_slot); + debug!(self.log, "Switching finalized chains to sync"; "new_target_root" => format!("{}", chain.target_head_root), "new_end_slot" => chain.target_head_slot, "new_start_epoch"=> local_epoch); // update the state to a new finalized state let state = RangeSyncState::Finalized { - start_slot: chain.start_slot, + start_slot: chain.start_epoch.start_slot(T::EthSpec::slots_per_epoch()), head_slot: chain.target_head_slot, head_root: chain.target_head_root, }; - self.update_sync_state(state); + self.state = state; // Stop the current chain from syncing self.finalized_chains[index].stop_syncing(); // Start the new chain - self.finalized_chains[new_index].start_syncing(network, local_slot); + self.finalized_chains[new_index].start_syncing(network, local_epoch); } } else if let Some(chain) = self .finalized_chains @@ -262,36 +261,36 @@ impl ChainCollection { .max_by_key(|chain| chain.peer_pool.len()) { // There is no currently syncing finalization chain, starting the one with the most peers - debug!(self.log, "New finalized chain started syncing"; "new_target_root" => format!("{}", chain.target_head_root), "new_end_slot" => chain.target_head_slot, "new_start_slot"=> chain.start_slot); - chain.start_syncing(network, local_slot); + debug!(self.log, "New finalized chain started syncing"; "new_target_root" => format!("{}", chain.target_head_root), "new_end_slot" => chain.target_head_slot, "new_start_epoch"=> chain.start_epoch); + chain.start_syncing(network, local_epoch); let state = RangeSyncState::Finalized { - start_slot: chain.start_slot, + start_slot: chain.start_epoch.start_slot(T::EthSpec::slots_per_epoch()), head_slot: chain.target_head_slot, head_root: chain.target_head_root, }; - self.update_sync_state(state); + self.state = state; } else { // There are no finalized chains, update the state. if self.head_chains.is_empty() { - self.update_sync_state(RangeSyncState::Idle); + self.state = RangeSyncState::Idle; } else { // for the syncing API, we find the minimal start_slot and the maximum // target_slot of all head chains to report back. - let (min_slot, max_slot) = self.head_chains.iter().fold( - (Slot::from(0u64), Slot::from(0u64)), + let (min_epoch, max_slot) = self.head_chains.iter().fold( + (Epoch::from(0u64), Slot::from(0u64)), |(min, max), chain| { ( - std::cmp::min(min, chain.start_slot), + std::cmp::min(min, chain.start_epoch), std::cmp::max(max, chain.target_head_slot), ) }, ); let head_state = RangeSyncState::Head { - start_slot: min_slot, + start_slot: min_epoch.start_slot(T::EthSpec::slots_per_epoch()), head_slot: max_slot, }; - self.update_sync_state(head_state); + self.state = head_state; } } } @@ -299,14 +298,16 @@ impl ChainCollection { /// Add a new finalized chain to the collection. pub fn new_finalized_chain( &mut self, - local_finalized_slot: Slot, + local_finalized_epoch: Epoch, target_head: Hash256, target_slot: Slot, peer_id: PeerId, sync_send: mpsc::UnboundedSender>, ) { + let chain_id = rand::random(); self.finalized_chains.push(SyncingChain::new( - local_finalized_slot, + chain_id, + local_finalized_epoch, target_slot, target_head, peer_id, @@ -321,7 +322,7 @@ impl ChainCollection { pub fn new_head_chain( &mut self, network: &mut SyncNetworkContext, - remote_finalized_slot: Slot, + remote_finalized_epoch: Epoch, target_head: Hash256, target_slot: Slot, peer_id: PeerId, @@ -334,8 +335,10 @@ impl ChainCollection { }); self.head_chains.retain(|chain| !chain.peer_pool.is_empty()); + let chain_id = rand::random(); let mut new_head_chain = SyncingChain::new( - remote_finalized_slot, + chain_id, + remote_finalized_epoch, target_slot, target_head, peer_id, @@ -344,7 +347,7 @@ impl ChainCollection { self.log.clone(), ); // All head chains can sync simultaneously - new_head_chain.start_syncing(network, remote_finalized_slot); + new_head_chain.start_syncing(network, remote_finalized_epoch); self.head_chains.push(new_head_chain); } @@ -429,7 +432,7 @@ impl ChainCollection { .fork_choice .contains_block(&chain.target_head_root) { - debug!(log_ref, "Purging out of finalized chain"; "start_slot" => chain.start_slot, "end_slot" => chain.target_head_slot); + debug!(log_ref, "Purging out of finalized chain"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot); chain.status_peers(network); false } else { @@ -442,7 +445,7 @@ impl ChainCollection { .fork_choice .contains_block(&chain.target_head_root) { - debug!(log_ref, "Purging out of date head chain"; "start_slot" => chain.start_slot, "end_slot" => chain.target_head_slot); + debug!(log_ref, "Purging out of date head chain"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot); chain.status_peers(network); false } else { @@ -478,7 +481,7 @@ impl ChainCollection { chain }; - debug!(self.log, "Chain was removed"; "start_slot" => chain.start_slot.as_u64(), "end_slot" => chain.target_head_slot.as_u64()); + debug!(self.log, "Chain was removed"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot); // update the state self.update_finalized(network); diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index 5d7b17c07a..77eb17f156 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -5,7 +5,9 @@ mod batch; mod chain; mod chain_collection; mod range; +mod sync_type; pub use batch::Batch; pub use batch::BatchId; +pub use chain::ChainId; pub use range::RangeSync; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 1288e4e96c..59c789f819 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -11,10 +11,10 @@ //! ## Finalized chain sync //! //! This occurs when a peer connects that claims to have a finalized head slot that is greater -//! than our own. In this case, we form a chain from our last finalized slot, to their claimed +//! than our own. In this case, we form a chain from our last finalized epoch, to their claimed //! finalized slot. Any peer that also claims to have this last finalized slot is added to a pool -//! of peers from which batches of blocks may be downloaded. Blocks are downloaded until -//! the finalized slot of the chain is reached. Once reached, all peers within the pool are sent a +//! of peers from which batches of blocks may be downloaded. Blocks are downloaded until the +//! finalized slot of the chain is reached. Once reached, all peers within the pool are sent a //! STATUS message to potentially start a head chain sync, or check if further finalized chains //! need to be downloaded. //! @@ -26,11 +26,11 @@ //! //! ## Head Chain Sync //! -//! If a peer joins and there is no active finalized chains being synced, and it's head is -//! beyond our `SLOT_IMPORT_TOLERANCE` a chain is formed starting from this peers finalized slot -//! (this has been necessarily downloaded by our node, otherwise we would start a finalized chain -//! sync) to this peers head slot. Any other peers that match this head slot and head root, are -//! added to this chain's peer pool, which will be downloaded in parallel. +//! If a peer joins and there is no active finalized chains being synced, and it's head is beyond +//! our `SLOT_IMPORT_TOLERANCE` a chain is formed starting from this peers finalized epoch (this +//! has been necessarily downloaded by our node, otherwise we would start a finalized chain sync) +//! to this peers head slot. Any other peers that match this head slot and head root, are added to +//! this chain's peer pool, which will be downloaded in parallel. //! //! Unlike finalized chains, head chains can be synced in parallel. //! @@ -39,13 +39,14 @@ //! Each chain is downloaded in batches of blocks. The batched blocks are processed sequentially //! and further batches are requested as current blocks are being processed. -use super::chain::ProcessingResult; +use super::chain::{ChainId, ProcessingResult}; use super::chain_collection::{ChainCollection, RangeSyncState}; +use super::sync_type::RangeSyncType; use super::BatchId; -use crate::router::processor::PeerSyncInfo; use crate::sync::block_processor::BatchProcessResult; use crate::sync::manager::SyncMessage; use crate::sync::network_context::SyncNetworkContext; +use crate::sync::PeerSyncInfo; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::rpc::RequestId; use eth2_libp2p::{NetworkGlobals, PeerId}; @@ -64,7 +65,7 @@ pub struct RangeSync { /// A collection of chains that need to be downloaded. This stores any head or finalized chains /// that need to be downloaded. chains: ChainCollection, - /// Peers that join whilst a finalized chain is being download, sit in this set. Once the + /// Peers that join whilst a finalized chain is being downloaded, sit in this set. Once the /// finalized chain(s) complete, these peer's get STATUS'ed to update their head slot before /// the head chains are formed and downloaded. awaiting_head_peers: HashSet, @@ -112,7 +113,7 @@ impl RangeSync { &mut self, network: &mut SyncNetworkContext, peer_id: PeerId, - remote: PeerSyncInfo, + remote_info: PeerSyncInfo, ) { // evaluate which chain to sync from @@ -131,90 +132,108 @@ impl RangeSync { }; // convenience variables - let remote_finalized_slot = remote + let remote_finalized_slot = remote_info .finalized_epoch .start_slot(T::EthSpec::slots_per_epoch()); let local_finalized_slot = local_info .finalized_epoch .start_slot(T::EthSpec::slots_per_epoch()); - // remove peer from any chains - self.remove_peer(network, &peer_id); + // NOTE: A peer that has been re-status'd may now exist in multiple finalized chains. // remove any out-of-date chains self.chains.purge_outdated_chains(network); - if remote_finalized_slot > local_info.head_slot - && !self - .beacon_chain - .fork_choice - .contains_block(&remote.finalized_root) - { - debug!(self.log, "Finalization sync peer joined"; "peer_id" => format!("{:?}", peer_id)); - // Finalized chain search + // determine which kind of sync to perform and set up the chains + match RangeSyncType::new(&self.beacon_chain, &local_info, &remote_info) { + RangeSyncType::Finalized => { + // Finalized chain search + debug!(self.log, "Finalization sync peer joined"; "peer_id" => format!("{:?}", peer_id)); - // Note: We keep current head chains. These can continue syncing whilst we complete - // this new finalized chain. + // remove the peer from the awaiting_head_peers list if it exists + self.awaiting_head_peers.remove(&peer_id); - // If a finalized chain already exists that matches, add this peer to the chain's peer - // pool. - if let Some(chain) = self - .chains - .get_finalized_mut(remote.finalized_root, remote_finalized_slot) - { - debug!(self.log, "Finalized chain exists, adding peer"; "peer_id" => format!("{:?}", peer_id), "target_root" => format!("{}", chain.target_head_root), "end_slot" => chain.target_head_slot, "start_slot"=> chain.start_slot); + // Note: We keep current head chains. These can continue syncing whilst we complete + // this new finalized chain. - // add the peer to the chain's peer pool - chain.add_peer(network, peer_id); + // If a finalized chain already exists that matches, add this peer to the chain's peer + // pool. + if let Some(chain) = self + .chains + .get_finalized_mut(remote_info.finalized_root, remote_finalized_slot) + { + debug!(self.log, "Finalized chain exists, adding peer"; "peer_id" => format!("{:?}", peer_id), "target_root" => format!("{}", chain.target_head_root), "end_slot" => chain.target_head_slot, "start_epoch"=> chain.start_epoch); - // check if the new peer's addition will favour a new syncing chain. - self.chains.update_finalized(network); - } else { - // there is no finalized chain that matches this peer's last finalized target - // create a new finalized chain - debug!(self.log, "New finalized chain added to sync"; "peer_id" => format!("{:?}", peer_id), "start_slot" => local_finalized_slot.as_u64(), "end_slot" => remote_finalized_slot.as_u64(), "finalized_root" => format!("{}", remote.finalized_root)); - - self.chains.new_finalized_chain( - local_finalized_slot, - remote.finalized_root, - remote_finalized_slot, - peer_id, - self.sync_send.clone(), - ); + // add the peer to the chain's peer pool + chain.add_peer(network, peer_id); + + // check if the new peer's addition will favour a new syncing chain. + self.chains.update_finalized(network); + // update the global sync state if necessary + self.chains.update_sync_state(); + } else { + // there is no finalized chain that matches this peer's last finalized target + // create a new finalized chain + debug!(self.log, "New finalized chain added to sync"; "peer_id" => format!("{:?}", peer_id), "start_epoch" => local_finalized_slot, "end_slot" => remote_finalized_slot, "finalized_root" => format!("{}", remote_info.finalized_root)); + + self.chains.new_finalized_chain( + local_info.finalized_epoch, + remote_info.finalized_root, + remote_finalized_slot, + peer_id, + self.sync_send.clone(), + ); + self.chains.update_finalized(network); + // update the global sync state + self.chains.update_sync_state(); + } + } + RangeSyncType::Head => { + // This peer requires a head chain sync + + if self.chains.is_finalizing_sync() { + // If there are finalized chains to sync, finish these first, before syncing head + // chains. This allows us to re-sync all known peers + trace!(self.log, "Waiting for finalized sync to complete"; "peer_id" => format!("{:?}", peer_id)); + // store the peer to re-status after all finalized chains complete + self.awaiting_head_peers.insert(peer_id); + return; + } + + // if the peer existed in any other head chain, remove it. + self.remove_peer(network, &peer_id); + + // The new peer has the same finalized (earlier filters should prevent a peer with an + // earlier finalized chain from reaching here). + debug!(self.log, "New peer added for recent head sync"; "peer_id" => format!("{:?}", peer_id)); + + // search if there is a matching head chain, then add the peer to the chain + if let Some(chain) = self + .chains + .get_head_mut(remote_info.head_root, remote_info.head_slot) + { + debug!(self.log, "Adding peer to the existing head chain peer pool"; "head_root" => format!("{}",remote_info.head_root), "head_slot" => remote_info.head_slot, "peer_id" => format!("{:?}", peer_id)); + + // add the peer to the head's pool + chain.add_peer(network, peer_id); + } else { + // There are no other head chains that match this peer's status, create a new one, and + let start_epoch = std::cmp::min(local_info.head_slot, remote_finalized_slot) + .epoch(T::EthSpec::slots_per_epoch()); + debug!(self.log, "Creating a new syncing head chain"; "head_root" => format!("{}",remote_info.head_root), "start_epoch" => start_epoch, "head_slot" => remote_info.head_slot, "peer_id" => format!("{:?}", peer_id)); + + self.chains.new_head_chain( + network, + start_epoch, + remote_info.head_root, + remote_info.head_slot, + peer_id, + self.sync_send.clone(), + ); + } self.chains.update_finalized(network); + self.chains.update_sync_state(); } - } else { - if self.chains.is_finalizing_sync() { - // If there are finalized chains to sync, finish these first, before syncing head - // chains. This allows us to re-sync all known peers - trace!(self.log, "Waiting for finalized sync to complete"; "peer_id" => format!("{:?}", peer_id)); - return; - } - - // The new peer has the same finalized (earlier filters should prevent a peer with an - // earlier finalized chain from reaching here). - debug!(self.log, "New peer added for recent head sync"; "peer_id" => format!("{:?}", peer_id)); - - // search if there is a matching head chain, then add the peer to the chain - if let Some(chain) = self.chains.get_head_mut(remote.head_root, remote.head_slot) { - debug!(self.log, "Adding peer to the existing head chain peer pool"; "head_root" => format!("{}",remote.head_root), "head_slot" => remote.head_slot, "peer_id" => format!("{:?}", peer_id)); - - // add the peer to the head's pool - chain.add_peer(network, peer_id); - } else { - // There are no other head chains that match this peer's status, create a new one, and - let start_slot = std::cmp::min(local_info.head_slot, remote_finalized_slot); - debug!(self.log, "Creating a new syncing head chain"; "head_root" => format!("{}",remote.head_root), "start_slot" => start_slot, "head_slot" => remote.head_slot, "peer_id" => format!("{:?}", peer_id)); - self.chains.new_head_chain( - network, - start_slot, - remote.head_root, - remote.head_slot, - peer_id, - self.sync_send.clone(), - ); - } - self.chains.update_finalized(network); } } @@ -252,6 +271,7 @@ impl RangeSync { pub fn handle_block_process_result( &mut self, network: &mut SyncNetworkContext, + chain_id: ChainId, batch_id: BatchId, downloaded_blocks: Vec>, result: BatchProcessResult, @@ -260,20 +280,28 @@ impl RangeSync { let mut downloaded_blocks = Some(downloaded_blocks); match self.chains.finalized_request(|chain| { - chain.on_batch_process_result(network, batch_id, &mut downloaded_blocks, &result) + chain.on_batch_process_result( + network, + chain_id, + batch_id, + &mut downloaded_blocks, + &result, + ) }) { Some((index, ProcessingResult::RemoveChain)) => { let chain = self.chains.remove_finalized_chain(index); - debug!(self.log, "Finalized chain removed"; "start_slot" => chain.start_slot.as_u64(), "end_slot" => chain.target_head_slot.as_u64()); - // the chain is complete, re-status it's peers - chain.status_peers(network); - + debug!(self.log, "Finalized chain removed"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot); // update the state of the collection self.chains.update_finalized(network); - // set the state to a head sync, to inform the manager that we are awaiting a + // the chain is complete, re-status it's peers + chain.status_peers(network); + + // set the state to a head sync if there are no finalized chains, to inform the manager that we are awaiting a // head chain. self.chains.set_head_sync(); + // Update the global variables + self.chains.update_sync_state(); // if there are no more finalized chains, re-status all known peers awaiting a head // sync @@ -291,6 +319,7 @@ impl RangeSync { match self.chains.head_request(|chain| { chain.on_batch_process_result( network, + chain_id, batch_id, &mut downloaded_blocks, &result, @@ -298,12 +327,14 @@ impl RangeSync { }) { Some((index, ProcessingResult::RemoveChain)) => { let chain = self.chains.remove_head_chain(index); - debug!(self.log, "Head chain completed"; "start_slot" => chain.start_slot.as_u64(), "end_slot" => chain.target_head_slot.as_u64()); + debug!(self.log, "Head chain completed"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot); // the chain is complete, re-status it's peers and remove it chain.status_peers(network); // update the state of the collection self.chains.update_finalized(network); + // update the global state and log any change + self.chains.update_sync_state(); } Some((_, ProcessingResult::KeepChain)) => {} None => { @@ -331,6 +362,8 @@ impl RangeSync { // update the state of the collection self.chains.update_finalized(network); + // update the global state and inform the user + self.chains.update_sync_state(); } /// When a peer gets removed, both the head and finalized chains need to be searched to check which pool the peer is in. The chain may also have a batch or batches awaiting diff --git a/beacon_node/network/src/sync/range_sync/sync_type.rs b/beacon_node/network/src/sync/range_sync/sync_type.rs new file mode 100644 index 0000000000..4b08b8b046 --- /dev/null +++ b/beacon_node/network/src/sync/range_sync/sync_type.rs @@ -0,0 +1,40 @@ +//! Contains logic about identifying which Sync to perform given PeerSyncInfo of ourselves and +//! of a remote. + +use crate::sync::PeerSyncInfo; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use std::sync::Arc; + +/// The type of Range sync that should be done relative to our current state. +pub enum RangeSyncType { + /// A finalized chain sync should be started with this peer. + Finalized, + /// A head chain sync should be started with this peer. + Head, +} + +impl RangeSyncType { + /// Determines the type of sync given our local `PeerSyncInfo` and the remote's + /// `PeerSyncInfo`. + pub fn new( + chain: &Arc>, + local_info: &PeerSyncInfo, + remote_info: &PeerSyncInfo, + ) -> RangeSyncType { + // Check for finalized chain sync + // + // The condition is: + // - The remotes finalized epoch is greater than our current finalized epoch and we have + // not seen the finalized hash before. + + if remote_info.finalized_epoch > local_info.finalized_epoch + && !chain + .fork_choice + .contains_block(&remote_info.finalized_root) + { + RangeSyncType::Finalized + } else { + RangeSyncType::Head + } + } +} diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 893d583c27..661b561c87 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -9,7 +9,7 @@ use network::NetworkMessage; use ssz::Decode; use store::{iter::AncestorIter, Store}; use types::{ - Attestation, BeaconState, CommitteeIndex, Epoch, EthSpec, Hash256, RelativeEpoch, + Attestation, BeaconState, ChainSpec, CommitteeIndex, Epoch, EthSpec, Hash256, RelativeEpoch, SignedAggregateAndProof, SignedBeaconBlock, Slot, }; @@ -251,15 +251,22 @@ pub fn publish_beacon_block_to_network( pub fn publish_raw_attestations_to_network( mut chan: NetworkChannel, attestations: Vec>, + spec: &ChainSpec, ) -> Result<(), ApiError> { let messages = attestations .into_iter() .map(|attestation| { // create the gossip message to send to the network - let subnet_id = attestation.subnet_id(); - PubsubMessage::Attestation(Box::new((subnet_id, attestation))) + let subnet_id = attestation + .subnet_id(spec) + .map_err(|e| ApiError::ServerError(format!("Unable to get subnet id: {:?}", e)))?; + + Ok(PubsubMessage::Attestation(Box::new(( + subnet_id, + attestation, + )))) }) - .collect::>(); + .collect::, ApiError>>()?; // Publish the attestations to the p2p network via gossipsub. if let Err(e) = chan.try_send(NetworkMessage::Publish { messages }) { diff --git a/beacon_node/rest_api/src/lighthouse.rs b/beacon_node/rest_api/src/lighthouse.rs index 48a64e3fba..556046ab37 100644 --- a/beacon_node/rest_api/src/lighthouse.rs +++ b/beacon_node/rest_api/src/lighthouse.rs @@ -2,12 +2,57 @@ use crate::response_builder::ResponseBuilder; use crate::ApiResult; -use eth2_libp2p::NetworkGlobals; +use eth2_libp2p::{NetworkGlobals, PeerInfo}; use hyper::{Body, Request}; +use serde::Serialize; use std::sync::Arc; use types::EthSpec; /// The syncing state of the beacon node. -pub fn syncing(req: Request, network: Arc>) -> ApiResult { - ResponseBuilder::new(&req)?.body_no_ssz(&network.sync_state()) +pub fn syncing( + req: Request, + network_globals: Arc>, +) -> ApiResult { + ResponseBuilder::new(&req)?.body_no_ssz(&network_globals.sync_state()) +} + +/// Returns all known peers and corresponding information +pub fn peers(req: Request, network_globals: Arc>) -> ApiResult { + let peers: Vec> = network_globals + .peers + .read() + .peers() + .map(|(peer_id, peer_info)| Peer { + peer_id: peer_id.to_string(), + peer_info: peer_info.clone(), + }) + .collect(); + ResponseBuilder::new(&req)?.body_no_ssz(&peers) +} + +/// Returns all known connected peers and their corresponding information +pub fn connected_peers( + req: Request, + network_globals: Arc>, +) -> ApiResult { + let peers: Vec> = network_globals + .peers + .read() + .connected_peers() + .map(|(peer_id, peer_info)| Peer { + peer_id: peer_id.to_string(), + peer_info: peer_info.clone(), + }) + .collect(); + ResponseBuilder::new(&req)?.body_no_ssz(&peers) +} + +/// Information returned by `peers` and `connected_peers`. +#[derive(Clone, Debug, Serialize)] +#[serde(bound = "T: EthSpec")] +struct Peer { + /// The Peer's ID + peer_id: String, + /// The PeerInfo associated with the peer. + peer_info: PeerInfo, } diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index 43b48b2a12..ae2486d341 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -65,7 +65,7 @@ pub fn get_peer_list( let connected_peers: Vec = network .peers .read() - .connected_peers() + .connected_peer_ids() .map(PeerId::to_string) .collect(); ResponseBuilder::new(&req)?.body_no_ssz(&connected_peers) diff --git a/beacon_node/rest_api/src/node.rs b/beacon_node/rest_api/src/node.rs index 2995957e02..ffd07f8f10 100644 --- a/beacon_node/rest_api/src/node.rs +++ b/beacon_node/rest_api/src/node.rs @@ -41,10 +41,3 @@ pub fn syncing( sync_status, }) } - -pub fn lighthouse_syncing( - req: Request, - network: Arc>, -) -> ApiResult { - ResponseBuilder::new(&req)?.body_no_ssz(&network.sync_state()) -} diff --git a/beacon_node/rest_api/src/router.rs b/beacon_node/rest_api/src/router.rs index ee391ae0db..1c86e8ebc7 100644 --- a/beacon_node/rest_api/src/router.rs +++ b/beacon_node/rest_api/src/router.rs @@ -203,7 +203,6 @@ pub fn route( (&Method::GET, "/advanced/operation_pool") => { into_boxfut(advanced::get_operation_pool::(req, beacon_chain)) } - (&Method::GET, "/metrics") => into_boxfut(metrics::get_prometheus::( req, beacon_chain, @@ -215,7 +214,12 @@ pub fn route( (&Method::GET, "/lighthouse/syncing") => { into_boxfut(lighthouse::syncing::(req, network_globals)) } - + (&Method::GET, "/lighthouse/peers") => { + into_boxfut(lighthouse::peers::(req, network_globals)) + } + (&Method::GET, "/lighthouse/connected_peers") => into_boxfut( + lighthouse::connected_peers::(req, network_globals), + ), _ => Box::new(futures::future::err(ApiError::NotFound( "Request path and/or method not found.".to_owned(), ))), diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 55ab8ccefc..609a52e647 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -507,10 +507,11 @@ pub fn publish_attestations( } } })?; - Ok(attestations) + + Ok((attestations, beacon_chain)) }) - .and_then(|attestations| { - publish_raw_attestations_to_network::(network_chan, attestations) + .and_then(|(attestations, beacon_chain)| { + publish_raw_attestations_to_network::(network_chan, attestations, &beacon_chain.spec) }) .and_then(|_| response_builder?.body_no_ssz(&())), ) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 637dc289e0..9a2dfc1d49 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -109,7 +109,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .arg( Arg::with_name("disable-enr-auto-update") - .short("s") + .short("x") .long("disable-enr-auto-update") .help("Discovery automatically updates the nodes local ENR with an external IP address and port as seen by other peers on the network. \ This disables this feature, fixing the ENR's IP/PORT to those specified on boot.") @@ -235,4 +235,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Specifies how many states the database should cache in memory [default: 5]") .takes_value(true) ) + /* + * Purge. + */ + .arg( + Arg::with_name("purge-db") + .long("purge-db") + .help("If present, the chain database will be deleted. Use with caution.") + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index a48569e53a..59b1457deb 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,3 +1,4 @@ +use beacon_chain::builder::PUBKEY_CACHE_FILENAME; use clap::ArgMatches; use client::{config::DEFAULT_DATADIR, ClientConfig, ClientGenesis}; use eth2_libp2p::{Enr, Multiaddr}; @@ -33,6 +34,32 @@ pub fn get_config( client_config.data_dir = get_data_dir(cli_args); + // If necessary, remove any existing database and configuration + if client_config.data_dir.exists() && cli_args.is_present("purge-db") { + // Remove the chain_db. + fs::remove_dir_all( + client_config + .get_db_path() + .ok_or("Failed to get db_path".to_string())?, + ) + .map_err(|err| format!("Failed to remove chain_db: {}", err))?; + + // Remove the freezer db. + fs::remove_dir_all( + client_config + .get_freezer_db_path() + .ok_or("Failed to get freezer db path".to_string())?, + ) + .map_err(|err| format!("Failed to remove chain_db: {}", err))?; + + // Remove the pubkey cache file if it exists + let pubkey_cache_file = client_config.data_dir.join(PUBKEY_CACHE_FILENAME); + if pubkey_cache_file.exists() { + fs::remove_file(&pubkey_cache_file) + .map_err(|e| format!("Failed to remove {:?}: {:?}", pubkey_cache_file, e))?; + } + } + // Create `datadir` and any non-existing parent directories. fs::create_dir_all(&client_config.data_dir) .map_err(|e| format!("Failed to create data dir: {}", e))?; @@ -85,7 +112,6 @@ pub fn get_config( .map_err(|_| format!("Invalid port: {}", port_str))?; client_config.network.libp2p_port = port; client_config.network.discovery_port = port; - dbg!(&client_config.network.discovery_port); } if let Some(port_str) = cli_args.value_of("discovery-port") { @@ -293,24 +319,7 @@ pub fn get_config( } /* - * Load the eth2 testnet dir to obtain some addition config values. - */ - let eth2_testnet_config: Eth2TestnetConfig = - get_eth2_testnet_config(&client_config.testnet_dir)?; - - client_config.eth1.deposit_contract_address = - format!("{:?}", eth2_testnet_config.deposit_contract_address()?); - client_config.eth1.deposit_contract_deploy_block = - eth2_testnet_config.deposit_contract_deploy_block; - client_config.eth1.lowest_cached_block_number = - client_config.eth1.deposit_contract_deploy_block; - - if let Some(mut boot_nodes) = eth2_testnet_config.boot_enr { - client_config.network.boot_nodes.append(&mut boot_nodes) - } - - /* - * Load the eth2 testnet dir to obtain some addition config values. + * Load the eth2 testnet dir to obtain some additional config values. */ let eth2_testnet_config: Eth2TestnetConfig = get_eth2_testnet_config(&client_config.testnet_dir)?; diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 2f325b5efb..6c37b67944 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -10,6 +10,7 @@ pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; pub use config::{get_data_dir, get_eth2_testnet_config, get_testnet_dir}; pub use eth2_config::Eth2Config; +use beacon_chain::migrate::{BackgroundMigrator, DiskStore}; use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, events::WebSocketSender, slot_clock::SystemTimeSlotClock, @@ -20,7 +21,6 @@ use environment::RuntimeContext; use futures::{Future, IntoFuture}; use slog::{info, warn}; use std::ops::{Deref, DerefMut}; -use store::{migrate::BackgroundMigrator, DiskStore}; use types::EthSpec; /// A type-alias to the tighten the definition of a production-intended `Client`. diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 362b866ba7..194461868d 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -204,7 +204,7 @@ impl Store for HotColdDB { } /// Advance the split point of the store, moving new finalized states to the freezer. - fn freeze_to_state( + fn process_finalization( store: Arc, frozen_head_root: Hash256, frozen_head: &BeaconState, diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 43bdd164d7..772f0ae309 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -1,4 +1,4 @@ -use crate::Store; +use crate::{Error, Store}; use std::borrow::Cow; use std::marker::PhantomData; use std::sync::Arc; @@ -43,12 +43,95 @@ impl<'a, U: Store, E: EthSpec> AncestorIter { + inner: RootsIterator<'a, T, U>, +} + +impl<'a, T: EthSpec, U> Clone for StateRootsIterator<'a, T, U> { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +impl<'a, T: EthSpec, U: Store> StateRootsIterator<'a, T, U> { + pub fn new(store: Arc, beacon_state: &'a BeaconState) -> Self { + Self { + inner: RootsIterator::new(store, beacon_state), + } + } + + pub fn owned(store: Arc, beacon_state: BeaconState) -> Self { + Self { + inner: RootsIterator::owned(store, beacon_state), + } + } +} + +impl<'a, T: EthSpec, U: Store> Iterator for StateRootsIterator<'a, T, U> { + type Item = (Hash256, Slot); + + fn next(&mut self) -> Option { + self.inner + .next() + .map(|(_, state_root, slot)| (state_root, slot)) + } +} + +/// Iterates backwards through block roots. If any specified slot is unable to be retrieved, the +/// iterator returns `None` indefinitely. +/// +/// Uses the `block_roots` field of `BeaconState` as the source of block roots and will +/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been +/// exhausted. +/// +/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. +pub struct BlockRootsIterator<'a, T: EthSpec, U> { + inner: RootsIterator<'a, T, U>, +} + +impl<'a, T: EthSpec, U> Clone for BlockRootsIterator<'a, T, U> { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +impl<'a, T: EthSpec, U: Store> BlockRootsIterator<'a, T, U> { + /// Create a new iterator over all block roots in the given `beacon_state` and prior states. + pub fn new(store: Arc, beacon_state: &'a BeaconState) -> Self { + Self { + inner: RootsIterator::new(store, beacon_state), + } + } + + /// Create a new iterator over all block roots in the given `beacon_state` and prior states. + pub fn owned(store: Arc, beacon_state: BeaconState) -> Self { + Self { + inner: RootsIterator::owned(store, beacon_state), + } + } +} + +impl<'a, T: EthSpec, U: Store> Iterator for BlockRootsIterator<'a, T, U> { + type Item = (Hash256, Slot); + + fn next(&mut self) -> Option { + self.inner + .next() + .map(|(block_root, _, slot)| (block_root, slot)) + } +} + +/// Iterator over state and block roots that backtracks using the vectors from a `BeaconState`. +pub struct RootsIterator<'a, T: EthSpec, U> { store: Arc, beacon_state: Cow<'a, BeaconState>, slot: Slot, } -impl<'a, T: EthSpec, U> Clone for StateRootsIterator<'a, T, U> { +impl<'a, T: EthSpec, U> Clone for RootsIterator<'a, T, U> { fn clone(&self) -> Self { Self { store: self.store.clone(), @@ -58,7 +141,7 @@ impl<'a, T: EthSpec, U> Clone for StateRootsIterator<'a, T, U> { } } -impl<'a, T: EthSpec, U: Store> StateRootsIterator<'a, T, U> { +impl<'a, T: EthSpec, U: Store> RootsIterator<'a, T, U> { pub fn new(store: Arc, beacon_state: &'a BeaconState) -> Self { Self { store, @@ -74,10 +157,21 @@ impl<'a, T: EthSpec, U: Store> StateRootsIterator<'a, T, U> { beacon_state: Cow::Owned(beacon_state), } } + + pub fn from_block(store: Arc, block_hash: Hash256) -> Result { + let block = store + .get_block(&block_hash)? + .ok_or_else(|| BeaconStateError::MissingBeaconBlock(block_hash.into()))?; + let state = store + .get_state(&block.state_root(), Some(block.slot()))? + .ok_or_else(|| BeaconStateError::MissingBeaconState(block.state_root().into()))?; + Ok(Self::owned(store, state)) + } } -impl<'a, T: EthSpec, U: Store> Iterator for StateRootsIterator<'a, T, U> { - type Item = (Hash256, Slot); +impl<'a, T: EthSpec, U: Store> Iterator for RootsIterator<'a, T, U> { + /// (block_root, state_root, slot) + type Item = (Hash256, Hash256, Slot); fn next(&mut self) -> Option { if self.slot == 0 || self.slot > self.beacon_state.slot { @@ -86,18 +180,22 @@ impl<'a, T: EthSpec, U: Store> Iterator for StateRootsIterator<'a, T, U> { self.slot -= 1; - match self.beacon_state.get_state_root(self.slot) { - Ok(root) => Some((*root, self.slot)), - Err(BeaconStateError::SlotOutOfBounds) => { + match ( + self.beacon_state.get_block_root(self.slot), + self.beacon_state.get_state_root(self.slot), + ) { + (Ok(block_root), Ok(state_root)) => Some((*block_root, *state_root, self.slot)), + (Err(BeaconStateError::SlotOutOfBounds), Err(BeaconStateError::SlotOutOfBounds)) => { // Read a `BeaconState` from the store that has access to prior historical roots. let beacon_state = next_historical_root_backtrack_state(&*self.store, &self.beacon_state)?; self.beacon_state = Cow::Owned(beacon_state); - let root = self.beacon_state.get_state_root(self.slot).ok()?; + let block_root = *self.beacon_state.get_block_root(self.slot).ok()?; + let state_root = *self.beacon_state.get_state_root(self.slot).ok()?; - Some((*root, self.slot)) + Some((block_root, state_root, self.slot)) } _ => None, } @@ -165,79 +263,7 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockIterator<'a, T, U> { fn next(&mut self) -> Option { let (root, _slot) = self.roots.next()?; - self.roots.store.get_block(&root).ok()? - } -} - -/// Iterates backwards through block roots. If any specified slot is unable to be retrieved, the -/// iterator returns `None` indefinitely. -/// -/// Uses the `block_roots` field of `BeaconState` to as the source of block roots and will -/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been -/// exhausted. -/// -/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. -pub struct BlockRootsIterator<'a, T: EthSpec, U> { - store: Arc, - beacon_state: Cow<'a, BeaconState>, - slot: Slot, -} - -impl<'a, T: EthSpec, U> Clone for BlockRootsIterator<'a, T, U> { - fn clone(&self) -> Self { - Self { - store: self.store.clone(), - beacon_state: self.beacon_state.clone(), - slot: self.slot, - } - } -} - -impl<'a, T: EthSpec, U: Store> BlockRootsIterator<'a, T, U> { - /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn new(store: Arc, beacon_state: &'a BeaconState) -> Self { - Self { - store, - slot: beacon_state.slot, - beacon_state: Cow::Borrowed(beacon_state), - } - } - - /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn owned(store: Arc, beacon_state: BeaconState) -> Self { - Self { - store, - slot: beacon_state.slot, - beacon_state: Cow::Owned(beacon_state), - } - } -} - -impl<'a, T: EthSpec, U: Store> Iterator for BlockRootsIterator<'a, T, U> { - type Item = (Hash256, Slot); - - fn next(&mut self) -> Option { - if self.slot == 0 || self.slot > self.beacon_state.slot { - return None; - } - - self.slot -= 1; - - match self.beacon_state.get_block_root(self.slot) { - Ok(root) => Some((*root, self.slot)), - Err(BeaconStateError::SlotOutOfBounds) => { - // Read a `BeaconState` from the store that has access to prior historical roots. - let beacon_state = - next_historical_root_backtrack_state(&*self.store, &self.beacon_state)?; - - self.beacon_state = Cow::Owned(beacon_state); - - let root = self.beacon_state.get_block_root(self.slot).ok()?; - - Some((*root, self.slot)) - } - _ => None, - } + self.roots.inner.store.get_block(&root).ok()? } } diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index a8220b08c9..31c948eb0f 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -15,7 +15,7 @@ pub mod chunked_vector; pub mod config; mod errors; mod forwards_iter; -mod hot_cold_store; +pub mod hot_cold_store; mod impls; mod leveldb_store; mod memory_store; @@ -24,7 +24,6 @@ mod partial_beacon_state; mod state_batch; pub mod iter; -pub mod migrate; use std::sync::Arc; @@ -32,7 +31,6 @@ pub use self::config::StoreConfig; pub use self::hot_cold_store::{HotColdDB as DiskStore, HotStateSummary}; pub use self::leveldb_store::LevelDB as SimpleDiskStore; pub use self::memory_store::MemoryStore; -pub use self::migrate::Migrate; pub use self::partial_beacon_state::PartialBeaconState; pub use errors::Error; pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer; @@ -132,7 +130,7 @@ pub trait Store: Sync + Send + Sized + 'static { } /// (Optionally) Move all data before the frozen slot to the freezer database. - fn freeze_to_state( + fn process_finalization( _store: Arc, _frozen_head_root: Hash256, _frozen_head: &BeaconState, diff --git a/beacon_node/store/src/migrate.rs b/beacon_node/store/src/migrate.rs deleted file mode 100644 index 5fd617a226..0000000000 --- a/beacon_node/store/src/migrate.rs +++ /dev/null @@ -1,153 +0,0 @@ -use crate::{ - hot_cold_store::HotColdDBError, DiskStore, Error, MemoryStore, SimpleDiskStore, Store, -}; -use parking_lot::Mutex; -use slog::{debug, warn}; -use std::mem; -use std::sync::mpsc; -use std::sync::Arc; -use std::thread; -use types::{BeaconState, EthSpec, Hash256, Slot}; - -/// Trait for migration processes that update the database upon finalization. -pub trait Migrate: Send + Sync + 'static { - fn new(db: Arc) -> Self; - - fn freeze_to_state( - &self, - _state_root: Hash256, - _state: BeaconState, - _max_finality_distance: u64, - ) { - } -} - -/// Migrator that does nothing, for stores that don't need migration. -pub struct NullMigrator; - -impl Migrate, E> for NullMigrator { - fn new(_: Arc>) -> Self { - NullMigrator - } -} - -impl Migrate, E> for NullMigrator { - fn new(_: Arc>) -> Self { - NullMigrator - } -} - -/// Migrator that immediately calls the store's migration function, blocking the current execution. -/// -/// Mostly useful for tests. -pub struct BlockingMigrator(Arc); - -impl> Migrate for BlockingMigrator { - fn new(db: Arc) -> Self { - BlockingMigrator(db) - } - - fn freeze_to_state( - &self, - state_root: Hash256, - state: BeaconState, - _max_finality_distance: u64, - ) { - if let Err(e) = S::freeze_to_state(self.0.clone(), state_root, &state) { - // This migrator is only used for testing, so we just log to stderr without a logger. - eprintln!("Migration error: {:?}", e); - } - } -} - -type MpscSender = mpsc::Sender<(Hash256, BeaconState)>; - -/// Migrator that runs a background thread to migrate state from the hot to the cold database. -pub struct BackgroundMigrator { - db: Arc>, - tx_thread: Mutex<(MpscSender, thread::JoinHandle<()>)>, -} - -impl Migrate, E> for BackgroundMigrator { - fn new(db: Arc>) -> Self { - let tx_thread = Mutex::new(Self::spawn_thread(db.clone())); - Self { db, tx_thread } - } - - /// Perform the freezing operation on the database, - fn freeze_to_state( - &self, - finalized_state_root: Hash256, - finalized_state: BeaconState, - max_finality_distance: u64, - ) { - if !self.needs_migration(finalized_state.slot, max_finality_distance) { - return; - } - - let (ref mut tx, ref mut thread) = *self.tx_thread.lock(); - - if let Err(tx_err) = tx.send((finalized_state_root, finalized_state)) { - let (new_tx, new_thread) = Self::spawn_thread(self.db.clone()); - - drop(mem::replace(tx, new_tx)); - let old_thread = mem::replace(thread, new_thread); - - // Join the old thread, which will probably have panicked, or may have - // halted normally just now as a result of us dropping the old `mpsc::Sender`. - if let Err(thread_err) = old_thread.join() { - warn!( - self.db.log, - "Migration thread died, so it was restarted"; - "reason" => format!("{:?}", thread_err) - ); - } - - // Retry at most once, we could recurse but that would risk overflowing the stack. - let _ = tx.send(tx_err.0); - } - } -} - -impl BackgroundMigrator { - /// Return true if a migration needs to be performed, given a new `finalized_slot`. - fn needs_migration(&self, finalized_slot: Slot, max_finality_distance: u64) -> bool { - let finality_distance = finalized_slot - self.db.get_split_slot(); - finality_distance > max_finality_distance - } - - /// Spawn a new child thread to run the migration process. - /// - /// Return a channel handle for sending new finalized states to the thread. - fn spawn_thread( - db: Arc>, - ) -> ( - mpsc::Sender<(Hash256, BeaconState)>, - thread::JoinHandle<()>, - ) { - let (tx, rx) = mpsc::channel(); - let thread = thread::spawn(move || { - while let Ok((state_root, state)) = rx.recv() { - match DiskStore::freeze_to_state(db.clone(), state_root, &state) { - Ok(()) => {} - Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { - debug!( - db.log, - "Database migration postponed, unaligned finalized block"; - "slot" => slot.as_u64() - ); - } - Err(e) => { - warn!( - db.log, - "Database migration failed"; - "error" => format!("{:?}", e) - ); - } - } - } - }); - - (tx, thread) - } -} diff --git a/book/src/http/lighthouse.md b/book/src/http/lighthouse.md index cc6ec2a119..d80c0f694a 100644 --- a/book/src/http/lighthouse.md +++ b/book/src/http/lighthouse.md @@ -7,6 +7,8 @@ The `/lighthouse` endpoints provide lighthouse-specific information about the be HTTP Path | Description | | --- | -- | [`/lighthouse/syncing`](#lighthousesyncing) | Get the node's syncing status +[`/lighthouse/peers`](#lighthousepeers) | Get the peers info known by the beacon node +[`/lighthouse/connected_peers`](#lighthousepeers) | Get the connected_peers known by the beacon node ## `/lighthouse/syncing` @@ -52,3 +54,129 @@ If the node is synced "Synced" } ``` + +## `/lighthouse/peers` + +Get all known peers info from the beacon node. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/peers` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +[ +{ + "peer_id" : "16Uiu2HAmTEinipUS3haxqucrn7d7SmCKx5XzAVbAZCiNW54ncynG", + "peer_info" : { + "_status" : "Healthy", + "client" : { + "agent_string" : "github.com/libp2p/go-libp2p", + "kind" : "Prysm", + "os_version" : "unknown", + "protocol_version" : "ipfs/0.1.0", + "version" : "unknown" + }, + "connection_status" : { + "Disconnected" : { + "since" : 3 + } + }, + "listening_addresses" : [ + "/ip4/10.3.58.241/tcp/9001", + "/ip4/35.172.14.146/tcp/9001", + "/ip4/35.172.14.146/tcp/9001" + ], + "meta_data" : { + "attnets" : "0x0000000000000000", + "seq_number" : 0 + }, + "reputation" : 20, + "sync_status" : { + "Synced" : { + "status_head_slot" : 18146 + } + } + } + }, + { + "peer_id" : "16Uiu2HAm8XZfPv3YjktCjitSRtfS7UfHfEvpiUyHrdiX6uAD55xZ", + "peer_info" : { + "_status" : "Healthy", + "client" : { + "agent_string" : null, + "kind" : "Unknown", + "os_version" : "unknown", + "protocol_version" : "unknown", + "version" : "unknown" + }, + "connection_status" : { + "Disconnected" : { + "since" : 5 + } + }, + "listening_addresses" : [], + "meta_data" : { + "attnets" : "0x0900000000000000", + "seq_number" : 0 + }, + "reputation" : 20, + "sync_status" : "Unknown" + } + }, +] +``` + +## `/lighthouse/connected_peers` + +Get all known peers info from the beacon node. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/connected_peers` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +[ + { + "peer_id" : "16Uiu2HAm8XZfPv3YjktCjitSRtfS7UfHfEvpiUyHrdiX6uAD55xZ", + "peer_info" : { + "_status" : "Healthy", + "client" : { + "agent_string" : null, + "kind" : "Unknown", + "os_version" : "unknown", + "protocol_version" : "unknown", + "version" : "unknown" + }, + "connection_status" : { + "Connected" : { + "in" : 5, + "out" : 2 + } + }, + "listening_addresses" : [], + "meta_data" : { + "attnets" : "0x0900000000000000", + "seq_number" : 0 + }, + "reputation" : 20, + "sync_status" : "Unknown" + } + }, + ] +``` diff --git a/book/src/local-testnets.md b/book/src/local-testnets.md index 387df1dd45..3b15cefdd6 100644 --- a/book/src/local-testnets.md +++ b/book/src/local-testnets.md @@ -18,6 +18,7 @@ TL;DR isn't adequate. ## TL;DR ```bash +make install-lcli lcli new-testnet lcli interop-genesis 128 lighthouse bn --testnet-dir ~/.lighthouse/testnet --dummy-eth1 --http --enr-match @@ -40,7 +41,7 @@ used for starting testnets and debugging. Install `lcli` from the root directory of this repository with: ```bash -cargo install --path lcli --force +make install-lcli ``` ### 1.2 Create a testnet directory diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 7718c227ad..ac2c1ed319 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -98,10 +98,14 @@ impl OperationPool { /// Get a list of attestations for inclusion in a block. /// - /// NOTE: Assumes that all attestations in the operation_pool are valid. + /// The `validity_filter` is a closure that provides extra filtering of the attestations + /// before an approximately optimal bundle is constructed. We use it to provide access + /// to the fork choice data from the `BeaconChain` struct that doesn't logically belong + /// in the operation pool. pub fn get_attestations( &self, state: &BeaconState, + validity_filter: impl FnMut(&&Attestation) -> bool, spec: &ChainSpec, ) -> Result>, OpPoolError> { // Attestations for the current fork, which may be from the current or previous epoch. @@ -143,6 +147,7 @@ impl OperationPool { ) .is_ok() }) + .filter(validity_filter) .flat_map(|att| AttMaxCover::new(att, state, total_active_balance, spec)); Ok(maximum_cover( @@ -584,7 +589,7 @@ mod release_tests { state.slot -= 1; assert_eq!( op_pool - .get_attestations(state, spec) + .get_attestations(state, |_| true, spec) .expect("should have attestations") .len(), 0 @@ -594,7 +599,7 @@ mod release_tests { state.slot += spec.min_attestation_inclusion_delay; let block_attestations = op_pool - .get_attestations(state, spec) + .get_attestations(state, |_| true, spec) .expect("Should have block attestations"); assert_eq!(block_attestations.len(), committees.len()); @@ -764,7 +769,7 @@ mod release_tests { state.slot += spec.min_attestation_inclusion_delay; let best_attestations = op_pool - .get_attestations(state, spec) + .get_attestations(state, |_| true, spec) .expect("should have best attestations"); assert_eq!(best_attestations.len(), max_attestations); @@ -839,7 +844,7 @@ mod release_tests { state.slot += spec.min_attestation_inclusion_delay; let best_attestations = op_pool - .get_attestations(state, spec) + .get_attestations(state, |_| true, spec) .expect("should have valid best attestations"); assert_eq!(best_attestations.len(), max_attestations); diff --git a/eth2/proto_array_fork_choice/src/proto_array.rs b/eth2/proto_array_fork_choice/src/proto_array.rs index ece8648bff..8516f80298 100644 --- a/eth2/proto_array_fork_choice/src/proto_array.rs +++ b/eth2/proto_array_fork_choice/src/proto_array.rs @@ -407,4 +407,41 @@ impl ProtoArray { && (node.finalized_epoch == self.finalized_epoch || self.finalized_epoch == Epoch::new(0)) } + + /// Return a reverse iterator over the nodes which comprise the chain ending at `block_root`. + pub fn iter_nodes<'a>(&'a self, block_root: &Hash256) -> Iter<'a> { + let next_node_index = self.indices.get(block_root).copied(); + Iter { + next_node_index, + proto_array: self, + } + } + + /// Return a reverse iterator over the block roots of the chain ending at `block_root`. + /// + /// Note that unlike many other iterators, this one WILL NOT yield anything at skipped slots. + pub fn iter_block_roots<'a>( + &'a self, + block_root: &Hash256, + ) -> impl Iterator + 'a { + self.iter_nodes(block_root) + .map(|node| (node.root, node.slot)) + } +} + +/// Reverse iterator over one path through a `ProtoArray`. +pub struct Iter<'a> { + next_node_index: Option, + proto_array: &'a ProtoArray, +} + +impl<'a> Iterator for Iter<'a> { + type Item = &'a ProtoNode; + + fn next(&mut self) -> Option { + let next_node_index = self.next_node_index?; + let node = self.proto_array.nodes.get(next_node_index)?; + self.next_node_index = node.parent; + Some(node) + } } diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index e407b18b2a..6387a59aef 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -27,6 +27,7 @@ eth2_ssz = "0.1.2" eth2_ssz_types = { path = "../utils/ssz_types" } merkle_proof = { path = "../utils/merkle_proof" } log = "0.4.8" +safe_arith = { path = "../utils/safe_arith" } tree_hash = "0.1.0" tree_hash_derive = "0.2" types = { path = "../types" } diff --git a/eth2/state_processing/src/common/deposit_data_tree.rs b/eth2/state_processing/src/common/deposit_data_tree.rs index e2d92d56d7..319c437eee 100644 --- a/eth2/state_processing/src/common/deposit_data_tree.rs +++ b/eth2/state_processing/src/common/deposit_data_tree.rs @@ -1,6 +1,7 @@ use eth2_hashing::hash; use int_to_bytes::int_to_bytes32; use merkle_proof::{MerkleTree, MerkleTreeError}; +use safe_arith::SafeArith; use types::Hash256; /// Emulates the eth1 deposit contract merkle tree. @@ -46,7 +47,7 @@ impl DepositDataTree { /// Add a deposit to the merkle tree. pub fn push_leaf(&mut self, leaf: Hash256) -> Result<(), MerkleTreeError> { self.tree.push_leaf(leaf, self.depth)?; - self.mix_in_length += 1; + self.mix_in_length.increment()?; Ok(()) } } diff --git a/eth2/state_processing/src/common/get_base_reward.rs b/eth2/state_processing/src/common/get_base_reward.rs index 1fcb5b64eb..ba7e696d60 100644 --- a/eth2/state_processing/src/common/get_base_reward.rs +++ b/eth2/state_processing/src/common/get_base_reward.rs @@ -1,4 +1,5 @@ use integer_sqrt::IntegerSquareRoot; +use safe_arith::SafeArith; use types::*; /// Returns the base reward for some validator. @@ -14,10 +15,10 @@ pub fn get_base_reward( if total_active_balance == 0 { Ok(0) } else { - Ok( - state.get_effective_balance(index, spec)? * spec.base_reward_factor - / total_active_balance.integer_sqrt() - / spec.base_rewards_per_epoch, - ) + Ok(state + .get_effective_balance(index, spec)? + .safe_mul(spec.base_reward_factor)? + .safe_div(total_active_balance.integer_sqrt())? + .safe_div(spec.base_rewards_per_epoch)?) } } diff --git a/eth2/state_processing/src/common/slash_validator.rs b/eth2/state_processing/src/common/slash_validator.rs index 7b1d31061a..9c67315351 100644 --- a/eth2/state_processing/src/common/slash_validator.rs +++ b/eth2/state_processing/src/common/slash_validator.rs @@ -1,4 +1,5 @@ use crate::common::initiate_validator_exit; +use safe_arith::SafeArith; use std::cmp; use types::{BeaconStateError as Error, *}; @@ -27,18 +28,21 @@ pub fn slash_validator( let validator_effective_balance = state.get_effective_balance(slashed_index, spec)?; state.set_slashings( epoch, - state.get_slashings(epoch)? + validator_effective_balance, + state + .get_slashings(epoch)? + .safe_add(validator_effective_balance)?, )?; safe_sub_assign!( state.balances[slashed_index], - validator_effective_balance / spec.min_slashing_penalty_quotient + validator_effective_balance.safe_div(spec.min_slashing_penalty_quotient)? ); // Apply proposer and whistleblower rewards let proposer_index = state.get_beacon_proposer_index(state.slot, spec)?; let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); - let whistleblower_reward = validator_effective_balance / spec.whistleblower_reward_quotient; - let proposer_reward = whistleblower_reward / spec.proposer_reward_quotient; + let whistleblower_reward = + validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; + let proposer_reward = whistleblower_reward.safe_div(spec.proposer_reward_quotient)?; safe_add_assign!(state.balances[proposer_index], proposer_reward); safe_add_assign!( diff --git a/eth2/state_processing/src/genesis.rs b/eth2/state_processing/src/genesis.rs index 71c66cba80..9ae9fabdc3 100644 --- a/eth2/state_processing/src/genesis.rs +++ b/eth2/state_processing/src/genesis.rs @@ -1,5 +1,6 @@ use super::per_block_processing::{errors::BlockProcessingError, process_deposit}; use crate::common::DepositDataTree; +use safe_arith::SafeArith; use tree_hash::TreeHash; use types::DEPOSIT_TREE_DEPTH; use types::*; @@ -14,8 +15,9 @@ pub fn initialize_beacon_state_from_eth1( deposits: Vec, spec: &ChainSpec, ) -> Result, BlockProcessingError> { - let genesis_time = - eth1_timestamp - eth1_timestamp % spec.min_genesis_delay + 2 * spec.min_genesis_delay; + let genesis_time = eth1_timestamp + .safe_sub(eth1_timestamp.safe_rem(spec.min_genesis_delay)?)? + .safe_add(2.safe_mul(spec.min_genesis_delay)?)?; let eth1_data = Eth1Data { // Temporary deposit root deposit_root: Hash256::zero(), @@ -37,7 +39,7 @@ pub fn initialize_beacon_state_from_eth1( process_deposit(&mut state, &deposit, spec, true)?; } - process_activations(&mut state, spec); + process_activations(&mut state, spec)?; // Now that we have our validators, initialize the caches (including the committees) state.build_all_caches(spec)?; @@ -60,11 +62,14 @@ pub fn is_valid_genesis_state(state: &BeaconState, spec: &ChainSp /// Activate genesis validators, if their balance is acceptable. /// /// Spec v0.11.1 -pub fn process_activations(state: &mut BeaconState, spec: &ChainSpec) { +pub fn process_activations( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { for (index, validator) in state.validators.iter_mut().enumerate() { let balance = state.balances[index]; validator.effective_balance = std::cmp::min( - balance - balance % spec.effective_balance_increment, + balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, ); if validator.effective_balance == spec.max_effective_balance { @@ -72,4 +77,5 @@ pub fn process_activations(state: &mut BeaconState, spec: &ChainS validator.activation_epoch = T::genesis_epoch(); } } + Ok(()) } diff --git a/eth2/state_processing/src/lib.rs b/eth2/state_processing/src/lib.rs index 63c5e25500..86dc2294f0 100644 --- a/eth2/state_processing/src/lib.rs +++ b/eth2/state_processing/src/lib.rs @@ -1,3 +1,5 @@ +#![deny(clippy::integer_arithmetic)] + #[macro_use] mod macros; diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 2f3a12da55..8634bb0e12 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -1,6 +1,7 @@ use crate::common::{initiate_validator_exit, slash_validator}; use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid, IntoWithIndex}; use rayon::prelude::*; +use safe_arith::{ArithError, SafeArith}; use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set}; use std::convert::TryInto; use tree_hash::TreeHash; @@ -239,7 +240,7 @@ pub fn process_eth1_data( state: &mut BeaconState, eth1_data: &Eth1Data, ) -> Result<(), Error> { - if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data) { + if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data)? { state.eth1_data = new_eth1_data; } @@ -248,14 +249,14 @@ pub fn process_eth1_data( Ok(()) } -/// Returns `Some(eth1_data)` if adding the given `eth1_data` to `state.eth1_data_votes` would +/// Returns `Ok(Some(eth1_data))` if adding the given `eth1_data` to `state.eth1_data_votes` would /// result in a change to `state.eth1_data`. /// /// Spec v0.11.1 pub fn get_new_eth1_data( state: &BeaconState, eth1_data: &Eth1Data, -) -> Option { +) -> Result, ArithError> { let num_votes = state .eth1_data_votes .iter() @@ -263,10 +264,10 @@ pub fn get_new_eth1_data( .count(); // The +1 is to account for the `eth1_data` supplied to the function. - if 2 * (num_votes + 1) > T::SlotsPerEth1VotingPeriod::to_usize() { - Some(eth1_data.clone()) + if num_votes.safe_add(1)?.safe_mul(2)? > T::SlotsPerEth1VotingPeriod::to_usize() { + Ok(Some(eth1_data.clone())) } else { - None + Ok(None) } } @@ -318,7 +319,8 @@ pub fn process_attester_slashings( ) -> Result<(), BlockProcessingError> { // Verify the `IndexedAttestation`s in parallel (these are the resource-consuming objects, not // the `AttesterSlashing`s themselves). - let mut indexed_attestations: Vec<&_> = Vec::with_capacity(attester_slashings.len() * 2); + let mut indexed_attestations: Vec<&_> = + Vec::with_capacity(attester_slashings.len().safe_mul(2)?); for attester_slashing in attester_slashings { indexed_attestations.push(&attester_slashing.attestation_1); indexed_attestations.push(&attester_slashing.attestation_2); @@ -432,8 +434,13 @@ pub fn process_deposits( .par_iter() .enumerate() .try_for_each(|(i, deposit)| { - verify_deposit_merkle_proof(state, deposit, state.eth1_deposit_index + i as u64, spec) - .map_err(|e| e.into_with_index(i)) + verify_deposit_merkle_proof( + state, + deposit, + state.eth1_deposit_index.safe_add(i as u64)?, + spec, + ) + .map_err(|e| e.into_with_index(i)) })?; // Update the state in series. @@ -459,7 +466,7 @@ pub fn process_deposit( .map_err(|e| e.into_with_index(deposit_index))?; } - state.eth1_deposit_index += 1; + state.eth1_deposit_index.increment()?; // Ensure the state's pubkey cache is fully up-to-date, it will be used to check to see if the // depositing validator already exists in the registry. @@ -495,7 +502,7 @@ pub fn process_deposit( exit_epoch: spec.far_future_epoch, withdrawable_epoch: spec.far_future_epoch, effective_balance: std::cmp::min( - amount - amount % spec.effective_balance_increment, + amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, ), slashed: false, diff --git a/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs b/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs index d29f49d8bc..13f6d099d2 100644 --- a/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -1,7 +1,9 @@ +#![allow(clippy::integer_arithmetic)] + use super::signature_sets::{Error as SignatureSetError, Result as SignatureSetResult, *}; use crate::common::get_indexed_attestation; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; -use bls::{verify_signature_sets, SignatureSet}; +use bls::{verify_signature_sets, PublicKey, SignatureSet}; use rayon::prelude::*; use std::borrow::Cow; use types::{ @@ -9,8 +11,6 @@ use types::{ SignedBeaconBlock, }; -pub use bls::G1Point; - pub type Result = std::result::Result; #[derive(Debug, PartialEq)] @@ -51,18 +51,18 @@ impl From> for Error { pub struct BlockSignatureVerifier<'a, T, F> where T: EthSpec, - F: Fn(usize) -> Option> + Clone, + F: Fn(usize) -> Option> + Clone, { get_pubkey: F, state: &'a BeaconState, spec: &'a ChainSpec, - sets: Vec>, + sets: Vec, } impl<'a, T, F> BlockSignatureVerifier<'a, T, F> where T: EthSpec, - F: Fn(usize) -> Option> + Clone, + F: Fn(usize) -> Option> + Clone, { /// Create a new verifier without any included signatures. See the `include...` functions to /// add signatures, and the `verify` @@ -114,7 +114,7 @@ where .sets .into_par_iter() .chunks(num_chunks) - .map(|chunk| verify_signature_sets(chunk.into_iter())) + .map(|chunk| verify_signature_sets(chunk)) .reduce(|| true, |current, this| current && this); if result { diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index efe7691b4f..514699ee69 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -1,9 +1,10 @@ use super::signature_sets::Error as SignatureSetError; use merkle_proof::MerkleTreeError; +use safe_arith::ArithError; use types::*; /// The error returned from the `per_block_processing` function. Indicates that a block is either -/// invalid, or we were unable to determine it's validity (we encountered an unexpected error). +/// invalid, or we were unable to determine its validity (we encountered an unexpected error). /// /// Any of the `...Error` variants indicate that at some point during block (and block operation) /// verification, there was an error. There is no indication as to _where_ that error happened @@ -48,6 +49,7 @@ pub enum BlockProcessingError { SignatureSetError(SignatureSetError), SszTypesError(ssz_types::Error), MerkleTreeError(MerkleTreeError), + ArithError(ArithError), } impl From for BlockProcessingError { @@ -68,6 +70,12 @@ impl From for BlockProcessingError { } } +impl From for BlockProcessingError { + fn from(e: ArithError) -> Self { + BlockProcessingError::ArithError(e) + } +} + impl From> for BlockProcessingError { fn from(e: BlockOperationError) -> BlockProcessingError { match e { @@ -75,6 +83,7 @@ impl From> for BlockProcessingError { BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e), BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e), BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e), + BlockOperationError::ArithError(e) => BlockProcessingError::ArithError(e), } } } @@ -101,6 +110,7 @@ macro_rules! impl_into_block_processing_error_with_index { BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e), BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e), BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e), + BlockOperationError::ArithError(e) => BlockProcessingError::ArithError(e), } } } @@ -130,6 +140,7 @@ pub enum BlockOperationError { BeaconStateError(BeaconStateError), SignatureSetError(SignatureSetError), SszTypesError(ssz_types::Error), + ArithError(ArithError), } impl BlockOperationError { @@ -155,6 +166,12 @@ impl From for BlockOperationError { } } +impl From for BlockOperationError { + fn from(e: ArithError) -> Self { + BlockOperationError::ArithError(e) + } +} + #[derive(Debug, PartialEq, Clone)] pub enum HeaderInvalid { ProposalSignatureInvalid, @@ -267,6 +284,7 @@ impl From> BlockOperationError::BeaconStateError(e) => BlockOperationError::BeaconStateError(e), BlockOperationError::SignatureSetError(e) => BlockOperationError::SignatureSetError(e), BlockOperationError::SszTypesError(e) => BlockOperationError::SszTypesError(e), + BlockOperationError::ArithError(e) => BlockOperationError::ArithError(e), } } } diff --git a/eth2/state_processing/src/per_block_processing/signature_sets.rs b/eth2/state_processing/src/per_block_processing/signature_sets.rs index 8a58cebd5d..20dba7de3d 100644 --- a/eth2/state_processing/src/per_block_processing/signature_sets.rs +++ b/eth2/state_processing/src/per_block_processing/signature_sets.rs @@ -2,7 +2,7 @@ //! validated individually, or alongside in others in a potentially cheaper bulk operation. //! //! This module exposes one function to extract each type of `SignatureSet` from a `BeaconBlock`. -use bls::{G1Point, G1Ref, SignatureSet, SignedMessage}; +use bls::SignatureSet; use ssz::DecodeError; use std::borrow::Cow; use std::convert::TryInto; @@ -44,7 +44,7 @@ impl From for Error { pub fn get_pubkey_from_state<'a, T>( state: &'a BeaconState, validator_index: usize, -) -> Option> +) -> Option> where T: EthSpec, { @@ -55,7 +55,7 @@ where let pk: Option = (&v.pubkey).try_into().ok(); pk }) - .map(|pk| Cow::Owned(pk.into_point())) + .map(Cow::Owned) } /// A signature set that is valid if a block was signed by the expected block producer. @@ -65,10 +65,10 @@ pub fn block_proposal_signature_set<'a, T, F>( signed_block: &'a SignedBeaconBlock, block_root: Option, spec: &'a ChainSpec, -) -> Result> +) -> Result where T: EthSpec, - F: Fn(usize) -> Option>, + F: Fn(usize) -> Option>, { let block = &signed_block.message; let proposer_index = state.get_beacon_proposer_index(block.slot, spec)?; @@ -103,10 +103,10 @@ pub fn randao_signature_set<'a, T, F>( get_pubkey: F, block: &'a BeaconBlock, spec: &'a ChainSpec, -) -> Result> +) -> Result where T: EthSpec, - F: Fn(usize) -> Option>, + F: Fn(usize) -> Option>, { let proposer_index = state.get_beacon_proposer_index(block.slot, spec)?; @@ -132,10 +132,10 @@ pub fn proposer_slashing_signature_set<'a, T, F>( get_pubkey: F, proposer_slashing: &'a ProposerSlashing, spec: &'a ChainSpec, -) -> Result<(SignatureSet<'a>, SignatureSet<'a>)> +) -> Result<(SignatureSet, SignatureSet)> where T: EthSpec, - F: Fn(usize) -> Option>, + F: Fn(usize) -> Option>, { let proposer_index = proposer_slashing.signed_header_1.message.proposer_index as usize; @@ -161,9 +161,9 @@ where fn block_header_signature_set<'a, T: EthSpec>( state: &'a BeaconState, signed_header: &'a SignedBeaconBlockHeader, - pubkey: Cow<'a, G1Point>, + pubkey: Cow<'a, PublicKey>, spec: &'a ChainSpec, -) -> Result> { +) -> Result { let domain = spec.get_domain( signed_header.message.slot.epoch(T::slots_per_epoch()), Domain::BeaconProposer, @@ -191,10 +191,10 @@ pub fn indexed_attestation_signature_set<'a, 'b, T, F>( signature: &'a AggregateSignature, indexed_attestation: &'b IndexedAttestation, spec: &'a ChainSpec, -) -> Result> +) -> Result where T: EthSpec, - F: Fn(usize) -> Option>, + F: Fn(usize) -> Option>, { let pubkeys = indexed_attestation .attesting_indices @@ -213,9 +213,9 @@ where ); let message = indexed_attestation.data.signing_root(domain); - let signed_message = SignedMessage::new(pubkeys, message.as_bytes().to_vec()); + let message = message.as_bytes().to_vec(); - Ok(SignatureSet::new(signature, vec![signed_message])) + Ok(SignatureSet::new(signature, pubkeys, message)) } /// Returns the signature set for the given `indexed_attestation` but pubkeys are supplied directly @@ -227,10 +227,10 @@ pub fn indexed_attestation_signature_set_from_pubkeys<'a, 'b, T, F>( fork: &Fork, genesis_validators_root: Hash256, spec: &'a ChainSpec, -) -> Result> +) -> Result where T: EthSpec, - F: Fn(usize) -> Option>, + F: Fn(usize) -> Option>, { let pubkeys = indexed_attestation .attesting_indices @@ -249,9 +249,9 @@ where ); let message = indexed_attestation.data.signing_root(domain); - let signed_message = SignedMessage::new(pubkeys, message.as_bytes().to_vec()); + let message = message.as_bytes().to_vec(); - Ok(SignatureSet::new(signature, vec![signed_message])) + Ok(SignatureSet::new(signature, pubkeys, message)) } /// Returns the signature set for the given `attester_slashing` and corresponding `pubkeys`. @@ -260,10 +260,10 @@ pub fn attester_slashing_signature_sets<'a, T, F>( get_pubkey: F, attester_slashing: &'a AttesterSlashing, spec: &'a ChainSpec, -) -> Result<(SignatureSet<'a>, SignatureSet<'a>)> +) -> Result<(SignatureSet, SignatureSet)> where T: EthSpec, - F: Fn(usize) -> Option> + Clone, + F: Fn(usize) -> Option> + Clone, { Ok(( indexed_attestation_signature_set( @@ -305,12 +305,12 @@ pub fn deposit_pubkey_signature_message( /// `deposit_pubkey_signature_message`. pub fn deposit_signature_set<'a>( pubkey_signature_message: &'a (PublicKey, Signature, Vec), -) -> SignatureSet<'a> { +) -> SignatureSet { let (pubkey, signature, message) = pubkey_signature_message; // Note: Deposits are valid across forks, thus the deposit domain is computed - // with the fork zeroed. - SignatureSet::single(signature, pubkey.g1_ref(), message.clone()) + // with the fok zeroed. + SignatureSet::single(&signature, Cow::Borrowed(pubkey), message.clone()) } /// Returns a signature set that is valid if the `SignedVoluntaryExit` was signed by the indicated @@ -320,10 +320,10 @@ pub fn exit_signature_set<'a, T, F>( get_pubkey: F, signed_exit: &'a SignedVoluntaryExit, spec: &'a ChainSpec, -) -> Result> +) -> Result where T: EthSpec, - F: Fn(usize) -> Option>, + F: Fn(usize) -> Option>, { let exit = &signed_exit.message; let proposer_index = exit.validator_index as usize; diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs index cd92ee895f..33478399bb 100644 --- a/eth2/state_processing/src/per_block_processing/verify_deposit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -3,6 +3,7 @@ use crate::per_block_processing::signature_sets::{ deposit_pubkey_signature_message, deposit_signature_set, }; use merkle_proof::verify_merkle_proof; +use safe_arith::SafeArith; use tree_hash::TreeHash; use types::*; @@ -59,7 +60,7 @@ pub fn verify_deposit_merkle_proof( verify_merkle_proof( leaf, &deposit.proof[..], - spec.deposit_contract_tree_depth as usize + 1, + spec.deposit_contract_tree_depth.safe_add(1)? as usize, deposit_index as usize, state.eth1_data.deposit_root, ), diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index cc995a8ac0..99ac78a772 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -1,4 +1,5 @@ use errors::EpochProcessingError as Error; +use safe_arith::SafeArith; use tree_hash::TreeHash; use types::*; @@ -90,7 +91,11 @@ pub fn process_justification_and_finalization( state.previous_justified_checkpoint = state.current_justified_checkpoint.clone(); state.justification_bits.shift_up(1)?; - if total_balances.previous_epoch_target_attesters() * 3 >= total_balances.current_epoch() * 2 { + if total_balances + .previous_epoch_target_attesters() + .safe_mul(3)? + >= total_balances.current_epoch().safe_mul(2)? + { state.current_justified_checkpoint = Checkpoint { epoch: previous_epoch, root: *state.get_block_root_at_epoch(previous_epoch)?, @@ -98,7 +103,11 @@ pub fn process_justification_and_finalization( state.justification_bits.set(1, true)?; } // If the current epoch gets justified, fill the last bit. - if total_balances.current_epoch_target_attesters() * 3 >= total_balances.current_epoch() * 2 { + if total_balances + .current_epoch_target_attesters() + .safe_mul(3)? + >= total_balances.current_epoch().safe_mul(2)? + { state.current_justified_checkpoint = Checkpoint { epoch: current_epoch, root: *state.get_block_root_at_epoch(current_epoch)?, @@ -152,17 +161,19 @@ pub fn process_final_updates( } // Update effective balances with hysteresis (lag). - let hysteresis_increment = spec.effective_balance_increment / spec.hysteresis_quotient; - let downward_threshold = hysteresis_increment * spec.hysteresis_downward_multiplier; - let upward_threshold = hysteresis_increment * spec.hysteresis_upward_multiplier; + let hysteresis_increment = spec + .effective_balance_increment + .safe_div(spec.hysteresis_quotient)?; + let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?; + let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?; for (index, validator) in state.validators.iter_mut().enumerate() { let balance = state.balances[index]; - if balance + downward_threshold < validator.effective_balance - || validator.effective_balance + upward_threshold < balance + if balance.safe_add(downward_threshold)? < validator.effective_balance + || validator.effective_balance.safe_add(upward_threshold)? < balance { validator.effective_balance = std::cmp::min( - balance - balance % spec.effective_balance_increment, + balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, ); } @@ -175,7 +186,11 @@ pub fn process_final_updates( state.set_randao_mix(next_epoch, *state.get_randao_mix(current_epoch)?)?; // Set historical root accumulator - if next_epoch.as_u64() % (T::SlotsPerHistoricalRoot::to_u64() / T::slots_per_epoch()) == 0 { + if next_epoch + .as_u64() + .safe_rem(T::SlotsPerHistoricalRoot::to_u64().safe_div(T::slots_per_epoch())?)? + == 0 + { let historical_batch = state.historical_batch(); state .historical_roots diff --git a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs index 1f79680fa1..4fad653776 100644 --- a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs +++ b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs @@ -1,6 +1,7 @@ use super::super::common::get_base_reward; use super::validator_statuses::{TotalBalances, ValidatorStatus, ValidatorStatuses}; use super::Error; +use safe_arith::SafeArith; use types::*; @@ -13,21 +14,21 @@ pub struct Delta { impl Delta { /// Reward the validator with the `reward`. - pub fn reward(&mut self, reward: u64) { - self.rewards += reward; + pub fn reward(&mut self, reward: u64) -> Result<(), Error> { + self.rewards = self.rewards.safe_add(reward)?; + Ok(()) } /// Penalize the validator with the `penalty`. - pub fn penalize(&mut self, penalty: u64) { - self.penalties += penalty; + pub fn penalize(&mut self, penalty: u64) -> Result<(), Error> { + self.penalties = self.penalties.safe_add(penalty)?; + Ok(()) } -} -impl std::ops::AddAssign for Delta { - /// Use wrapping addition as that is how it's defined in the spec. - fn add_assign(&mut self, other: Delta) { - self.rewards += other.rewards; - self.penalties += other.penalties; + /// Combine two deltas. + fn combine(&mut self, other: Delta) -> Result<(), Error> { + self.reward(other.rewards)?; + self.penalize(other.penalties) } } @@ -56,9 +57,10 @@ pub fn process_rewards_and_penalties( get_proposer_deltas(&mut deltas, state, validator_statuses, spec)?; - // Apply the deltas, over-flowing but not under-flowing (saturating at 0 instead). + // Apply the deltas, erroring on overflow above but not on overflow below (saturating at 0 + // instead). for (i, delta) in deltas.iter().enumerate() { - state.balances[i] += delta.rewards; + state.balances[i] = state.balances[i].safe_add(delta.rewards)?; state.balances[i] = state.balances[i].saturating_sub(delta.penalties); } @@ -91,7 +93,8 @@ fn get_proposer_deltas( return Err(Error::ValidatorStatusesInconsistent); } - deltas[inclusion.proposer_index].reward(base_reward / spec.proposer_reward_quotient); + deltas[inclusion.proposer_index] + .reward(base_reward.safe_div(spec.proposer_reward_quotient)?)?; } } @@ -123,9 +126,9 @@ fn get_attestation_deltas( base_reward, finality_delay, spec, - ); + )?; - deltas[index] += delta; + deltas[index].combine(delta)?; } Ok(()) @@ -140,7 +143,7 @@ fn get_attestation_delta( base_reward: u64, finality_delay: u64, spec: &ChainSpec, -) -> Delta { +) -> Result { let mut delta = Delta::default(); // Is this validator eligible to be rewarded or penalized? @@ -149,7 +152,7 @@ fn get_attestation_delta( || (validator.is_slashed && !validator.is_withdrawable_in_current_epoch); if !is_eligible { - return delta; + return Ok(delta); } // Handle integer overflow by dividing these quantities by EFFECTIVE_BALANCE_INCREMENT @@ -157,59 +160,78 @@ fn get_attestation_delta( // - increment = EFFECTIVE_BALANCE_INCREMENT // - reward_numerator = get_base_reward(state, index) * (attesting_balance // increment) // - rewards[index] = reward_numerator // (total_balance // increment) - let total_balance_ebi = total_balances.current_epoch() / spec.effective_balance_increment; - let total_attesting_balance_ebi = - total_balances.previous_epoch_attesters() / spec.effective_balance_increment; - let matching_target_balance_ebi = - total_balances.previous_epoch_target_attesters() / spec.effective_balance_increment; - let matching_head_balance_ebi = - total_balances.previous_epoch_head_attesters() / spec.effective_balance_increment; + let total_balance_ebi = total_balances + .current_epoch() + .safe_div(spec.effective_balance_increment)?; + let total_attesting_balance_ebi = total_balances + .previous_epoch_attesters() + .safe_div(spec.effective_balance_increment)?; + let matching_target_balance_ebi = total_balances + .previous_epoch_target_attesters() + .safe_div(spec.effective_balance_increment)?; + let matching_head_balance_ebi = total_balances + .previous_epoch_head_attesters() + .safe_div(spec.effective_balance_increment)?; // Expected FFG source. // Spec: // - validator index in `get_unslashed_attesting_indices(state, matching_source_attestations)` if validator.is_previous_epoch_attester && !validator.is_slashed { - delta.reward(base_reward * total_attesting_balance_ebi / total_balance_ebi); + delta.reward( + base_reward + .safe_mul(total_attesting_balance_ebi)? + .safe_div(total_balance_ebi)?, + )?; // Inclusion speed bonus - let proposer_reward = base_reward / spec.proposer_reward_quotient; - let max_attester_reward = base_reward - proposer_reward; + let proposer_reward = base_reward.safe_div(spec.proposer_reward_quotient)?; + let max_attester_reward = base_reward.safe_sub(proposer_reward)?; let inclusion = validator .inclusion_info .expect("It is a logic error for an attester not to have an inclusion delay."); - delta.reward(max_attester_reward / inclusion.delay); + delta.reward(max_attester_reward.safe_div(inclusion.delay)?)?; } else { - delta.penalize(base_reward); + delta.penalize(base_reward)?; } // Expected FFG target. // Spec: // - validator index in `get_unslashed_attesting_indices(state, matching_target_attestations)` if validator.is_previous_epoch_target_attester && !validator.is_slashed { - delta.reward(base_reward * matching_target_balance_ebi / total_balance_ebi); + delta.reward( + base_reward + .safe_mul(matching_target_balance_ebi)? + .safe_div(total_balance_ebi)?, + )?; } else { - delta.penalize(base_reward); + delta.penalize(base_reward)?; } // Expected head. // Spec: // - validator index in `get_unslashed_attesting_indices(state, matching_head_attestations)` if validator.is_previous_epoch_head_attester && !validator.is_slashed { - delta.reward(base_reward * matching_head_balance_ebi / total_balance_ebi); + delta.reward( + base_reward + .safe_mul(matching_head_balance_ebi)? + .safe_div(total_balance_ebi)?, + )?; } else { - delta.penalize(base_reward); + delta.penalize(base_reward)?; } // Inactivity penalty if finality_delay > spec.min_epochs_to_inactivity_penalty { // All eligible validators are penalized - delta.penalize(spec.base_rewards_per_epoch * base_reward); + delta.penalize(spec.base_rewards_per_epoch.safe_mul(base_reward)?)?; // Additionally, all validators whose FFG target didn't match are penalized extra if !validator.is_previous_epoch_target_attester { delta.penalize( - validator.current_epoch_effective_balance * finality_delay - / spec.inactivity_penalty_quotient, - ); + validator + .current_epoch_effective_balance + .safe_mul(finality_delay)? + .safe_div(spec.inactivity_penalty_quotient)?, + )?; } } @@ -218,5 +240,5 @@ fn get_attestation_delta( // This function only computes the delta for a single validator, so it cannot also return a // delta for a validator. - delta + Ok(delta) } diff --git a/eth2/state_processing/src/per_epoch_processing/errors.rs b/eth2/state_processing/src/per_epoch_processing/errors.rs index 98e012e906..245935c1d7 100644 --- a/eth2/state_processing/src/per_epoch_processing/errors.rs +++ b/eth2/state_processing/src/per_epoch_processing/errors.rs @@ -18,6 +18,7 @@ pub enum EpochProcessingError { BeaconStateError(BeaconStateError), InclusionError(InclusionError), SszTypesError(ssz_types::Error), + ArithError(safe_arith::ArithError), } impl From for EpochProcessingError { @@ -38,6 +39,12 @@ impl From for EpochProcessingError { } } +impl From for EpochProcessingError { + fn from(e: safe_arith::ArithError) -> EpochProcessingError { + EpochProcessingError::ArithError(e) + } +} + #[derive(Debug, PartialEq)] pub enum InclusionError { /// The validator did not participate in an attestation in this period. diff --git a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs index 4bf24468f2..c291d3f09c 100644 --- a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs +++ b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs @@ -1,3 +1,4 @@ +use safe_arith::SafeArith; use types::{BeaconStateError as Error, *}; /// Process slashings. @@ -13,12 +14,17 @@ pub fn process_slashings( for (index, validator) in state.validators.iter().enumerate() { if validator.slashed - && epoch + T::EpochsPerSlashingsVector::to_u64() / 2 == validator.withdrawable_epoch + && epoch + T::EpochsPerSlashingsVector::to_u64().safe_div(2)? + == validator.withdrawable_epoch { let increment = spec.effective_balance_increment; - let penalty_numerator = validator.effective_balance / increment - * std::cmp::min(sum_slashings * 3, total_balance); - let penalty = penalty_numerator / total_balance * increment; + let penalty_numerator = validator + .effective_balance + .safe_div(increment)? + .safe_mul(std::cmp::min(sum_slashings.safe_mul(3)?, total_balance))?; + let penalty = penalty_numerator + .safe_div(total_balance)? + .safe_mul(increment)?; safe_sub_assign!(state.balances[index], penalty); } diff --git a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs index 8877ca8157..269e366adf 100644 --- a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs +++ b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs @@ -1,4 +1,5 @@ use crate::common::get_attesting_indices; +use safe_arith::SafeArith; use types::*; /// Sets the boolean `var` on `self` to be true if it is true on `other`. Otherwise leaves `self` @@ -198,12 +199,16 @@ impl ValidatorStatuses { if validator.is_active_at(state.current_epoch()) { status.is_active_in_current_epoch = true; - total_balances.current_epoch += effective_balance; + total_balances + .current_epoch + .safe_add_assign(effective_balance)?; } if validator.is_active_at(state.previous_epoch()) { status.is_active_in_previous_epoch = true; - total_balances.previous_epoch += effective_balance; + total_balances + .previous_epoch + .safe_add_assign(effective_balance)?; } statuses.push(status); @@ -275,19 +280,29 @@ impl ValidatorStatuses { let validator_balance = state.get_effective_balance(index, spec)?; if v.is_current_epoch_attester { - self.total_balances.current_epoch_attesters += validator_balance; + self.total_balances + .current_epoch_attesters + .safe_add_assign(validator_balance)?; } if v.is_current_epoch_target_attester { - self.total_balances.current_epoch_target_attesters += validator_balance; + self.total_balances + .current_epoch_target_attesters + .safe_add_assign(validator_balance)?; } if v.is_previous_epoch_attester { - self.total_balances.previous_epoch_attesters += validator_balance; + self.total_balances + .previous_epoch_attesters + .safe_add_assign(validator_balance)?; } if v.is_previous_epoch_target_attester { - self.total_balances.previous_epoch_target_attesters += validator_balance; + self.total_balances + .previous_epoch_target_attesters + .safe_add_assign(validator_balance)?; } if v.is_previous_epoch_head_attester { - self.total_balances.previous_epoch_head_attesters += validator_balance; + self.total_balances + .previous_epoch_head_attesters + .safe_add_assign(validator_balance)?; } } } diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index fdd97736f4..48c98a67f0 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -23,6 +23,7 @@ log = "0.4.8" merkle_proof = { path = "../utils/merkle_proof" } rayon = "1.2.0" rand = "0.7.2" +safe_arith = { path = "../utils/safe_arith" } serde = "1.0.102" serde_derive = "1.0.102" slog = "2.5.2" diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index 8fb38c837a..34c11f26f8 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -3,6 +3,7 @@ use super::{ Signature, SignedRoot, SubnetId, }; use crate::{test_utils::TestRandom, Hash256}; +use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -13,6 +14,7 @@ use tree_hash_derive::TreeHash; pub enum Error { SszTypesError(ssz_types::Error), AlreadySigned(usize), + SubnetCountIsZero(ArithError), } /// Details an attestation that can be slashable. @@ -86,8 +88,12 @@ impl Attestation { /// /// Note, this will return the subnet id for an aggregated attestation. This is done /// to avoid checking aggregate bits every time we wish to get an id. - pub fn subnet_id(&self) -> SubnetId { - SubnetId::new(self.data.index % T::default_spec().attestation_subnet_count) + pub fn subnet_id(&self, spec: &ChainSpec) -> Result { + self.data + .index + .safe_rem(spec.attestation_subnet_count) + .map(SubnetId::new) + .map_err(Error::SubnetCountIsZero) } } diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index a9589b4a0f..4f980a0191 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -7,11 +7,13 @@ use compare_fields_derive::CompareFields; use eth2_hashing::hash; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use pubkey_cache::PubkeyCache; +use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; use ssz::ssz_encode; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; use std::convert::TryInto; +use std::fmt; use swap_or_not_shuffle::compute_shuffled_index; use test_random_derive::TestRandom; use tree_hash::TreeHash; @@ -76,6 +78,12 @@ pub enum Error { deposit_count: u64, deposit_index: u64, }, + /// An arithmetic operation occurred which would have overflowed or divided by 0. + /// + /// This represents a serious bug in either the spec or Lighthouse! + ArithError(ArithError), + MissingBeaconBlock(SignedBeaconBlockHash), + MissingBeaconState(BeaconStateHash), } /// Control whether an epoch-indexed field can be indexed at the next epoch or not. @@ -94,6 +102,33 @@ impl AllowNextEpoch { } } +#[derive(PartialEq, Eq, Hash, Clone, Copy)] +pub struct BeaconStateHash(Hash256); + +impl fmt::Debug for BeaconStateHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "BeaconStateHash({:?})", self.0) + } +} + +impl fmt::Display for BeaconStateHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for BeaconStateHash { + fn from(hash: Hash256) -> BeaconStateHash { + BeaconStateHash(hash) + } +} + +impl From for Hash256 { + fn from(beacon_state_hash: BeaconStateHash) -> Hash256 { + beacon_state_hash.0 + } +} + /// The state of the `BeaconChain` at some slot. /// /// Spec v0.11.1 @@ -413,7 +448,7 @@ impl BeaconState { let mut i = 0; loop { let candidate_index = indices[compute_shuffled_index( - i % indices.len(), + i.safe_rem(indices.len())?, indices.len(), seed, spec.shuffle_round_count, @@ -421,17 +456,19 @@ impl BeaconState { .ok_or(Error::UnableToShuffle)?]; let random_byte = { let mut preimage = seed.to_vec(); - preimage.append(&mut int_to_bytes8((i / 32) as u64)); + preimage.append(&mut int_to_bytes8(i.safe_div(32)? as u64)); let hash = hash(&preimage); - hash[i % 32] + hash[i.safe_rem(32)?] }; let effective_balance = self.validators[candidate_index].effective_balance; - if effective_balance * MAX_RANDOM_BYTE - >= spec.max_effective_balance * u64::from(random_byte) + if effective_balance.safe_mul(MAX_RANDOM_BYTE)? + >= spec + .max_effective_balance + .safe_mul(u64::from(random_byte))? { return Ok(candidate_index); } - i += 1; + i.increment()?; } } @@ -448,7 +485,7 @@ impl BeaconState { let committee = self.get_beacon_committee(slot, index)?; let modulo = std::cmp::max( 1, - committee.committee.len() as u64 / spec.target_aggregators_per_committee, + (committee.committee.len() as u64).safe_div(spec.target_aggregators_per_committee)?, ); let signature_hash = hash(&slot_signature.as_bytes()); let signature_hash_int = u64::from_le_bytes( @@ -456,7 +493,8 @@ impl BeaconState { .try_into() .expect("first 8 bytes of signature should always convert to fixed array"), ); - Ok(signature_hash_int % modulo == 0) + + Ok(signature_hash_int.safe_rem(modulo)? == 0) } /// Returns the beacon proposer index for the `slot` in the given `relative_epoch`. @@ -502,8 +540,8 @@ impl BeaconState { /// /// Spec v0.11.1 fn get_latest_block_roots_index(&self, slot: Slot) -> Result { - if (slot < self.slot) && (self.slot <= slot + self.block_roots.len() as u64) { - Ok(slot.as_usize() % self.block_roots.len()) + if slot < self.slot && self.slot <= slot + self.block_roots.len() as u64 { + Ok(slot.as_usize().safe_rem(self.block_roots.len())?) } else { Err(BeaconStateError::SlotOutOfBounds) } @@ -555,7 +593,7 @@ impl BeaconState { let len = T::EpochsPerHistoricalVector::to_u64(); if current_epoch < epoch + len && epoch <= allow_next_epoch.upper_bound_of(current_epoch) { - Ok(epoch.as_usize() % len as usize) + Ok(epoch.as_usize().safe_rem(len as usize)?) } else { Err(Error::EpochOutOfBounds) } @@ -569,7 +607,9 @@ impl BeaconState { /// /// Spec v0.11.1 pub fn update_randao_mix(&mut self, epoch: Epoch, signature: &Signature) -> Result<(), Error> { - let i = epoch.as_usize() % T::EpochsPerHistoricalVector::to_usize(); + let i = epoch + .as_usize() + .safe_rem(T::EpochsPerHistoricalVector::to_usize())?; let signature_hash = Hash256::from_slice(&hash(&ssz_encode(signature))); @@ -599,8 +639,8 @@ impl BeaconState { /// /// Spec v0.11.1 fn get_latest_state_roots_index(&self, slot: Slot) -> Result { - if (slot < self.slot) && (self.slot <= slot + Slot::from(self.state_roots.len())) { - Ok(slot.as_usize() % self.state_roots.len()) + if slot < self.slot && self.slot <= slot + self.state_roots.len() as u64 { + Ok(slot.as_usize().safe_rem(self.state_roots.len())?) } else { Err(BeaconStateError::SlotOutOfBounds) } @@ -631,6 +671,14 @@ impl BeaconState { Ok(&self.block_roots[i]) } + pub fn get_block_state_roots( + &self, + slot: Slot, + ) -> Result<(SignedBeaconBlockHash, BeaconStateHash), Error> { + let i = self.get_latest_block_roots_index(slot)?; + Ok((self.block_roots[i].into(), self.state_roots[i].into())) + } + /// Sets the latest state root for slot. /// /// Spec v0.11.1 @@ -654,7 +702,9 @@ impl BeaconState { if current_epoch < epoch + T::EpochsPerSlashingsVector::to_u64() && epoch <= allow_next_epoch.upper_bound_of(current_epoch) { - Ok(epoch.as_usize() % T::EpochsPerSlashingsVector::to_usize()) + Ok(epoch + .as_usize() + .safe_rem(T::EpochsPerSlashingsVector::to_usize())?) } else { Err(Error::EpochOutOfBounds) } @@ -713,20 +763,20 @@ impl BeaconState { // == 0`. let mix = { let i = epoch + T::EpochsPerHistoricalVector::to_u64() - spec.min_seed_lookahead - 1; - self.randao_mixes[i.as_usize() % self.randao_mixes.len()] + self.randao_mixes[i.as_usize().safe_rem(self.randao_mixes.len())?] }; let domain_bytes = int_to_bytes4(spec.get_domain_constant(domain_type)); let epoch_bytes = int_to_bytes8(epoch.as_u64()); const NUM_DOMAIN_BYTES: usize = 4; const NUM_EPOCH_BYTES: usize = 8; + const MIX_OFFSET: usize = NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES; const NUM_MIX_BYTES: usize = 32; let mut preimage = [0; NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES + NUM_MIX_BYTES]; preimage[0..NUM_DOMAIN_BYTES].copy_from_slice(&domain_bytes); - preimage[NUM_DOMAIN_BYTES..NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES] - .copy_from_slice(&epoch_bytes); - preimage[NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES..].copy_from_slice(mix.as_bytes()); + preimage[NUM_DOMAIN_BYTES..MIX_OFFSET].copy_from_slice(&epoch_bytes); + preimage[MIX_OFFSET..].copy_from_slice(mix.as_bytes()); Ok(Hash256::from_slice(&hash(&preimage))) } @@ -760,9 +810,10 @@ impl BeaconState { pub fn get_churn_limit(&self, spec: &ChainSpec) -> Result { Ok(std::cmp::max( spec.min_per_epoch_churn_limit, - self.committee_cache(RelativeEpoch::Current)? - .active_validator_count() as u64 - / spec.churn_limit_quotient, + (self + .committee_cache(RelativeEpoch::Current)? + .active_validator_count() as u64) + .safe_div(spec.churn_limit_quotient)?, )) } @@ -792,7 +843,7 @@ impl BeaconState { ) -> Result { validator_indices.iter().try_fold(0_u64, |acc, i| { self.get_effective_balance(*i, spec) - .and_then(|bal| Ok(bal + acc)) + .and_then(|bal| Ok(acc.safe_add(bal)?)) }) } @@ -1098,3 +1149,9 @@ impl From for Error { Error::TreeHashError(e) } } + +impl From for Error { + fn from(e: ArithError) -> Error { + Error::ArithError(e) + } +} diff --git a/eth2/types/src/beacon_state/committee_cache.rs b/eth2/types/src/beacon_state/committee_cache.rs index 5783bdbc4c..d5d89d3112 100644 --- a/eth2/types/src/beacon_state/committee_cache.rs +++ b/eth2/types/src/beacon_state/committee_cache.rs @@ -1,3 +1,5 @@ +#![allow(clippy::integer_arithmetic)] + use super::BeaconState; use crate::*; use core::num::NonZeroUsize; @@ -43,7 +45,7 @@ impl CommitteeCache { } let committees_per_slot = - T::get_committee_count_per_slot(active_validator_indices.len(), spec) as u64; + T::get_committee_count_per_slot(active_validator_indices.len(), spec)? as u64; let seed = state.get_seed(epoch, Domain::BeaconAttester, spec)?; @@ -56,7 +58,7 @@ impl CommitteeCache { .ok_or_else(|| Error::UnableToShuffle)?; // The use of `NonZeroUsize` reduces the maximum number of possible validators by one. - if state.validators.len() > usize::max_value() - 1 { + if state.validators.len() == usize::max_value() { return Err(Error::TooManyValidators); } diff --git a/eth2/types/src/beacon_state/exit_cache.rs b/eth2/types/src/beacon_state/exit_cache.rs index 4908194fab..aff05baaf7 100644 --- a/eth2/types/src/beacon_state/exit_cache.rs +++ b/eth2/types/src/beacon_state/exit_cache.rs @@ -1,4 +1,5 @@ use super::{BeaconStateError, ChainSpec, Epoch, Validator}; +use safe_arith::SafeArith; use serde_derive::{Deserialize, Serialize}; use std::collections::HashMap; @@ -50,7 +51,10 @@ impl ExitCache { /// Must only be called once per exiting validator. pub fn record_validator_exit(&mut self, exit_epoch: Epoch) -> Result<(), BeaconStateError> { self.check_initialized()?; - *self.exits_per_epoch.entry(exit_epoch).or_insert(0) += 1; + self.exits_per_epoch + .entry(exit_epoch) + .or_insert(0) + .increment()?; Ok(()) } diff --git a/eth2/types/src/beacon_state/pubkey_cache.rs b/eth2/types/src/beacon_state/pubkey_cache.rs index 0063758a25..a4a38ebc47 100644 --- a/eth2/types/src/beacon_state/pubkey_cache.rs +++ b/eth2/types/src/beacon_state/pubkey_cache.rs @@ -23,6 +23,7 @@ impl PubkeyCache { /// /// The added index must equal the number of validators already added to the map. This ensures /// that an index is never skipped. + #[allow(clippy::integer_arithmetic)] pub fn insert(&mut self, pubkey: PublicKeyBytes, index: ValidatorIndex) -> bool { if index == self.len { self.map.insert(pubkey, index); diff --git a/eth2/types/src/beacon_state/tree_hash_cache.rs b/eth2/types/src/beacon_state/tree_hash_cache.rs index 75b14e8eef..d77987acd1 100644 --- a/eth2/types/src/beacon_state/tree_hash_cache.rs +++ b/eth2/types/src/beacon_state/tree_hash_cache.rs @@ -1,3 +1,5 @@ +#![allow(clippy::integer_arithmetic)] + use super::Error; use crate::{BeaconState, EthSpec, Hash256, Unsigned, Validator}; use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; @@ -195,7 +197,7 @@ impl ValidatorsListTreeHashCache { /// This function makes assumptions that the `validators` list will only change in accordance /// with valid per-block/per-slot state transitions. fn recalculate_tree_hash_root(&mut self, validators: &[Validator]) -> Result { - let mut list_arena = std::mem::replace(&mut self.list_arena, CacheArena::default()); + let mut list_arena = std::mem::take(&mut self.list_arena); let leaves = self .values diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 6c4401e3b0..40cda04639 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -270,10 +270,10 @@ impl ChainSpec { /* * Gwei values */ - min_deposit_amount: u64::pow(2, 0) * u64::pow(10, 9), - max_effective_balance: u64::pow(2, 5) * u64::pow(10, 9), - ejection_balance: u64::pow(2, 4) * u64::pow(10, 9), - effective_balance_increment: u64::pow(2, 0) * u64::pow(10, 9), + min_deposit_amount: u64::pow(2, 0).saturating_mul(u64::pow(10, 9)), + max_effective_balance: u64::pow(2, 5).saturating_mul(u64::pow(10, 9)), + ejection_balance: u64::pow(2, 4).saturating_mul(u64::pow(10, 9)), + effective_balance_increment: u64::pow(2, 0).saturating_mul(u64::pow(10, 9)), /* * Initial Values @@ -355,6 +355,7 @@ impl ChainSpec { persistent_committee_period: 128, min_genesis_delay: 300, milliseconds_per_slot: 6_000, + safe_slots_to_update_justified: 2, network_id: 2, // lighthouse testnet network id boot_nodes, ..ChainSpec::mainnet() @@ -560,6 +561,7 @@ impl Default for YamlConfig { /// Spec v0.11.1 impl YamlConfig { + #[allow(clippy::integer_arithmetic)] pub fn from_spec(spec: &ChainSpec) -> Self { Self { // ChainSpec @@ -595,7 +597,7 @@ impl YamlConfig { proposer_reward_quotient: spec.proposer_reward_quotient, inactivity_penalty_quotient: spec.inactivity_penalty_quotient, min_slashing_penalty_quotient: spec.min_slashing_penalty_quotient, - genesis_fork_version: spec.genesis_fork_version.clone(), + genesis_fork_version: spec.genesis_fork_version, safe_slots_to_update_justified: spec.safe_slots_to_update_justified, domain_beacon_proposer: spec.domain_beacon_proposer, domain_beacon_attester: spec.domain_beacon_attester, @@ -624,9 +626,9 @@ impl YamlConfig { // Validator eth1_follow_distance: spec.eth1_follow_distance, - target_aggregators_per_committee: 0, - random_subnets_per_validator: 0, - epochs_per_random_subnet_subscription: 0, + target_aggregators_per_committee: spec.target_aggregators_per_committee, + random_subnets_per_validator: spec.random_subnets_per_validator, + epochs_per_random_subnet_subscription: spec.epochs_per_random_subnet_subscription, seconds_per_eth1_block: spec.seconds_per_eth1_block, } } @@ -680,7 +682,7 @@ impl YamlConfig { effective_balance_increment: self.effective_balance_increment, genesis_slot: Slot::from(self.genesis_slot), bls_withdrawal_prefix_byte: self.bls_withdrawal_prefix, - milliseconds_per_slot: self.seconds_per_slot * 1000, + milliseconds_per_slot: self.seconds_per_slot.saturating_mul(1000), min_attestation_inclusion_delay: self.min_attestation_inclusion_delay, min_seed_lookahead: Epoch::from(self.min_seed_lookahead), max_seed_lookahead: Epoch::from(self.max_seed_lookahead), @@ -700,7 +702,7 @@ impl YamlConfig { domain_deposit: self.domain_deposit, domain_voluntary_exit: self.domain_voluntary_exit, boot_nodes: chain_spec.boot_nodes.clone(), - genesis_fork_version: self.genesis_fork_version.clone(), + genesis_fork_version: self.genesis_fork_version, eth1_follow_distance: self.eth1_follow_distance, ..*chain_spec }) diff --git a/eth2/types/src/eth_spec.rs b/eth2/types/src/eth_spec.rs index b5bf71e5b8..18ee3e5a91 100644 --- a/eth2/types/src/eth_spec.rs +++ b/eth2/types/src/eth_spec.rs @@ -1,4 +1,5 @@ use crate::*; +use safe_arith::SafeArith; use serde_derive::{Deserialize, Serialize}; use ssz_types::typenum::{ Unsigned, U0, U1, U1024, U1099511627776, U128, U16, U16777216, U2, U2048, U32, U4, U4096, U64, @@ -64,16 +65,21 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { /// the `active_validator_count` during the slot's epoch. /// /// Spec v0.11.1 - fn get_committee_count_per_slot(active_validator_count: usize, spec: &ChainSpec) -> usize { + fn get_committee_count_per_slot( + active_validator_count: usize, + spec: &ChainSpec, + ) -> Result { let slots_per_epoch = Self::SlotsPerEpoch::to_usize(); - std::cmp::max( + Ok(std::cmp::max( 1, std::cmp::min( spec.max_committees_per_slot, - active_validator_count / slots_per_epoch / spec.target_committee_size, + active_validator_count + .safe_div(slots_per_epoch)? + .safe_div(spec.target_committee_size)?, ), - ) + )) } /// Returns the minimum number of validators required for this spec. diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index bc8cddf0bb..97e3710060 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -2,6 +2,8 @@ // Required for big type-level numbers #![recursion_limit = "128"] +// Clippy lint set up +#![deny(clippy::integer_arithmetic)] #[macro_use] pub mod test_utils; @@ -76,11 +78,11 @@ pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; pub use crate::selection_proof::SelectionProof; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; -pub use crate::signed_beacon_block::SignedBeaconBlock; +pub use crate::signed_beacon_block::{SignedBeaconBlock, SignedBeaconBlockHash}; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; pub use crate::signing_root::{SignedRoot, SigningRoot}; -pub use crate::slot_epoch::{Epoch, Slot, FAR_FUTURE_EPOCH}; +pub use crate::slot_epoch::{Epoch, Slot}; pub use crate::subnet_id::SubnetId; pub use crate::validator::Validator; pub use crate::voluntary_exit::VoluntaryExit; diff --git a/eth2/types/src/selection_proof.rs b/eth2/types/src/selection_proof.rs index 02d24821a4..18c62ba400 100644 --- a/eth2/types/src/selection_proof.rs +++ b/eth2/types/src/selection_proof.rs @@ -1,4 +1,5 @@ use crate::{ChainSpec, Domain, EthSpec, Fork, Hash256, SecretKey, Signature, SignedRoot, Slot}; +use safe_arith::{ArithError, SafeArith}; use std::convert::TryInto; use tree_hash::TreeHash; @@ -24,7 +25,7 @@ impl SelectionProof { Self(Signature::new(message.as_bytes(), secret_key)) } - pub fn is_aggregator(&self, modulo: u64) -> bool { + pub fn is_aggregator(&self, modulo: u64) -> Result { let signature_hash = self.0.tree_hash_root(); let signature_hash_int = u64::from_le_bytes( signature_hash[0..8] @@ -33,7 +34,7 @@ impl SelectionProof { .expect("first 8 bytes of signature should always convert to fixed array"), ); - signature_hash_int % modulo == 0 + signature_hash_int.safe_rem(modulo).map(|rem| rem == 0) } } diff --git a/eth2/types/src/signed_beacon_block.rs b/eth2/types/src/signed_beacon_block.rs index 02f376fc23..81abc0dfd1 100644 --- a/eth2/types/src/signed_beacon_block.rs +++ b/eth2/types/src/signed_beacon_block.rs @@ -1,11 +1,40 @@ use crate::{test_utils::TestRandom, BeaconBlock, EthSpec, Hash256, Slot}; use bls::Signature; +use std::fmt; + use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; +#[derive(PartialEq, Eq, Hash, Clone, Copy)] +pub struct SignedBeaconBlockHash(Hash256); + +impl fmt::Debug for SignedBeaconBlockHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "SignedBeaconBlockHash({:?})", self.0) + } +} + +impl fmt::Display for SignedBeaconBlockHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for SignedBeaconBlockHash { + fn from(hash: Hash256) -> SignedBeaconBlockHash { + SignedBeaconBlockHash(hash) + } +} + +impl From for Hash256 { + fn from(signed_beacon_block_hash: SignedBeaconBlockHash) -> Hash256 { + signed_beacon_block_hash.0 + } +} + /// A `BeaconBlock` and a signature from its proposer. /// /// Spec v0.11.1 diff --git a/eth2/types/src/slot_epoch.rs b/eth2/types/src/slot_epoch.rs index 6261497673..7f3b45ce41 100644 --- a/eth2/types/src/slot_epoch.rs +++ b/eth2/types/src/slot_epoch.rs @@ -14,7 +14,6 @@ use crate::test_utils::TestRandom; use crate::SignedRoot; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use slog; use ssz::{ssz_encode, Decode, DecodeError, Encode}; use std::cmp::{Ord, Ordering}; use std::fmt; @@ -22,13 +21,11 @@ use std::hash::{Hash, Hasher}; use std::iter::Iterator; use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; -pub const FAR_FUTURE_EPOCH: Epoch = Epoch(u64::max_value()); - -#[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)] +#[derive(Eq, Clone, Copy, Default, Serialize, Deserialize)] #[serde(transparent)] pub struct Slot(u64); -#[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)] +#[derive(Eq, Clone, Copy, Default, Serialize, Deserialize)] pub struct Epoch(u64); impl_common!(Slot); @@ -40,7 +37,7 @@ impl Slot { } pub fn epoch(self, slots_per_epoch: u64) -> Epoch { - Epoch::from(self.0 / slots_per_epoch) + Epoch::from(self.0) / Epoch::from(slots_per_epoch) } pub fn max_value() -> Slot { @@ -79,8 +76,8 @@ impl Epoch { let start = self.start_slot(slots_per_epoch); let end = self.end_slot(slots_per_epoch); - if (slot >= start) && (slot <= end) { - Some(slot.as_usize() - start.as_usize()) + if slot >= start && slot <= end { + slot.as_usize().checked_sub(start.as_usize()) } else { None } @@ -113,7 +110,7 @@ impl<'a> Iterator for SlotIter<'a> { } else { let start_slot = self.epoch.start_slot(self.slots_per_epoch); let previous = self.current_iteration; - self.current_iteration += 1; + self.current_iteration = self.current_iteration.checked_add(1)?; Some(start_slot + previous) } } diff --git a/eth2/types/src/slot_epoch_macros.rs b/eth2/types/src/slot_epoch_macros.rs index 49a1b8e0d5..15263f654e 100644 --- a/eth2/types/src/slot_epoch_macros.rs +++ b/eth2/types/src/slot_epoch_macros.rs @@ -107,20 +107,21 @@ macro_rules! impl_math_between { fn div(self, rhs: $other) -> $main { let rhs: u64 = rhs.into(); - if rhs == 0 { - panic!("Cannot divide by zero-valued Slot/Epoch") - } - $main::from(self.0 / rhs) + $main::from( + self.0 + .checked_div(rhs) + .expect("Cannot divide by zero-valued Slot/Epoch"), + ) } } impl DivAssign<$other> for $main { fn div_assign(&mut self, rhs: $other) { let rhs: u64 = rhs.into(); - if rhs == 0 { - panic!("Cannot divide by zero-valued Slot/Epoch") - } - self.0 = self.0 / rhs + self.0 = self + .0 + .checked_div(rhs) + .expect("Cannot divide by zero-valued Slot/Epoch"); } } @@ -129,7 +130,11 @@ macro_rules! impl_math_between { fn rem(self, modulus: $other) -> $main { let modulus: u64 = modulus.into(); - $main::from(self.0 % modulus) + $main::from( + self.0 + .checked_rem(modulus) + .expect("Cannot divide by zero-valued Slot/Epoch"), + ) } } }; @@ -190,6 +195,16 @@ macro_rules! impl_display { }; } +macro_rules! impl_debug { + ($type: ident) => { + impl fmt::Debug for $type { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}({:?})", stringify!($type), self.0) + } + } + }; +} + macro_rules! impl_ssz { ($type: ident) => { impl Encode for $type { @@ -234,7 +249,7 @@ macro_rules! impl_ssz { } fn tree_hash_packing_factor() -> usize { - 32 / 8 + 32usize.wrapping_div(8) } fn tree_hash_root(&self) -> tree_hash::Hash256 { @@ -270,6 +285,7 @@ macro_rules! impl_common { impl_math_between!($type, u64); impl_math!($type); impl_display!($type); + impl_debug!($type); impl_ssz!($type); impl_hash!($type); }; diff --git a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs index 333f221ad7..c4580a48fb 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -2,7 +2,6 @@ use super::super::{generate_deterministic_keypairs, KeypairsFile}; use crate::test_utils::{AttestationTestTask, TestingPendingAttestationBuilder}; use crate::*; use bls::get_withdrawal_credentials; -use dirs; use log::debug; use rayon::prelude::*; use std::path::{Path, PathBuf}; diff --git a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs index aad5f20986..69da25997e 100644 --- a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs @@ -36,7 +36,7 @@ impl TestingDepositBuilder { let mut secret_key = keypair.sk.clone(); match test_task { - DepositTestTask::BadPubKey => pubkeybytes = PublicKeyBytes::from(new_key.pk.clone()), + DepositTestTask::BadPubKey => pubkeybytes = PublicKeyBytes::from(new_key.pk), DepositTestTask::InvalidPubKey => { // Creating invalid public key bytes let mut public_key_bytes: Vec = vec![0; 48]; diff --git a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs index a969843fad..88e84a3995 100644 --- a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs @@ -50,7 +50,7 @@ impl TestingProposerSlashingBuilder { message: BeaconBlockHeader { parent_root: hash_2, slot: slot_2, - ..signed_header_1.message.clone() + ..signed_header_1.message }, signature: Signature::empty_signature(), }; diff --git a/eth2/types/src/test_utils/keypairs_file.rs b/eth2/types/src/test_utils/keypairs_file.rs index 13b1b17f2d..8d45b7f51f 100644 --- a/eth2/types/src/test_utils/keypairs_file.rs +++ b/eth2/types/src/test_utils/keypairs_file.rs @@ -5,7 +5,7 @@ use std::io::{Error, ErrorKind, Read, Write}; use std::path::Path; pub const PUBLIC_KEY_BYTES_LEN: usize = 96; -pub const SECRET_KEY_BYTES_LEN: usize = 48; +pub const SECRET_KEY_BYTES_LEN: usize = 32; pub const BATCH_SIZE: usize = 1_000; // ~15MB diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 593be11ab2..719fd2e3fc 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -1,3 +1,5 @@ +#![allow(clippy::integer_arithmetic)] + #[macro_use] mod macros; mod builders; diff --git a/eth2/types/src/tree_hash_impls.rs b/eth2/types/src/tree_hash_impls.rs index 0b08a550b7..787d62d760 100644 --- a/eth2/types/src/tree_hash_impls.rs +++ b/eth2/types/src/tree_hash_impls.rs @@ -32,12 +32,10 @@ impl CachedTreeHash for Validator { // Fields pubkey and withdrawal_credentials are constant if (i == 0 || i == 1) && cache.initialized { None + } else if process_field_by_index(self, i, leaf, !cache.initialized) { + Some(i) } else { - if process_field_by_index(self, i, leaf, !cache.initialized) { - Some(i) - } else { - None - } + None } }) .collect(); diff --git a/eth2/types/src/utils/serde_utils.rs b/eth2/types/src/utils/serde_utils.rs index 8d8e7dff04..d2d3e5655c 100644 --- a/eth2/types/src/utils/serde_utils.rs +++ b/eth2/types/src/utils/serde_utils.rs @@ -1,4 +1,3 @@ -use hex; use serde::de::Error; use serde::{Deserialize, Deserializer, Serializer}; diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml index f22263e313..25a577834c 100644 --- a/eth2/utils/bls/Cargo.toml +++ b/eth2/utils/bls/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -milagro_bls = { git = "https://github.com/sigp/milagro_bls", branch = "eth2.0-v0.10" } +milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.0.1" } eth2_hashing = "0.1.0" hex = "0.3" rand = "0.7.2" diff --git a/eth2/utils/bls/src/aggregate_public_key.rs b/eth2/utils/bls/src/aggregate_public_key.rs index 4f4040d120..6389915c4f 100644 --- a/eth2/utils/bls/src/aggregate_public_key.rs +++ b/eth2/utils/bls/src/aggregate_public_key.rs @@ -1,5 +1,5 @@ use super::{PublicKey, BLS_PUBLIC_KEY_BYTE_SIZE}; -use milagro_bls::{AggregatePublicKey as RawAggregatePublicKey, G1Point}; +use milagro_bls::AggregatePublicKey as RawAggregatePublicKey; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; @@ -19,9 +19,7 @@ impl AggregatePublicKey { pub fn from_bytes(bytes: &[u8]) -> Result { let pubkey = RawAggregatePublicKey::from_bytes(&bytes).map_err(|_| { - DecodeError::BytesInvalid( - format!("Invalid AggregatePublicKey bytes: {:?}", bytes).to_string(), - ) + DecodeError::BytesInvalid(format!("Invalid AggregatePublicKey bytes: {:?}", bytes)) })?; Ok(AggregatePublicKey(pubkey)) @@ -39,10 +37,6 @@ impl AggregatePublicKey { self.0.add(public_key.as_raw()) } - pub fn add_point(&mut self, point: &G1Point) { - self.0.point.add(point) - } - /// Returns the underlying public key. pub fn as_raw(&self) -> &RawAggregatePublicKey { &self.0 diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs index 8ea582a4c7..8ff6c8bd8b 100644 --- a/eth2/utils/bls/src/aggregate_signature.rs +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -1,8 +1,5 @@ use super::*; -use milagro_bls::{ - AggregatePublicKey as RawAggregatePublicKey, AggregateSignature as RawAggregateSignature, - G2Point, -}; +use milagro_bls::{AggregateSignature as RawAggregateSignature, G2Point}; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; @@ -32,16 +29,19 @@ impl AggregateSignature { /// Add (aggregate) a signature to the `AggregateSignature`. pub fn add(&mut self, signature: &Signature) { - if self.is_empty { - self.aggregate_signature = RawAggregateSignature::new(); - self.is_empty = false; - } + // Only empty if both are empty + self.is_empty = self.is_empty && signature.is_empty(); + // Note: empty signatures will have point at infinity which is equivalent of adding 0. self.aggregate_signature.add(signature.as_raw()) } /// Add (aggregate) another `AggregateSignature`. pub fn add_aggregate(&mut self, agg_signature: &AggregateSignature) { + // Only empty if both are empty + self.is_empty = self.is_empty && agg_signature.is_empty(); + + // Note: empty signatures will have point at infinity which is equivalent of adding 0. self.aggregate_signature .add_aggregate(&agg_signature.aggregate_signature) } @@ -55,32 +55,32 @@ impl AggregateSignature { return false; } self.aggregate_signature - .verify(msg, aggregate_public_key.as_raw()) + .fast_aggregate_verify_pre_aggregated(msg, aggregate_public_key.as_raw()) } - /// Verify this AggregateSignature against multiple AggregatePublickeys with multiple Messages. + /// Verify the `AggregateSignature` against an `AggregatePublicKey`. /// - /// All PublicKeys related to a Message should be aggregated into one AggregatePublicKey. - /// Each AggregatePublicKey has a 1:1 ratio with a 32 byte Message. - pub fn verify_multiple( - &self, - messages: &[&[u8]], - aggregate_public_keys: &[&AggregatePublicKey], - ) -> bool { + /// Only returns `true` if the set of keys in the `AggregatePublicKey` match the set of keys + /// that signed the `AggregateSignature`. + pub fn verify_unaggregated(&self, msg: &[u8], public_keys: &[&PublicKey]) -> bool { if self.is_empty { return false; } - let aggregate_public_keys: Vec<&RawAggregatePublicKey> = - aggregate_public_keys.iter().map(|pk| pk.as_raw()).collect(); - - // Messages are concatenated into one long message. - let mut msgs: Vec> = vec![]; - for message in messages { - msgs.push(message.to_vec()); - } - + let public_key_refs: Vec<_> = public_keys.iter().map(|pk| pk.as_raw()).collect(); self.aggregate_signature - .verify_multiple(&msgs, &aggregate_public_keys[..]) + .fast_aggregate_verify(msg, &public_key_refs) + } + + /// Verify this AggregateSignature against multiple AggregatePublickeys and Messages. + /// + /// Each AggregatePublicKey has a 1:1 ratio with a 32 byte Message. + pub fn verify_multiple(&self, messages: &[&[u8]], public_keys: &[&PublicKey]) -> bool { + if self.is_empty { + return false; + } + let public_keys_refs: Vec<_> = public_keys.iter().map(|pk| pk.as_raw()).collect(); + self.aggregate_signature + .aggregate_verify(&messages, &public_keys_refs) } /// Return AggregateSignature as bytes diff --git a/eth2/utils/bls/src/fake_aggregate_public_key.rs b/eth2/utils/bls/src/fake_aggregate_public_key.rs index 4eed94d654..fc9b7db5ad 100644 --- a/eth2/utils/bls/src/fake_aggregate_public_key.rs +++ b/eth2/utils/bls/src/fake_aggregate_public_key.rs @@ -67,6 +67,17 @@ impl FakeAggregatePublicKey { // No nothing. } + pub fn aggregate(_pks: &[&PublicKey]) -> Self { + Self::new() + } + + pub fn from_public_key(public_key: &PublicKey) -> Self { + Self { + bytes: public_key.as_bytes(), + point: public_key.point.clone(), + } + } + pub fn as_raw(&self) -> &Self { &self } diff --git a/eth2/utils/bls/src/fake_aggregate_signature.rs b/eth2/utils/bls/src/fake_aggregate_signature.rs index 19fe400387..401c448788 100644 --- a/eth2/utils/bls/src/fake_aggregate_signature.rs +++ b/eth2/utils/bls/src/fake_aggregate_signature.rs @@ -1,6 +1,6 @@ use super::{ - fake_aggregate_public_key::FakeAggregatePublicKey, fake_signature::FakeSignature, - BLS_AGG_SIG_BYTE_SIZE, + fake_aggregate_public_key::FakeAggregatePublicKey, fake_public_key::FakePublicKey, + fake_signature::FakeSignature, BLS_AGG_SIG_BYTE_SIZE, }; use milagro_bls::G2Point; use serde::de::{Deserialize, Deserializer}; @@ -47,6 +47,11 @@ impl FakeAggregateSignature { // Do nothing. } + /// Does glorious nothing. + pub fn aggregate(&mut self, _agg_sig: &FakeAggregateSignature) { + // Do nothing. + } + /// _Always_ returns `true`. pub fn verify(&self, _msg: &[u8], _aggregate_public_key: &FakeAggregatePublicKey) -> bool { true @@ -56,11 +61,28 @@ impl FakeAggregateSignature { pub fn verify_multiple( &self, _messages: &[&[u8]], - _aggregate_public_keys: &[&FakeAggregatePublicKey], + _aggregate_public_keys: &[&FakePublicKey], ) -> bool { true } + /// _Always_ returns `true`. + pub fn fast_aggregate_verify_pre_aggregated( + &self, + _messages: &[u8], + _aggregate_public_keys: &FakeAggregatePublicKey, + ) -> bool { + true + } + + /// _Always_ returns `true`. + pub fn from_signature(signature: &FakeSignature) -> Self { + Self { + bytes: signature.as_bytes(), + point: signature.point.clone(), + } + } + /// Convert bytes to fake BLS aggregate signature pub fn from_bytes(bytes: &[u8]) -> Result { if bytes.len() != BLS_AGG_SIG_BYTE_SIZE { diff --git a/eth2/utils/bls/src/lib.rs b/eth2/utils/bls/src/lib.rs index 27196fcc27..2d6b56a646 100644 --- a/eth2/utils/bls/src/lib.rs +++ b/eth2/utils/bls/src/lib.rs @@ -13,8 +13,8 @@ pub use crate::keypair::Keypair; pub use crate::public_key_bytes::PublicKeyBytes; pub use crate::secret_key::SecretKey; pub use crate::signature_bytes::SignatureBytes; -pub use milagro_bls::{compress_g2, hash_on_g2, G1Point}; -pub use signature_set::{verify_signature_sets, G1Ref, SignatureSet, SignedMessage}; +pub use milagro_bls::{compress_g2, hash_to_curve_g2}; +pub use signature_set::{verify_signature_sets, SignatureSet}; #[cfg(feature = "fake_crypto")] mod fake_aggregate_public_key; @@ -56,7 +56,7 @@ mod reals { pub const BLS_AGG_SIG_BYTE_SIZE: usize = 96; pub const BLS_SIG_BYTE_SIZE: usize = 96; -pub const BLS_SECRET_KEY_BYTE_SIZE: usize = 48; +pub const BLS_SECRET_KEY_BYTE_SIZE: usize = 32; pub const BLS_PUBLIC_KEY_BYTE_SIZE: usize = 48; use eth2_hashing::hash; diff --git a/eth2/utils/bls/src/secret_key.rs b/eth2/utils/bls/src/secret_key.rs index a930e65cc0..893ab16698 100644 --- a/eth2/utils/bls/src/secret_key.rs +++ b/eth2/utils/bls/src/secret_key.rs @@ -80,9 +80,8 @@ mod tests { #[test] pub fn test_ssz_round_trip() { let byte_key = [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, 211, 210, 129, 231, 69, 162, 234, - 16, 15, 244, 214, 126, 201, 0, 85, 28, 239, 82, 121, 208, 190, 223, 6, 169, 202, 86, - 236, 197, 218, 3, 69, + 3, 211, 210, 129, 231, 69, 162, 234, 16, 15, 244, 214, 126, 201, 0, 85, 28, 239, 82, + 121, 208, 190, 223, 6, 169, 202, 86, 236, 197, 218, 3, 69, ]; let original = SecretKey::from_bytes(&byte_key).unwrap(); diff --git a/eth2/utils/bls/src/signature_set.rs b/eth2/utils/bls/src/signature_set.rs index ab49d1414a..76a82e96d0 100644 --- a/eth2/utils/bls/src/signature_set.rs +++ b/eth2/utils/bls/src/signature_set.rs @@ -1,179 +1,74 @@ -use crate::{AggregatePublicKey, AggregateSignature, PublicKey, Signature}; -use milagro_bls::{G1Point, G2Point}; +use crate::{AggregateSignature, PublicKey, Signature}; use std::borrow::Cow; #[cfg(not(feature = "fake_crypto"))] -use milagro_bls::AggregateSignature as RawAggregateSignature; +use milagro_bls::{ + AggregatePublicKey as RawAggregatePublicKey, AggregateSignature as RawAggregateSignature, + PublicKey as RawPublicKey, +}; + +#[cfg(feature = "fake_crypto")] +use crate::fakes::{ + AggregatePublicKey as RawAggregatePublicKey, AggregateSignature as RawAggregateSignature, + PublicKey as RawPublicKey, +}; type Message = Vec; #[derive(Clone, Debug)] -pub struct SignedMessage<'a> { - signing_keys: Vec>, +pub struct SignatureSet { + pub signature: RawAggregateSignature, + signing_keys: RawAggregatePublicKey, message: Message, } -impl<'a> SignedMessage<'a> { - pub fn new(signing_keys: Vec>, message: Message) -> Self { +impl SignatureSet { + pub fn single(signature: &Signature, signing_key: Cow, message: Message) -> Self { Self { - signing_keys, + signature: RawAggregateSignature::from_signature(signature.as_raw()), + signing_keys: RawAggregatePublicKey::from_public_key(signing_key.as_raw()), message, } } -} -#[derive(Clone, Debug)] -pub struct SignatureSet<'a> { - pub signature: &'a G2Point, - signed_messages: Vec>, -} - -impl<'a> SignatureSet<'a> { - pub fn single(signature: &'a S, signing_key: Cow<'a, G1Point>, message: Message) -> Self - where - S: G2Ref, - { - Self { - signature: signature.g2_ref(), - signed_messages: vec![SignedMessage::new(vec![signing_key], message)], - } - } - - pub fn dual( - signature: &'a S, - message_0: Message, - message_0_signing_keys: Vec>, - message_1: Message, - message_1_signing_keys: Vec>, + pub fn new( + signature: &AggregateSignature, + signing_keys: Vec>, + message: Message, ) -> Self - where - T: G1Ref + Clone, - S: G2Ref, - { +where { + let signing_keys_refs: Vec<&RawPublicKey> = + signing_keys.iter().map(|pk| pk.as_raw()).collect(); Self { - signature: signature.g2_ref(), - signed_messages: vec![ - SignedMessage::new(message_0_signing_keys, message_0), - SignedMessage::new(message_1_signing_keys, message_1), - ], - } - } - - pub fn new(signature: &'a S, signed_messages: Vec>) -> Self - where - S: G2Ref, - { - Self { - signature: signature.g2_ref(), - signed_messages, + signature: signature.as_raw().clone(), + signing_keys: RawAggregatePublicKey::aggregate(&signing_keys_refs), + message, } } pub fn is_valid(&self) -> bool { - let sig = milagro_bls::AggregateSignature { - point: self.signature.clone(), - }; - - let mut messages: Vec> = vec![]; - let mut pubkeys = vec![]; - - self.signed_messages.iter().for_each(|signed_message| { - messages.push(signed_message.message.clone()); - - let point = if signed_message.signing_keys.len() == 1 { - signed_message.signing_keys[0].clone().into_owned() - } else { - aggregate_public_keys(&signed_message.signing_keys) - }; - - pubkeys.push(milagro_bls::AggregatePublicKey { point }); - }); - - let pubkey_refs: Vec<&milagro_bls::AggregatePublicKey> = - pubkeys.iter().map(std::borrow::Borrow::borrow).collect(); - - sig.verify_multiple(&messages, &pubkey_refs) + self.signature + .fast_aggregate_verify_pre_aggregated(&self.message, &self.signing_keys) } } +type VerifySet<'a> = ( + &'a RawAggregateSignature, + &'a RawAggregatePublicKey, + &'a [u8], +); + #[cfg(not(feature = "fake_crypto"))] -pub fn verify_signature_sets<'a>(iter: impl Iterator>) -> bool { +pub fn verify_signature_sets<'a>(sets: Vec) -> bool { let rng = &mut rand::thread_rng(); - RawAggregateSignature::verify_multiple_signatures(rng, iter.map(Into::into)) + let verify_set: Vec = sets + .iter() + .map(|ss| (&ss.signature, &ss.signing_keys, ss.message.as_slice())) + .collect(); + RawAggregateSignature::verify_multiple_aggregate_signatures(rng, verify_set.into_iter()) } #[cfg(feature = "fake_crypto")] -pub fn verify_signature_sets<'a>(_iter: impl Iterator>) -> bool { +pub fn verify_signature_sets<'a>(sets: Vec) -> bool { true } - -type VerifySet<'a> = (G2Point, Vec, Vec>); - -impl<'a> Into> for SignatureSet<'a> { - fn into(self) -> VerifySet<'a> { - let signature = self.signature.clone(); - - let (pubkeys, messages): (Vec, Vec) = self - .signed_messages - .into_iter() - .map(|signed_message| { - let key = if signed_message.signing_keys.len() == 1 { - signed_message.signing_keys[0].clone().into_owned() - } else { - aggregate_public_keys(&signed_message.signing_keys) - }; - - (key, signed_message.message) - }) - .unzip(); - - (signature, pubkeys, messages) - } -} - -/// Create an aggregate public key for a list of validators, failing if any key can't be found. -fn aggregate_public_keys<'a>(public_keys: &'a [Cow<'a, G1Point>]) -> G1Point { - let mut aggregate = - public_keys - .iter() - .fold(AggregatePublicKey::new(), |mut aggregate, pubkey| { - aggregate.add_point(&pubkey); - aggregate - }); - - aggregate.affine(); - - aggregate.into_raw().point -} - -pub trait G1Ref { - fn g1_ref(&self) -> Cow<'_, G1Point>; -} - -impl G1Ref for AggregatePublicKey { - fn g1_ref(&self) -> Cow<'_, G1Point> { - Cow::Borrowed(&self.as_raw().point) - } -} - -impl G1Ref for PublicKey { - fn g1_ref(&self) -> Cow<'_, G1Point> { - Cow::Borrowed(&self.as_raw().point) - } -} - -pub trait G2Ref { - fn g2_ref(&self) -> &G2Point; -} - -impl G2Ref for AggregateSignature { - fn g2_ref(&self) -> &G2Point { - &self.as_raw().point - } -} - -impl G2Ref for Signature { - fn g2_ref(&self) -> &G2Point { - &self.as_raw().point - } -} diff --git a/eth2/utils/cached_tree_hash/src/cache.rs b/eth2/utils/cached_tree_hash/src/cache.rs index 782cedcbfc..9c546e0ff6 100644 --- a/eth2/utils/cached_tree_hash/src/cache.rs +++ b/eth2/utils/cached_tree_hash/src/cache.rs @@ -164,8 +164,8 @@ impl TreeHashCache { fn lift_dirty(dirty_indices: &[usize]) -> SmallVec8 { let mut new_dirty = SmallVec8::with_capacity(dirty_indices.len()); - for i in 0..dirty_indices.len() { - new_dirty.push(dirty_indices[i] / 2) + for index in dirty_indices { + new_dirty.push(index / 2) } new_dirty.dedup(); diff --git a/eth2/utils/cached_tree_hash/src/cache_arena.rs b/eth2/utils/cached_tree_hash/src/cache_arena.rs index 5923b386b5..b6ade14743 100644 --- a/eth2/utils/cached_tree_hash/src/cache_arena.rs +++ b/eth2/utils/cached_tree_hash/src/cache_arena.rs @@ -89,6 +89,7 @@ impl CacheArena { /// To reiterate, the given `range` should be relative to the given `alloc_id`, not /// `self.backing`. E.g., if the allocation has an offset of `20` and the range is `0..1`, then /// the splice will translate to `self.backing[20..21]`. + #[allow(clippy::comparison_chain)] fn splice_forgetful>( &mut self, alloc_id: usize, diff --git a/eth2/utils/clap_utils/Cargo.toml b/eth2/utils/clap_utils/Cargo.toml new file mode 100644 index 0000000000..f1916c4ba4 --- /dev/null +++ b/eth2/utils/clap_utils/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "clap_utils" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +clap = "2.33.0" +hex = "0.3" +dirs = "2.0" +types = { path = "../../types" } +eth2_testnet_config = { path = "../eth2_testnet_config" } +eth2_ssz = { path = "../ssz" } diff --git a/eth2/utils/clap_utils/src/lib.rs b/eth2/utils/clap_utils/src/lib.rs new file mode 100644 index 0000000000..d8002d76fa --- /dev/null +++ b/eth2/utils/clap_utils/src/lib.rs @@ -0,0 +1,113 @@ +//! A helper library for parsing values from `clap::ArgMatches`. + +use clap::ArgMatches; +use eth2_testnet_config::Eth2TestnetConfig; +use hex; +use ssz::Decode; +use std::path::PathBuf; +use std::str::FromStr; +use types::EthSpec; + +/// Attempts to load the testnet dir at the path if `name` is in `matches`, returning an error if +/// the path cannot be found or the testnet dir is invalid. +/// +/// If `name` is not in `matches`, attempts to return the "hard coded" testnet dir. +pub fn parse_testnet_dir_with_hardcoded_default( + matches: &ArgMatches, + name: &'static str, +) -> Result, String> { + if let Some(path) = parse_optional::(matches, name)? { + Eth2TestnetConfig::load(path.clone()) + .map_err(|e| format!("Unable to open testnet dir at {:?}: {}", path, e)) + } else { + Eth2TestnetConfig::hard_coded().map_err(|e| { + format!( + "The hard-coded testnet directory was invalid. \ + This happens when Lighthouse is migrating between spec versions. \ + Error : {}", + e + ) + }) + } +} + +/// If `name` is in `matches`, parses the value as a path. Otherwise, attempts to find the user's +/// home directory and appends `default` to it. +pub fn parse_path_with_default_in_home_dir( + matches: &ArgMatches, + name: &'static str, + default: PathBuf, +) -> Result { + matches + .value_of(name) + .map(|dir| { + dir.parse::() + .map_err(|e| format!("Unable to parse {}: {}", name, e)) + }) + .unwrap_or_else(|| { + dirs::home_dir() + .map(|home| home.join(default)) + .ok_or_else(|| format!("Unable to locate home directory. Try specifying {}", name)) + }) +} + +/// Returns the value of `name` or an error if it is not in `matches` or does not parse +/// successfully using `std::string::FromStr`. +pub fn parse_required(matches: &ArgMatches, name: &'static str) -> Result +where + T: FromStr, + ::Err: std::fmt::Display, +{ + parse_optional(matches, name)?.ok_or_else(|| format!("{} not specified", name)) +} + +/// Returns the value of `name` (if present) or an error if it does not parse successfully using +/// `std::string::FromStr`. +pub fn parse_optional(matches: &ArgMatches, name: &'static str) -> Result, String> +where + T: FromStr, + ::Err: std::fmt::Display, +{ + matches + .value_of(name) + .map(|val| { + val.parse() + .map_err(|e| format!("Unable to parse {}: {}", name, e)) + }) + .transpose() +} + +/// Returns the value of `name` or an error if it is not in `matches` or does not parse +/// successfully using `ssz::Decode`. +/// +/// Expects the value of `name` to be 0x-prefixed ASCII-hex. +pub fn parse_ssz_required( + matches: &ArgMatches, + name: &'static str, +) -> Result { + parse_ssz_optional(matches, name)?.ok_or_else(|| format!("{} not specified", name)) +} + +/// Returns the value of `name` (if present) or an error if it does not parse successfully using +/// `ssz::Decode`. +/// +/// Expects the value of `name` (if any) to be 0x-prefixed ASCII-hex. +pub fn parse_ssz_optional( + matches: &ArgMatches, + name: &'static str, +) -> Result, String> { + matches + .value_of(name) + .map(|val| { + if val.starts_with("0x") { + let vec = hex::decode(&val[2..]) + .map_err(|e| format!("Unable to parse {} as hex: {:?}", name, e))?; + + T::from_ssz_bytes(&vec) + .map_err(|e| format!("Unable to parse {} as SSZ: {:?}", name, e)) + } else { + Err(format!("Unable to parse {}, must have 0x prefix", name)) + } + }) + .transpose() +} diff --git a/eth2/utils/deposit_contract/src/lib.rs b/eth2/utils/deposit_contract/src/lib.rs index 11a831c0fa..2a5ea514e9 100644 --- a/eth2/utils/deposit_contract/src/lib.rs +++ b/eth2/utils/deposit_contract/src/lib.rs @@ -22,7 +22,7 @@ impl From for DecodeError { } pub const CONTRACT_DEPLOY_GAS: usize = 4_000_000; -pub const DEPOSIT_GAS: usize = 4_000_000; +pub const DEPOSIT_GAS: usize = 400_000; pub const ABI: &[u8] = include_bytes!("../contracts/v0.11.1_validator_registration.json"); pub const BYTECODE: &[u8] = include_bytes!("../contracts/v0.11.1_validator_registration.bytecode"); pub const DEPOSIT_DATA_LEN: usize = 420; // lol diff --git a/eth2/utils/eth2_interop_keypairs/Cargo.toml b/eth2/utils/eth2_interop_keypairs/Cargo.toml index 641f82c257..30509a8f97 100644 --- a/eth2/utils/eth2_interop_keypairs/Cargo.toml +++ b/eth2/utils/eth2_interop_keypairs/Cargo.toml @@ -11,7 +11,7 @@ lazy_static = "1.4.0" num-bigint = "0.2.3" eth2_hashing = "0.1.0" hex = "0.3" -milagro_bls = { git = "https://github.com/sigp/milagro_bls", branch = "eth2.0-v0.10" } +milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.0.1" } serde_yaml = "0.8.11" serde = "1.0.102" serde_derive = "1.0.102" diff --git a/eth2/utils/eth2_interop_keypairs/src/lib.rs b/eth2/utils/eth2_interop_keypairs/src/lib.rs index 94b0b207fe..f80b45c18a 100644 --- a/eth2/utils/eth2_interop_keypairs/src/lib.rs +++ b/eth2/utils/eth2_interop_keypairs/src/lib.rs @@ -27,7 +27,7 @@ use std::convert::TryInto; use std::fs::File; use std::path::PathBuf; -pub const PRIVATE_KEY_BYTES: usize = 48; +pub const PRIVATE_KEY_BYTES: usize = 32; pub const PUBLIC_KEY_BYTES: usize = 48; pub const HASH_BYTES: usize = 32; diff --git a/eth2/utils/eth2_testnet_config/src/lib.rs b/eth2/utils/eth2_testnet_config/src/lib.rs index 20ab80f079..0b3d476749 100644 --- a/eth2/utils/eth2_testnet_config/src/lib.rs +++ b/eth2/utils/eth2_testnet_config/src/lib.rs @@ -64,9 +64,11 @@ impl Eth2TestnetConfig { }) } - // Write the files to the directory, only if the directory doesn't already exist. - pub fn write_to_file(&self, base_dir: PathBuf) -> Result<(), String> { - if base_dir.exists() { + // Write the files to the directory. + // + // Overwrites files if specified to do so. + pub fn write_to_file(&self, base_dir: PathBuf, overwrite: bool) -> Result<(), String> { + if base_dir.exists() && !overwrite { return Err("Testnet directory already exists".to_string()); } @@ -252,7 +254,7 @@ mod tests { }; testnet - .write_to_file(base_dir.clone()) + .write_to_file(base_dir.clone(), false) .expect("should write to file"); let decoded = Eth2TestnetConfig::load(base_dir).expect("should load struct"); diff --git a/eth2/utils/merkle_proof/Cargo.toml b/eth2/utils/merkle_proof/Cargo.toml index 2692fced30..d4127a53f8 100644 --- a/eth2/utils/merkle_proof/Cargo.toml +++ b/eth2/utils/merkle_proof/Cargo.toml @@ -8,6 +8,7 @@ edition = "2018" ethereum-types = "0.8.0" eth2_hashing = "0.1.0" lazy_static = "1.4.0" +safe_arith = { path = "../safe_arith" } [dev-dependencies] quickcheck = "0.9.0" diff --git a/eth2/utils/merkle_proof/src/lib.rs b/eth2/utils/merkle_proof/src/lib.rs index 64f744be80..f80ee2a2d6 100644 --- a/eth2/utils/merkle_proof/src/lib.rs +++ b/eth2/utils/merkle_proof/src/lib.rs @@ -1,6 +1,7 @@ use eth2_hashing::{hash, hash32_concat, ZERO_HASHES}; use ethereum_types::H256; use lazy_static::lazy_static; +use safe_arith::ArithError; const MAX_TREE_DEPTH: usize = 32; const EMPTY_SLICE: &[H256] = &[]; @@ -38,6 +39,8 @@ pub enum MerkleTreeError { Invalid, // Incorrect Depth provided DepthTooSmall, + // Overflow occurred + ArithError, } impl MerkleTree { @@ -232,6 +235,12 @@ fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usi H256::from_slice(&merkle_root) } +impl From for MerkleTreeError { + fn from(_: ArithError) -> Self { + MerkleTreeError::ArithError + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/eth2/utils/remote_beacon_node/src/lib.rs b/eth2/utils/remote_beacon_node/src/lib.rs index c5e8fd28a7..b9f2325f79 100644 --- a/eth2/utils/remote_beacon_node/src/lib.rs +++ b/eth2/utils/remote_beacon_node/src/lib.rs @@ -20,8 +20,8 @@ pub use operation_pool::PersistedOperationPool; pub use proto_array_fork_choice::core::ProtoArray; pub use rest_types::{ CanonicalHeadResponse, Committee, HeadBeaconBlock, IndividualVotesRequest, - IndividualVotesResponse, ValidatorDutiesRequest, ValidatorDutyBytes, ValidatorRequest, - ValidatorResponse, ValidatorSubscription, + IndividualVotesResponse, SyncingResponse, ValidatorDutiesRequest, ValidatorDutyBytes, + ValidatorRequest, ValidatorResponse, ValidatorSubscription, }; // Setting a long timeout for debug ensures that crypto-heavy operations can still succeed. @@ -611,6 +611,13 @@ impl Node { let url = self.url("version")?; client.json_get(url, vec![]).await } + + pub fn syncing_status(&self) -> impl Future { + let client = self.0.clone(); + self.url("syncing") + .into_future() + .and_then(move |url| client.json_get(url, vec![])) + } } /// Provides the functions on the `/advanced` endpoint of the node. diff --git a/eth2/utils/safe_arith/Cargo.toml b/eth2/utils/safe_arith/Cargo.toml new file mode 100644 index 0000000000..7784a03929 --- /dev/null +++ b/eth2/utils/safe_arith/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "safe_arith" +version = "0.1.0" +authors = ["Michael Sproul "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/eth2/utils/safe_arith/src/lib.rs b/eth2/utils/safe_arith/src/lib.rs new file mode 100644 index 0000000000..90387b2238 --- /dev/null +++ b/eth2/utils/safe_arith/src/lib.rs @@ -0,0 +1,161 @@ +//! Library for safe arithmetic on integers, avoiding overflow and division by zero. + +/// Error representing the failure of an arithmetic operation. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum ArithError { + Overflow, + DivisionByZero, +} + +type Result = std::result::Result; + +macro_rules! assign_method { + ($name:ident, $op:ident, $doc_op:expr) => { + assign_method!($name, $op, Self, $doc_op); + }; + ($name:ident, $op:ident, $rhs_ty:ty, $doc_op:expr) => { + #[doc = "Safe variant of `"] + #[doc = $doc_op] + #[doc = "`."] + fn $name(&mut self, other: $rhs_ty) -> Result<()> { + *self = self.$op(other)?; + Ok(()) + } + }; +} + +/// Trait providing safe arithmetic operations for built-in types. +pub trait SafeArith: Sized + Copy { + const ZERO: Self; + const ONE: Self; + + /// Safe variant of `+` that guards against overflow. + fn safe_add(&self, other: Self) -> Result; + + /// Safe variant of `-` that guards against overflow. + fn safe_sub(&self, other: Self) -> Result; + + /// Safe variant of `*` that guards against overflow. + fn safe_mul(&self, other: Self) -> Result; + + /// Safe variant of `/` that guards against division by 0. + fn safe_div(&self, other: Self) -> Result; + + /// Safe variant of `%` that guards against division by 0. + fn safe_rem(&self, other: Self) -> Result; + + /// Safe variant of `<<` that guards against overflow. + fn safe_shl(&self, other: u32) -> Result; + + /// Safe variant of `>>` that guards against overflow. + fn safe_shr(&self, other: u32) -> Result; + + assign_method!(safe_add_assign, safe_add, "+="); + assign_method!(safe_sub_assign, safe_sub, "-="); + assign_method!(safe_mul_assign, safe_mul, "*="); + assign_method!(safe_div_assign, safe_div, "/="); + assign_method!(safe_rem_assign, safe_rem, "%="); + assign_method!(safe_shl_assign, safe_shl, u32, "<<="); + assign_method!(safe_shr_assign, safe_shr, u32, ">>="); + + /// Mutate `self` by adding 1, erroring on overflow. + fn increment(&mut self) -> Result<()> { + self.safe_add_assign(Self::ONE) + } +} + +macro_rules! impl_safe_arith { + ($typ:ty) => { + impl SafeArith for $typ { + const ZERO: Self = 0; + const ONE: Self = 1; + + fn safe_add(&self, other: Self) -> Result { + self.checked_add(other).ok_or(ArithError::Overflow) + } + + fn safe_sub(&self, other: Self) -> Result { + self.checked_sub(other).ok_or(ArithError::Overflow) + } + + fn safe_mul(&self, other: Self) -> Result { + self.checked_mul(other).ok_or(ArithError::Overflow) + } + + fn safe_div(&self, other: Self) -> Result { + self.checked_div(other).ok_or(ArithError::DivisionByZero) + } + + fn safe_rem(&self, other: Self) -> Result { + self.checked_rem(other).ok_or(ArithError::DivisionByZero) + } + + fn safe_shl(&self, other: u32) -> Result { + self.checked_shl(other).ok_or(ArithError::Overflow) + } + + fn safe_shr(&self, other: u32) -> Result { + self.checked_shr(other).ok_or(ArithError::Overflow) + } + } + }; +} + +impl_safe_arith!(u8); +impl_safe_arith!(u16); +impl_safe_arith!(u32); +impl_safe_arith!(u64); +impl_safe_arith!(usize); +impl_safe_arith!(i8); +impl_safe_arith!(i16); +impl_safe_arith!(i32); +impl_safe_arith!(i64); +impl_safe_arith!(isize); + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn basic() { + let x = 10u32; + let y = 11; + assert_eq!(x.safe_add(y), Ok(x + y)); + assert_eq!(y.safe_sub(x), Ok(y - x)); + assert_eq!(x.safe_mul(y), Ok(x * y)); + assert_eq!(x.safe_div(y), Ok(x / y)); + assert_eq!(x.safe_rem(y), Ok(x % y)); + + assert_eq!(x.safe_shl(1), Ok(x << 1)); + assert_eq!(x.safe_shr(1), Ok(x >> 1)); + } + + #[test] + fn mutate() { + let mut x = 0u8; + x.increment().unwrap(); + x.increment().unwrap(); + assert_eq!(x, 2); + x.safe_sub_assign(1).unwrap(); + assert_eq!(x, 1); + x.safe_shl_assign(1).unwrap(); + assert_eq!(x, 2); + x.safe_mul_assign(3).unwrap(); + assert_eq!(x, 6); + x.safe_div_assign(4).unwrap(); + assert_eq!(x, 1); + x.safe_shr_assign(1).unwrap(); + assert_eq!(x, 0); + } + + #[test] + fn errors() { + assert!(u32::max_value().safe_add(1).is_err()); + assert!(u32::min_value().safe_sub(1).is_err()); + assert!(u32::max_value().safe_mul(2).is_err()); + assert!(u32::max_value().safe_div(0).is_err()); + assert!(u32::max_value().safe_rem(0).is_err()); + assert!(u32::max_value().safe_shl(32).is_err()); + assert!(u32::max_value().safe_shr(32).is_err()); + } +} diff --git a/eth2/utils/serde_hex/src/lib.rs b/eth2/utils/serde_hex/src/lib.rs index dd76601ab7..7b254cf88c 100644 --- a/eth2/utils/serde_hex/src/lib.rs +++ b/eth2/utils/serde_hex/src/lib.rs @@ -1,4 +1,3 @@ -use hex; use hex::ToHex; use serde::de::{self, Visitor}; use std::fmt; diff --git a/eth2/utils/ssz/src/decode.rs b/eth2/utils/ssz/src/decode.rs index 8a2fc5351a..ec87935b40 100644 --- a/eth2/utils/ssz/src/decode.rs +++ b/eth2/utils/ssz/src/decode.rs @@ -21,10 +21,72 @@ pub enum DecodeError { /// length items (i.e., `length[0] < BYTES_PER_LENGTH_OFFSET`). /// - When decoding variable-length items, the `n`'th offset was less than the `n-1`'th offset. OutOfBoundsByte { i: usize }, + /// An offset points “backwards” into the fixed-bytes portion of the message, essentially + /// double-decoding bytes that will also be decoded as fixed-length. + /// + /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#1-Offset-into-fixed-portion + OffsetIntoFixedPortion(usize), + /// The first offset does not point to the byte that follows the fixed byte portion, + /// essentially skipping a variable-length byte. + /// + /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#2-Skip-first-variable-byte + OffsetSkipsVariableBytes(usize), + /// An offset points to bytes prior to the previous offset. Depending on how you look at it, + /// this either double-decodes bytes or makes the first offset a negative-length. + /// + /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#3-Offsets-are-decreasing + OffsetsAreDecreasing(usize), + /// An offset references byte indices that do not exist in the source bytes. + /// + /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#4-Offsets-are-out-of-bounds + OffsetOutOfBounds(usize), + /// A variable-length list does not have a fixed portion that is cleanly divisible by + /// `BYTES_PER_LENGTH_OFFSET`. + InvalidListFixedBytesLen(usize), + /// Some item has a `ssz_fixed_len` of zero. This is illegal. + ZeroLengthItem, /// The given bytes were invalid for some application-level reason. BytesInvalid(String), } +/// Performs checks on the `offset` based upon the other parameters provided. +/// +/// ## Detail +/// +/// - `offset`: the offset bytes (e.g., result of `read_offset(..)`). +/// - `previous_offset`: unless this is the first offset in the SSZ object, the value of the +/// previously-read offset. Used to ensure offsets are not decreasing. +/// - `num_bytes`: the total number of bytes in the SSZ object. Used to ensure the offset is not +/// out of bounds. +/// - `num_fixed_bytes`: the number of fixed-bytes in the struct, if it is known. Used to ensure +/// that the first offset doesn't skip any variable bytes. +/// +/// ## References +/// +/// The checks here are derived from this document: +/// +/// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view +pub fn sanitize_offset( + offset: usize, + previous_offset: Option, + num_bytes: usize, + num_fixed_bytes: Option, +) -> Result { + if num_fixed_bytes.map_or(false, |fixed_bytes| offset < fixed_bytes) { + Err(DecodeError::OffsetIntoFixedPortion(offset)) + } else if previous_offset.is_none() + && num_fixed_bytes.map_or(false, |fixed_bytes| offset != fixed_bytes) + { + Err(DecodeError::OffsetSkipsVariableBytes(offset)) + } else if offset > num_bytes { + Err(DecodeError::OffsetOutOfBounds(offset)) + } else if previous_offset.map_or(false, |prev| prev > offset) { + Err(DecodeError::OffsetsAreDecreasing(offset)) + } else { + Ok(offset) + } +} + /// Provides SSZ decoding (de-serialization) via the `from_ssz_bytes(&bytes)` method. /// /// See `examples/` for manual implementations or the crate root for implementations using @@ -97,21 +159,14 @@ impl<'a> SszDecoderBuilder<'a> { self.items.push(slice); } else { - let offset = read_offset(&self.bytes[self.items_index..])?; - - let previous_offset = self - .offsets - .last() - .map(|o| o.offset) - .unwrap_or_else(|| BYTES_PER_LENGTH_OFFSET); - - if (previous_offset > offset) || (offset > self.bytes.len()) { - return Err(DecodeError::OutOfBoundsByte { i: offset }); - } - self.offsets.push(Offset { position: self.items.len(), - offset, + offset: sanitize_offset( + read_offset(&self.bytes[self.items_index..])?, + self.offsets.last().map(|o| o.offset), + self.bytes.len(), + None, + )?, }); // Push an empty slice into items; it will be replaced later. @@ -124,13 +179,13 @@ impl<'a> SszDecoderBuilder<'a> { } fn finalize(&mut self) -> Result<(), DecodeError> { - if !self.offsets.is_empty() { + if let Some(first_offset) = self.offsets.first().map(|o| o.offset) { // Check to ensure the first offset points to the byte immediately following the // fixed-length bytes. - if self.offsets[0].offset != self.items_index { - return Err(DecodeError::OutOfBoundsByte { - i: self.offsets[0].offset, - }); + if first_offset < self.items_index { + return Err(DecodeError::OffsetIntoFixedPortion(first_offset)); + } else if first_offset > self.items_index { + return Err(DecodeError::OffsetSkipsVariableBytes(first_offset)); } // Iterate through each pair of offsets, grabbing the slice between each of the offsets. diff --git a/eth2/utils/ssz/src/decode/impls.rs b/eth2/utils/ssz/src/decode/impls.rs index a33fcac189..e039e2d164 100644 --- a/eth2/utils/ssz/src/decode/impls.rs +++ b/eth2/utils/ssz/src/decode/impls.rs @@ -366,7 +366,7 @@ impl_decodable_for_u8_array!(4); impl_decodable_for_u8_array!(32); macro_rules! impl_for_vec { - ($type: ty) => { + ($type: ty, $max_len: expr) => { impl Decode for $type { fn is_ssz_fixed_len() -> bool { false @@ -381,22 +381,22 @@ macro_rules! impl_for_vec { .map(|chunk| T::from_ssz_bytes(chunk)) .collect() } else { - decode_list_of_variable_length_items(bytes).map(|vec| vec.into()) + decode_list_of_variable_length_items(bytes, $max_len).map(|vec| vec.into()) } } } }; } -impl_for_vec!(Vec); -impl_for_vec!(SmallVec<[T; 1]>); -impl_for_vec!(SmallVec<[T; 2]>); -impl_for_vec!(SmallVec<[T; 3]>); -impl_for_vec!(SmallVec<[T; 4]>); -impl_for_vec!(SmallVec<[T; 5]>); -impl_for_vec!(SmallVec<[T; 6]>); -impl_for_vec!(SmallVec<[T; 7]>); -impl_for_vec!(SmallVec<[T; 8]>); +impl_for_vec!(Vec, None); +impl_for_vec!(SmallVec<[T; 1]>, Some(1)); +impl_for_vec!(SmallVec<[T; 2]>, Some(2)); +impl_for_vec!(SmallVec<[T; 3]>, Some(3)); +impl_for_vec!(SmallVec<[T; 4]>, Some(4)); +impl_for_vec!(SmallVec<[T; 5]>, Some(5)); +impl_for_vec!(SmallVec<[T; 6]>, Some(6)); +impl_for_vec!(SmallVec<[T; 7]>, Some(7)); +impl_for_vec!(SmallVec<[T; 8]>, Some(8)); /// Decodes `bytes` as if it were a list of variable-length items. /// @@ -405,43 +405,52 @@ impl_for_vec!(SmallVec<[T; 8]>); /// differing types. pub fn decode_list_of_variable_length_items( bytes: &[u8], + max_len: Option, ) -> Result, DecodeError> { - let mut next_variable_byte = read_offset(bytes)?; - - // The value of the first offset must not point back into the same bytes that defined - // it. - if next_variable_byte < BYTES_PER_LENGTH_OFFSET { - return Err(DecodeError::OutOfBoundsByte { - i: next_variable_byte, - }); + if bytes.is_empty() { + return Ok(vec![]); } - let num_items = next_variable_byte / BYTES_PER_LENGTH_OFFSET; + let first_offset = read_offset(bytes)?; + sanitize_offset(first_offset, None, bytes.len(), Some(first_offset))?; - // The fixed-length section must be a clean multiple of `BYTES_PER_LENGTH_OFFSET`. - if next_variable_byte != num_items * BYTES_PER_LENGTH_OFFSET { - return Err(DecodeError::InvalidByteLength { - len: next_variable_byte, - expected: num_items * BYTES_PER_LENGTH_OFFSET, - }); + if first_offset % BYTES_PER_LENGTH_OFFSET != 0 || first_offset < BYTES_PER_LENGTH_OFFSET { + return Err(DecodeError::InvalidListFixedBytesLen(first_offset)); } - let mut values = Vec::with_capacity(num_items); + let num_items = first_offset / BYTES_PER_LENGTH_OFFSET; + + if max_len.map_or(false, |max| num_items > max) { + return Err(DecodeError::BytesInvalid(format!( + "Variable length list of {} items exceeds maximum of {:?}", + num_items, max_len + ))); + } + + // Only initialize the vec with a capacity if a maximum length is provided. + // + // We assume that if a max length is provided then the application is able to handle an + // allocation of this size. + let mut values = if max_len.is_some() { + Vec::with_capacity(num_items) + } else { + vec![] + }; + + let mut offset = first_offset; for i in 1..=num_items { let slice_option = if i == num_items { - bytes.get(next_variable_byte..) + bytes.get(offset..) } else { - let offset = read_offset(&bytes[(i * BYTES_PER_LENGTH_OFFSET)..])?; + let start = offset; - let start = next_variable_byte; - next_variable_byte = offset; + let next_offset = read_offset(&bytes[(i * BYTES_PER_LENGTH_OFFSET)..])?; + offset = sanitize_offset(next_offset, Some(offset), bytes.len(), Some(first_offset))?; - bytes.get(start..next_variable_byte) + bytes.get(start..offset) }; - let slice = slice_option.ok_or_else(|| DecodeError::OutOfBoundsByte { - i: next_variable_byte, - })?; + let slice = slice_option.ok_or_else(|| DecodeError::OutOfBoundsByte { i: offset })?; values.push(T::from_ssz_bytes(slice)?); } @@ -519,26 +528,34 @@ mod tests { ); } + #[test] + fn empty_list() { + let vec: Vec> = vec![]; + let bytes = vec.as_ssz_bytes(); + assert!(bytes.is_empty()); + assert_eq!(Vec::from_ssz_bytes(&bytes), Ok(vec),); + } + #[test] fn first_length_points_backwards() { assert_eq!( >>::from_ssz_bytes(&[0, 0, 0, 0]), - Err(DecodeError::OutOfBoundsByte { i: 0 }) + Err(DecodeError::InvalidListFixedBytesLen(0)) ); assert_eq!( >>::from_ssz_bytes(&[1, 0, 0, 0]), - Err(DecodeError::OutOfBoundsByte { i: 1 }) + Err(DecodeError::InvalidListFixedBytesLen(1)) ); assert_eq!( >>::from_ssz_bytes(&[2, 0, 0, 0]), - Err(DecodeError::OutOfBoundsByte { i: 2 }) + Err(DecodeError::InvalidListFixedBytesLen(2)) ); assert_eq!( >>::from_ssz_bytes(&[3, 0, 0, 0]), - Err(DecodeError::OutOfBoundsByte { i: 3 }) + Err(DecodeError::InvalidListFixedBytesLen(3)) ); } @@ -546,7 +563,7 @@ mod tests { fn lengths_are_decreasing() { assert_eq!( >>::from_ssz_bytes(&[12, 0, 0, 0, 14, 0, 0, 0, 12, 0, 0, 0, 1, 0, 1, 0]), - Err(DecodeError::OutOfBoundsByte { i: 12 }) + Err(DecodeError::OffsetsAreDecreasing(12)) ); } @@ -554,10 +571,7 @@ mod tests { fn awkward_fixed_length_portion() { assert_eq!( >>::from_ssz_bytes(&[10, 0, 0, 0, 10, 0, 0, 0, 0, 0]), - Err(DecodeError::InvalidByteLength { - len: 10, - expected: 8 - }) + Err(DecodeError::InvalidListFixedBytesLen(10)) ); } @@ -565,14 +579,15 @@ mod tests { fn length_out_of_bounds() { assert_eq!( >>::from_ssz_bytes(&[5, 0, 0, 0]), - Err(DecodeError::InvalidByteLength { - len: 5, - expected: 4 - }) + Err(DecodeError::OffsetOutOfBounds(5)) ); assert_eq!( >>::from_ssz_bytes(&[8, 0, 0, 0, 9, 0, 0, 0]), - Err(DecodeError::OutOfBoundsByte { i: 9 }) + Err(DecodeError::OffsetOutOfBounds(9)) + ); + assert_eq!( + >>::from_ssz_bytes(&[8, 0, 0, 0, 16, 0, 0, 0]), + Err(DecodeError::OffsetOutOfBounds(16)) ); } diff --git a/eth2/utils/ssz/tests/tests.rs b/eth2/utils/ssz/tests/tests.rs index f82e5d6e3f..2eada5c51c 100644 --- a/eth2/utils/ssz/tests/tests.rs +++ b/eth2/utils/ssz/tests/tests.rs @@ -152,7 +152,7 @@ mod round_trip { assert_eq!( VariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OutOfBoundsByte { i: 9 }) + Err(DecodeError::OffsetIntoFixedPortion(9)) ); } @@ -182,7 +182,7 @@ mod round_trip { assert_eq!( VariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OutOfBoundsByte { i: 11 }) + Err(DecodeError::OffsetSkipsVariableBytes(11)) ); } @@ -284,7 +284,7 @@ mod round_trip { assert_eq!( ThreeVariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OutOfBoundsByte { i: 14 }) + Err(DecodeError::OffsetsAreDecreasing(14)) ); } diff --git a/eth2/utils/ssz_derive/src/lib.rs b/eth2/utils/ssz_derive/src/lib.rs index 8b341f38a1..04ef8b9826 100644 --- a/eth2/utils/ssz_derive/src/lib.rs +++ b/eth2/utils/ssz_derive/src/lib.rs @@ -86,6 +86,7 @@ pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { let field_types_f = field_types_a.clone(); let output = quote! { + #[allow(clippy::integer_arithmetic)] impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { fn is_ssz_fixed_len() -> bool { #( @@ -221,6 +222,7 @@ pub fn ssz_decode_derive(input: TokenStream) -> TokenStream { } let output = quote! { + #[allow(clippy::integer_arithmetic)] impl #impl_generics ssz::Decode for #name #ty_generics #where_clause { fn is_ssz_fixed_len() -> bool { #( diff --git a/eth2/utils/ssz_types/src/fixed_vector.rs b/eth2/utils/ssz_types/src/fixed_vector.rs index 91b6912f81..dffd2ad753 100644 --- a/eth2/utils/ssz_types/src/fixed_vector.rs +++ b/eth2/utils/ssz_types/src/fixed_vector.rs @@ -224,29 +224,44 @@ where } fn from_ssz_bytes(bytes: &[u8]) -> Result { + let fixed_len = N::to_usize(); + if bytes.is_empty() { Err(ssz::DecodeError::InvalidByteLength { len: 0, expected: 1, }) } else if T::is_ssz_fixed_len() { + let num_items = bytes + .len() + .checked_div(T::ssz_fixed_len()) + .ok_or_else(|| ssz::DecodeError::ZeroLengthItem)?; + + if num_items != fixed_len { + return Err(ssz::DecodeError::BytesInvalid(format!( + "FixedVector of {} items has {} items", + num_items, fixed_len + ))); + } + bytes .chunks(T::ssz_fixed_len()) .map(|chunk| T::from_ssz_bytes(chunk)) .collect::, _>>() .and_then(|vec| { - if vec.len() == N::to_usize() { + if vec.len() == fixed_len { Ok(vec.into()) } else { Err(ssz::DecodeError::BytesInvalid(format!( - "wrong number of vec elements, got: {}, expected: {}", + "Wrong number of FixedVector elements, got: {}, expected: {}", vec.len(), N::to_usize() ))) } }) } else { - ssz::decode_list_of_variable_length_items(bytes).and_then(|vec| Ok(vec.into())) + ssz::decode_list_of_variable_length_items(bytes, Some(fixed_len)) + .and_then(|vec| Ok(vec.into())) } } } diff --git a/eth2/utils/ssz_types/src/variable_list.rs b/eth2/utils/ssz_types/src/variable_list.rs index 65f72f236d..c5cb185772 100644 --- a/eth2/utils/ssz_types/src/variable_list.rs +++ b/eth2/utils/ssz_types/src/variable_list.rs @@ -218,22 +218,41 @@ where } } -impl ssz::Decode for VariableList +impl ssz::Decode for VariableList where T: ssz::Decode, + N: Unsigned, { fn is_ssz_fixed_len() -> bool { - >::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - >::ssz_fixed_len() + false } fn from_ssz_bytes(bytes: &[u8]) -> Result { - let vec = >::from_ssz_bytes(bytes)?; + let max_len = N::to_usize(); - Self::new(vec).map_err(|e| ssz::DecodeError::BytesInvalid(format!("VariableList {:?}", e))) + if bytes.is_empty() { + Ok(vec![].into()) + } else if T::is_ssz_fixed_len() { + let num_items = bytes + .len() + .checked_div(T::ssz_fixed_len()) + .ok_or_else(|| ssz::DecodeError::ZeroLengthItem)?; + + if num_items > max_len { + return Err(ssz::DecodeError::BytesInvalid(format!( + "VariableList of {} items exceeds maximum of {}", + num_items, max_len + ))); + } + + bytes + .chunks(T::ssz_fixed_len()) + .map(|chunk| T::from_ssz_bytes(chunk)) + .collect::, _>>() + .map(Into::into) + } else { + ssz::decode_list_of_variable_length_items(bytes, Some(max_len)).map(|vec| vec.into()) + } } } diff --git a/eth2/utils/tree_hash/src/merkle_hasher.rs b/eth2/utils/tree_hash/src/merkle_hasher.rs index 9c921c0751..02c349eb8e 100644 --- a/eth2/utils/tree_hash/src/merkle_hasher.rs +++ b/eth2/utils/tree_hash/src/merkle_hasher.rs @@ -274,22 +274,20 @@ impl MerkleHasher { loop { if let Some(root) = self.root { break Ok(root); + } else if let Some(node) = self.half_nodes.last() { + let right_child = node.id * 2 + 1; + self.process_right_node(right_child, self.zero_hash(right_child)); + } else if self.next_leaf == 1 { + // The next_leaf can only be 1 if the tree has a depth of one. If have been no + // leaves supplied, assume a root of zero. + break Ok(Hash256::zero()); } else { - if let Some(node) = self.half_nodes.last() { - let right_child = node.id * 2 + 1; - self.process_right_node(right_child, self.zero_hash(right_child)); - } else if self.next_leaf == 1 { - // The next_leaf can only be 1 if the tree has a depth of one. If have been no - // leaves supplied, assume a root of zero. - break Ok(Hash256::zero()); - } else { - // The only scenario where there are (a) no half nodes and (b) a tree of depth - // two or more is where no leaves have been supplied at all. - // - // Once we supply this first zero-hash leaf then all future operations will be - // triggered via the `process_right_node` branch. - self.process_left_node(self.next_leaf, self.zero_hash(self.next_leaf)) - } + // The only scenario where there are (a) no half nodes and (b) a tree of depth + // two or more is where no leaves have been supplied at all. + // + // Once we supply this first zero-hash leaf then all future operations will be + // triggered via the `process_right_node` branch. + self.process_left_node(self.next_leaf, self.zero_hash(self.next_leaf)) } } } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index d4afbdbeac..e63b20c376 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -28,3 +28,5 @@ genesis = { path = "../beacon_node/genesis" } deposit_contract = { path = "../eth2/utils/deposit_contract" } tree_hash = { path = "../eth2/utils/tree_hash" } tokio = { version = "0.2", features = ["full"] } +clap_utils = { path = "../eth2/utils/clap_utils" } +eth2-libp2p = { path = "../beacon_node/eth2-libp2p" } diff --git a/lcli/src/check_deposit_data.rs b/lcli/src/check_deposit_data.rs index af2b6fb478..56f18f9988 100644 --- a/lcli/src/check_deposit_data.rs +++ b/lcli/src/check_deposit_data.rs @@ -1,12 +1,12 @@ -use crate::helpers::{parse_hex_bytes, parse_u64}; use clap::ArgMatches; +use clap_utils::{parse_required, parse_ssz_required}; use deposit_contract::{decode_eth1_tx_data, DEPOSIT_DATA_LEN}; use tree_hash::TreeHash; use types::EthSpec; pub fn run(matches: &ArgMatches) -> Result<(), String> { - let rlp_bytes = parse_hex_bytes(matches, "deposit-data")?; - let amount = parse_u64(matches, "deposit-amount")?; + let rlp_bytes = parse_ssz_required::>(matches, "deposit-data")?; + let amount = parse_required(matches, "deposit-amount")?; if rlp_bytes.len() != DEPOSIT_DATA_LEN { return Err(format!( diff --git a/lcli/src/deploy_deposit_contract.rs b/lcli/src/deploy_deposit_contract.rs index 91faaf1ae5..39339c54b8 100644 --- a/lcli/src/deploy_deposit_contract.rs +++ b/lcli/src/deploy_deposit_contract.rs @@ -1,31 +1,35 @@ use clap::ArgMatches; -use eth1_test_rig::DepositContract; -use futures::compat::Future01CompatExt; -use std::fs::File; -use std::io::Read; +use clap_utils; +use deposit_contract::{ + testnet::{ABI, BYTECODE}, + CONTRACT_DEPLOY_GAS, +}; +use environment::Environment; +use futures::{Future, IntoFuture}; +use std::path::PathBuf; use types::EthSpec; -use web3::{transports::Http, Web3}; +use web3::{ + contract::{Contract, Options}, + transports::Ipc, + types::{Address, U256}, + Web3, +}; -pub async fn run(matches: &ArgMatches<'_>) -> Result<(), String> { - let confirmations = matches - .value_of("confirmations") - .ok_or_else(|| "Confirmations not specified")? - .parse::() - .map_err(|e| format!("Failed to parse confirmations: {}", e))?; +pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { + let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, "eth1-ipc")?; + let from_address: Address = clap_utils::parse_required(matches, "from-address")?; + let confirmations: usize = clap_utils::parse_required(matches, "confirmations")?; - let password = parse_password(matches)?; + let (_event_loop_handle, transport) = + Ipc::new(eth1_ipc_path).map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?; + let web3 = Web3::new(transport); - let endpoint = matches - .value_of("eth1-endpoint") - .ok_or_else(|| "eth1-endpoint not specified")?; - - let (_event_loop, transport) = Http::new(&endpoint).map_err(|e| { + let bytecode = String::from_utf8(BYTECODE.to_vec()).map_err(|e| { format!( - "Failed to start HTTP transport connected to ganache: {:?}", + "Unable to parse deposit contract bytecode as utf-8: {:?}", e ) })?; - let web3 = Web3::new(transport); // It's unlikely that this will be the _actual_ deployment block, however it'll be close // enough to serve our purposes. @@ -39,49 +43,26 @@ pub async fn run(matches: &ArgMatches<'_>) -> Result<(), String> { .await .map_err(|e| format!("Failed to get block number: {}", e))?; - info!("Present eth1 block number is {}", deploy_block); + let address = env.runtime().block_on( + Contract::deploy(web3.eth(), &ABI) + .map_err(|e| format!("Unable to build contract deployer: {:?}", e))? + .confirmations(confirmations) + .options(Options { + gas: Some(U256::from(CONTRACT_DEPLOY_GAS)), + ..Options::default() + }) + .execute(bytecode, (), from_address) + .into_future() + .map_err(|e| format!("Unable to execute deployment: {:?}", e)) + .and_then(|pending| { + pending.map_err(|e| format!("Unable to await pending contract: {:?}", e)) + }) + .map(|tx_receipt| tx_receipt.address()) + .map_err(|e| format!("Failed to execute deployment: {:?}", e)), + )?; - info!("Deploying the bytecode at https://github.com/sigp/unsafe-eth2-deposit-contract",); - - info!( - "Submitting deployment transaction, waiting for {} confirmations", - confirmations - ); - - let deposit_contract = DepositContract::deploy_testnet(web3, confirmations, password) - .await - .map_err(|e| format!("Failed to deploy contract: {}", e))?; - - info!( - "Deposit contract deployed. address: {}, deploy_block: {}", - deposit_contract.address(), - deploy_block - ); + println!("deposit_contract_address: {:?}", address); + println!("deposit_contract_deploy_block: {}", deploy_block); Ok(()) } - -pub fn parse_password(matches: &ArgMatches) -> Result, String> { - if let Some(password_path) = matches.value_of("password") { - Ok(Some( - File::open(password_path) - .map_err(|e| format!("Unable to open password file: {:?}", e)) - .and_then(|mut file| { - let mut password = String::new(); - file.read_to_string(&mut password) - .map_err(|e| format!("Unable to read password file to string: {:?}", e)) - .map(|_| password) - }) - .map(|password| { - // Trim the linefeed from the end. - if password.ends_with('\n') { - password[0..password.len() - 1].to_string() - } else { - password - } - })?, - )) - } else { - Ok(None) - } -} diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs new file mode 100644 index 0000000000..2d6e685a62 --- /dev/null +++ b/lcli/src/generate_bootnode_enr.rs @@ -0,0 +1,60 @@ +use clap::ArgMatches; +use eth2_libp2p::{ + discovery::{build_enr, CombinedKey, Keypair, ENR_FILENAME}, + NetworkConfig, NETWORK_KEY_FILENAME, +}; +use std::convert::TryInto; +use std::fs; +use std::fs::File; +use std::io::Write; +use std::net::IpAddr; +use std::path::PathBuf; +use types::{EnrForkId, EthSpec}; + +pub fn run(matches: &ArgMatches) -> Result<(), String> { + let ip: IpAddr = clap_utils::parse_required(matches, "ip")?; + let udp_port: u16 = clap_utils::parse_required(matches, "udp-port")?; + let tcp_port: u16 = clap_utils::parse_required(matches, "tcp-port")?; + let output_dir: PathBuf = clap_utils::parse_required(matches, "output-dir")?; + + if output_dir.exists() { + return Err(format!( + "{:?} already exists, will not override", + output_dir + )); + } + + let mut config = NetworkConfig::default(); + config.enr_address = Some(ip); + config.enr_udp_port = Some(udp_port); + config.enr_tcp_port = Some(tcp_port); + + let local_keypair = Keypair::generate_secp256k1(); + let enr_key: CombinedKey = local_keypair + .clone() + .try_into() + .map_err(|e| format!("Unable to convert keypair: {:?}", e))?; + let enr = build_enr::(&enr_key, &config, EnrForkId::default()) + .map_err(|e| format!("Unable to create ENR: {:?}", e))?; + + fs::create_dir_all(&output_dir).map_err(|e| format!("Unable to create output-dir: {:?}", e))?; + + let mut enr_file = File::create(output_dir.join(ENR_FILENAME)) + .map_err(|e| format!("Unable to create {}: {:?}", ENR_FILENAME, e))?; + enr_file + .write_all(&enr.to_base64().as_bytes()) + .map_err(|e| format!("Unable to write ENR to {}: {:?}", ENR_FILENAME, e))?; + + let secret_bytes = match local_keypair { + Keypair::Secp256k1(key) => key.secret().to_bytes(), + _ => return Err("Key is not a secp256k1 key".into()), + }; + + let mut key_file = File::create(output_dir.join(NETWORK_KEY_FILENAME)) + .map_err(|e| format!("Unable to create {}: {:?}", NETWORK_KEY_FILENAME, e))?; + key_file + .write_all(&secret_bytes) + .map_err(|e| format!("Unable to write key to {}: {:?}", NETWORK_KEY_FILENAME, e))?; + + Ok(()) +} diff --git a/lcli/src/helpers.rs b/lcli/src/helpers.rs index 6f7014f3a6..441059cd19 100644 --- a/lcli/src/helpers.rs +++ b/lcli/src/helpers.rs @@ -29,6 +29,14 @@ pub fn parse_path_with_default_in_home_dir( }) } +pub fn parse_path(matches: &ArgMatches, name: &'static str) -> Result { + matches + .value_of(name) + .ok_or_else(|| format!("{} not specified", name))? + .parse::() + .map_err(|e| format!("Unable to parse {}: {}", name, e)) +} + pub fn parse_u64(matches: &ArgMatches, name: &'static str) -> Result { matches .value_of(name) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index ce756dfdfa..ebda88f93e 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -5,7 +5,7 @@ mod change_genesis_time; mod check_deposit_data; mod deploy_deposit_contract; mod eth1_genesis; -mod helpers; +mod generate_bootnode_enr; mod interop_genesis; mod new_testnet; mod parse_hex; @@ -18,6 +18,7 @@ use log::Level; use parse_hex::run_parse_hex; use std::fs::File; use std::path::PathBuf; +use std::process; use std::time::{SystemTime, UNIX_EPOCH}; use transition_blocks::run_transition_blocks; use types::{test_utils::TestingBeaconStateBuilder, EthSpec, MainnetEthSpec, MinimalEthSpec}; @@ -28,8 +29,7 @@ async fn main() { let matches = App::new("Lighthouse CLI Tool") .about( - "Performs various testing-related tasks, modelled after zcli. \ - by @protolambda.", + "Performs various testing-related tasks, including defining testnets.", ) .arg( Arg::with_name("spec") @@ -41,6 +41,15 @@ async fn main() { .possible_values(&["minimal", "mainnet"]) .default_value("mainnet") ) + .arg( + Arg::with_name("testnet-dir") + .short("d") + .long("testnet-dir") + .value_name("PATH") + .takes_value(true) + .global(true) + .help("The testnet dir. Defaults to ~/.lighthouse/testnet"), + ) .subcommand( SubCommand::with_name("genesis_yaml") .about("Generates a genesis YAML file") @@ -120,13 +129,22 @@ async fn main() { "Deploy a testing eth1 deposit contract.", ) .arg( - Arg::with_name("eth1-endpoint") + Arg::with_name("eth1-ipc") + .long("eth1-ipc") .short("e") - .long("eth1-endpoint") - .value_name("HTTP_SERVER") + .value_name("ETH1_IPC_PATH") + .help("Path to an Eth1 JSON-RPC IPC endpoint") .takes_value(true) - .default_value("http://localhost:8545") - .help("The URL to the eth1 JSON-RPC http API."), + .required(true) + ) + .arg( + Arg::with_name("from-address") + .long("from-address") + .short("f") + .value_name("FROM_ETH1_ADDRESS") + .help("The address that will submit the contract creation. Must be unlocked.") + .takes_value(true) + .required(true) ) .arg( Arg::with_name("confirmations") @@ -136,13 +154,6 @@ async fn main() { .default_value("3") .help("The number of block confirmations before declaring the contract deployed."), ) - .arg( - Arg::with_name("password") - .long("password") - .value_name("FILE") - .takes_value(true) - .help("The password file to unlock the eth1 account (see --index)"), - ) ) .subcommand( SubCommand::with_name("refund-deposit-contract") @@ -150,37 +161,32 @@ async fn main() { "Calls the steal() function on a testnet eth1 contract.", ) .arg( - Arg::with_name("testnet-dir") - .short("d") - .long("testnet-dir") - .value_name("PATH") - .takes_value(true) - .help("The testnet dir. Defaults to ~/.lighthouse/testnet"), - ) - .arg( - Arg::with_name("eth1-endpoint") + Arg::with_name("eth1-ipc") + .long("eth1-ipc") .short("e") - .long("eth1-endpoint") - .value_name("HTTP_SERVER") + .value_name("ETH1_IPC_PATH") + .help("Path to an Eth1 JSON-RPC IPC endpoint") .takes_value(true) - .default_value("http://localhost:8545") - .help("The URL to the eth1 JSON-RPC http API."), + .required(true) ) .arg( - Arg::with_name("password") - .long("password") - .value_name("FILE") + Arg::with_name("from-address") + .long("from-address") + .short("f") + .value_name("FROM_ETH1_ADDRESS") + .help("The address that will submit the contract creation. Must be unlocked.") .takes_value(true) - .help("The password file to unlock the eth1 account (see --index)"), + .required(true) ) .arg( - Arg::with_name("account-index") - .short("i") - .long("account-index") - .value_name("INDEX") + Arg::with_name("contract-address") + .long("contract-address") + .short("c") + .value_name("CONTRACT_ETH1_ADDRESS") + .help("The address of the contract to be refunded. Its owner must match + --from-address.") .takes_value(true) - .default_value("0") - .help("The eth1 accounts[] index which will send the transaction"), + .required(true) ) ) .subcommand( @@ -188,14 +194,6 @@ async fn main() { .about( "Listens to the eth1 chain and finds the genesis beacon state", ) - .arg( - Arg::with_name("testnet-dir") - .short("d") - .long("testnet-dir") - .value_name("PATH") - .takes_value(true) - .help("The testnet dir. Defaults to ~/.lighthouse/testnet"), - ) .arg( Arg::with_name("eth1-endpoint") .short("e") @@ -211,14 +209,6 @@ async fn main() { .about( "Produces an interop-compatible genesis state using deterministic keypairs", ) - .arg( - Arg::with_name("testnet-dir") - .short("d") - .long("testnet-dir") - .value_name("PATH") - .takes_value(true) - .help("The testnet dir. Defaults to ~/.lighthouse/testnet"), - ) .arg( Arg::with_name("validator-count") .long("validator-count") @@ -262,14 +252,15 @@ async fn main() { .subcommand( SubCommand::with_name("new-testnet") .about( - "Produce a new testnet directory.", + "Produce a new testnet directory. If any of the optional flags are not + supplied the values will remain the default for the --spec flag", ) .arg( - Arg::with_name("testnet-dir") - .long("testnet-dir") - .value_name("DIRECTORY") - .takes_value(true) - .help("The output path for the new testnet directory. Defaults to ~/.lighthouse/testnet"), + Arg::with_name("force") + .long("force") + .short("f") + .takes_value(false) + .help("Overwrites any previous testnet configurations"), ) .arg( Arg::with_name("min-genesis-time") @@ -284,7 +275,6 @@ async fn main() { .long("min-genesis-active-validator-count") .value_name("INTEGER") .takes_value(true) - .default_value("16384") .help("The number of validators required to trigger eth2 genesis."), ) .arg( @@ -292,7 +282,6 @@ async fn main() { .long("min-genesis-delay") .value_name("SECONDS") .takes_value(true) - .default_value("3600") // 10 minutes .help("The delay between sufficient eth1 deposits and eth2 genesis."), ) .arg( @@ -300,7 +289,6 @@ async fn main() { .long("min-deposit-amount") .value_name("GWEI") .takes_value(true) - .default_value("100000000") // 0.1 Eth .help("The minimum permitted deposit amount."), ) .arg( @@ -308,7 +296,6 @@ async fn main() { .long("max-effective-balance") .value_name("GWEI") .takes_value(true) - .default_value("3200000000") // 3.2 Eth .help("The amount required to become a validator."), ) .arg( @@ -316,7 +303,6 @@ async fn main() { .long("effective-balance-increment") .value_name("GWEI") .takes_value(true) - .default_value("100000000") // 0.1 Eth .help("The steps in effective balance calculation."), ) .arg( @@ -324,7 +310,6 @@ async fn main() { .long("ejection-balance") .value_name("GWEI") .takes_value(true) - .default_value("1600000000") // 1.6 Eth .help("The balance at which a validator gets ejected."), ) .arg( @@ -332,7 +317,6 @@ async fn main() { .long("eth1-follow-distance") .value_name("ETH1_BLOCKS") .takes_value(true) - .default_value("16") .help("The distance to follow behind the eth1 chain head."), ) .arg( @@ -340,7 +324,6 @@ async fn main() { .long("genesis-fork-version") .value_name("HEX") .takes_value(true) - .default_value("0x00000000") .help("Used to avoid reply attacks between testnets. Recommended to set to non-default."), ) @@ -349,7 +332,7 @@ async fn main() { .long("deposit-contract-address") .value_name("ETH1_ADDRESS") .takes_value(true) - .default_value("0x0000000000000000000000000000000000000000") + .required(true) .help("The address of the deposit contract."), ) .arg( @@ -385,11 +368,55 @@ async fn main() { function signature."), ) ) + .subcommand( + SubCommand::with_name("generate-bootnode-enr") + .about( + "Generates an ENR address to be used as a pre-genesis boot node..", + ) + .arg( + Arg::with_name("ip") + .long("ip") + .value_name("IP_ADDRESS") + .takes_value(true) + .required(true) + .help("The IP address to be included in the ENR and used for discovery"), + ) + .arg( + Arg::with_name("udp-port") + .long("udp-port") + .value_name("UDP_PORT") + .takes_value(true) + .required(true) + .help("The UDP port to be included in the ENR and used for discovery"), + ) + .arg( + Arg::with_name("tcp-port") + .long("tcp-port") + .value_name("TCP_PORT") + .takes_value(true) + .required(true) + .help("The TCP port to be included in the ENR and used for application comms"), + ) + .arg( + Arg::with_name("output-dir") + .long("output-dir") + .value_name("OUTPUT_DIRECTORY") + .takes_value(true) + .required(true) + .help("The directory in which to create the network dir"), + ) + ) .get_matches(); macro_rules! run_with_spec { ($env_builder: expr) => { - run($env_builder, &matches) + match run($env_builder, &matches) { + Ok(()) => process::exit(0), + Err(e) => { + println!("Failed to run lcli: {}", e); + process::exit(1) + } + } }; } @@ -404,14 +431,14 @@ async fn main() { } } -async fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches<'_>) { +fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) -> Result<(), String> { let env = env_builder .multi_threaded_tokio_runtime() - .expect("should start tokio runtime") + .map_err(|e| format!("should start tokio runtime: {:?}", e))? .async_logger("trace", None) - .expect("should start null logger") + .map_err(|e| format!("should start null logger: {:?}", e))? .build() - .expect("should build env"); + .map_err(|e| format!("should build env: {:?}", e))?; match matches.subcommand() { ("genesis_yaml", Some(matches)) => { @@ -450,31 +477,34 @@ async fn run(env_builder: EnvironmentBuilder, matches: &ArgMatche _ => unreachable!("guarded by slog possible_values"), }; info!("Genesis state YAML file created. Exiting successfully."); + Ok(()) } ("transition-blocks", Some(matches)) => run_transition_blocks::(matches) - .unwrap_or_else(|e| error!("Failed to transition blocks: {}", e)), - ("pretty-hex", Some(matches)) => run_parse_hex::(matches) - .unwrap_or_else(|e| error!("Failed to pretty print hex: {}", e)), - ("deploy-deposit-contract", Some(matches)) => deploy_deposit_contract::run::(matches) - .await - .unwrap_or_else(|e| error!("Failed to run deploy-deposit-contract command: {}", e)), + .map_err(|e| format!("Failed to transition blocks: {}", e)), + ("pretty-hex", Some(matches)) => { + run_parse_hex::(matches).map_err(|e| format!("Failed to pretty print hex: {}", e)) + } + ("deploy-deposit-contract", Some(matches)) => { + deploy_deposit_contract::run::(env, matches) + .map_err(|e| format!("Failed to run deploy-deposit-contract command: {}", e)) + } ("refund-deposit-contract", Some(matches)) => { refund_deposit_contract::run::(env, matches) - .await - .unwrap_or_else(|e| error!("Failed to run refund-deposit-contract command: {}", e)) + .map_err(|e| format!("Failed to run refund-deposit-contract command: {}", e)) } ("eth1-genesis", Some(matches)) => eth1_genesis::run::(env, matches) - .await - .unwrap_or_else(|e| error!("Failed to run eth1-genesis command: {}", e)), + .map_err(|e| format!("Failed to run eth1-genesis command: {}", e)), ("interop-genesis", Some(matches)) => interop_genesis::run::(env, matches) - .unwrap_or_else(|e| error!("Failed to run interop-genesis command: {}", e)), + .map_err(|e| format!("Failed to run interop-genesis command: {}", e)), ("change-genesis-time", Some(matches)) => change_genesis_time::run::(matches) - .unwrap_or_else(|e| error!("Failed to run change-genesis-time command: {}", e)), + .map_err(|e| format!("Failed to run change-genesis-time command: {}", e)), ("new-testnet", Some(matches)) => new_testnet::run::(matches) - .unwrap_or_else(|e| error!("Failed to run new_testnet command: {}", e)), + .map_err(|e| format!("Failed to run new_testnet command: {}", e)), ("check-deposit-data", Some(matches)) => check_deposit_data::run::(matches) - .unwrap_or_else(|e| error!("Failed to run check-deposit-data command: {}", e)), - (other, _) => error!("Unknown subcommand {}. See --help.", other), + .map_err(|e| format!("Failed to run check-deposit-data command: {}", e)), + ("generate-bootnode-enr", Some(matches)) => generate_bootnode_enr::run::(matches) + .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), + (other, _) => Err(format!("Unknown subcommand {}. See --help.", other)), } } diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 447bffdfc3..6e2eea4030 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -1,8 +1,10 @@ -use crate::helpers::*; use clap::ArgMatches; +use clap_utils::{ + parse_optional, parse_path_with_default_in_home_dir, parse_required, parse_ssz_optional, +}; use eth2_testnet_config::Eth2TestnetConfig; use std::path::PathBuf; -use types::{EthSpec, YamlConfig}; +use types::{Address, EthSpec, YamlConfig}; pub fn run(matches: &ArgMatches) -> Result<(), String> { let testnet_dir_path = parse_path_with_default_in_home_dir( @@ -10,40 +12,44 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { "testnet-dir", PathBuf::from(".lighthouse/testnet"), )?; - let min_genesis_time = parse_u64_opt(matches, "min-genesis-time")?; - let min_genesis_delay = parse_u64(matches, "min-genesis-delay")?; - let min_genesis_active_validator_count = - parse_u64(matches, "min-genesis-active-validator-count")?; - let min_deposit_amount = parse_u64(matches, "min-deposit-amount")?; - let max_effective_balance = parse_u64(matches, "max-effective-balance")?; - let effective_balance_increment = parse_u64(matches, "effective-balance-increment")?; - let ejection_balance = parse_u64(matches, "ejection-balance")?; - let eth1_follow_distance = parse_u64(matches, "eth1-follow-distance")?; - let deposit_contract_deploy_block = parse_u64(matches, "deposit-contract-deploy-block")?; - let genesis_fork_version = parse_fork_opt(matches, "genesis-fork-version")?; - let deposit_contract_address = parse_address(matches, "deposit-contract-address")?; + let deposit_contract_address: Address = parse_required(matches, "deposit-contract-address")?; + let deposit_contract_deploy_block = parse_required(matches, "deposit-contract-deploy-block")?; + + let overwrite_files = matches.is_present("force"); if testnet_dir_path.exists() { - return Err(format!( - "{:?} already exists, will not overwrite", - testnet_dir_path - )); + if !overwrite_files { + return Err(format!( + "{:?} already exists, will not overwrite. Use --force to overwrite", + testnet_dir_path + )); + } } let mut spec = T::default_spec(); - if let Some(time) = min_genesis_time { - spec.min_genesis_time = time; - } else { - spec.min_genesis_time = time_now()?; + + // Update the spec value if the flag was defined. Otherwise, leave it as the default. + macro_rules! maybe_update { + ($flag: tt, $var: ident) => { + if let Some(val) = parse_optional(matches, $flag)? { + spec.$var = val + } + }; } - spec.min_deposit_amount = min_deposit_amount; - spec.min_genesis_active_validator_count = min_genesis_active_validator_count; - spec.max_effective_balance = max_effective_balance; - spec.effective_balance_increment = effective_balance_increment; - spec.ejection_balance = ejection_balance; - spec.eth1_follow_distance = eth1_follow_distance; - spec.min_genesis_delay = min_genesis_delay; - if let Some(v) = genesis_fork_version { + + maybe_update!("min-genesis-time", min_genesis_time); + maybe_update!("min-deposit-amount", min_deposit_amount); + maybe_update!( + "min-genesis-active-validator-count", + min_genesis_active_validator_count + ); + maybe_update!("max-effective-balance", max_effective_balance); + maybe_update!("effective-balance-increment", effective_balance_increment); + maybe_update!("ejection-balance", ejection_balance); + maybe_update!("eth1-follow_distance", eth1_follow_distance); + maybe_update!("min-genesis-delay", min_genesis_delay); + + if let Some(v) = parse_ssz_optional(matches, "genesis-fork-version")? { spec.genesis_fork_version = v; } @@ -55,5 +61,5 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { yaml_config: Some(YamlConfig::from_spec::(&spec)), }; - testnet.write_to_file(testnet_dir_path) + testnet.write_to_file(testnet_dir_path, overwrite_files) } diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs new file mode 100644 index 0000000000..a6359c8b01 --- /dev/null +++ b/lcli/src/parse_ssz.rs @@ -0,0 +1,40 @@ +use crate::helpers::parse_path; +use clap::ArgMatches; +use serde::Serialize; +use ssz::Decode; +use std::fs::File; +use std::io::Read; +use types::{EthSpec, SignedBeaconBlock}; + +pub fn run(matches: &ArgMatches) -> Result<(), String> { + let type_str = matches + .value_of("type") + .ok_or_else(|| "No type supplied".to_string())?; + let path = parse_path(matches, "path")?; + + info!("Type: {:?}", type_str); + + let mut bytes = vec![]; + let mut file = File::open(&path).map_err(|e| format!("Unable to open {:?}: {}", path, e))?; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Unable to read {:?}: {}", path, e))?; + + match type_str { + "SignedBeaconBlock" => decode_and_print::>(&bytes)?, + other => return Err(format!("Unknown type: {}", other)), + }; + + Ok(()) +} + +fn decode_and_print(bytes: &[u8]) -> Result<(), String> { + let item = T::from_ssz_bytes(&bytes).map_err(|e| format!("Ssz decode failed: {:?}", e))?; + + println!( + "{}", + serde_yaml::to_string(&item) + .map_err(|e| format!("Unable to write object to YAML: {:?}", e))? + ); + + Ok(()) +} diff --git a/lcli/src/refund_deposit_contract.rs b/lcli/src/refund_deposit_contract.rs index fb085ba707..719a8ef1b0 100644 --- a/lcli/src/refund_deposit_contract.rs +++ b/lcli/src/refund_deposit_contract.rs @@ -1,12 +1,10 @@ -use crate::deploy_deposit_contract::parse_password; use clap::ArgMatches; use environment::Environment; -use eth2_testnet_config::Eth2TestnetConfig; -use futures::compat::Future01CompatExt; +use futures::Future; use std::path::PathBuf; use types::EthSpec; use web3::{ - transports::Http, + transports::Ipc, types::{Address, TransactionRequest, U256}, Web3, }; @@ -14,97 +12,29 @@ use web3::{ /// `keccak("steal()")[0..4]` pub const STEAL_FN_SIGNATURE: &[u8] = &[0xcf, 0x7a, 0x89, 0x65]; -pub async fn run(_env: Environment, matches: &ArgMatches<'_>) -> Result<(), String> { - let endpoint = matches - .value_of("eth1-endpoint") - .ok_or_else(|| "eth1-endpoint not specified")?; - - let account_index = matches - .value_of("account-index") - .ok_or_else(|| "No account-index".to_string())? - .parse::() - .map_err(|e| format!("Unable to parse account-index: {}", e))?; - - let password_opt = parse_password(matches)?; - - let testnet_dir = matches - .value_of("testnet-dir") - .ok_or_else(|| ()) - .and_then(|dir| dir.parse::().map_err(|_| ())) - .unwrap_or_else(|_| { - dirs::home_dir() - .map(|home| home.join(".lighthouse").join("testnet")) - .expect("should locate home directory") - }); - - let eth2_testnet_config: Eth2TestnetConfig = Eth2TestnetConfig::load(testnet_dir)?; - - let (_event_loop, transport) = Http::new(&endpoint).map_err(|e| { - format!( - "Failed to start HTTP transport connected to ganache: {:?}", - e - ) - })?; +pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { + let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, "eth1-ipc")?; + let from: Address = clap_utils::parse_required(matches, "from-address")?; + let contract_address: Address = clap_utils::parse_required(matches, "contract-address")?; + let (_event_loop_handle, transport) = + Ipc::new(eth1_ipc_path).map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?; let web3 = Web3::new(transport); - // Convert from `types::Address` to `web3::types::Address`. - let deposit_contract = Address::from_slice( - eth2_testnet_config - .deposit_contract_address()? - .as_fixed_bytes(), - ); - - let from_address = web3 - .eth() - .accounts() - .compat() - .await - .map_err(|e| format!("Failed to get accounts: {:?}", e)) - .and_then(|accounts| { - accounts - .get(account_index) - .cloned() - .ok_or_else(|| "Insufficient accounts for deposit".to_string()) - })?; - - let from = if let Some(password) = password_opt { - // Unlock for only a single transaction. - let duration = None; - - let result = web3 - .personal() - .unlock_account(from_address, &password, duration) - .compat() - .await; - match result { - Ok(true) => from_address, - Ok(false) => return Err("Eth1 node refused to unlock account".to_string()), - Err(e) => return Err(format!("Eth1 unlock request failed: {:?}", e)), - } - } else { - from_address - }; - - let tx_request = TransactionRequest { - from, - to: Some(deposit_contract), - gas: Some(U256::from(400_000)), - gas_price: None, - value: Some(U256::zero()), - data: Some(STEAL_FN_SIGNATURE.into()), - nonce: None, - condition: None, - }; - - let tx = web3 - .eth() - .send_transaction(tx_request) - .compat() - .await - .map_err(|e| format!("Failed to call deposit fn: {:?}", e))?; - - info!("Refund transaction submitted: eth1_tx_hash: {:?}", tx); + env.runtime().block_on( + web3.eth() + .send_transaction(TransactionRequest { + from, + to: Some(contract_address), + gas: Some(U256::from(400_000)), + gas_price: None, + value: Some(U256::zero()), + data: Some(STEAL_FN_SIGNATURE.into()), + nonce: None, + condition: None, + }) + .map_err(|e| format!("Failed to call deposit fn: {:?}", e)), + )?; Ok(()) } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index f22f79097a..e93dd01926 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -4,6 +4,9 @@ version = "0.2.0" authors = ["Sigma Prime "] edition = "2018" +[features] +write_ssz_files = ["beacon_node/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing. + [dependencies] beacon_node = { "path" = "../beacon_node" } tokio = "0.1.22" @@ -19,3 +22,4 @@ environment = { path = "./environment" } futures = "0.1.25" validator_client = { "path" = "../validator_client" } account_manager = { "path" = "../account_manager" } +clap_utils = { path = "../eth2/utils/clap_utils" } diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 08c6d0a5d2..20e54b0979 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -28,6 +28,7 @@ pub struct EnvironmentBuilder { log: Option, eth_spec_instance: E, eth2_config: Eth2Config, + testnet: Option>, } impl EnvironmentBuilder { @@ -38,6 +39,7 @@ impl EnvironmentBuilder { log: None, eth_spec_instance: MinimalEthSpec, eth2_config: Eth2Config::minimal(), + testnet: None, } } } @@ -50,6 +52,7 @@ impl EnvironmentBuilder { log: None, eth_spec_instance: MainnetEthSpec, eth2_config: Eth2Config::mainnet(), + testnet: None, } } } @@ -62,6 +65,7 @@ impl EnvironmentBuilder { log: None, eth_spec_instance: InteropEthSpec, eth2_config: Eth2Config::interop(), + testnet: None, } } } @@ -140,7 +144,7 @@ impl EnvironmentBuilder { /// Setups eth2 config using the CLI arguments. pub fn eth2_testnet_config( mut self, - eth2_testnet_config: &Eth2TestnetConfig, + eth2_testnet_config: Eth2TestnetConfig, ) -> Result { // Create a new chain spec from the default configuration. self.eth2_config.spec = eth2_testnet_config @@ -155,6 +159,8 @@ impl EnvironmentBuilder { ) })?; + self.testnet = Some(eth2_testnet_config); + Ok(self) } @@ -169,6 +175,7 @@ impl EnvironmentBuilder { .ok_or_else(|| "Cannot build environment without log".to_string())?, eth_spec_instance: self.eth_spec_instance, eth2_config: self.eth2_config, + testnet: self.testnet, }) } } @@ -211,6 +218,7 @@ pub struct Environment { log: Logger, eth_spec_instance: E, pub eth2_config: Eth2Config, + pub testnet: Option>, } impl Environment { diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 3c473db2d1..dbb3c90395 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -1,8 +1,9 @@ #[macro_use] extern crate clap; -use beacon_node::{get_eth2_testnet_config, get_testnet_dir, ProductionBeaconNode}; +use beacon_node::ProductionBeaconNode; use clap::{App, Arg, ArgMatches}; +use clap_utils; use env_logger::{Builder, Env}; use environment::EnvironmentBuilder; use slog::{crit, info, warn}; @@ -123,12 +124,13 @@ fn run( .ok_or_else(|| "Expected --debug-level flag".to_string())?; let log_format = matches.value_of("log-format"); - let eth2_testnet_config = get_eth2_testnet_config(&get_testnet_dir(matches))?; + let eth2_testnet_config = + clap_utils::parse_testnet_dir_with_hardcoded_default(matches, "testnet-dir")?; let mut environment = environment_builder .async_logger(debug_level, log_format)? .multi_threaded_tokio_runtime()? - .eth2_testnet_config(ð2_testnet_config)? + .eth2_testnet_config(eth2_testnet_config)? .build()?; let log = environment.core_context().log; @@ -164,7 +166,7 @@ fn run( if let Some(sub_matches) = matches.subcommand_matches("account_manager") { // Pass the entire `environment` to the account manager so it can run blocking operations. - account_manager::run(sub_matches, environment); + account_manager::run(sub_matches, environment)?; // Exit as soon as account manager returns control. return Ok(()); diff --git a/tests/ef_tests/src/cases/bls_aggregate_verify.rs b/tests/ef_tests/src/cases/bls_aggregate_verify.rs index c6a2b2d4f3..7cac4e9066 100644 --- a/tests/ef_tests/src/cases/bls_aggregate_verify.rs +++ b/tests/ef_tests/src/cases/bls_aggregate_verify.rs @@ -1,12 +1,12 @@ use super::*; use crate::case_result::compare_result; use crate::cases::common::BlsCase; -use bls::{AggregatePublicKey, AggregateSignature}; +use bls::{AggregateSignature, PublicKey}; use serde_derive::Deserialize; #[derive(Debug, Clone, Deserialize)] pub struct BlsAggregatePair { - pub pubkey: AggregatePublicKey, + pub pubkey: PublicKey, pub message: String, } diff --git a/tests/ef_tests/src/cases/bls_sign_msg.rs b/tests/ef_tests/src/cases/bls_sign_msg.rs index 53b09e4fd7..3eae6eb150 100644 --- a/tests/ef_tests/src/cases/bls_sign_msg.rs +++ b/tests/ef_tests/src/cases/bls_sign_msg.rs @@ -23,7 +23,6 @@ impl Case for BlsSign { // Convert private_key and message to required types let mut sk = hex::decode(&self.input.privkey[2..]) .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; - pad_to_48(&mut sk); let sk = SecretKey::from_bytes(&sk).unwrap(); let msg = hex::decode(&self.input.message[2..]) .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; @@ -37,10 +36,3 @@ impl Case for BlsSign { compare_result::, Vec>(&Ok(signature.as_bytes()), &Some(decoded)) } } - -// Increase the size of an array to 48 bytes -fn pad_to_48(array: &mut Vec) { - while array.len() < 48 { - array.insert(0, 0); - } -} diff --git a/tests/ef_tests/tests/tests.rs b/tests/ef_tests/tests/tests.rs index b3a85b8ea3..3bec38af80 100644 --- a/tests/ef_tests/tests/tests.rs +++ b/tests/ef_tests/tests/tests.rs @@ -13,7 +13,9 @@ fn config_test() { .join("config.yaml"); let yaml_config = YamlConfig::from_file(&config_path).expect("config file loads OK"); let spec = E::default_spec(); + let yaml_from_spec = YamlConfig::from_spec::(&spec); assert_eq!(yaml_config.apply_to_chain_spec::(&spec), Some(spec)); + assert_eq!(yaml_from_spec, yaml_config); } #[test] diff --git a/tests/simulator/src/cli.rs b/tests/simulator/src/cli.rs index 4f3277fc11..444e670104 100644 --- a/tests/simulator/src/cli.rs +++ b/tests/simulator/src/cli.rs @@ -78,27 +78,31 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .short("s") .long("speedup") .takes_value(true) - .help("Speed up factor for eth1 blocks and slot production (default 15)"), + .default_value("15") + .help("Speed up factor for eth1 blocks and slot production"), ) .arg( Arg::with_name("initial_delay") .short("i") .long("initial_delay") .takes_value(true) - .help("Epoch delay for new beacon node to start syncing (default 50)"), + .default_value("5") + .help("Epoch delay for new beacon node to start syncing"), ) .arg( Arg::with_name("sync_timeout") .long("sync_timeout") .takes_value(true) - .help("Number of epochs after which newly added beacon nodes must be synced (default 10)"), + .default_value("10") + .help("Number of epochs after which newly added beacon nodes must be synced"), ) .arg( Arg::with_name("strategy") .long("strategy") .takes_value(true) + .default_value("all") .possible_values(&["one-node", "two-nodes", "mixed", "all"]) - .help("Sync verification strategy to run. (default all)"), + .help("Sync verification strategy to run."), ), ) } diff --git a/tests/simulator/src/sync_sim.rs b/tests/simulator/src/sync_sim.rs index 6e8ac4d46e..16a62fc327 100644 --- a/tests/simulator/src/sync_sim.rs +++ b/tests/simulator/src/sync_sim.rs @@ -12,14 +12,14 @@ use tokio::timer::Interval; use types::{Epoch, EthSpec}; pub fn run_syncing_sim(matches: &ArgMatches) -> Result<(), String> { - let initial_delay = value_t!(matches, "initial_delay", u64).unwrap_or(50); - let sync_delay = value_t!(matches, "sync_delay", u64).unwrap_or(10); - let speed_up_factor = value_t!(matches, "speedup", u64).unwrap_or(15); - let strategy = value_t!(matches, "strategy", String).unwrap_or("all".into()); + let initial_delay = value_t!(matches, "initial_delay", u64).unwrap(); + let sync_timeout = value_t!(matches, "sync_timeout", u64).unwrap(); + let speed_up_factor = value_t!(matches, "speedup", u64).unwrap(); + let strategy = value_t!(matches, "strategy", String).unwrap(); println!("Syncing Simulator:"); println!(" initial_delay:{}", initial_delay); - println!(" sync delay:{}", sync_delay); + println!(" sync timeout: {}", sync_timeout); println!(" speed up factor:{}", speed_up_factor); println!(" strategy:{}", strategy); @@ -29,7 +29,7 @@ pub fn run_syncing_sim(matches: &ArgMatches) -> Result<(), String> { syncing_sim( speed_up_factor, initial_delay, - sync_delay, + sync_timeout, strategy, log_level, log_format, @@ -39,7 +39,7 @@ pub fn run_syncing_sim(matches: &ArgMatches) -> Result<(), String> { fn syncing_sim( speed_up_factor: u64, initial_delay: u64, - sync_delay: u64, + sync_timeout: u64, strategy: String, log_level: &str, log_format: Option<&str>, @@ -108,7 +108,7 @@ fn syncing_sim( beacon_config.clone(), slot_duration, initial_delay, - sync_delay, + sync_timeout, )) .join(final_future) .map(|_| network) @@ -353,27 +353,24 @@ pub fn verify_syncing( pub fn check_still_syncing( network: &LocalNetwork, ) -> impl Future { - let net = network.clone(); network .remote_nodes() .into_future() - // get all head epochs + // get syncing status of nodes .and_then(|remote_nodes| { stream::unfold(remote_nodes.into_iter(), |mut iter| { iter.next().map(|remote_node| { remote_node .http - .beacon() - .get_head() - .map(|head| head.finalized_slot.epoch(E::slots_per_epoch())) - .map(|epoch| (epoch, iter)) - .map_err(|e| format!("Get head via http failed: {:?}", e)) + .node() + .syncing_status() + .map(|status| status.is_syncing) + .map(|status| (status, iter)) + .map_err(|e| format!("Get syncing status via http failed: {:?}", e)) }) }) .collect() }) - // find current epoch - .and_then(move |epochs| net.bootnode_epoch().map(|epoch| (epochs, epoch))) - .and_then(move |(epochs, epoch)| Ok(epochs.iter().any(|head_epoch| *head_epoch != epoch))) + .and_then(move |status| Ok(status.iter().any(|is_syncing| *is_syncing))) .map_err(|e| format!("Failed syncing check: {:?}", e)) } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 11db63302e..68dfc906d0 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -41,3 +41,4 @@ bls = { path = "../eth2/utils/bls" } remote_beacon_node = { path = "../eth2/utils/remote_beacon_node" } tempdir = "0.3" rayon = "1.2.0" +web3 = "0.10.0" diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index c8220775d8..c1842519b5 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -271,12 +271,14 @@ impl AttestationService { .validator_store .produce_selection_proof(duty.validator_pubkey(), slot)?; let modulo = duty.duty.aggregator_modulo?; - let subscription = ValidatorSubscription { validator_index, attestation_committee_index, slot, - is_aggregator: selection_proof.is_aggregator(modulo), + is_aggregator: selection_proof + .is_aggregator(modulo) + .map_err(|e| crit!(log_1, "Unable to determine aggregator: {:?}", e)) + .ok()?, }; Some((subscription, (duty, selection_proof))) diff --git a/validator_client/src/validator_directory.rs b/validator_client/src/validator_directory.rs index f904e7f678..197e1cb44e 100644 --- a/validator_client/src/validator_directory.rs +++ b/validator_client/src/validator_directory.rs @@ -1,5 +1,6 @@ use bls::get_withdrawal_credentials; -use deposit_contract::encode_eth1_tx_data; +use deposit_contract::{encode_eth1_tx_data, DEPOSIT_GAS}; +use futures::{Future, IntoFuture}; use hex; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -12,6 +13,10 @@ use types::{ test_utils::generate_deterministic_keypair, ChainSpec, DepositData, Hash256, Keypair, PublicKey, SecretKey, Signature, }; +use web3::{ + types::{Address, TransactionRequest, U256}, + Transport, Web3, +}; const VOTING_KEY_PREFIX: &str = "voting"; const WITHDRAWAL_KEY_PREFIX: &str = "withdrawal"; @@ -241,7 +246,7 @@ impl ValidatorDirectoryBuilder { Ok(()) } - pub fn write_eth1_data_file(mut self) -> Result { + fn get_deposit_data(&self) -> Result<(Vec, u64), String> { let voting_keypair = self .voting_keypair .as_ref() @@ -254,30 +259,35 @@ impl ValidatorDirectoryBuilder { .amount .ok_or_else(|| "write_eth1_data_file requires an amount")?; let spec = self.spec.as_ref().ok_or_else(|| "build requires a spec")?; + + let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials( + &withdrawal_keypair.pk, + spec.bls_withdrawal_prefix_byte, + )); + + let mut deposit_data = DepositData { + pubkey: voting_keypair.pk.clone().into(), + withdrawal_credentials, + amount, + signature: Signature::empty_signature().into(), + }; + + deposit_data.signature = deposit_data.create_signature(&voting_keypair.sk, &spec); + + let deposit_data = encode_eth1_tx_data(&deposit_data) + .map_err(|e| format!("Unable to encode eth1 deposit tx data: {:?}", e))?; + + Ok((deposit_data, amount)) + } + + pub fn write_eth1_data_file(mut self) -> Result { let path = self .directory .as_ref() .map(|directory| directory.join(ETH1_DEPOSIT_DATA_FILE)) .ok_or_else(|| "write_eth1_data_filer requires a directory")?; - let deposit_data = { - let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials( - &withdrawal_keypair.pk, - spec.bls_withdrawal_prefix_byte, - )); - - let mut deposit_data = DepositData { - pubkey: voting_keypair.pk.clone().into(), - withdrawal_credentials, - amount, - signature: Signature::empty_signature().into(), - }; - - deposit_data.signature = deposit_data.create_signature(&voting_keypair.sk, &spec); - - encode_eth1_tx_data(&deposit_data) - .map_err(|e| format!("Unable to encode eth1 deposit tx data: {:?}", e))? - }; + let (deposit_data, _) = self.get_deposit_data()?; if path.exists() { return Err(format!("Eth1 data file already exists at: {:?}", path)); @@ -293,6 +303,31 @@ impl ValidatorDirectoryBuilder { Ok(self) } + pub fn submit_eth1_deposit( + self, + web3: Web3, + from: Address, + deposit_contract: Address, + ) -> impl Future { + self.get_deposit_data() + .into_future() + .and_then(move |(deposit_data, deposit_amount)| { + web3.eth() + .send_transaction(TransactionRequest { + from, + to: Some(deposit_contract), + gas: Some(DEPOSIT_GAS.into()), + gas_price: None, + value: Some(from_gwei(deposit_amount)), + data: Some(deposit_data.into()), + nonce: None, + condition: None, + }) + .map_err(|e| format!("Failed to send transaction: {:?}", e)) + }) + .map(|tx| (self, tx)) + } + pub fn build(self) -> Result { Ok(ValidatorDirectory { directory: self.directory.ok_or_else(|| "build requires a directory")?, @@ -303,6 +338,11 @@ impl ValidatorDirectoryBuilder { } } +/// Converts gwei to wei. +fn from_gwei(gwei: u64) -> U256 { + U256::from(gwei) * U256::exp10(9) +} + #[cfg(test)] mod tests { use super::*;