From b6408805a2177db9cbf6e75f04d191f2de2e8ef3 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 17 May 2020 21:16:48 +1000 Subject: [PATCH] Stable futures (#879) * Port eth1 lib to use stable futures * Port eth1_test_rig to stable futures * Port eth1 tests to stable futures * Port genesis service to stable futures * Port genesis tests to stable futures * Port beacon_chain to stable futures * Port lcli to stable futures * Fix eth1_test_rig (#1014) * Fix lcli * Port timer to stable futures * Fix timer * Port websocket_server to stable futures * Port notifier to stable futures * Add TODOS * Update hashmap hashset to stable futures * Adds panic test to hashset delay * Port remote_beacon_node to stable futures * Fix lcli merge conflicts * Non rpc stuff compiles * protocol.rs compiles * Port websockets, timer and notifier to stable futures (#1035) * Fix lcli * Port timer to stable futures * Fix timer * Port websocket_server to stable futures * Port notifier to stable futures * Add TODOS * Port remote_beacon_node to stable futures * Partial eth2-libp2p stable future upgrade * Finished first round of fighting RPC types * Further progress towards porting eth2-libp2p adds caching to discovery * Update behaviour * RPC handler to stable futures * Update RPC to master libp2p * Network service additions * Fix the fallback transport construction (#1102) * Correct warning * Remove hashmap delay * Compiling version of eth2-libp2p * Update all crates versions * Fix conversion function and add tests (#1113) * Port validator_client to stable futures (#1114) * Add PH & MS slot clock changes * Account for genesis time * Add progress on duties refactor * Add simple is_aggregator bool to val subscription * Start work on attestation_verification.rs * Add progress on ObservedAttestations * Progress with ObservedAttestations * Fix tests * Add observed attestations to the beacon chain * Add attestation observation to processing code * Add progress on attestation verification * Add first draft of ObservedAttesters * Add more tests * Add observed attesters to beacon chain * Add observers to attestation processing * Add more attestation verification * Create ObservedAggregators map * Remove commented-out code * Add observed aggregators into chain * Add progress * Finish adding features to attestation verification * Ensure beacon chain compiles * Link attn verification into chain * Integrate new attn verification in chain * Remove old attestation processing code * Start trying to fix beacon_chain tests * Split adding into pools into two functions * Add aggregation to harness * Get test harness working again * Adjust the number of aggregators for test harness * Fix edge-case in harness * Integrate new attn processing in network * Fix compile bug in validator_client * Update validator API endpoints * Fix aggreagation in test harness * Fix enum thing * Fix attestation observation bug: * Patch failing API tests * Start adding comments to attestation verification * Remove unused attestation field * Unify "is block known" logic * Update comments * Supress fork choice errors for network processing * Add todos * Tidy * Add gossip attn tests * Disallow test harness to produce old attns * Comment out in-progress tests * Partially address pruning tests * Fix failing store test * Add aggregate tests * Add comments about which spec conditions we check * Dont re-aggregate * Split apart test harness attn production * Fix compile error in network * Make progress on commented-out test * Fix skipping attestation test * Add fork choice verification tests * Tidy attn tests, remove dead code * Remove some accidentally added code * Fix clippy lint * Rename test file * Add block tests, add cheap block proposer check * Rename block testing file * Add observed_block_producers * Tidy * Switch around block signature verification * Finish block testing * Remove gossip from signature tests * First pass of self review * Fix deviation in spec * Update test spec tags * Start moving over to hashset * Finish moving observed attesters to hashmap * Move aggregation pool over to hashmap * Make fc attn borrow again * Fix rest_api compile error * Fix missing comments * Fix monster test * Uncomment increasing slots test * Address remaining comments * Remove unsafe, use cfg test * Remove cfg test flag * Fix dodgy comment * Revert "Update hashmap hashset to stable futures" This reverts commit d432378a3cc5cd67fc29c0b15b96b886c1323554. * Revert "Adds panic test to hashset delay" This reverts commit 281502396fc5b90d9c421a309c2c056982c9525b. * Ported attestation_service * Ported duties_service * Ported fork_service * More ports * Port block_service * Minor fixes * VC compiles * Update TODOS * Borrow self where possible * Ignore aggregates that are already known. * Unify aggregator modulo logic * Fix typo in logs * Refactor validator subscription logic * Avoid reproducing selection proof * Skip HTTP call if no subscriptions * Rename DutyAndState -> DutyAndProof * Tidy logs * Print root as dbg * Fix compile errors in tests * Fix compile error in test * Re-Fix attestation and duties service * Minor fixes Co-authored-by: Paul Hauner * Network crate update to stable futures * Port account_manager to stable futures (#1121) * Port account_manager to stable futures * Run async fns in tokio environment * Port rest_api crate to stable futures (#1118) * Port rest_api lib to stable futures * Reduce tokio features * Update notifier to stable futures * Builder update * Further updates * Convert self referential async functions * stable futures fixes (#1124) * Fix eth1 update functions * Fix genesis and client * Fix beacon node lib * Return appropriate runtimes from environment * Fix test rig * Refactor eth1 service update * Upgrade simulator to stable futures * Lighthouse compiles on stable futures * Remove println debugging statement * Update libp2p service, start rpc test upgrade * Update network crate for new libp2p * Update tokio::codec to futures_codec (#1128) * Further work towards RPC corrections * Correct http timeout and network service select * Use tokio runtime for libp2p * Revert "Update tokio::codec to futures_codec (#1128)" This reverts commit e57aea924acf5cbabdcea18895ac07e38a425ed7. * Upgrade RPC libp2p tests * Upgrade secio fallback test * Upgrade gossipsub examples * Clean up RPC protocol * Test fixes (#1133) * Correct websocket timeout and run on os thread * Fix network test * Clean up PR * Correct tokio tcp move attestation service tests * Upgrade attestation service tests * Correct network test * Correct genesis test * Test corrections * Log info when block is received * Modify logs and update attester service events * Stable futures: fixes to vc, eth1 and account manager (#1142) * Add local testnet scripts * Remove whiteblock script * Rename local testnet script * Move spawns onto handle * Fix VC panic * Initial fix to block production issue * Tidy block producer fix * Tidy further * Add local testnet clean script * Run cargo fmt * Tidy duties service * Tidy fork service * Tidy ForkService * Tidy AttestationService * Tidy notifier * Ensure await is not suppressed in eth1 * Ensure await is not suppressed in account_manager * Use .ok() instead of .unwrap_or(()) * RPC decoding test for proto * Update discv5 and eth2-libp2p deps * Fix lcli double runtime issue (#1144) * Handle stream termination and dialing peer errors * Correct peer_info variant types * Remove unnecessary warnings * Handle subnet unsubscription removal and improve logigng * Add logs around ping * Upgrade discv5 and improve logging * Handle peer connection status for multiple connections * Improve network service logging * Improve logging around peer manager * Upgrade swarm poll centralise peer management * Identify clients on error * Fix `remove_peer` in sync (#1150) * remove_peer removes from all chains * Remove logs * Fix early return from loop * Improved logging, fix panic * Partially correct tests * Stable futures: Vc sync (#1149) * Improve syncing heuristic * Add comments * Use safer method for tolerance * Fix tests * Stable futures: Fix VC bug, update agg pool, add more metrics (#1151) * Expose epoch processing summary * Expose participation metrics to prometheus * Switch to f64 * Reduce precision * Change precision * Expose observed attesters metrics * Add metrics for agg/unagg attn counts * Add metrics for gossip rx * Add metrics for gossip tx * Adds ignored attns to prom * Add attestation timing * Add timer for aggregation pool sig agg * Add write lock timer for agg pool * Add more metrics to agg pool * Change map lock code * Add extra metric to agg pool * Change lock handling in agg pool * Change .write() to .read() * Add another agg pool timer * Fix for is_aggregator * Fix pruning bug Co-authored-by: pawan Co-authored-by: Paul Hauner --- Cargo.lock | 2165 ++++++++--------- Cargo.toml | 2 +- account_manager/Cargo.toml | 19 +- account_manager/src/deposits.rs | 128 +- account_manager/src/lib.rs | 249 +- beacon_node/Cargo.toml | 21 +- beacon_node/beacon_chain/Cargo.toml | 22 +- .../src/attestation_verification.rs | 2 +- beacon_node/beacon_chain/src/beacon_chain.rs | 26 +- .../beacon_chain/src/block_verification.rs | 54 +- beacon_node/beacon_chain/src/eth1_chain.rs | 10 +- beacon_node/beacon_chain/src/metrics.rs | 141 +- .../src/naive_aggregation_pool.rs | 23 +- .../beacon_chain/src/observed_attesters.rs | 22 +- beacon_node/client/Cargo.toml | 26 +- beacon_node/client/src/builder.rs | 260 +- beacon_node/client/src/notifier.rs | 68 +- beacon_node/eth1/Cargo.toml | 27 +- beacon_node/eth1/src/http.rs | 277 +-- beacon_node/eth1/src/service.rs | 730 +++--- beacon_node/eth1/tests/test.rs | 398 ++- beacon_node/eth2-libp2p/Cargo.toml | 49 +- beacon_node/eth2-libp2p/src/behaviour.rs | 236 +- beacon_node/eth2-libp2p/src/config.rs | 2 +- beacon_node/eth2-libp2p/src/discovery/enr.rs | 11 +- .../eth2-libp2p/src/discovery/enr_ext.rs | 190 ++ beacon_node/eth2-libp2p/src/discovery/mod.rs | 170 +- beacon_node/eth2-libp2p/src/lib.rs | 7 +- .../eth2-libp2p/src/peer_manager/client.rs | 12 + .../eth2-libp2p/src/peer_manager/mod.rs | 148 +- .../eth2-libp2p/src/peer_manager/peer_info.rs | 11 +- .../eth2-libp2p/src/peer_manager/peerdb.rs | 35 +- beacon_node/eth2-libp2p/src/rpc/codec/base.rs | 83 +- beacon_node/eth2-libp2p/src/rpc/codec/mod.rs | 12 +- beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs | 22 +- .../eth2-libp2p/src/rpc/codec/ssz_snappy.rs | 24 +- beacon_node/eth2-libp2p/src/rpc/handler.rs | 467 ++-- beacon_node/eth2-libp2p/src/rpc/methods.rs | 12 +- beacon_node/eth2-libp2p/src/rpc/mod.rs | 105 +- beacon_node/eth2-libp2p/src/rpc/protocol.rs | 126 +- beacon_node/eth2-libp2p/src/service.rs | 264 +- beacon_node/eth2-libp2p/src/types/globals.rs | 1 + beacon_node/eth2-libp2p/src/types/mod.rs | 2 +- beacon_node/eth2-libp2p/tests/common/mod.rs | 43 +- .../eth2-libp2p/tests/gossipsub_tests.rs | 113 +- beacon_node/eth2-libp2p/tests/noise.rs | 82 +- beacon_node/eth2-libp2p/tests/rpc_tests.rs | 668 +++-- beacon_node/genesis/Cargo.toml | 23 +- .../genesis/src/eth1_genesis_service.rs | 181 +- beacon_node/genesis/tests/tests.rs | 132 +- beacon_node/network/Cargo.toml | 28 +- .../network/src/attestation_service/mod.rs | 686 +----- .../network/src/attestation_service/tests.rs | 508 ++++ beacon_node/network/src/lib.rs | 4 + beacon_node/network/src/metrics.rs | 39 + beacon_node/network/src/router/mod.rs | 29 +- beacon_node/network/src/router/processor.rs | 13 +- beacon_node/network/src/service.rs | 480 ++-- beacon_node/network/src/service/tests.rs | 20 +- .../network/src/sync/block_processor.rs | 26 +- beacon_node/network/src/sync/manager.rs | 42 +- .../network/src/sync/network_context.rs | 27 +- .../network/src/sync/range_sync/chain.rs | 5 +- .../src/sync/range_sync/chain_collection.rs | 31 +- .../network/src/sync/range_sync/range.rs | 38 +- beacon_node/rest_api/Cargo.toml | 34 +- beacon_node/rest_api/src/beacon.rs | 96 +- beacon_node/rest_api/src/consensus.rs | 33 +- beacon_node/rest_api/src/error.rs | 7 - beacon_node/rest_api/src/helpers.rs | 4 +- beacon_node/rest_api/src/lib.rs | 48 +- beacon_node/rest_api/src/macros.rs | 4 +- beacon_node/rest_api/src/response_builder.rs | 2 +- beacon_node/rest_api/src/router.rs | 380 ++- beacon_node/rest_api/src/validator.rs | 258 +- beacon_node/src/lib.rs | 110 +- beacon_node/store/Cargo.toml | 16 +- beacon_node/timer/Cargo.toml | 6 +- beacon_node/timer/src/lib.rs | 34 +- beacon_node/websocket_server/Cargo.toml | 10 +- beacon_node/websocket_server/src/lib.rs | 37 +- eth2/operation_pool/Cargo.toml | 8 +- eth2/proto_array_fork_choice/Cargo.toml | 8 +- eth2/state_processing/Cargo.toml | 18 +- .../src/per_epoch_processing.rs | 11 +- .../src/per_slot_processing.rs | 10 +- eth2/types/Cargo.toml | 20 +- eth2/utils/bls/Cargo.toml | 10 +- eth2/utils/cached_tree_hash/Cargo.toml | 12 +- eth2/utils/clap_utils/Cargo.toml | 6 +- eth2/utils/compare_fields_derive/Cargo.toml | 4 +- eth2/utils/compare_fields_derive/src/lib.rs | 2 +- eth2/utils/deposit_contract/Cargo.toml | 10 +- eth2/utils/deposit_contract/build.rs | 4 +- eth2/utils/eth2_config/Cargo.toml | 6 +- eth2/utils/eth2_hashing/Cargo.toml | 6 +- eth2/utils/eth2_interop_keypairs/Cargo.toml | 10 +- eth2/utils/eth2_keystore/Cargo.toml | 2 +- eth2/utils/eth2_testnet_config/Cargo.toml | 11 +- eth2/utils/eth2_testnet_config/build.rs | 15 +- eth2/utils/eth2_wallet/Cargo.toml | 2 +- eth2/utils/hashmap_delay/Cargo.toml | 9 - eth2/utils/hashmap_delay/src/hashmap_delay.rs | 161 -- eth2/utils/hashmap_delay/src/lib.rs | 21 - eth2/utils/hashset_delay/Cargo.toml | 12 + .../src/hashset_delay.rs | 73 +- eth2/utils/hashset_delay/src/lib.rs | 12 + eth2/utils/int_to_bytes/Cargo.toml | 4 +- eth2/utils/lighthouse_metrics/Cargo.toml | 2 +- eth2/utils/lighthouse_metrics/src/lib.rs | 29 +- eth2/utils/logging/Cargo.toml | 2 +- eth2/utils/merkle_proof/Cargo.toml | 6 +- eth2/utils/remote_beacon_node/Cargo.toml | 14 +- eth2/utils/remote_beacon_node/src/lib.rs | 455 ++-- eth2/utils/rest_types/Cargo.toml | 10 +- eth2/utils/serde_hex/Cargo.toml | 4 +- eth2/utils/serde_hex/src/lib.rs | 9 +- eth2/utils/slot_clock/Cargo.toml | 2 +- eth2/utils/ssz/Cargo.toml | 2 +- eth2/utils/ssz_derive/Cargo.toml | 4 +- eth2/utils/ssz_derive/src/lib.rs | 6 +- eth2/utils/ssz_types/Cargo.toml | 10 +- eth2/utils/swap_or_not_shuffle/Cargo.toml | 4 +- eth2/utils/test_random_derive/Cargo.toml | 4 +- eth2/utils/test_random_derive/src/lib.rs | 2 +- eth2/utils/tree_hash/Cargo.toml | 10 +- eth2/utils/tree_hash_derive/Cargo.toml | 4 +- eth2/utils/tree_hash_derive/src/lib.rs | 5 +- lcli/Cargo.toml | 15 +- lcli/src/deploy_deposit_contract.rs | 50 +- lcli/src/eth1_genesis.rs | 29 +- lcli/src/generate_bootnode_enr.rs | 8 +- lcli/src/main.rs | 5 +- lcli/src/refund_deposit_contract.rs | 17 +- lighthouse/Cargo.toml | 16 +- lighthouse/environment/Cargo.toml | 18 +- lighthouse/environment/src/lib.rs | 32 +- lighthouse/src/main.rs | 10 +- scripts/local_testnet_beacon_node.sh | 18 + scripts/local_testnet_clean.sh | 7 + scripts/local_testnet_setup.sh | 33 + scripts/local_testnet_valdiator_client.sh | 20 + scripts/whiteblock_start.sh | 98 - tests/ef_tests/Cargo.toml | 14 +- tests/ef_tests/src/cases/sanity_slots.rs | 2 +- tests/eth1_test_rig/Cargo.toml | 6 +- tests/eth1_test_rig/src/ganache.rs | 14 +- tests/eth1_test_rig/src/lib.rs | 222 +- tests/node_test_rig/Cargo.toml | 10 +- tests/node_test_rig/src/lib.rs | 45 +- tests/simulator/Cargo.toml | 6 +- tests/simulator/src/checks.rs | 170 +- tests/simulator/src/eth1_sim.rs | 209 +- tests/simulator/src/local_network.rs | 112 +- tests/simulator/src/no_eth1_sim.rs | 103 +- tests/simulator/src/sync_sim.rs | 395 ++- validator_client/Cargo.toml | 36 +- validator_client/src/attestation_service.rs | 618 +++-- validator_client/src/block_service.rs | 248 +- validator_client/src/duties_service.rs | 419 ++-- validator_client/src/fork_service.rs | 79 +- validator_client/src/is_synced.rs | 80 + validator_client/src/lib.rs | 390 ++- validator_client/src/notifier.rs | 61 +- validator_client/src/validator_directory.rs | 35 +- 165 files changed, 7924 insertions(+), 7733 deletions(-) create mode 100644 beacon_node/eth2-libp2p/src/discovery/enr_ext.rs create mode 100644 beacon_node/network/src/attestation_service/tests.rs create mode 100644 beacon_node/network/src/metrics.rs delete mode 100644 eth2/utils/hashmap_delay/Cargo.toml delete mode 100644 eth2/utils/hashmap_delay/src/hashmap_delay.rs delete mode 100644 eth2/utils/hashmap_delay/src/lib.rs create mode 100644 eth2/utils/hashset_delay/Cargo.toml rename eth2/utils/{hashmap_delay => hashset_delay}/src/hashset_delay.rs (70%) create mode 100644 eth2/utils/hashset_delay/src/lib.rs create mode 100755 scripts/local_testnet_beacon_node.sh create mode 100755 scripts/local_testnet_clean.sh create mode 100755 scripts/local_testnet_setup.sh create mode 100755 scripts/local_testnet_valdiator_client.sh delete mode 100755 scripts/whiteblock_start.sh create mode 100644 validator_client/src/is_synced.rs diff --git a/Cargo.lock b/Cargo.lock index 13ae0142f7..9895f3e445 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,15 +13,15 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "eth2_testnet_config", - "futures", - "hex 0.3.2", + "futures 0.3.5", + "hex 0.4.2", "libc", "rayon", "slog", "slog-async", "slog-term", "tempdir", - "tokio", + "tokio 0.2.20", "types", "validator_client", "web3", @@ -52,7 +52,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfd7e7ae3f9a1fb5c03b389fc6bb9a51400d0c13053f0dca698c832bfd893a0d" dependencies = [ "block-cipher-trait", - "byteorder 1.3.4", + "byteorder", "opaque-debug", ] @@ -103,6 +103,12 @@ dependencies = [ "winapi 0.3.8", ] +[[package]] +name = "anyhow" +version = "1.0.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9a60d744a80c30fcb657dfe2c1b22bcb3e814c1a1e3674f32bf5820b570fbff" + [[package]] name = "arbitrary" version = "0.4.4" @@ -154,8 +160,20 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" dependencies = [ - "quote 1.0.4", - "syn 1.0.19", + "quote", + "syn", +] + +[[package]] +name = "async-tls" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95fd83426b89b034bf4e9ceb9c533c2f2386b813fd3dcae0a425ec6f1837d78a" +dependencies = [ + "futures 0.3.5", + "rustls", + "webpki", + "webpki-roots 0.19.0", ] [[package]] @@ -209,19 +227,10 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" dependencies = [ - "byteorder 1.3.4", + "byteorder", "safemem", ] -[[package]] -name = "base64" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" -dependencies = [ - "byteorder 1.3.4", -] - [[package]] name = "base64" version = "0.11.0" @@ -247,16 +256,16 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "eth2_ssz_types", - "futures", + "futures 0.3.5", "genesis", "integer-sqrt", "lazy_static", "lighthouse_metrics", "log 0.4.8", - "lru 0.4.3", + "lru", "merkle_proof", "operation_pool", - "parking_lot 0.9.0", + "parking_lot 0.10.2", "proto_array_fork_choice", "rand 0.7.3", "rayon", @@ -271,7 +280,7 @@ dependencies = [ "state_processing", "store", "tempfile", - "tokio", + "tokio 0.2.20", "tree_hash", "types", "websocket_server", @@ -287,14 +296,14 @@ dependencies = [ "client", "ctrlc", "dirs", - "env_logger 0.7.1", + "env_logger", "environment", "eth2-libp2p", "eth2_config", "eth2_ssz", "eth2_testnet_config", "exit-future", - "futures", + "futures 0.3.5", "genesis", "logging", "node_test_rig", @@ -304,30 +313,19 @@ dependencies = [ "slog-async", "slog-term", "store", - "tokio", - "tokio-timer 0.2.13", + "tokio 0.2.20", "toml", "types", "version", ] -[[package]] -name = "bigint" -version = "4.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0e8c8a600052b52482eff2cf4d810e462fdff1f656ac1ecb6232132a1ed7def" -dependencies = [ - "byteorder 1.3.4", - "crunchy 0.1.6", -] - [[package]] name = "bincode" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5753e2a71534719bf3f4e57006c3a4f0d2c672a4b676eec84161f763eca87dbf" dependencies = [ - "byteorder 1.3.4", + "byteorder", "serde", ] @@ -354,15 +352,13 @@ dependencies = [ ] [[package]] -name = "blake2" -version = "0.8.1" +name = "blake2-rfc" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94cb07b0da6a73955f8fb85d24c466778e70cda767a568229b104f0264089330" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" dependencies = [ - "byte-tools", - "crypto-mac", - "digest", - "opaque-debug", + "arrayvec 0.4.12", + "constant_time_eq", ] [[package]] @@ -376,6 +372,17 @@ dependencies = [ "constant_time_eq", ] +[[package]] +name = "blake2s_simd" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab9e07352b829279624ceb7c64adb4f585dacdb81d35cafae81139ccd617cf44" +dependencies = [ + "arrayref", + "arrayvec 0.5.1", + "constant_time_eq", +] + [[package]] name = "block-buffer" version = "0.7.3" @@ -384,7 +391,7 @@ checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ "block-padding", "byte-tools", - "byteorder 1.3.4", + "byteorder", "generic-array", ] @@ -414,7 +421,7 @@ dependencies = [ "eth2_hashing", "eth2_ssz", "eth2_ssz_types", - "hex 0.3.2", + "hex 0.4.2", "milagro_bls", "rand 0.7.3", "serde", @@ -423,12 +430,6 @@ dependencies = [ "tree_hash", ] -[[package]] -name = "bs58" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95ee6bba9d950218b6cc910cf62bc9e0a171d0f4537e3627b0f54d08549b188" - [[package]] name = "bs58" version = "0.3.1" @@ -437,9 +438,9 @@ checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" [[package]] name = "bstr" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2889e6d50f394968c8bf4240dc3f2a7eb4680844d27308f798229ac9d4725f41" +checksum = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931" dependencies = [ "lazy_static", "memchr", @@ -465,12 +466,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" -[[package]] -name = "byteorder" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855" - [[package]] name = "byteorder" version = "1.3.4" @@ -483,11 +478,17 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" dependencies = [ - "byteorder 1.3.4", + "byteorder", "either", "iovec", ] +[[package]] +name = "bytes" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" + [[package]] name = "c_linked_list" version = "1.1.1" @@ -530,6 +531,15 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +[[package]] +name = "chacha20-poly1305-aead" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77d2058ba29594f69c75e8a9018e0485e3914ca5084e3613cd64529042f5423b" +dependencies = [ + "constant_time_eq", +] + [[package]] name = "chrono" version = "0.4.11" @@ -543,9 +553,9 @@ dependencies = [ [[package]] name = "clap" -version = "2.33.0" +version = "2.33.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" +checksum = "bdfa80d47f954d53a35a64987ca1422f495b8d6483c0fe9f7117b36c2a792129" dependencies = [ "ansi_term", "atty", @@ -564,7 +574,7 @@ dependencies = [ "dirs", "eth2_ssz", "eth2_testnet_config", - "hex 0.3.2", + "hex 0.4.2", "types", ] @@ -589,12 +599,12 @@ dependencies = [ "eth2-libp2p", "eth2_config", "eth2_ssz", - "futures", + "futures 0.3.5", "genesis", "lazy_static", "lighthouse_metrics", "network", - "parking_lot 0.9.0", + "parking_lot 0.10.2", "prometheus", "reqwest", "rest_api", @@ -607,7 +617,7 @@ dependencies = [ "slot_clock", "store", "timer", - "tokio", + "tokio 0.2.20", "toml", "tree_hash", "types", @@ -646,8 +656,8 @@ dependencies = [ name = "compare_fields_derive" version = "0.2.0" dependencies = [ - "quote 0.6.13", - "syn 0.15.44", + "quote", + "syn", ] [[package]] @@ -686,34 +696,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" -[[package]] -name = "cookie" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "888604f00b3db336d2af898ec3c1d5d0ddf5e6d462220f2ededc33a87ac4bbd5" -dependencies = [ - "time", - "url 1.7.2", -] - -[[package]] -name = "cookie_store" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46750b3f362965f197996c4448e4a0935e791bf7d6631bfce9ee0af3d24c919c" -dependencies = [ - "cookie", - "failure", - "idna 0.1.5", - "log 0.4.8", - "publicsuffix", - "serde", - "serde_json", - "time", - "try_from", - "url 1.7.2", -] - [[package]] name = "core-foundation" version = "0.7.0" @@ -774,12 +756,6 @@ dependencies = [ "itertools 0.9.0", ] -[[package]] -name = "crossbeam" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd66663db5a988098a89599d4857919b3acf7f61402e61365acfd3919857b9be" - [[package]] name = "crossbeam" version = "0.7.3" @@ -827,7 +803,7 @@ dependencies = [ "lazy_static", "maybe-uninit", "memoffset", - "scopeguard 1.1.0", + "scopeguard", ] [[package]] @@ -851,12 +827,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "crunchy" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2f4a431c5c9f662e1200b7c7f02c34e91361150e382089a8f2dec3ba680cbda" - [[package]] name = "crunchy" version = "0.2.2" @@ -915,40 +885,17 @@ dependencies = [ "winapi 0.3.8", ] -[[package]] -name = "cuckoofilter" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd43f7cfaffe0a386636a10baea2ee05cc50df3b77bea4a456c9572a939bf1f" -dependencies = [ - "byteorder 0.5.3", - "rand 0.3.23", -] - -[[package]] -name = "curve25519-dalek" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7dcd30ba50cdf88b55b033456138b7c0ac4afdc436d82e1b79f370f24cc66d" -dependencies = [ - "byteorder 1.3.4", - "clear_on_drop", - "digest", - "rand_core 0.3.1", - "subtle 2.2.2", -] - [[package]] name = "curve25519-dalek" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26778518a7f6cffa1d25a44b602b62b979bd88adb9e99ffec546998cf3404839" dependencies = [ - "byteorder 1.3.4", + "byteorder", "digest", "rand_core 0.5.1", "subtle 2.2.2", - "zeroize 1.1.0", + "zeroize", ] [[package]] @@ -977,13 +924,13 @@ dependencies = [ [[package]] name = "derivative" -version = "1.0.4" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c6d883546668a3e2011b6a716a7330b82eabb0151b138217f632c8243e17135" +checksum = "cb582b60359da160a9477ee80f15c8d784c477e69c217ef2cdd4169c24ea380f" dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "syn 0.15.44", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -992,9 +939,9 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cee758ebd1c79a9c6fb95f242dcc30bdbf555c28369ae908d21fdaf81537496" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1003,9 +950,9 @@ version = "0.99.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2323f3f47db9a0e77ce7a300605d8d2098597fc451ed1a97bb1f6411bb550a7" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1040,13 +987,28 @@ dependencies = [ ] [[package]] -name = "dns-parser" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" +name = "discv5" +version = "0.1.0-alpha.2" +source = "git+https://github.com/sigp/discv5?rev=7b3bd40591b62b8c002ffdb85de008aa9f82e2e5#7b3bd40591b62b8c002ffdb85de008aa9f82e2e5" dependencies = [ - "byteorder 1.3.4", - "quick-error", + "arrayvec 0.5.1", + "digest", + "enr", + "fnv", + "futures 0.3.5", + "hex 0.4.2", + "hkdf", + "libsecp256k1", + "log 0.4.8", + "net2", + "openssl", + "rand 0.7.3", + "rlp", + "sha2", + "smallvec 1.4.0", + "tokio 0.2.20", + "uint", + "zeroize", ] [[package]] @@ -1062,7 +1024,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978710b352437433c97b2bff193f2fb1dfd58a093f863dd95e225a19baa599a2" dependencies = [ "clear_on_drop", - "curve25519-dalek 2.0.0", + "curve25519-dalek", "rand 0.7.3", "sha2", ] @@ -1077,7 +1039,7 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "ethereum-types", - "hex 0.3.2", + "hex 0.4.2", "rayon", "serde", "serde_derive", @@ -1108,34 +1070,21 @@ dependencies = [ [[package]] name = "enr" -version = "0.1.0-alpha.3" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.1.0-alpha.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "266c26f4eec6f07787274ddb813bb91355eeea7c094113d2d81ddd6330af1334" dependencies = [ "base64 0.12.1", - "bs58 0.3.1", + "bs58", "ed25519-dalek", "hex 0.4.2", - "libp2p-core", "libsecp256k1", "log 0.4.8", "rand 0.7.3", "rlp", "serde", - "sha3", - "zeroize 1.1.0", -] - -[[package]] -name = "env_logger" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aafcde04e90a5226a6443b7aabdb016ba2f8307c847d524724bd9b346dd1a2d3" -dependencies = [ - "atty", - "humantime", - "log 0.4.8", - "regex", - "termcolor", + "tiny-keccak 2.0.2", + "zeroize", ] [[package]] @@ -1158,18 +1107,18 @@ dependencies = [ "beacon_node", "clap", "ctrlc", - "env_logger 0.6.2", + "env_logger", "eth2_config", "eth2_testnet_config", - "futures", + "futures 0.3.5", "logging", - "parking_lot 0.7.1", + "parking_lot 0.10.2", "slog", "slog-async", "slog-json", "slog-term", "sloggers", - "tokio", + "tokio 0.2.20", "types", ] @@ -1192,19 +1141,20 @@ dependencies = [ "eth2_hashing", "eth2_ssz", "eth2_ssz_derive", - "futures", - "hex 0.3.2", + "futures 0.3.5", + "hex 0.4.2", "lazy_static", "libflate", "lighthouse_metrics", "merkle_proof", - "parking_lot 0.7.1", + "parking_lot 0.10.2", "reqwest", "serde", "serde_json", "slog", + "sloggers", "state_processing", - "tokio", + "tokio 0.2.20", "toml", "tree_hash", "types", @@ -1216,9 +1166,9 @@ name = "eth1_test_rig" version = "0.2.0" dependencies = [ "deposit_contract", - "futures", + "futures 0.3.5", "serde_json", - "tokio", + "tokio 0.2.20", "types", "web3", ] @@ -1227,35 +1177,39 @@ dependencies = [ name = "eth2-libp2p" version = "0.2.0" dependencies = [ - "base64 0.11.0", + "base64 0.12.1", "dirs", + "discv5", "error-chain", "eth2_ssz", "eth2_ssz_derive", "eth2_ssz_types", "fnv", - "futures", - "hashmap_delay", - "hex 0.3.2", + "futures 0.3.5", + "hashset_delay", + "hex 0.4.2", "lazy_static", "libp2p", + "libp2p-tcp", "lighthouse_metrics", - "lru 0.4.3", - "parking_lot 0.9.0", + "lru", + "parking_lot 0.10.2", "serde", "serde_derive", "sha2", "slog", "slog-async", - "slog-stdlog 4.0.0", + "slog-stdlog", "slog-term", "smallvec 1.4.0", "snap", "tempdir", - "tokio", + "tiny-keccak 2.0.2", + "tokio 0.2.20", "tokio-io-timeout", + "tokio-util", "types", - "unsigned-varint", + "unsigned-varint 0.3.3 (git+https://github.com/sigp/unsigned-varint?branch=latest-codecs)", "version", "void", ] @@ -1285,9 +1239,9 @@ dependencies = [ name = "eth2_interop_keypairs" version = "0.2.0" dependencies = [ - "base64 0.11.0", + "base64 0.12.1", "eth2_hashing", - "hex 0.3.2", + "hex 0.4.2", "lazy_static", "milagro_bls", "num-bigint", @@ -1304,7 +1258,7 @@ dependencies = [ "num-bigint-dig", "ring", "rust-crypto", - "zeroize 1.1.0", + "zeroize", ] [[package]] @@ -1320,8 +1274,8 @@ dependencies = [ "serde_json", "serde_repr", "tempfile", - "uuid 0.8.1", - "zeroize 1.1.0", + "uuid", + "zeroize", ] [[package]] @@ -1337,8 +1291,8 @@ dependencies = [ name = "eth2_ssz_derive" version = "0.1.0" dependencies = [ - "quote 0.6.13", - "syn 0.15.44", + "quote", + "syn", ] [[package]] @@ -1383,7 +1337,7 @@ dependencies = [ "serde_repr", "tempfile", "tiny-bip39", - "uuid 0.8.1", + "uuid", ] [[package]] @@ -1406,7 +1360,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "befe713756981dbbda28e23f5c65c85de512915db695284342cc2ee36b7a184f" dependencies = [ - "crunchy 0.2.2", + "crunchy", "fixed-hash", "impl-rlp", "impl-serde", @@ -1429,12 +1383,11 @@ dependencies = [ [[package]] name = "exit-future" -version = "0.1.4" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8013f441e38e31c670e7f34ec8f1d5d3a2bd9d303c1ff83976ca886005e8f48" +checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" dependencies = [ - "futures", - "parking_lot 0.7.1", + "futures 0.3.5", ] [[package]] @@ -1453,9 +1406,9 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "proc-macro2", + "quote", + "syn", "synstructure", ] @@ -1472,12 +1425,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11498d382790b7a8f2fd211780bec78619bba81cdad3a283997c0c41f836759c" dependencies = [ "arbitrary", - "byteorder 1.3.4", + "byteorder", "rand 0.7.3", "rustc-hex", "static_assertions", ] +[[package]] +name = "fixedbitset" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" + [[package]] name = "flate2" version = "1.0.14" @@ -1486,11 +1445,9 @@ checksum = "2cfff41391129e0a856d6d822600b8d71179d46879e310417eb9c762eb178b42" dependencies = [ "cfg-if", "crc32fast", - "futures", "libc", "libz-sys", "miniz_oxide", - "tokio-io", ] [[package]] @@ -1542,16 +1499,131 @@ version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" +[[package]] +name = "futures" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" + [[package]] name = "futures-cpupool" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" dependencies = [ - "futures", + "futures 0.1.29", "num_cpus", ] +[[package]] +name = "futures-executor" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", + "num_cpus", +] + +[[package]] +name = "futures-io" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" + +[[package]] +name = "futures-macro" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" + +[[package]] +name = "futures-task" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" +dependencies = [ + "once_cell", +] + +[[package]] +name = "futures-timer" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" + +[[package]] +name = "futures-util" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" +dependencies = [ + "futures 0.1.29", + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project", + "pin-utils", + "proc-macro-hack", + "proc-macro-nested", + "slab 0.4.2", +] + +[[package]] +name = "futures_codec" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0a73299e4718f5452e45980fc1d6957a070abe308d3700b63b8673f47e1c2b3" +dependencies = [ + "bytes 0.5.4", + "futures 0.3.5", + "memchr", + "pin-project", +] + [[package]] name = "gcc" version = "0.3.55" @@ -1577,16 +1649,16 @@ dependencies = [ "eth2_hashing", "eth2_ssz", "exit-future", - "futures", + "futures 0.3.5", "int_to_bytes", "merkle_proof", - "parking_lot 0.7.1", + "parking_lot 0.10.2", "rayon", "serde", "serde_derive", "slog", "state_processing", - "tokio", + "tokio 0.2.20", "tree_hash", "types", ] @@ -1630,11 +1702,11 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" dependencies = [ - "byteorder 1.3.4", - "bytes", + "byteorder", + "bytes 0.4.12", "fnv", - "futures", - "http", + "futures 0.1.29", + "http 0.1.21", "indexmap", "log 0.4.8", "slab 0.4.2", @@ -1643,10 +1715,23 @@ dependencies = [ ] [[package]] -name = "hashbrown" -version = "0.5.0" +name = "h2" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1de41fb8dba9714efd92241565cdff73f78508c95697dd56787d3cba27e2353" +checksum = "79b7246d7e4b979c03fa093da39cfb3617a96bbeee6310af63991668d7e843ff" +dependencies = [ + "bytes 0.5.4", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.1", + "indexmap", + "log 0.4.8", + "slab 0.4.2", + "tokio 0.2.20", + "tokio-util", +] [[package]] name = "hashbrown" @@ -1659,11 +1744,20 @@ dependencies = [ ] [[package]] -name = "hashmap_delay" +name = "hashset_delay" version = "0.2.0" dependencies = [ - "futures", - "tokio-timer 0.2.13", + "futures 0.3.5", + "tokio 0.2.20", +] + +[[package]] +name = "heck" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +dependencies = [ + "unicode-segmentation", ] [[package]] @@ -1724,7 +1818,18 @@ version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0" dependencies = [ - "bytes", + "bytes 0.4.12", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +dependencies = [ + "bytes 0.5.4", "fnv", "itoa", ] @@ -1735,12 +1840,22 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" dependencies = [ - "bytes", - "futures", - "http", + "bytes 0.4.12", + "futures 0.1.29", + "http 0.1.21", "tokio-buf", ] +[[package]] +name = "http-body" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +dependencies = [ + "bytes 0.5.4", + "http 0.2.1", +] + [[package]] name = "httparse" version = "1.3.4" @@ -1781,12 +1896,12 @@ version = "0.12.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dbe6ed1438e1f8ad955a4701e9a944938e9519f6888d12d8558b645e247d5f6" dependencies = [ - "bytes", - "futures", + "bytes 0.4.12", + "futures 0.1.29", "futures-cpupool", - "h2", - "http", - "http-body", + "h2 0.1.26", + "http 0.1.21", + "http-body 0.1.0", "httparse", "iovec", "itoa", @@ -1794,7 +1909,7 @@ dependencies = [ "net2", "rustc_version", "time", - "tokio", + "tokio 0.1.22", "tokio-buf", "tokio-executor", "tokio-io", @@ -1802,7 +1917,31 @@ dependencies = [ "tokio-tcp", "tokio-threadpool", "tokio-timer 0.2.13", - "want", + "want 0.2.0", +] + +[[package]] +name = "hyper" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96816e1d921eca64d208a85aab4f7798455a8e34229ee5a88c935bdee1b78b14" +dependencies = [ + "bytes 0.5.4", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.2.5", + "http 0.2.1", + "http-body 0.3.1", + "httparse", + "itoa", + "log 0.4.8", + "net2", + "pin-project", + "time", + "tokio 0.2.20", + "tower-service", + "want 0.3.0", ] [[package]] @@ -1811,13 +1950,26 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a800d6aa50af4b5850b2b0f659625ce9504df908e9733b635720483be26174f" dependencies = [ - "bytes", - "futures", + "bytes 0.4.12", + "futures 0.1.29", "hyper 0.12.35", "native-tls", "tokio-io", ] +[[package]] +name = "hyper-tls" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3adcd308402b9553630734e9c36b77a7e48b3821251ca2493e8cd596763aafaa" +dependencies = [ + "bytes 0.5.4", + "hyper 0.13.5", + "native-tls", + "tokio 0.2.20", + "tokio-tls 0.3.1", +] + [[package]] name = "idna" version = "0.1.5" @@ -1880,8 +2032,8 @@ dependencies = [ name = "int_to_bytes" version = "0.2.0" dependencies = [ - "bytes", - "hex 0.3.2", + "bytes 0.5.4", + "hex 0.4.2", "yaml-rust", ] @@ -1945,7 +2097,7 @@ version = "14.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25525f6002338fb4debb5167a89a0b47f727a5a48418417545ad3429758b7fec" dependencies = [ - "futures", + "futures 0.1.29", "log 0.4.8", "serde", "serde_derive", @@ -2002,15 +2154,16 @@ dependencies = [ "eth2-libp2p", "eth2_ssz", "eth2_testnet_config", - "futures", + "futures 0.3.5", "genesis", - "hex 0.3.2", + "hex 0.4.2", "log 0.4.8", "regex", "serde", "serde_yaml", "simple_logger", "state_processing", + "tokio 0.2.20", "tree_hash", "types", "web3", @@ -2044,16 +2197,22 @@ checksum = "99e85c08494b21a9054e7fe1374a732aeadaff3980b6990b94bfd3a70f690005" [[package]] name = "libflate" -version = "0.1.27" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9135df43b1f5d0e333385cb6e7897ecd1a43d7d11b91ac003f4d2c2d2401fdd" +checksum = "a1fbe6b967a94346446d37ace319ae85be7eca261bb8149325811ac435d35d64" dependencies = [ "adler32", "crc32fast", + "libflate_lz77", "rle-decode-fast", - "take_mut", ] +[[package]] +name = "libflate_lz77" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3286f09f7d4926fc486334f28d8d2e6ebe4f7f9994494b6dab27ddfad2c9b11b" + [[package]] name = "libm" version = "0.2.1" @@ -2062,429 +2221,256 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.13.2" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32ea742c86405b659c358223a8f0f9f5a9eb27bb6083894c6340959b05269662" dependencies = [ - "bytes", - "futures", + "bytes 0.5.4", + "futures 0.3.5", "lazy_static", "libp2p-core", "libp2p-core-derive", - "libp2p-deflate", - "libp2p-discv5", "libp2p-dns", - "libp2p-floodsub", "libp2p-gossipsub", "libp2p-identify", - "libp2p-kad", - "libp2p-mdns", "libp2p-mplex", "libp2p-noise", - "libp2p-ping", - "libp2p-plaintext", "libp2p-secio", "libp2p-swarm", - "libp2p-tcp", - "libp2p-uds", - "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", + "multihash", "parity-multiaddr", - "parity-multihash", - "parking_lot 0.9.0", - "smallvec 0.6.13", - "tokio-codec", - "tokio-executor", - "tokio-io", + "parking_lot 0.10.2", + "pin-project", + "smallvec 1.4.0", "wasm-timer", ] [[package]] name = "libp2p-core" -version = "0.13.2" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d2c17158c4dca984a77a5927aac6f0862d7f50c013470a415f93be498b5739" dependencies = [ "asn1_der", - "bs58 0.3.1", - "bytes", + "bs58", "ed25519-dalek", - "failure", + "either", "fnv", - "futures", + "futures 0.3.5", + "futures-timer", "lazy_static", "libsecp256k1", "log 0.4.8", + "multihash", "multistream-select", "parity-multiaddr", - "parity-multihash", - "parking_lot 0.9.0", - "protobuf", - "quick-error", + "parking_lot 0.10.2", + "pin-project", + "prost", + "prost-build", "rand 0.7.3", "ring", "rw-stream-sink", "sha2", - "smallvec 0.6.13", - "tokio-executor", - "tokio-io", - "unsigned-varint", - "untrusted", + "smallvec 1.4.0", + "thiserror", + "unsigned-varint 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "void", - "wasm-timer", - "zeroize 1.1.0", + "zeroize", ] [[package]] name = "libp2p-core-derive" -version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "329127858e4728db5ab60c33d5ae352a999325fdf190ed022ec7d3a4685ae2e6" dependencies = [ - "quote 1.0.4", - "syn 1.0.19", -] - -[[package]] -name = "libp2p-deflate" -version = "0.5.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" -dependencies = [ - "flate2", - "futures", - "libp2p-core", - "tokio-io", -] - -[[package]] -name = "libp2p-discv5" -version = "0.1.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" -dependencies = [ - "arrayvec 0.4.12", - "bigint", - "digest", - "enr", - "fnv", - "futures", - "hex 0.4.2", - "hkdf", - "libp2p-core", - "libp2p-swarm", - "libsecp256k1", - "log 0.4.8", - "openssl", - "parity-multiaddr", - "parity-multihash", - "rand 0.7.3", - "rlp", - "sha2", - "smallvec 0.6.13", - "tokio-io", - "tokio-timer 0.2.13", - "tokio-udp", - "void", - "zeroize 0.6.0", + "quote", + "syn", ] [[package]] name = "libp2p-dns" -version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0d0993481203d68e5ce2f787d033fb0cac6b850659ed6c784612db678977c71" dependencies = [ - "futures", + "futures 0.3.5", "libp2p-core", "log 0.4.8", - "tokio-dns-unofficial", -] - -[[package]] -name = "libp2p-floodsub" -version = "0.13.1" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" -dependencies = [ - "bs58 0.3.1", - "bytes", - "cuckoofilter", - "fnv", - "futures", - "libp2p-core", - "libp2p-swarm", - "protobuf", - "rand 0.6.5", - "smallvec 0.6.13", - "tokio-io", ] [[package]] name = "libp2p-gossipsub" -version = "0.1.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f7f3f79f060864db0317cc47641b7d35276dee52a0ffa91553fbd0c153863a3" dependencies = [ - "base64 0.10.1", - "bs58 0.2.5", - "byteorder 1.3.4", - "bytes", + "base64 0.11.0", + "byteorder", + "bytes 0.5.4", "fnv", - "futures", + "futures 0.3.5", + "futures_codec", "libp2p-core", "libp2p-swarm", "log 0.4.8", - "lru 0.1.17", - "protobuf", - "rand 0.6.5", + "lru", + "prost", + "prost-build", + "rand 0.7.3", "sha2", "smallvec 1.4.0", - "tokio-codec", - "tokio-io", - "tokio-timer 0.2.13", - "unsigned-varint", + "unsigned-varint 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-timer", ] [[package]] name = "libp2p-identify" -version = "0.13.2" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a38ca3eb807789e26f41c82ca7cd2b3843c66c5587b8b5f709a2f421f3061414" dependencies = [ - "bytes", - "futures", + "futures 0.3.5", "libp2p-core", "libp2p-swarm", "log 0.4.8", - "parity-multiaddr", - "protobuf", - "smallvec 0.6.13", - "tokio-codec", - "tokio-io", - "unsigned-varint", - "wasm-timer", -] - -[[package]] -name = "libp2p-kad" -version = "0.13.2" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" -dependencies = [ - "arrayvec 0.5.1", - "bytes", - "either", - "fnv", - "futures", - "libp2p-core", - "libp2p-swarm", - "log 0.4.8", - "parity-multiaddr", - "parity-multihash", - "protobuf", - "rand 0.7.3", - "sha2", - "smallvec 0.6.13", - "tokio-codec", - "tokio-io", - "uint", - "unsigned-varint", - "void", - "wasm-timer", -] - -[[package]] -name = "libp2p-mdns" -version = "0.13.1" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" -dependencies = [ - "data-encoding", - "dns-parser", - "futures", - "libp2p-core", - "libp2p-swarm", - "log 0.4.8", - "net2", - "parity-multiaddr", - "rand 0.6.5", - "smallvec 0.6.13", - "tokio-io", - "tokio-reactor", - "tokio-udp", - "void", + "prost", + "prost-build", + "smallvec 1.4.0", "wasm-timer", ] [[package]] name = "libp2p-mplex" -version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0832882b06619b2e81d74e71447753ea3c068164a0bca67847d272e856a04a02" dependencies = [ - "bytes", + "bytes 0.5.4", "fnv", - "futures", + "futures 0.3.5", + "futures_codec", "libp2p-core", "log 0.4.8", - "parking_lot 0.9.0", - "tokio-codec", - "tokio-io", - "unsigned-varint", + "parking_lot 0.10.2", + "unsigned-varint 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libp2p-noise" -version = "0.11.1" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "918e94a649e1139c24ee9f1f8c1f2adaba6d157b9471af787f2d9beac8c29c77" dependencies = [ - "bytes", - "curve25519-dalek 1.2.3", - "futures", + "curve25519-dalek", + "futures 0.3.5", "lazy_static", "libp2p-core", "log 0.4.8", - "protobuf", + "prost", + "prost-build", "rand 0.7.3", - "ring", + "sha2", "snow", - "tokio-io", + "static_assertions", "x25519-dalek", - "zeroize 1.1.0", -] - -[[package]] -name = "libp2p-ping" -version = "0.13.1" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" -dependencies = [ - "bytes", - "futures", - "libp2p-core", - "libp2p-swarm", - "log 0.4.8", - "parity-multiaddr", - "rand 0.7.3", - "tokio-io", - "void", - "wasm-timer", -] - -[[package]] -name = "libp2p-plaintext" -version = "0.13.1" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" -dependencies = [ - "bytes", - "futures", - "libp2p-core", - "log 0.4.8", - "protobuf", - "rw-stream-sink", - "tokio-io", - "void", + "zeroize", ] [[package]] name = "libp2p-secio" -version = "0.13.1" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a0509a7e47245259954fef58b85b81bf4d29ae33a4365e38d718a866698774" dependencies = [ "aes-ctr", - "bytes", "ctr", - "futures", + "futures 0.3.5", "hmac", "js-sys", "lazy_static", "libp2p-core", "log 0.4.8", "parity-send-wrapper", - "protobuf", - "rand 0.6.5", + "pin-project", + "prost", + "prost-build", + "quicksink", + "rand 0.7.3", "ring", "rw-stream-sink", "sha2", - "tokio-codec", - "tokio-io", + "static_assertions", "twofish", - "untrusted", "wasm-bindgen", - "wasm-bindgen-futures 0.3.27", + "wasm-bindgen-futures", "web-sys", ] [[package]] name = "libp2p-swarm" -version = "0.3.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44ab289ae44cc691da0a6fe96aefa43f26c86c6c7813998e203f6d80f1860f18" dependencies = [ - "futures", + "futures 0.3.5", "libp2p-core", - "smallvec 0.6.13", - "tokio-io", + "log 0.4.8", + "rand 0.7.3", + "smallvec 1.4.0", "void", "wasm-timer", ] [[package]] name = "libp2p-tcp" -version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b37ea44823d3ed223e4605da94b50177bc520f05ae2452286700549a32d81669" dependencies = [ - "bytes", - "futures", + "futures 0.3.5", + "futures-timer", "get_if_addrs", "ipnet", "libp2p-core", "log 0.4.8", - "tokio-io", - "tokio-tcp", - "tokio-timer 0.2.13", -] - -[[package]] -name = "libp2p-uds" -version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" -dependencies = [ - "futures", - "libp2p-core", - "log 0.4.8", - "tokio-uds 0.2.6", -] - -[[package]] -name = "libp2p-wasm-ext" -version = "0.6.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" -dependencies = [ - "futures", - "js-sys", - "libp2p-core", - "parity-send-wrapper", - "tokio-io", - "wasm-bindgen", - "wasm-bindgen-futures 0.3.27", + "tokio 0.2.20", ] [[package]] name = "libp2p-websocket" -version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6874c9069ce93d899df9dc7b29f129c706b2a0fdc048f11d878935352b580190" dependencies = [ - "bytes", - "futures", + "async-tls", + "bytes 0.5.4", + "either", + "futures 0.3.5", "libp2p-core", "log 0.4.8", + "quicksink", + "rustls", "rw-stream-sink", "soketto", - "tokio-codec", - "tokio-io", - "tokio-rustls", "url 2.1.1", - "webpki-roots", + "webpki", + "webpki-roots 0.18.0", ] [[package]] name = "libp2p-yamux" -version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02f91aea50f6571e0bc6c058dc0e9b270afd41ec28dd94e9e4bf607e78b9ab87" dependencies = [ - "futures", + "futures 0.3.5", "libp2p-core", - "log 0.4.8", - "tokio-io", + "parking_lot 0.10.2", + "thiserror", "yamux", ] @@ -2495,7 +2481,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" dependencies = [ "arrayref", - "crunchy 0.2.2", + "crunchy", "digest", "hmac-drbg", "rand 0.7.3", @@ -2524,16 +2510,16 @@ dependencies = [ "beacon_node", "clap", "clap_utils", - "env_logger 0.6.2", + "env_logger", "environment", "eth2_testnet_config", - "futures", + "futures 0.3.5", "logging", "slog", "slog-async", "slog-term", "sloggers", - "tokio", + "tokio 0.2.20", "types", "validator_client", ] @@ -2552,23 +2538,13 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" -[[package]] -name = "lock_api" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c" -dependencies = [ - "owning_ref", - "scopeguard 0.3.3", -] - [[package]] name = "lock_api" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" dependencies = [ - "scopeguard 1.1.0", + "scopeguard", ] [[package]] @@ -2599,22 +2575,13 @@ dependencies = [ "slog-term", ] -[[package]] -name = "lru" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8f669d42c72d18514dfca8115689c5f6370a17d980cb5bd777a67f404594c8" -dependencies = [ - "hashbrown 0.5.0", -] - [[package]] name = "lru" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0609345ddee5badacf857d4f547e0e5a2e987db77085c24cd887f73573a04237" dependencies = [ - "hashbrown 0.6.3", + "hashbrown", ] [[package]] @@ -2665,7 +2632,7 @@ dependencies = [ "hex 0.4.2", "lazy_static", "rand 0.7.3", - "zeroize 1.1.0", + "zeroize", ] [[package]] @@ -2715,7 +2682,7 @@ dependencies = [ "kernel32-sys", "libc", "log 0.4.8", - "miow", + "miow 0.2.1", "net2", "slab 0.4.2", "winapi 0.2.8", @@ -2733,6 +2700,18 @@ dependencies = [ "slab 0.4.2", ] +[[package]] +name = "mio-named-pipes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5e374eff525ce1c5b7687c4cef63943e7686524a387933ad27ca7ec43779cb3" +dependencies = [ + "log 0.4.8", + "mio", + "miow 0.3.3", + "winapi 0.3.8", +] + [[package]] name = "mio-uds" version = "0.6.8" @@ -2757,16 +2736,48 @@ dependencies = [ ] [[package]] -name = "multistream-select" -version = "0.6.1" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +name = "miow" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "396aa0f2003d7df8395cb93e09871561ccc3e785f0acb369170e8cc74ddf9226" dependencies = [ - "bytes", - "futures", + "socket2", + "winapi 0.3.8", +] + +[[package]] +name = "multihash" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47fbc227f7e2b1cb701f95404579ecb2668abbdd3c7ef7a6cbb3cc0d3b236869" +dependencies = [ + "blake2b_simd", + "blake2s_simd", + "digest", + "sha-1", + "sha2", + "sha3", + "unsigned-varint 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "multimap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8883adfde9756c1d30b0f519c9b8c502a94b41ac62f696453c37c7fc0a958ce" + +[[package]] +name = "multistream-select" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74cdcf7cfb3402881e15a1f95116cb033d69b33c83d481e1234777f5ef0c3d2c" +dependencies = [ + "bytes 0.5.4", + "futures 0.3.5", "log 0.4.8", - "smallvec 0.6.13", - "tokio-io", - "unsigned-varint", + "pin-project", + "smallvec 1.4.0", + "unsigned-varint 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2807,13 +2818,14 @@ dependencies = [ "eth2-libp2p", "eth2_ssz", "fnv", - "futures", + "futures 0.3.5", "genesis", - "hashmap_delay", - "hex 0.3.2", + "hashset_delay", + "hex 0.4.2", "lazy_static", + "lighthouse_metrics", "matches", - "parking_lot 0.9.0", + "parking_lot 0.10.2", "rand 0.7.3", "rest_types", "rlp", @@ -2822,10 +2834,8 @@ dependencies = [ "slot_clock", "smallvec 1.4.0", "store", - "tempdir", "tempfile", - "tokio", - "tokio-timer 0.2.13", + "tokio 0.2.20", "tree_hash", "types", ] @@ -2850,14 +2860,14 @@ dependencies = [ "beacon_node", "environment", "eth2_config", - "futures", + "futures 0.3.5", "genesis", "remote_beacon_node", "reqwest", "serde", "tempdir", "types", - "url 1.7.2", + "url 2.1.1", "validator_client", ] @@ -2869,9 +2879,9 @@ checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" [[package]] name = "nohash-hasher" -version = "0.1.3" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721a2bf1c26159ebf17e0a980bc4ce61f4b2fec5ec3b42d42fddd7a84a9e538f" +checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "num-bigint" @@ -2891,7 +2901,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3d03c330f9f7a2c19e3c0b42698e48141d0809c78cd9b6219f85bd7d7e892aa" dependencies = [ "autocfg 0.1.7", - "byteorder 1.3.4", + "byteorder", "lazy_static", "libm", "num-integer", @@ -2900,7 +2910,7 @@ dependencies = [ "rand 0.7.3", "serde", "smallvec 1.4.0", - "zeroize 1.1.0", + "zeroize", ] [[package]] @@ -3004,7 +3014,7 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "int_to_bytes", - "parking_lot 0.9.0", + "parking_lot 0.10.2", "rand 0.7.3", "serde", "serde_derive", @@ -3013,46 +3023,24 @@ dependencies = [ "types", ] -[[package]] -name = "owning_ref" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" -dependencies = [ - "stable_deref_trait", -] - [[package]] name = "parity-multiaddr" -version = "0.6.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4db35e222f783ef4e6661873f6c165c4eb7b65e0c408349818517d5705c2d7d3" dependencies = [ "arrayref", - "bs58 0.3.1", - "byteorder 1.3.4", - "bytes", + "bs58", + "byteorder", "data-encoding", - "parity-multihash", + "multihash", "percent-encoding 2.1.0", "serde", - "unsigned-varint", + "static_assertions", + "unsigned-varint 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.1", ] -[[package]] -name = "parity-multihash" -version = "0.2.0" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" -dependencies = [ - "blake2", - "bytes", - "rand 0.6.5", - "sha-1", - "sha2", - "sha3", - "unsigned-varint", -] - [[package]] name = "parity-scale-codec" version = "1.3.0" @@ -3071,23 +3059,13 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" -[[package]] -name = "parking_lot" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337" -dependencies = [ - "lock_api 0.1.5", - "parking_lot_core 0.4.0", -] - [[package]] name = "parking_lot" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" dependencies = [ - "lock_api 0.3.4", + "lock_api", "parking_lot_core 0.6.2", "rustc_version", ] @@ -3098,23 +3076,10 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" dependencies = [ - "lock_api 0.3.4", + "lock_api", "parking_lot_core 0.7.2", ] -[[package]] -name = "parking_lot_core" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94c8c7923936b28d546dfd14d4472eaf34c99b14e1c973a32b3e6d4eb04298c9" -dependencies = [ - "libc", - "rand 0.6.5", - "rustc_version", - "smallvec 0.6.13", - "winapi 0.3.8", -] - [[package]] name = "parking_lot_core" version = "0.6.2" @@ -3150,7 +3115,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" dependencies = [ - "byteorder 1.3.4", + "byteorder", "crypto-mac", ] @@ -3166,6 +3131,48 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +[[package]] +name = "petgraph" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29c127eea4a29ec6c85d153c59dc1213f33ec74cead30fe4730aecc88cc1fd92" +dependencies = [ + "fixedbitset", + "indexmap", +] + +[[package]] +name = "pin-project" +version = "0.4.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81d480cb4e89522ccda96d0eed9af94180b7a5f93fb28f66e1fd7d68431663d1" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "0.4.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a82996f11efccb19b685b14b5df818de31c1edcee3daa256ab5775dd98e72feb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7505eeebd78492e0f6108f7171c4948dbb120ee8119d9d77d0afa5469bef67f" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + [[package]] name = "pkg-config" version = "0.3.17" @@ -3210,13 +3217,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63" [[package]] -name = "proc-macro2" -version = "0.4.30" +name = "proc-macro-nested" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -dependencies = [ - "unicode-xid 0.1.0", -] +checksum = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694" [[package]] name = "proc-macro2" @@ -3224,21 +3228,72 @@ version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8872cf6f48eee44265156c111456a700ab3483686b3f96df4cf5481c89157319" dependencies = [ - "unicode-xid 0.2.0", + "unicode-xid", ] [[package]] name = "prometheus" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5567486d5778e2c6455b1b90ff1c558f29e751fc018130fa182e15828e728af1" +checksum = "b0575e258dab62268e7236d7307caa38848acbda7ec7ab87bd9093791e999d20" dependencies = [ "cfg-if", "fnv", "lazy_static", "protobuf", - "quick-error", "spin", + "thiserror", +] + +[[package]] +name = "prost" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce49aefe0a6144a45de32927c77bd2859a5f7677b55f220ae5b744e87389c212" +dependencies = [ + "bytes 0.5.4", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b10678c913ecbd69350e8535c3aef91a8676c0773fc1d7b95cdd196d7f2f26" +dependencies = [ + "bytes 0.5.4", + "heck", + "itertools 0.8.2", + "log 0.4.8", + "multimap", + "petgraph", + "prost", + "prost-types", + "tempfile", + "which", +] + +[[package]] +name = "prost-derive" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72" +dependencies = [ + "anyhow", + "itertools 0.8.2", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "prost-types" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1834f67c0697c001304b75be76f67add9c89742eda3a085ad8ee0bb38c3417aa" +dependencies = [ + "bytes 0.5.4", + "prost", ] [[package]] @@ -3247,8 +3302,8 @@ version = "0.2.0" dependencies = [ "eth2_ssz", "eth2_ssz_derive", - "itertools 0.8.2", - "parking_lot 0.9.0", + "itertools 0.9.0", + "parking_lot 0.10.2", "serde", "serde_derive", "serde_yaml", @@ -3257,22 +3312,9 @@ dependencies = [ [[package]] name = "protobuf" -version = "2.8.1" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40361836defdd5871ff7e84096c6f6444af7fc157f8ef1789f54f147687caa20" - -[[package]] -name = "publicsuffix" -version = "1.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bbaa49075179162b49acac1c6aa45fb4dafb5f13cf6794276d77bc7fd95757b" -dependencies = [ - "error-chain", - "idna 0.2.0", - "lazy_static", - "regex", - "url 2.1.1", -] +checksum = "8e86d370532557ae7573551a1ec8235a0f8d6cb276c7c9e6aa490b511c447485" [[package]] name = "quick-error" @@ -3286,7 +3328,7 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f" dependencies = [ - "env_logger 0.7.1", + "env_logger", "log 0.4.8", "rand 0.7.3", "rand_core 0.5.1", @@ -3294,22 +3336,24 @@ dependencies = [ [[package]] name = "quickcheck_macros" -version = "0.8.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7dfc1c4a1e048f5cc7d36a4c4118dfcf31d217c79f4b9a61bad65d68185752c" +checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "syn 0.15.44", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "quote" -version = "0.6.13" +name = "quicksink" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" +checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" dependencies = [ - "proc-macro2 0.4.30", + "futures-core", + "futures-sink", + "pin-project-lite", ] [[package]] @@ -3318,7 +3362,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c1f4b0efa5fc5e8ceb705136bfee52cfdb6a4e3509f770b478cd6ed434232a7" dependencies = [ - "proc-macro2 1.0.12", + "proc-macro2", ] [[package]] @@ -3363,25 +3407,6 @@ dependencies = [ "winapi 0.3.8", ] -[[package]] -name = "rand" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -dependencies = [ - "autocfg 0.1.7", - "libc", - "rand_chacha 0.1.1", - "rand_core 0.4.2", - "rand_hc 0.1.0", - "rand_isaac", - "rand_jitter", - "rand_os", - "rand_pcg", - "rand_xorshift 0.1.1", - "winapi 0.3.8", -] - [[package]] name = "rand" version = "0.7.3" @@ -3390,19 +3415,9 @@ checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ "getrandom", "libc", - "rand_chacha 0.2.2", + "rand_chacha", "rand_core 0.5.1", - "rand_hc 0.2.0", -] - -[[package]] -name = "rand_chacha" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -dependencies = [ - "autocfg 0.1.7", - "rand_core 0.3.1", + "rand_hc", ] [[package]] @@ -3439,15 +3454,6 @@ dependencies = [ "getrandom", ] -[[package]] -name = "rand_hc" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -dependencies = [ - "rand_core 0.3.1", -] - [[package]] name = "rand_hc" version = "0.2.0" @@ -3457,59 +3463,6 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "rand_isaac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_jitter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -dependencies = [ - "libc", - "rand_core 0.4.2", - "winapi 0.3.8", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -dependencies = [ - "cloudabi", - "fuchsia-cprng", - "libc", - "rand_core 0.4.2", - "rdrand", - "winapi 0.3.8", -] - -[[package]] -name = "rand_pcg" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -dependencies = [ - "autocfg 0.1.7", - "rand_core 0.4.2", -] - -[[package]] -name = "rand_xorshift" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -dependencies = [ - "rand_core 0.3.1", -] - [[package]] name = "rand_xorshift" version = "0.2.0" @@ -3587,7 +3540,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" dependencies = [ - "byteorder 1.3.4", + "byteorder", ] [[package]] @@ -3602,8 +3555,8 @@ version = "0.2.0" dependencies = [ "eth2_config", "eth2_ssz", - "futures", - "hex 0.3.2", + "futures 0.3.5", + "hex 0.4.2", "operation_pool", "proto_array_fork_choice", "reqwest", @@ -3611,7 +3564,7 @@ dependencies = [ "serde", "serde_json", "types", - "url 1.7.2", + "url 2.1.1", ] [[package]] @@ -3625,35 +3578,37 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.9.24" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f88643aea3c1343c804950d7bf983bd2067f5ab59db6d613a08e05572f2714ab" +checksum = "02b81e49ddec5109a9dcfc5f2a317ff53377c915e9ae9d4f2fb50914b85614e2" dependencies = [ - "base64 0.10.1", - "bytes", - "cookie", - "cookie_store", + "base64 0.11.0", + "bytes 0.5.4", "encoding_rs", - "flate2", - "futures", - "http", - "hyper 0.12.35", - "hyper-tls", + "futures-core", + "futures-util", + "http 0.2.1", + "http-body 0.3.1", + "hyper 0.13.5", + "hyper-tls 0.4.1", + "js-sys", + "lazy_static", "log 0.4.8", "mime 0.3.16", "mime_guess", "native-tls", + "percent-encoding 2.1.0", + "pin-project-lite", "serde", "serde_json", "serde_urlencoded", "time", - "tokio", - "tokio-executor", - "tokio-io", - "tokio-threadpool", - "tokio-timer 0.2.13", - "url 1.7.2", - "uuid 0.7.4", + "tokio 0.2.20", + "tokio-tls 0.3.1", + "url 2.1.1", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", "winreg", ] @@ -3667,16 +3622,16 @@ dependencies = [ "eth2_config", "eth2_ssz", "eth2_ssz_derive", - "futures", - "hex 0.3.2", - "http", - "hyper 0.12.35", + "futures 0.3.5", + "hex 0.4.2", + "http 0.2.1", + "hyper 0.13.5", "lazy_static", "lighthouse_metrics", "network", "node_test_rig", "operation_pool", - "parking_lot 0.9.0", + "parking_lot 0.10.2", "rayon", "remote_beacon_node", "rest_types", @@ -3689,7 +3644,7 @@ dependencies = [ "slot_clock", "state_processing", "store", - "tokio", + "tokio 0.2.20", "tree_hash", "types", "url 2.1.1", @@ -3801,11 +3756,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" +checksum = "c0d4a31f5d68413404705d6982529b0e11a9aacd4839d1d6222ee3b8cb4015e1" dependencies = [ - "base64 0.10.1", + "base64 0.11.0", "log 0.4.8", "ring", "sct", @@ -3814,12 +3769,13 @@ dependencies = [ [[package]] name = "rw-stream-sink" -version = "0.1.2" -source = "git+https://github.com/SigP/rust-libp2p?rev=71cf486b4d992862f5a05f9f4ef5e5c1631f4add#71cf486b4d992862f5a05f9f4ef5e5c1631f4add" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ - "bytes", - "futures", - "tokio-io", + "futures 0.3.5", + "pin-project", + "static_assertions", ] [[package]] @@ -3849,9 +3805,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "039c25b130bd8c1321ee2d7de7fde2659fa9c2744e4bb29711cfc852ea53cd19" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", "winapi 0.3.8", @@ -3869,12 +3825,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" -[[package]] -name = "scopeguard" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" - [[package]] name = "scopeguard" version = "1.1.0" @@ -3911,9 +3861,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f331b9025654145cd425b9ded0caf8f5ae0df80d418b326e2dc1c3dc5eb0620" +checksum = "64808902d7d99f78eaddd2b4e2509713babc3dc3c85ad6f4c447680f3c01e535" dependencies = [ "bitflags 1.2.1", "core-foundation", @@ -3955,37 +3905,37 @@ checksum = "a0eddf2e8f50ced781f288c19f18621fa72a3779e3cb58dbf23b07469b0abeb4" [[package]] name = "serde" -version = "1.0.106" +version = "1.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df6ac6412072f67cf767ebbde4133a5b2e88e76dc6187fa7104cd16f783399" +checksum = "99e7b308464d16b56eba9964e4972a3eee817760ab60d88c3f86e1fecb08204c" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.106" +version = "1.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c" +checksum = "818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "proc-macro2", + "quote", + "syn", ] [[package]] name = "serde_hex" version = "0.2.0" dependencies = [ - "hex 0.3.2", + "hex 0.4.2", "serde", ] [[package]] name = "serde_json" -version = "1.0.52" +version = "1.0.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7894c8ed05b7a3a279aeb79025fdec1d3158080b75b98a08faf2806bb799edd" +checksum = "993948e75b189211a9b31a7528f950c6adc21f9720b6438ff80a7fa2f864cea2" dependencies = [ "itoa", "ryu", @@ -3998,28 +3948,28 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd02c7587ec314570041b2754829f84d873ced14a96d1fd1823531e11db40573" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "proc-macro2", + "quote", + "syn", ] [[package]] name = "serde_urlencoded" -version = "0.5.5" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642dd69105886af2efd227f75a520ec9b44a820d65bc133a9131f7d229fd165a" +checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" dependencies = [ "dtoa", "itoa", "serde", - "url 1.7.2", + "url 2.1.1", ] [[package]] name = "serde_yaml" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "691b17f19fc1ec9d94ec0b5864859290dff279dbd7b03f017afda54eb36c3c35" +checksum = "16c7a592a1ec97c9c1c68d75b6e537dcbf60c7618e038e7841e00af1d9ccf0c4" dependencies = [ "dtoa", "linked-hash-map", @@ -4070,6 +4020,16 @@ dependencies = [ "opaque-debug", ] +[[package]] +name = "signal-hook-registry" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41" +dependencies = [ + "arc-swap", + "libc", +] + [[package]] name = "simple_logger" version = "1.6.0" @@ -4088,12 +4048,12 @@ name = "simulator" version = "0.2.0" dependencies = [ "clap", - "env_logger 0.7.1", + "env_logger", "eth1_test_rig", - "futures", + "futures 0.3.5", "node_test_rig", - "parking_lot 0.9.0", - "tokio", + "parking_lot 0.10.2", + "tokio 0.2.20", "types", "validator_client", ] @@ -4161,25 +4121,13 @@ dependencies = [ "slog", ] -[[package]] -name = "slog-stdlog" -version = "3.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1c469573d1e3f36f9eee66cd132206caf47b50c94b1f6c6e7b4d8235e9ecf01" -dependencies = [ - "crossbeam 0.2.12", - "log 0.3.9", - "slog", - "slog-scope", -] - [[package]] name = "slog-stdlog" version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4d87903baf655da2d82bc3ac3f7ef43868c58bf712b3a661fda72009304c23" dependencies = [ - "crossbeam 0.7.3", + "crossbeam", "log 0.4.8", "slog", "slog-scope", @@ -4200,20 +4148,19 @@ dependencies = [ [[package]] name = "sloggers" -version = "0.3.6" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31bef221d42166d6708aa1e9b0182324b37a0a7517ff590ec201dbfe1cfa46ef" +checksum = "f01d37507aa6f37490cfa08d71e2639b16906e84c285ae4b9f7ec7ca35756d69" dependencies = [ "chrono", "libflate", "regex", "serde", - "serde_derive", "slog", "slog-async", "slog-kvfilter", "slog-scope", - "slog-stdlog 3.0.5", + "slog-stdlog", "slog-term", "trackable", ] @@ -4224,7 +4171,7 @@ version = "0.2.0" dependencies = [ "lazy_static", "lighthouse_metrics", - "parking_lot 0.9.0", + "parking_lot 0.10.2", "types", ] @@ -4256,30 +4203,47 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afb767eee7d257ba202f0b9b08673bc13b22281632ef45267b19f13100accd2f" dependencies = [ "arrayref", + "blake2-rfc", + "chacha20-poly1305-aead", + "rand 0.7.3", "rand_core 0.5.1", "ring", "rustc_version", + "sha2", "subtle 2.2.2", + "x25519-dalek", +] + +[[package]] +name = "socket2" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "winapi 0.3.8", ] [[package]] name = "soketto" -version = "0.2.3" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bceb1a3a15232d013d9a3b7cac9e5ce8e2313f348f01d4bc1097e5e53aa07095" +checksum = "1c9dab3f95c9ebdf3a88268c19af668f637a3c5039c2c56ff2d40b1b2d64a25b" dependencies = [ - "base64 0.10.1", - "bytes", + "base64 0.11.0", + "bytes 0.5.4", "flate2", - "futures", - "http", + "futures 0.3.5", + "http 0.2.1", "httparse", "log 0.4.8", - "rand 0.6.5", + "rand 0.7.3", "sha1", - "smallvec 0.6.13", - "tokio-codec", - "tokio-io", + "smallvec 1.4.0", + "static_assertions", + "thiserror", ] [[package]] @@ -4288,12 +4252,6 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -[[package]] -name = "stable_deref_trait" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8" - [[package]] name = "state_processing" version = "0.2.0" @@ -4302,13 +4260,13 @@ dependencies = [ "beacon_chain", "bls", "criterion", - "env_logger 0.7.1", + "env_logger", "eth2_hashing", "eth2_ssz", "eth2_ssz_types", "int_to_bytes", "integer-sqrt", - "itertools 0.8.2", + "itertools 0.9.0", "lazy_static", "log 0.4.8", "merkle_proof", @@ -4346,12 +4304,12 @@ dependencies = [ "db-key", "eth2_ssz", "eth2_ssz_derive", - "itertools 0.8.2", + "itertools 0.9.0", "lazy_static", "leveldb", "lighthouse_metrics", - "lru 0.4.3", - "parking_lot 0.9.0", + "lru", + "parking_lot 0.10.2", "rayon", "serde", "serde_derive", @@ -4378,7 +4336,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d24114bfcceb867ca7f71a0d3fe45d45619ec47a6fbfa98cb14e14250bfa5d6d" dependencies = [ - "bytes", + "bytes 0.4.12", ] [[package]] @@ -4406,30 +4364,19 @@ dependencies = [ "criterion", "eth2_hashing", "ethereum-types", - "hex 0.3.2", + "hex 0.4.2", "yaml-rust", ] [[package]] name = "syn" -version = "0.15.44" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" +checksum = "dd1b5e337360b1fae433c59fcafa0c6b77c605e92540afa5221a7b81a9eca91d" dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "unicode-xid 0.1.0", -] - -[[package]] -name = "syn" -version = "1.0.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8e5aa70697bb26ee62214ae3288465ecec0000f05182f039b477001f08f5ae7" -dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.4", - "unicode-xid 0.2.0", + "proc-macro2", + "quote", + "unicode-xid", ] [[package]] @@ -4438,10 +4385,10 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", - "unicode-xid 0.2.0", + "proc-macro2", + "quote", + "syn", + "unicode-xid", ] [[package]] @@ -4503,8 +4450,8 @@ dependencies = [ name = "test_random_derive" version = "0.2.0" dependencies = [ - "quote 0.6.13", - "syn 0.15.44", + "quote", + "syn", ] [[package]] @@ -4516,6 +4463,26 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "thiserror" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d12a1dae4add0f0d568eebc7bf142f145ba1aa2544cafb195c76f0f409091b60" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f34e0c1caaa462fd840ec6b768946ea1e7842620d94fe29d5b847138f521269" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "thread_local" version = "1.0.1" @@ -4540,11 +4507,11 @@ name = "timer" version = "0.2.0" dependencies = [ "beacon_chain", - "futures", + "futures 0.3.5", "parking_lot 0.10.2", "slog", "slot_clock", - "tokio", + "tokio 0.2.20", "types", ] @@ -4570,7 +4537,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d8a021c69bb74a44ccedb824a046447e2c84a01df9e5c20779750acb38e11b2" dependencies = [ - "crunchy 0.2.2", + "crunchy", ] [[package]] @@ -4579,7 +4546,7 @@ version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" dependencies = [ - "crunchy 0.2.2", + "crunchy", ] [[package]] @@ -4598,8 +4565,8 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" dependencies = [ - "bytes", - "futures", + "bytes 0.4.12", + "futures 0.1.29", "mio", "num_cpus", "tokio-codec", @@ -4616,15 +4583,39 @@ dependencies = [ "tokio-uds 0.2.6", ] +[[package]] +name = "tokio" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05c1d570eb1a36f0345a5ce9c6c6e665b70b73d11236912c0b477616aeec47b1" +dependencies = [ + "bytes 0.5.4", + "fnv", + "futures-core", + "iovec", + "lazy_static", + "libc", + "memchr", + "mio", + "mio-named-pipes", + "mio-uds", + "num_cpus", + "pin-project-lite", + "signal-hook-registry", + "slab 0.4.2", + "tokio-macros", + "winapi 0.3.8", +] + [[package]] name = "tokio-buf" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" dependencies = [ - "bytes", + "bytes 0.4.12", "either", - "futures", + "futures 0.1.29", ] [[package]] @@ -4633,8 +4624,8 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" dependencies = [ - "bytes", - "futures", + "bytes 0.4.12", + "futures 0.1.29", "tokio-io", ] @@ -4644,13 +4635,13 @@ version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aeeffbbb94209023feaef3c196a41cbcdafa06b4a6f893f68779bb5e53796f71" dependencies = [ - "bytes", - "futures", + "bytes 0.4.12", + "futures 0.1.29", "iovec", "log 0.4.8", "mio", "scoped-tls 0.1.2", - "tokio", + "tokio 0.1.22", "tokio-executor", "tokio-io", "tokio-reactor", @@ -4663,22 +4654,10 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" dependencies = [ - "futures", + "futures 0.1.29", "tokio-executor", ] -[[package]] -name = "tokio-dns-unofficial" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82c65483db54eb91b4ef3a9389a3364558590faf30ce473141707c0e16fda975" -dependencies = [ - "futures", - "futures-cpupool", - "lazy_static", - "tokio", -] - [[package]] name = "tokio-executor" version = "0.1.10" @@ -4686,7 +4665,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" dependencies = [ "crossbeam-utils", - "futures", + "futures 0.1.29", ] [[package]] @@ -4695,7 +4674,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" dependencies = [ - "futures", + "futures 0.1.29", "tokio-io", "tokio-threadpool", ] @@ -4706,21 +4685,30 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" dependencies = [ - "bytes", - "futures", + "bytes 0.4.12", + "futures 0.1.29", "log 0.4.8", ] [[package]] name = "tokio-io-timeout" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "135ce81f15cfd7982fac684f9057a1299eebeb79e98a8a709969b9aa51123129" +checksum = "9390a43272c8a6ac912ed1d1e2b6abeafd5047e05530a2fa304deee041a06215" dependencies = [ - "bytes", - "futures", - "tokio-io", - "tokio-timer 0.2.13", + "bytes 0.5.4", + "tokio 0.2.20", +] + +[[package]] +name = "tokio-macros" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -4730,7 +4718,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" dependencies = [ "crossbeam-utils", - "futures", + "futures 0.1.29", "lazy_static", "log 0.4.8", "mio", @@ -4742,20 +4730,6 @@ dependencies = [ "tokio-sync", ] -[[package]] -name = "tokio-rustls" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d7cf08f990090abd6c6a73cab46fed62f85e8aef8b99e4b918a9f4a637f0676" -dependencies = [ - "bytes", - "futures", - "iovec", - "rustls", - "tokio-io", - "webpki", -] - [[package]] name = "tokio-sync" version = "0.1.8" @@ -4763,7 +4737,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" dependencies = [ "fnv", - "futures", + "futures 0.1.29", ] [[package]] @@ -4772,8 +4746,8 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" dependencies = [ - "bytes", - "futures", + "bytes 0.4.12", + "futures 0.1.29", "iovec", "mio", "tokio-io", @@ -4789,7 +4763,7 @@ dependencies = [ "crossbeam-deque", "crossbeam-queue", "crossbeam-utils", - "futures", + "futures 0.1.29", "lazy_static", "log 0.4.8", "num_cpus", @@ -4803,7 +4777,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6131e780037787ff1b3f8aad9da83bca02438b72277850dd6ad0d455e0e20efc" dependencies = [ - "futures", + "futures 0.1.29", "slab 0.3.0", ] @@ -4814,7 +4788,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" dependencies = [ "crossbeam-utils", - "futures", + "futures 0.1.29", "slab 0.4.2", "tokio-executor", ] @@ -4825,19 +4799,29 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "354b8cd83825b3c20217a9dc174d6a0c67441a2fae5c41bcb1ea6679f6ae0f7c" dependencies = [ - "futures", + "futures 0.1.29", "native-tls", "tokio-io", ] +[[package]] +name = "tokio-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" +dependencies = [ + "native-tls", + "tokio 0.2.20", +] + [[package]] name = "tokio-udp" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" dependencies = [ - "bytes", - "futures", + "bytes 0.4.12", + "futures 0.1.29", "log 0.4.8", "mio", "tokio-codec", @@ -4851,8 +4835,8 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65ae5d255ce739e8537221ed2942e0445f4b3b813daebac1c0050ddaaa3587f9" dependencies = [ - "bytes", - "futures", + "bytes 0.4.12", + "futures 0.1.29", "iovec", "libc", "log 0.3.9", @@ -4868,8 +4852,8 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5076db410d6fdc6523df7595447629099a1fdc47b3d9f896220780fa48faf798" dependencies = [ - "bytes", - "futures", + "bytes 0.4.12", + "futures 0.1.29", "iovec", "libc", "log 0.4.8", @@ -4880,6 +4864,21 @@ dependencies = [ "tokio-reactor", ] +[[package]] +name = "tokio-util" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +dependencies = [ + "bytes 0.5.4", + "futures-core", + "futures-io", + "futures-sink", + "log 0.4.8", + "pin-project-lite", + "tokio 0.2.20", +] + [[package]] name = "toml" version = "0.5.6" @@ -4890,22 +4889,28 @@ dependencies = [ ] [[package]] -name = "trackable" -version = "0.2.23" +name = "tower-service" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11475c3c53b075360eac9794965822cb053996046545f91cf61d90e00b72efa5" +checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" + +[[package]] +name = "trackable" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30fb6e13d129dd92c501458f64d56c708e3685e3fd307e878ec5f934c5c5bdb0" dependencies = [ "trackable_derive", ] [[package]] name = "trackable_derive" -version = "0.1.3" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edcf0b9b2caa5f4804ef77aeee1b929629853d806117c48258f402b69737e65c" +checksum = "ebeb235c5847e2f82cfe0f07eb971d1e5f6804b18dac2ae16349cc604380f82f" dependencies = [ - "quote 1.0.4", - "syn 1.0.19", + "quote", + "syn", ] [[package]] @@ -4932,8 +4937,8 @@ dependencies = [ name = "tree_hash_derive" version = "0.2.0" dependencies = [ - "quote 0.6.13", - "syn 0.15.44", + "quote", + "syn", ] [[package]] @@ -4942,15 +4947,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" -[[package]] -name = "try_from" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "283d3b89e1368717881a9d51dad843cc435380d8109c9e47d38780a324698d8b" -dependencies = [ - "cfg-if", -] - [[package]] name = "twofish" version = "0.2.0" @@ -4958,7 +4954,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712d261e83e727c8e2dbb75dacac67c36e35db36a958ee504f2164fc052434e1" dependencies = [ "block-cipher-trait", - "byteorder 1.3.4", + "byteorder", "opaque-debug", ] @@ -4986,19 +4982,19 @@ dependencies = [ "criterion", "derivative", "dirs", - "env_logger 0.7.1", + "env_logger", "eth2_hashing", "eth2_interop_keypairs", "eth2_ssz", "eth2_ssz_derive", "eth2_ssz_types", "ethereum-types", - "hex 0.3.2", + "hex 0.4.2", "int_to_bytes", "log 0.4.8", "merkle_proof", "rand 0.7.3", - "rand_xorshift 0.2.0", + "rand_xorshift", "rayon", "safe_arith", "serde", @@ -5020,8 +5016,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "173cd16430c206dc1a430af8a89a0e9c076cf15cb42b4aedb10e8cc8fee73681" dependencies = [ "arbitrary", - "byteorder 1.3.4", - "crunchy 0.2.2", + "byteorder", + "crunchy", "rustc-hex", "static_assertions", ] @@ -5062,18 +5058,18 @@ dependencies = [ "smallvec 1.4.0", ] +[[package]] +name = "unicode-segmentation" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" + [[package]] name = "unicode-width" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479" -[[package]] -name = "unicode-xid" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" - [[package]] name = "unicode-xid" version = "0.2.0" @@ -5082,12 +5078,21 @@ checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" [[package]] name = "unsigned-varint" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7f0023a96687fe169081e8adce3f65e3874426b7886e9234d490af2dc077959" +version = "0.3.3" +source = "git+https://github.com/sigp/unsigned-varint?branch=latest-codecs#76fc423494e59f1ec4c8948bd0d3ae3c09851909" dependencies = [ - "bytes", - "tokio-codec", + "bytes 0.5.4", + "tokio-util", +] + +[[package]] +name = "unsigned-varint" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f67332660eb59a6f1eb24ff1220c9e8d01738a8503c6002e30bcfe4bd9f2b4a9" +dependencies = [ + "bytes 0.5.4", + "futures_codec", ] [[package]] @@ -5118,15 +5123,6 @@ dependencies = [ "percent-encoding 2.1.0", ] -[[package]] -name = "uuid" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90dbc611eb48397705a6b0f6e917da23ae517e4d127123d2cf7674206627d32a" -dependencies = [ - "rand 0.6.5", -] - [[package]] name = "uuid" version = "0.8.1" @@ -5153,11 +5149,11 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "exit-future", - "futures", - "hex 0.3.2", + "futures 0.3.5", + "hex 0.4.2", "libc", "logging", - "parking_lot 0.7.1", + "parking_lot 0.10.2", "rayon", "remote_beacon_node", "rest_types", @@ -5169,8 +5165,7 @@ dependencies = [ "slog-term", "slot_clock", "tempdir", - "tokio", - "tokio-timer 0.2.13", + "tokio 0.2.20", "tree_hash", "types", "web3", @@ -5230,7 +5225,17 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" dependencies = [ - "futures", + "futures 0.1.29", + "log 0.4.8", + "try-lock", +] + +[[package]] +name = "want" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +dependencies = [ "log 0.4.8", "try-lock", ] @@ -5248,6 +5253,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c7d40d09cdbf0f4895ae58cf57d92e1e57a9dd8ed2e8390514b54a47cc5551" dependencies = [ "cfg-if", + "serde", + "serde_json", "wasm-bindgen-macro", ] @@ -5260,25 +5267,12 @@ dependencies = [ "bumpalo", "lazy_static", "log 0.4.8", - "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "proc-macro2", + "quote", + "syn", "wasm-bindgen-shared", ] -[[package]] -name = "wasm-bindgen-futures" -version = "0.3.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83420b37346c311b9ed822af41ec2e82839bfe99867ec6c54e2da43b7538771c" -dependencies = [ - "cfg-if", - "futures", - "js-sys", - "wasm-bindgen", - "web-sys", -] - [[package]] name = "wasm-bindgen-futures" version = "0.4.12" @@ -5297,7 +5291,7 @@ version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2cd85aa2c579e8892442954685f0d801f9129de24fa2136b2c6a539c76b65776" dependencies = [ - "quote 1.0.4", + "quote", "wasm-bindgen-macro-support", ] @@ -5307,9 +5301,9 @@ version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8eb197bd3a47553334907ffd2f16507b4f4f01bbec3ac921a7719e0decdfe72a" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "proc-macro2", + "quote", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5330,7 +5324,7 @@ dependencies = [ "js-sys", "scoped-tls 1.0.0", "wasm-bindgen", - "wasm-bindgen-futures 0.4.12", + "wasm-bindgen-futures", "wasm-bindgen-test-macro", ] @@ -5340,21 +5334,23 @@ version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c358c8d2507c1bae25efa069e62ea907aa28700b25c8c33dafb0b15ba4603627" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.4", + "proc-macro2", + "quote", ] [[package]] name = "wasm-timer" -version = "0.1.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa3e01d234bb71760e685cfafa5e2c96f8ad877c161a721646356651069e26ac" +checksum = "324c5e65a08699c9c4334ba136597ab22b85dccd4b65dd1e36ccf8f723a95b54" dependencies = [ - "futures", + "futures 0.3.5", "js-sys", + "parking_lot 0.9.0", + "pin-utils", "send_wrapper", - "tokio-timer 0.2.13", "wasm-bindgen", + "wasm-bindgen-futures", "web-sys", ] @@ -5378,9 +5374,9 @@ dependencies = [ "derive_more", "ethabi", "ethereum-types", - "futures", + "futures 0.1.29", "hyper 0.12.35", - "hyper-tls", + "hyper-tls 0.3.2", "jsonrpc-core", "log 0.4.8", "native-tls", @@ -5397,7 +5393,7 @@ dependencies = [ "tokio-uds 0.1.7", "url 2.1.1", "websocket", - "zeroize 1.1.0", + "zeroize", ] [[package]] @@ -5419,6 +5415,15 @@ dependencies = [ "webpki", ] +[[package]] +name = "webpki-roots" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8eff4b7516a57307f9349c64bf34caa34b940b66fed4b2fb3136cb7386e5739" +dependencies = [ + "webpki", +] + [[package]] name = "websocket" version = "0.21.1" @@ -5427,16 +5432,16 @@ checksum = "8c9faed2bff8af2ea6b9f8b917d3d00b467583f6781fe3def174a9e33c879703" dependencies = [ "base64 0.9.3", "bitflags 0.9.1", - "byteorder 1.3.4", - "bytes", - "futures", + "byteorder", + "bytes 0.4.12", + "futures 0.1.29", "hyper 0.10.16", "native-tls", "rand 0.5.6", "sha1", "tokio-core", "tokio-io", - "tokio-tls", + "tokio-tls 0.2.1", "unicase 1.4.2", "url 1.7.2", ] @@ -5445,16 +5450,25 @@ dependencies = [ name = "websocket_server" version = "0.2.0" dependencies = [ - "futures", + "futures 0.3.5", "serde", "serde_derive", "serde_json", "slog", - "tokio", + "tokio 0.2.20", "types", "ws", ] +[[package]] +name = "which" +version = "3.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" +dependencies = [ + "libc", +] + [[package]] name = "winapi" version = "0.2.8" @@ -5513,8 +5527,8 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c51a2c47b5798ccc774ffb93ff536aec7c4275d722fd9c740c83cdd1af1f2d94" dependencies = [ - "byteorder 1.3.4", - "bytes", + "byteorder", + "bytes 0.4.12", "httparse", "log 0.4.8", "mio", @@ -5537,13 +5551,13 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee1585dc1484373cbc1cee7aafda26634665cf449436fd6e24bfd1fad230538" +checksum = "637ff90c9540fa3073bb577e65033069e4bae7c79d49d74aa3ffdf5342a53217" dependencies = [ - "clear_on_drop", - "curve25519-dalek 1.2.3", - "rand_core 0.3.1", + "curve25519-dalek", + "rand_core 0.5.1", + "zeroize", ] [[package]] @@ -5557,28 +5571,16 @@ dependencies = [ [[package]] name = "yamux" -version = "0.2.2" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2758f29014c1cb7a6e74c1b1160ac8c8203be342d35b73462fc6a13cc6385423" +checksum = "84300bb493cc878f3638b981c62b4632ec1a5c52daaa3036651e8c106d3b55ea" dependencies = [ - "bytes", - "futures", + "futures 0.3.5", "log 0.4.8", "nohash-hasher", - "parking_lot 0.9.0", - "quick-error", + "parking_lot 0.10.2", "rand 0.7.3", - "tokio-codec", - "tokio-io", -] - -[[package]] -name = "zeroize" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e68403b858b6af538b11614e62dfe9ab2facba9f13a0cafb974855cfb495ec95" -dependencies = [ - "zeroize_derive 0.1.0", + "static_assertions", ] [[package]] @@ -5587,18 +5589,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" dependencies = [ - "zeroize_derive 1.0.0", -] - -[[package]] -name = "zeroize_derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3f07490820219949839d0027b965ffdd659d75be9220c00798762e36c6cd281" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "syn 0.15.44", + "zeroize_derive", ] [[package]] @@ -5607,8 +5598,8 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" dependencies = [ - "proc-macro2 1.0.12", - "quote 1.0.4", - "syn 1.0.19", + "proc-macro2", + "quote", + "syn", "synstructure", ] diff --git a/Cargo.toml b/Cargo.toml index 70cbc15414..028a8fa06e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ members = [ "eth2/utils/eth2_wallet", "eth2/utils/logging", "eth2/utils/eth2_hashing", - "eth2/utils/hashmap_delay", + "eth2/utils/hashset_delay", "eth2/utils/lighthouse_metrics", "eth2/utils/merkle_proof", "eth2/utils/int_to_bytes", diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 9588767faf..21a92d5363 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -5,26 +5,27 @@ authors = ["Paul Hauner ", "Luke Anderson () -> App<'a, 'b> { ) } -pub fn cli_run(matches: &ArgMatches, mut env: Environment) -> Result<(), String> { +pub fn cli_run( + matches: &ArgMatches<'_>, + mut env: Environment, +) -> Result<(), String> { let spec = env.core_context().eth2_config.spec; let log = env.core_context().log; @@ -138,12 +137,13 @@ pub fn cli_run(matches: &ArgMatches, mut env: Environment) -> Res let tx_hash_log = log.clone(); env.runtime() - .block_on( + .block_on(async { ValidatorDirectoryBuilder::default() .spec(spec.clone()) .custom_deposit_amount(deposit_gwei) .thread_random_keypairs() .submit_eth1_deposit(web3.clone(), from_address, deposit_contract) + .await .map(move |(builder, tx_hash)| { info!( tx_hash_log, @@ -152,8 +152,8 @@ pub fn cli_run(matches: &ArgMatches, mut env: Environment) -> Res "index" => format!("{}/{}", i + 1, n), ); builder - }), - )? + }) + })? .create_directory(validator_dir.clone())? .write_keypair_files()? .write_eth1_data_file()? @@ -183,73 +183,59 @@ fn existing_validator_count(validator_dir: &PathBuf) -> Result { } /// Run a poll on the `eth_syncing` endpoint, blocking until the node is synced. -fn poll_until_synced(web3: Web3, log: Logger) -> impl Future + Send +async fn poll_until_synced(web3: Web3, log: Logger) -> Result<(), String> where T: Transport + Send + 'static, ::Out: Send, { - loop_fn((web3.clone(), log.clone()), move |(web3, log)| { - web3.clone() + loop { + let sync_state = web3 + .clone() .eth() .syncing() - .map_err(|e| format!("Unable to read syncing state from eth1 node: {:?}", e)) - .and_then::<_, Box + Send>>(move |sync_state| { - match sync_state { - SyncState::Syncing(SyncInfo { - current_block, - highest_block, - .. - }) => { - info!( - log, - "Waiting for eth1 node to sync"; - "est_highest_block" => format!("{}", highest_block), - "current_block" => format!("{}", current_block), - ); + .compat() + .await + .map_err(|e| format!("Unable to read syncing state from eth1 node: {:?}", e))?; + match sync_state { + SyncState::Syncing(SyncInfo { + current_block, + highest_block, + .. + }) => { + info!( + log, + "Waiting for eth1 node to sync"; + "est_highest_block" => format!("{}", highest_block), + "current_block" => format!("{}", current_block), + ); - Box::new( - Delay::new(Instant::now() + SYNCING_STATE_RETRY_DELAY) - .map_err(|e| format!("Failed to trigger delay: {:?}", e)) - .and_then(|_| future::ok(Loop::Continue((web3, log)))), - ) - } - SyncState::NotSyncing => Box::new( - web3.clone() - .eth() - .block_number() - .map_err(|e| { - format!("Unable to read block number from eth1 node: {:?}", e) - }) - .and_then::<_, Box + Send>>( - |block_number| { - if block_number > 0.into() { - info!( - log, - "Eth1 node is synced"; - "head_block" => format!("{}", block_number), - ); - Box::new(future::ok(Loop::Break((web3, log)))) - } else { - Box::new( - Delay::new(Instant::now() + SYNCING_STATE_RETRY_DELAY) - .map_err(|e| { - format!("Failed to trigger delay: {:?}", e) - }) - .and_then(|_| { - info!( - log, - "Waiting for eth1 node to sync"; - "current_block" => 0, - ); - future::ok(Loop::Continue((web3, log))) - }), - ) - } - }, - ), - ), + delay_until(Instant::now() + SYNCING_STATE_RETRY_DELAY).await; + } + SyncState::NotSyncing => { + let block_number = web3 + .clone() + .eth() + .block_number() + .compat() + .await + .map_err(|e| format!("Unable to read block number from eth1 node: {:?}", e))?; + if block_number > 0.into() { + info!( + log, + "Eth1 node is synced"; + "head_block" => format!("{}", block_number), + ); + break; + } else { + delay_until(Instant::now() + SYNCING_STATE_RETRY_DELAY).await; + info!( + log, + "Waiting for eth1 node to sync"; + "current_block" => 0, + ); } - }) - }) - .map(|_| ()) + } + } + } + Ok(()) } diff --git a/account_manager/src/lib.rs b/account_manager/src/lib.rs index 4f3c80ec76..4ed498d12e 100644 --- a/account_manager/src/lib.rs +++ b/account_manager/src/lib.rs @@ -5,7 +5,8 @@ use clap::ArgMatches; use deposit_contract::DEPOSIT_GAS; use environment::{Environment, RuntimeContext}; use eth2_testnet_config::Eth2TestnetConfig; -use futures::{future, Future, IntoFuture, Stream}; +use futures::compat::Future01CompatExt; +use futures::{FutureExt, StreamExt}; use rayon::prelude::*; use slog::{error, info, Logger}; use std::fs; @@ -23,7 +24,7 @@ use web3::{ pub use cli::cli_app; /// Run the account manager, returning an error if the operation did not succeed. -pub fn run(matches: &ArgMatches, mut env: Environment) -> Result<(), String> { +pub fn run(matches: &ArgMatches<'_>, mut env: Environment) -> Result<(), String> { let context = env.core_context(); let log = context.log.clone(); @@ -292,7 +293,7 @@ fn make_validators( /// /// Returns success as soon as the eth1 endpoint accepts the transaction (i.e., does not wait for /// transaction success/revert). -fn deposit_validators( +async fn deposit_validators( context: RuntimeContext, eth1_endpoint: String, deposit_contract: Address, @@ -300,156 +301,154 @@ fn deposit_validators( account_index: usize, deposit_value: u64, password: Option, -) -> impl Future { +) -> Result<(), ()> { let log_1 = context.log.clone(); let log_2 = context.log.clone(); - Http::new(ð1_endpoint) - .map_err(move |e| { - error!( - log_1, - "Failed to start web3 HTTP transport"; - "error" => format!("{:?}", e) + let (event_loop, transport) = Http::new(ð1_endpoint).map_err(move |e| { + error!( + log_1, + "Failed to start web3 HTTP transport"; + "error" => format!("{:?}", e) + ) + })?; + /* + * Loop through the validator directories and submit the deposits. + */ + let web3 = Web3::new(transport); + + futures::stream::iter(validators) + .for_each(|validator| async { + let web3 = web3.clone(); + let log = log_2.clone(); + let password = password.clone(); + + let _ = deposit_validator( + web3, + deposit_contract, + validator, + deposit_value, + account_index, + password, + log, ) + .await; }) - .into_future() - /* - * Loop through the validator directories and submit the deposits. - */ - .and_then(move |(event_loop, transport)| { - let web3 = Web3::new(transport); - - futures::stream::iter_ok(validators) - .for_each(move |validator| { - let web3 = web3.clone(); - let log = log_2.clone(); - let password = password.clone(); - - deposit_validator( - web3, - deposit_contract, - &validator, - deposit_value, - account_index, - password, - log, - ) - }) - .map(|_| event_loop) - }) - // Web3 gives errors if the event loop is dropped whilst performing requests. + .map(|_| event_loop) + // // Web3 gives errors if the event loop is dropped whilst performing requests. .map(drop) + .await; + + Ok(()) } /// For the given `ValidatorDirectory`, submit a deposit transaction to the `web3` node. /// /// Returns success as soon as the eth1 endpoint accepts the transaction (i.e., does not wait for /// transaction success/revert). -fn deposit_validator( +async fn deposit_validator( web3: Web3, deposit_contract: Address, - validator: &ValidatorDirectory, + validator: ValidatorDirectory, deposit_amount: u64, account_index: usize, password_opt: Option, log: Logger, -) -> impl Future { - validator +) -> Result<(), ()> { + let voting_keypair = validator .voting_keypair .clone() - .ok_or_else(|| error!(log, "Validator does not have voting keypair")) - .and_then(|voting_keypair| { - validator - .deposit_data - .clone() - .ok_or_else(|| error!(log, "Validator does not have deposit data")) - .map(|deposit_data| (voting_keypair, deposit_data)) - }) - .into_future() - .and_then(move |(voting_keypair, deposit_data)| { - let pubkey_1 = voting_keypair.pk.clone(); - let pubkey_2 = voting_keypair.pk; + .ok_or_else(|| error!(log, "Validator does not have voting keypair"))?; - let web3_1 = web3.clone(); - let web3_2 = web3.clone(); + let deposit_data = validator + .deposit_data + .clone() + .ok_or_else(|| error!(log, "Validator does not have deposit data"))?; - let log_1 = log.clone(); - let log_2 = log.clone(); + let pubkey_1 = voting_keypair.pk.clone(); + let pubkey_2 = voting_keypair.pk; - web3.eth() - .accounts() - .map_err(|e| format!("Failed to get accounts: {:?}", e)) - .and_then(move |accounts| { - accounts - .get(account_index) - .cloned() - .ok_or_else(|| "Insufficient accounts for deposit".to_string()) - }) - /* - * If a password was supplied, unlock the account. - */ - .and_then(move |from_address| { - let future: Box + Send> = - if let Some(password) = password_opt { - // Unlock for only a single transaction. - let duration = None; + let log_1 = log.clone(); + let log_2 = log.clone(); - let future = web3_1 - .personal() - .unlock_account(from_address, &password, duration) - .then(move |result| match result { - Ok(true) => Ok(from_address), - Ok(false) => { - Err("Eth1 node refused to unlock account. Check password." - .to_string()) - } - Err(e) => Err(format!("Eth1 unlock request failed: {:?}", e)), - }); + // TODO: creating a future to extract the Error type + // check if there's a better way + let future = async move { + let accounts = web3 + .eth() + .accounts() + .compat() + .await + .map_err(|e| format!("Failed to get accounts: {:?}", e))?; - Box::new(future) - } else { - Box::new(future::ok(from_address)) - }; + let from_address = accounts + .get(account_index) + .cloned() + .ok_or_else(|| "Insufficient accounts for deposit".to_string())?; - future - }) - /* - * Submit the deposit transaction. - */ - .and_then(move |from| { - let tx_request = TransactionRequest { - from, - to: Some(deposit_contract), - gas: Some(U256::from(DEPOSIT_GAS)), - gas_price: None, - value: Some(from_gwei(deposit_amount)), - data: Some(deposit_data.into()), - nonce: None, - condition: None, - }; + /* + * If a password was supplied, unlock the account. + */ + let from = if let Some(password) = password_opt { + // Unlock for only a single transaction. + let duration = None; - web3_2 - .eth() - .send_transaction(tx_request) - .map_err(|e| format!("Failed to call deposit fn: {:?}", e)) - }) - .map(move |tx| { - info!( - log_1, - "Validator deposit successful"; - "eth1_tx_hash" => format!("{:?}", tx), - "validator_voting_pubkey" => format!("{:?}", pubkey_1) + let result = web3 + .personal() + .unlock_account(from_address, &password, duration) + .compat() + .await; + match result { + Ok(true) => from_address, + Ok(false) => { + return Err::<(), String>( + "Eth1 node refused to unlock account. Check password.".to_string(), ) - }) - .map_err(move |e| { - error!( - log_2, - "Validator deposit_failed"; - "error" => e, - "validator_voting_pubkey" => format!("{:?}", pubkey_2) - ) - }) - }) + } + Err(e) => return Err::<(), String>(format!("Eth1 unlock request failed: {:?}", e)), + } + } else { + from_address + }; + + /* + * Submit the deposit transaction. + */ + let tx_request = TransactionRequest { + from, + to: Some(deposit_contract), + gas: Some(U256::from(DEPOSIT_GAS)), + gas_price: None, + value: Some(from_gwei(deposit_amount)), + data: Some(deposit_data.into()), + nonce: None, + condition: None, + }; + + let tx = web3 + .eth() + .send_transaction(tx_request) + .compat() + .await + .map_err(|e| format!("Failed to call deposit fn: {:?}", e))?; + info!( + log_1, + "Validator deposit successful"; + "eth1_tx_hash" => format!("{:?}", tx), + "validator_voting_pubkey" => format!("{:?}", pubkey_1) + ); + Ok(()) + }; + + future.await.map_err(move |e| { + error!( + log_2, + "Validator deposit_failed"; + "error" => e, + "validator_voting_pubkey" => format!("{:?}", pubkey_2) + ); + })?; + Ok(()) } /// Converts gwei to wei. diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 28fef848a8..048d3caf53 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -22,23 +22,22 @@ store = { path = "./store" } client = { path = "client" } version = { path = "version" } clap = "2.33.0" -rand = "0.7.2" +rand = "0.7.3" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } -slog-term = "2.4.2" -slog-async = "2.3.0" -ctrlc = { version = "3.1.3", features = ["termination"] } -tokio = "0.1.22" -tokio-timer = "0.2.12" -exit-future = "0.1.4" +slog-term = "2.5.0" +slog-async = "2.5.0" +ctrlc = { version = "3.1.4", features = ["termination"] } +tokio = {version = "0.2.20", features = ["time"] } +exit-future = "0.2.0" env_logger = "0.7.1" dirs = "2.0.2" logging = { path = "../eth2/utils/logging" } -futures = "0.1.29" +futures = "0.3.5" environment = { path = "../lighthouse/environment" } genesis = { path = "genesis" } eth2_testnet_config = { path = "../eth2/utils/eth2_testnet_config" } eth2-libp2p = { path = "./eth2-libp2p" } -eth2_ssz = { path = "../eth2/utils/ssz" } -toml = "0.5.4" -serde = "1.0.102" +eth2_ssz = "0.1.2" +toml = "0.5.6" +serde = "1.0.110" clap_utils = { path = "../eth2/utils/clap_utils" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 4611e773b5..28303540dc 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -5,24 +5,26 @@ authors = ["Paul Hauner ", "Age Manning BeaconChain { // Note: supplying some `state_root` when it is known would be a cheap and easy // optimization. match per_slot_processing(&mut state, skip_state_root, &self.spec) { - Ok(()) => (), + Ok(_) => (), Err(e) => { warn!( self.log, @@ -863,7 +863,14 @@ impl BeaconChain { &self, attestation: Attestation, ) -> Result, AttestationError> { - VerifiedUnaggregatedAttestation::verify(attestation, self) + metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_REQUESTS); + let _timer = + metrics::start_timer(&metrics::UNAGGREGATED_ATTESTATION_GOSSIP_VERIFICATION_TIMES); + + VerifiedUnaggregatedAttestation::verify(attestation, self).map(|v| { + metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_SUCCESSES); + v + }) } /// Accepts some `SignedAggregateAndProof` from the network and attempts to verify it, @@ -872,7 +879,14 @@ impl BeaconChain { &self, signed_aggregate: SignedAggregateAndProof, ) -> Result, AttestationError> { - VerifiedAggregatedAttestation::verify(signed_aggregate, self) + metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_PROCESSING_REQUESTS); + let _timer = + metrics::start_timer(&metrics::AGGREGATED_ATTESTATION_GOSSIP_VERIFICATION_TIMES); + + VerifiedAggregatedAttestation::verify(signed_aggregate, self).map(|v| { + metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_PROCESSING_SUCCESSES); + v + }) } /// Accepts some attestation-type object and attempts to verify it in the context of fork @@ -887,6 +901,8 @@ impl BeaconChain { &self, unverified_attestation: &'a impl IntoForkChoiceVerifiedAttestation<'a, T>, ) -> Result, AttestationError> { + let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_APPLY_TO_FORK_CHOICE); + let verified = unverified_attestation.into_fork_choice_verified_attestation(self)?; let indexed_attestation = verified.indexed_attestation(); self.fork_choice @@ -907,6 +923,8 @@ impl BeaconChain { &self, unaggregated_attestation: VerifiedUnaggregatedAttestation, ) -> Result, AttestationError> { + let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_APPLY_TO_AGG_POOL); + let attestation = unaggregated_attestation.attestation(); match self.naive_aggregation_pool.insert(attestation) { @@ -950,6 +968,8 @@ impl BeaconChain { &self, signed_aggregate: VerifiedAggregatedAttestation, ) -> Result, AttestationError> { + let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_APPLY_TO_OP_POOL); + // If there's no eth1 chain then it's impossible to produce blocks and therefore // useless to put things in the op pool. if self.eth1_chain.is_some() { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 63ca502f10..cc929e530b 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -54,10 +54,12 @@ use slot_clock::SlotClock; use ssz::Encode; use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, - per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, - SlotProcessingError, + per_block_processing, + per_epoch_processing::EpochProcessingSummary, + per_slot_processing, BlockProcessingError, BlockSignatureStrategy, SlotProcessingError, }; use std::borrow::Cow; +use std::convert::TryFrom; use std::fs; use std::io::Write; use store::{Error as DBError, StateBatch}; @@ -238,7 +240,7 @@ pub fn signature_verify_chain_segment( /// the p2p network. pub struct GossipVerifiedBlock { pub block: SignedBeaconBlock, - block_root: Hash256, + pub block_root: Hash256, parent: BeaconSnapshot, } @@ -556,6 +558,8 @@ impl FullyVerifiedBlock { }); } + let mut summaries = vec![]; + // Transition the parent state to the block slot. let mut state = parent.beacon_state; let distance = block.slot().as_u64().saturating_sub(state.slot.as_u64()); @@ -571,9 +575,12 @@ impl FullyVerifiedBlock { state_root }; - per_slot_processing(&mut state, Some(state_root), &chain.spec)?; + per_slot_processing(&mut state, Some(state_root), &chain.spec)? + .map(|summary| summaries.push(summary)); } + expose_participation_metrics(&summaries); + metrics::stop_timer(catchup_timer); /* @@ -891,6 +898,45 @@ fn get_signature_verifier<'a, E: EthSpec>( ) } +fn expose_participation_metrics(summaries: &[EpochProcessingSummary]) { + if !cfg!(feature = "participation_metrics") { + return; + } + + for summary in summaries { + let b = &summary.total_balances; + + metrics::maybe_set_float_gauge( + &metrics::PARTICIPATION_PREV_EPOCH_ATTESTER, + participation_ratio(b.previous_epoch_attesters(), b.previous_epoch()), + ); + + metrics::maybe_set_float_gauge( + &metrics::PARTICIPATION_PREV_EPOCH_TARGET_ATTESTER, + participation_ratio(b.previous_epoch_target_attesters(), b.previous_epoch()), + ); + + metrics::maybe_set_float_gauge( + &metrics::PARTICIPATION_PREV_EPOCH_HEAD_ATTESTER, + participation_ratio(b.previous_epoch_head_attesters(), b.previous_epoch()), + ); + } +} + +fn participation_ratio(section: u64, total: u64) -> Option { + // Reduce the precision to help ensure we fit inside a u32. + const PRECISION: u64 = 100_000_000; + + let section: f64 = u32::try_from(section / PRECISION).ok()?.into(); + let total: f64 = u32::try_from(total / PRECISION).ok()?.into(); + + if total > 0_f64 { + Some(section / total) + } else { + None + } +} + fn write_state(prefix: &str, state: &BeaconState, log: &Logger) { if WRITE_BLOCK_PROCESSING_SSZ { let root = state.tree_hash_root(); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 66e125ddd3..5f4434fc59 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -1,7 +1,6 @@ use crate::metrics; use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService}; use eth2_hashing::hash; -use futures::Future; use slog::{debug, error, trace, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -286,11 +285,10 @@ impl> CachingEth1Backend { } /// Starts the routine which connects to the external eth1 node and updates the caches. - pub fn start( - &self, - exit: tokio::sync::oneshot::Receiver<()>, - ) -> impl Future { - self.core.auto_update(exit) + pub fn start(&self, exit: tokio::sync::oneshot::Receiver<()>) { + // don't need to spawn as a task is being spawned in auto_update + // TODO: check if this is correct + HttpService::auto_update(self.core.clone(), exit); } /// Instantiates `self` from an existing service. diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index c6e3afebcf..d10a7efa66 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,6 +1,7 @@ use crate::{BeaconChain, BeaconChainTypes}; pub use lighthouse_metrics::*; -use types::{BeaconState, Epoch, Hash256, Slot}; +use slot_clock::SlotClock; +use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; lazy_static! { /* @@ -79,25 +80,81 @@ lazy_static! { "Number of attestations in a block" ); + /* + * Unaggregated Attestation Verification + */ + pub static ref UNAGGREGATED_ATTESTATION_PROCESSING_REQUESTS: Result = try_create_int_counter( + "beacon_unaggregated_attestation_processing_requests_total", + "Count of all unaggregated attestations submitted for processing" + ); + pub static ref UNAGGREGATED_ATTESTATION_PROCESSING_SUCCESSES: Result = try_create_int_counter( + "beacon_unaggregated_attestation_processing_successes_total", + "Number of unaggregated attestations verified for gossip" + ); + pub static ref UNAGGREGATED_ATTESTATION_GOSSIP_VERIFICATION_TIMES: Result = try_create_histogram( + "beacon_unaggregated_attestation_gossip_verification_seconds", + "Full runtime of aggregated attestation gossip verification" + ); + + /* + * Aggregated Attestation Verification + */ + pub static ref AGGREGATED_ATTESTATION_PROCESSING_REQUESTS: Result = try_create_int_counter( + "beacon_aggregated_attestation_processing_requests_total", + "Count of all aggregated attestations submitted for processing" + ); + pub static ref AGGREGATED_ATTESTATION_PROCESSING_SUCCESSES: Result = try_create_int_counter( + "beacon_aggregated_attestation_processing_successes_total", + "Number of aggregated attestations verified for gossip" + ); + pub static ref AGGREGATED_ATTESTATION_GOSSIP_VERIFICATION_TIMES: Result = try_create_histogram( + "beacon_aggregated_attestation_gossip_verification_seconds", + "Full runtime of aggregated attestation gossip verification" + ); + + /* + * General Attestation Processing + */ + pub static ref ATTESTATION_PROCESSING_APPLY_TO_FORK_CHOICE: Result = try_create_histogram( + "beacon_attestation_processing_apply_to_fork_choice", + "Time spent applying an attestation to fork choice" + ); + pub static ref ATTESTATION_PROCESSING_APPLY_TO_AGG_POOL: Result = try_create_histogram( + "beacon_attestation_processing_apply_to_agg_pool", + "Time spent applying an attestation to the naive aggregation pool" + ); + pub static ref ATTESTATION_PROCESSING_AGG_POOL_MAPS_WRITE_LOCK: Result = try_create_histogram( + "beacon_attestation_processing_agg_pool_maps_write_lock", + "Time spent waiting for the maps write lock when adding to the agg poll" + ); + pub static ref ATTESTATION_PROCESSING_AGG_POOL_PRUNE: Result = try_create_histogram( + "beacon_attestation_processing_agg_pool_prune", + "Time spent for the agg pool to prune" + ); + pub static ref ATTESTATION_PROCESSING_AGG_POOL_INSERT: Result = try_create_histogram( + "beacon_attestation_processing_agg_pool_insert", + "Time spent for the outer pool.insert() function of agg pool" + ); + pub static ref ATTESTATION_PROCESSING_AGG_POOL_CORE_INSERT: Result = try_create_histogram( + "beacon_attestation_processing_agg_pool_core_insert", + "Time spent for the core map.insert() function of agg pool" + ); + pub static ref ATTESTATION_PROCESSING_AGG_POOL_AGGREGATION: Result = try_create_histogram( + "beacon_attestation_processing_agg_pool_aggregation", + "Time spent doing signature aggregation when adding to the agg poll" + ); + pub static ref ATTESTATION_PROCESSING_AGG_POOL_CREATE_MAP: Result = try_create_histogram( + "beacon_attestation_processing_agg_pool_create_map", + "Time spent for creating a map for a new slot" + ); + pub static ref ATTESTATION_PROCESSING_APPLY_TO_OP_POOL: Result = try_create_histogram( + "beacon_attestation_processing_apply_to_op_pool", + "Time spent applying an attestation to the block inclusion pool" + ); + /* * Attestation Processing */ - pub static ref ATTESTATION_PROCESSING_REQUESTS: Result = try_create_int_counter( - "beacon_attestation_processing_requests_total", - "Count of all attestations submitted for processing" - ); - pub static ref ATTESTATION_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "beacon_attestation_processing_successes_total", - "total_attestation_processing_successes" - ); - pub static ref ATTESTATION_PROCESSING_TIMES: Result = try_create_histogram( - "beacon_attestation_processing_seconds", - "Full runtime of attestation processing" - ); - pub static ref ATTESTATION_PROCESSING_INITIAL_VALIDATION_TIMES: Result = try_create_histogram( - "beacon_attestation_processing_initial_validation_seconds", - "Time spent on the initial_validation of attestation processing" - ); pub static ref ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES: Result = try_create_histogram( "beacon_attestation_processing_shuffling_cache_wait_seconds", "Time spent on waiting for the shuffling cache lock during attestation processing" @@ -251,6 +308,34 @@ lazy_static! { try_create_int_gauge("beacon_op_pool_proposer_slashings_total", "Count of proposer slashings in the op pool"); pub static ref OP_POOL_NUM_VOLUNTARY_EXITS: Result = try_create_int_gauge("beacon_op_pool_voluntary_exits_total", "Count of voluntary exits in the op pool"); + + /* + * Participation Metrics + */ + pub static ref PARTICIPATION_PREV_EPOCH_ATTESTER: Result = try_create_float_gauge( + "beacon_participation_prev_epoch_attester", + "Ratio of attesting balances to total balances" + ); + pub static ref PARTICIPATION_PREV_EPOCH_TARGET_ATTESTER: Result = try_create_float_gauge( + "beacon_participation_prev_epoch_target_attester", + "Ratio of target-attesting balances to total balances" + ); + pub static ref PARTICIPATION_PREV_EPOCH_HEAD_ATTESTER: Result = try_create_float_gauge( + "beacon_participation_prev_epoch_head_attester", + "Ratio of head-attesting balances to total balances" + ); + + /* + * Attestation Observation Metrics + */ + pub static ref ATTN_OBSERVATION_PREV_EPOCH_ATTESTERS: Result = try_create_int_gauge( + "beacon_attn_observation_epoch_attesters", + "Count of attesters that have been seen by the beacon chain in the previous epoch" + ); + pub static ref ATTN_OBSERVATION_PREV_EPOCH_AGGREGATORS: Result = try_create_int_gauge( + "beacon_attn_observation_epoch_aggregators", + "Count of aggregators that have been seen by the beacon chain in the previous epoch" + ); } /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, @@ -260,6 +345,10 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { scrape_head_state::(&head.beacon_state, head.beacon_state_root) } + if let Some(slot) = beacon_chain.slot_clock.now() { + scrape_attestation_observation(slot, beacon_chain); + } + set_gauge_by_usize( &OP_POOL_NUM_ATTESTATIONS, beacon_chain.op_pool.num_attestations(), @@ -332,6 +421,24 @@ fn scrape_head_state(state: &BeaconState, state set_gauge_by_u64(&HEAD_STATE_ETH1_DEPOSIT_INDEX, state.eth1_deposit_index); } +fn scrape_attestation_observation(slot_now: Slot, chain: &BeaconChain) { + let prev_epoch = slot_now.epoch(T::EthSpec::slots_per_epoch()) - 1; + + if let Some(count) = chain + .observed_attesters + .observed_validator_count(prev_epoch) + { + set_gauge_by_usize(&ATTN_OBSERVATION_PREV_EPOCH_ATTESTERS, count); + } + + if let Some(count) = chain + .observed_aggregators + .observed_validator_count(prev_epoch) + { + set_gauge_by_usize(&ATTN_OBSERVATION_PREV_EPOCH_AGGREGATORS, count); + } +} + fn set_gauge_by_slot(gauge: &Result, value: Slot) { set_gauge(gauge, value.as_u64() as i64); } diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs index 0996843076..4a3e042ac7 100644 --- a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -1,3 +1,4 @@ +use crate::metrics; use parking_lot::RwLock; use std::collections::HashMap; use types::{Attestation, AttestationData, EthSpec, Slot}; @@ -68,6 +69,8 @@ impl AggregatedAttestationMap { /// /// The given attestation (`a`) must only have one signature. pub fn insert(&mut self, a: &Attestation) -> Result { + let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_CORE_INSERT); + let set_bits = a .aggregation_bits .iter() @@ -93,6 +96,8 @@ impl AggregatedAttestationMap { { Ok(InsertOutcome::SignatureAlreadyKnown { committee_index }) } else { + let _timer = + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_AGGREGATION); existing_attestation.aggregate(a); Ok(InsertOutcome::SignatureAggregated { committee_index }) } @@ -164,8 +169,9 @@ impl NaiveAggregationPool { /// The pool may be pruned if the given `attestation.data` has a slot higher than any /// previously seen. pub fn insert(&self, attestation: &Attestation) -> Result { + let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_INSERT); let slot = attestation.data.slot; - let lowest_permissible_slot = *self.lowest_permissible_slot.read(); + let lowest_permissible_slot: Slot = *self.lowest_permissible_slot.read(); // Reject any attestations that are too old. if slot < lowest_permissible_slot { @@ -175,11 +181,15 @@ impl NaiveAggregationPool { }); } + let lock_timer = + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_MAPS_WRITE_LOCK); let mut maps = self.maps.write(); + drop(lock_timer); let outcome = if let Some(map) = maps.get_mut(&slot) { map.insert(attestation) } else { + let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_CREATE_MAP); // To avoid re-allocations, try and determine a rough initial capacity for the new item // by obtaining the mean size of all items in earlier epoch. let (count, sum) = maps @@ -219,8 +229,19 @@ impl NaiveAggregationPool { /// Removes any attestations with a slot lower than `current_slot` and bars any future /// attestations with a slot lower than `current_slot - SLOTS_RETAINED`. pub fn prune(&self, current_slot: Slot) { + let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_PRUNE); + // Taking advantage of saturating subtraction on `Slot`. let lowest_permissible_slot = current_slot - Slot::from(SLOTS_RETAINED); + + // No need to prune if the lowest permissible slot has not changed and the queue length is + // less than the maximum + if *self.lowest_permissible_slot.read() == lowest_permissible_slot + && self.maps.read().len() <= SLOTS_RETAINED + { + return; + } + *self.lowest_permissible_slot.write() = lowest_permissible_slot; let mut maps = self.maps.write(); diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index 8be39853ff..bc428fd5d1 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -36,9 +36,12 @@ pub trait Item { /// The default capacity for self. Used when we can't guess a reasonable size. fn default_capacity() -> usize; - /// Returns the number of validator indices stored in `self`. + /// Returns the allocated size of `self`, measured by validator indices. fn len(&self) -> usize; + /// Returns the number of validators that have been observed by `self`. + fn validator_count(&self) -> usize; + /// Store `validator_index` in `self`. fn insert(&mut self, validator_index: usize) -> bool; @@ -67,6 +70,10 @@ impl Item for EpochBitfield { self.bitfield.len() } + fn validator_count(&self) -> usize { + self.bitfield.iter().filter(|bit| **bit).count() + } + fn insert(&mut self, validator_index: usize) -> bool { self.bitfield .get_mut(validator_index) @@ -116,6 +123,10 @@ impl Item for EpochHashSet { self.set.len() } + fn validator_count(&self) -> usize { + self.set.len() + } + /// Inserts the `validator_index` in the set. Returns `true` if the `validator_index` was /// already in the set. fn insert(&mut self, validator_index: usize) -> bool { @@ -219,6 +230,15 @@ impl AutoPruningContainer { Ok(exists) } + /// Returns the number of validators that have been observed at the given `epoch`. Returns + /// `None` if `self` does not have a cache for that epoch. + pub fn observed_validator_count(&self, epoch: Epoch) -> Option { + self.items + .read() + .get(&epoch) + .map(|item| item.validator_count()) + } + fn sanitize_request(&self, a: &Attestation, validator_index: usize) -> Result<(), Error> { if validator_index > E::ValidatorRegistryLimit::to_usize() { return Err(Error::ValidatorIndexTooHigh(validator_index)); diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index a6a2421616..a7d8aaa467 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -5,8 +5,8 @@ authors = ["Age Manning "] edition = "2018" [dev-dependencies] -sloggers = "0.3.4" -toml = "^0.5" +sloggers = "1.0.0" +toml = "0.5.6" [dependencies] beacon_chain = { path = "../beacon_chain" } @@ -15,27 +15,27 @@ network = { path = "../network" } timer = { path = "../timer" } eth2-libp2p = { path = "../eth2-libp2p" } rest_api = { path = "../rest_api" } -parking_lot = "0.9.0" +parking_lot = "0.10.2" websocket_server = { path = "../websocket_server" } -prometheus = "0.7.0" +prometheus = "0.8.0" types = { path = "../../eth2/types" } tree_hash = "0.1.0" eth2_config = { path = "../../eth2/utils/eth2_config" } slot_clock = { path = "../../eth2/utils/slot_clock" } -serde = "1.0.102" -serde_derive = "1.0.102" -error-chain = "0.12.1" +serde = "1.0.110" +serde_derive = "1.0.110" +error-chain = "0.12.2" serde_yaml = "0.8.11" slog = { version = "2.5.2", features = ["max_level_trace"] } -slog-async = "2.3.0" -tokio = "0.1.22" +slog-async = "2.5.0" +tokio = "0.2.20" dirs = "2.0.2" -futures = "0.1.29" -reqwest = "0.9.22" -url = "2.1.0" +futures = "0.3.5" +reqwest = "0.10.4" +url = "2.1.1" eth1 = { path = "../eth1" } genesis = { path = "../genesis" } environment = { path = "../../lighthouse/environment" } -eth2_ssz = { path = "../../eth2/utils/ssz" } +eth2_ssz = "0.1.2" lazy_static = "1.4.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index ace8a91012..8e0699bfe5 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -13,7 +13,6 @@ use environment::RuntimeContext; use eth1::{Config as Eth1Config, Service as Eth1Service}; use eth2_config::Eth2Config; use eth2_libp2p::NetworkGlobals; -use futures::{future, Future, IntoFuture}; use genesis::{interop_genesis_state, Eth1GenesisService}; use network::{NetworkConfig, NetworkMessage, NetworkService}; use slog::info; @@ -109,11 +108,11 @@ where /// Initializes the `BeaconChainBuilder`. The `build_beacon_chain` method will need to be /// called later in order to actually instantiate the `BeaconChain`. - pub fn beacon_chain_builder( + pub async fn beacon_chain_builder( mut self, client_genesis: ClientGenesis, config: ClientConfig, - ) -> impl Future { + ) -> Result { let store = self.store.clone(); let store_migrator = self.store_migrator.take(); let chain_spec = self.chain_spec.clone(); @@ -122,123 +121,94 @@ where let data_dir = config.data_dir.clone(); let disabled_forks = config.disabled_forks.clone(); - future::ok(()) - .and_then(move |()| { - let store = store - .ok_or_else(|| "beacon_chain_start_method requires a store".to_string())?; - let store_migrator = store_migrator.ok_or_else(|| { - "beacon_chain_start_method requires a store migrator".to_string() - })?; - let context = runtime_context - .ok_or_else(|| { - "beacon_chain_start_method requires a runtime context".to_string() - })? - .service_context("beacon".into()); - let spec = chain_spec - .ok_or_else(|| "beacon_chain_start_method requires a chain spec".to_string())?; + let store = + store.ok_or_else(|| "beacon_chain_start_method requires a store".to_string())?; + let store_migrator = store_migrator + .ok_or_else(|| "beacon_chain_start_method requires a store migrator".to_string())?; + let context = runtime_context + .ok_or_else(|| "beacon_chain_start_method requires a runtime context".to_string())? + .service_context("beacon".into()); + let spec = chain_spec + .ok_or_else(|| "beacon_chain_start_method requires a chain spec".to_string())?; - let builder = BeaconChainBuilder::new(eth_spec_instance) - .logger(context.log.clone()) - .store(store) - .store_migrator(store_migrator) - .data_dir(data_dir) - .custom_spec(spec.clone()) - .disabled_forks(disabled_forks); + let builder = BeaconChainBuilder::new(eth_spec_instance) + .logger(context.log.clone()) + .store(store) + .store_migrator(store_migrator) + .data_dir(data_dir) + .custom_spec(spec.clone()) + .disabled_forks(disabled_forks); - Ok((builder, spec, context)) - }) - .and_then(move |(builder, spec, context)| { - let chain_exists = builder - .store_contains_beacon_chain() - .unwrap_or_else(|_| false); + let chain_exists = builder + .store_contains_beacon_chain() + .unwrap_or_else(|_| false); - // If the client is expect to resume but there's no beacon chain in the database, - // use the `DepositContract` method. This scenario is quite common when the client - // is shutdown before finding genesis via eth1. - // - // Alternatively, if there's a beacon chain in the database then always resume - // using it. - let client_genesis = if client_genesis == ClientGenesis::FromStore && !chain_exists - { - info!(context.log, "Defaulting to deposit contract genesis"); + // If the client is expect to resume but there's no beacon chain in the database, + // use the `DepositContract` method. This scenario is quite common when the client + // is shutdown before finding genesis via eth1. + // + // Alternatively, if there's a beacon chain in the database then always resume + // using it. + let client_genesis = if client_genesis == ClientGenesis::FromStore && !chain_exists { + info!(context.log, "Defaulting to deposit contract genesis"); - ClientGenesis::DepositContract - } else if chain_exists { - ClientGenesis::FromStore - } else { - client_genesis - }; + ClientGenesis::DepositContract + } else if chain_exists { + ClientGenesis::FromStore + } else { + client_genesis + }; - let genesis_state_future: Box + Send> = - match client_genesis { - ClientGenesis::Interop { - validator_count, - genesis_time, - } => { - let keypairs = generate_deterministic_keypairs(validator_count); - let result = interop_genesis_state(&keypairs, genesis_time, &spec); + let (beacon_chain_builder, eth1_service_option) = match client_genesis { + ClientGenesis::Interop { + validator_count, + genesis_time, + } => { + let keypairs = generate_deterministic_keypairs(validator_count); + let genesis_state = interop_genesis_state(&keypairs, genesis_time, &spec)?; + builder.genesis_state(genesis_state).map(|v| (v, None))? + } + ClientGenesis::SszBytes { + genesis_state_bytes, + } => { + info!( + context.log, + "Starting from known genesis state"; + ); - let future = result - .and_then(move |genesis_state| builder.genesis_state(genesis_state)) - .into_future() - .map(|v| (v, None)); + let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes) + .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; - Box::new(future) - } - ClientGenesis::SszBytes { - genesis_state_bytes, - } => { - info!( - context.log, - "Starting from known genesis state"; - ); + builder.genesis_state(genesis_state).map(|v| (v, None))? + } + ClientGenesis::DepositContract => { + info!( + context.log, + "Waiting for eth2 genesis from eth1"; + "eth1_endpoint" => &config.eth1.endpoint, + "contract_deploy_block" => config.eth1.deposit_contract_deploy_block, + "deposit_contract" => &config.eth1.deposit_contract_address + ); - let result = BeaconState::from_ssz_bytes(&genesis_state_bytes) - .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e)); + let genesis_service = Eth1GenesisService::new(config.eth1, context.log.clone()); - let future = result - .and_then(move |genesis_state| builder.genesis_state(genesis_state)) - .into_future() - .map(|v| (v, None)); + let genesis_state = genesis_service + .wait_for_genesis_state( + Duration::from_millis(ETH1_GENESIS_UPDATE_INTERVAL_MILLIS), + context.eth2_config().spec.clone(), + ) + .await?; - Box::new(future) - } - ClientGenesis::DepositContract => { - info!( - context.log, - "Waiting for eth2 genesis from eth1"; - "eth1_endpoint" => &config.eth1.endpoint, - "contract_deploy_block" => config.eth1.deposit_contract_deploy_block, - "deposit_contract" => &config.eth1.deposit_contract_address - ); + builder + .genesis_state(genesis_state) + .map(|v| (v, Some(genesis_service.into_core_service())))? + } + ClientGenesis::FromStore => builder.resume_from_db().map(|v| (v, None))?, + }; - let genesis_service = - Eth1GenesisService::new(config.eth1, context.log.clone()); - - let future = genesis_service - .wait_for_genesis_state( - Duration::from_millis(ETH1_GENESIS_UPDATE_INTERVAL_MILLIS), - context.eth2_config().spec.clone(), - ) - .and_then(move |genesis_state| builder.genesis_state(genesis_state)) - .map(|v| (v, Some(genesis_service.into_core_service()))); - - Box::new(future) - } - ClientGenesis::FromStore => { - let future = builder.resume_from_db().into_future().map(|v| (v, None)); - - Box::new(future) - } - }; - - genesis_state_future - }) - .map(move |(beacon_chain_builder, eth1_service_option)| { - self.eth1_service = eth1_service_option; - self.beacon_chain_builder = Some(beacon_chain_builder); - self - }) + self.eth1_service = eth1_service_option; + self.beacon_chain_builder = Some(beacon_chain_builder); + Ok(self) } /// Immediately starts the networking stack. @@ -251,10 +221,10 @@ where .runtime_context .as_ref() .ok_or_else(|| "network requires a runtime_context")? - .service_context("network".into()); + .clone(); let (network_globals, network_send, network_exit) = - NetworkService::start(beacon_chain, config, &context.executor, context.log) + NetworkService::start(beacon_chain, config, &context.runtime_handle, context.log) .map_err(|e| format!("Failed to start network: {:?}", e))?; self.network_globals = Some(network_globals); @@ -281,13 +251,10 @@ where .ok_or_else(|| "node timer requires a chain spec".to_string())? .milliseconds_per_slot; - let timer_exit = timer::spawn( - &context.executor, - beacon_chain, - milliseconds_per_slot, - context.log, - ) - .map_err(|e| format!("Unable to start node timer: {}", e))?; + let timer_exit = context + .runtime_handle + .enter(|| timer::spawn(beacon_chain, milliseconds_per_slot)) + .map_err(|e| format!("Unable to start node timer: {}", e))?; self.exit_channels.push(timer_exit); @@ -323,21 +290,23 @@ where network_chan: network_send, }; - let (exit_channel, listening_addr) = rest_api::start_server( - &client_config.rest_api, - &context.executor, - beacon_chain, - network_info, - client_config - .create_db_path() - .map_err(|_| "unable to read data dir")?, - client_config - .create_freezer_db_path() - .map_err(|_| "unable to read freezer DB dir")?, - eth2_config.clone(), - context.log, - ) - .map_err(|e| format!("Failed to start HTTP API: {:?}", e))?; + let log = context.log.clone(); + let (exit_channel, listening_addr) = context.runtime_handle.enter(|| { + rest_api::start_server( + &client_config.rest_api, + beacon_chain, + network_info, + client_config + .create_db_path() + .map_err(|_| "unable to read data dir")?, + client_config + .create_freezer_db_path() + .map_err(|_| "unable to read freezer DB dir")?, + eth2_config.clone(), + log, + ) + .map_err(|e| format!("Failed to start HTTP API: {:?}", e)) + })?; self.exit_channels.push(exit_channel); self.http_listen_addr = Some(listening_addr); @@ -366,13 +335,17 @@ where .ok_or_else(|| "slot_notifier requires a chain spec".to_string())? .milliseconds_per_slot; - let exit_channel = spawn_notifier( - context, - beacon_chain, - network_globals, - milliseconds_per_slot, - ) - .map_err(|e| format!("Unable to start slot notifier: {}", e))?; + let exit_channel = context + .runtime_handle + .enter(|| { + spawn_notifier( + beacon_chain, + network_globals, + milliseconds_per_slot, + context.log.clone(), + ) + }) + .map_err(|e| format!("Unable to start slot notifier: {}", e))?; self.exit_channels.push(exit_channel); @@ -468,8 +441,9 @@ where Option<_>, Option<_>, ) = if config.enabled { - let (sender, exit, listening_addr) = - websocket_server::start_server(&config, &context.executor, &context.log)?; + let (sender, exit, listening_addr) = context + .runtime_handle + .enter(|| websocket_server::start_server(&config, &context.log))?; (sender, Some(exit), Some(listening_addr)) } else { (WebSocketSender::dummy(), None, None) @@ -688,7 +662,7 @@ where }; // Starts the service that connects to an eth1 node and periodically updates caches. - context.executor.spawn(backend.start(exit)); + context.runtime_handle.enter(|| backend.start(exit)); self.beacon_chain_builder = Some(beacon_chain_builder.eth1_backend(Some(backend))); diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1bb649b99e..aa13536537 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,14 +1,12 @@ use crate::metrics; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use environment::RuntimeContext; use eth2_libp2p::NetworkGlobals; -use futures::{Future, Stream}; +use futures::prelude::*; use parking_lot::Mutex; use slog::{debug, error, info, warn}; use slot_clock::SlotClock; use std::sync::Arc; use std::time::{Duration, Instant}; -use tokio::timer::Interval; use types::{EthSpec, Slot}; /// Create a warning log whenever the peer count is at or below this value. @@ -27,15 +25,11 @@ const SPEEDO_OBSERVATIONS: usize = 4; /// Spawns a notifier service which periodically logs information about the node. pub fn spawn_notifier( - context: RuntimeContext, beacon_chain: Arc>, network: Arc>, milliseconds_per_slot: u64, + log: slog::Logger, ) -> Result, String> { - let log_1 = context.log.clone(); - let log_2 = context.log.clone(); - let log_3 = context.log.clone(); - let slot_duration = Duration::from_millis(milliseconds_per_slot); let duration_to_next_slot = beacon_chain .slot_clock @@ -43,29 +37,26 @@ pub fn spawn_notifier( .ok_or_else(|| "slot_notifier unable to determine time to next slot")?; // Run this half way through each slot. - let start_instant = Instant::now() + duration_to_next_slot + (slot_duration / 2); + let start_instant = tokio::time::Instant::now() + duration_to_next_slot + (slot_duration / 2); // Run this each slot. let interval_duration = slot_duration; let speedo = Mutex::new(Speedo::default()); + let mut interval = tokio::time::interval_at(start_instant, interval_duration); - let interval_future = Interval::new(start_instant, interval_duration) - .map_err( - move |e| error!(log_1, "Slot notifier timer failed"; "error" => format!("{:?}", e)), - ) - .for_each(move |_| { - let log = log_2.clone(); - + let interval_future = async move { + while let Some(_) = interval.next().await { let connected_peer_count = network.connected_peers(); let sync_state = network.sync_state(); - let head_info = beacon_chain.head_info() - .map_err(|e| error!( + let head_info = beacon_chain.head_info().map_err(|e| { + error!( log, "Failed to get beacon chain head info"; "error" => format!("{:?}", e) - ))?; + ) + })?; let head_slot = head_info.slot; let current_slot = beacon_chain.slot().map_err(|e| { @@ -83,7 +74,10 @@ pub fn spawn_notifier( let mut speedo = speedo.lock(); speedo.observe(head_slot, Instant::now()); - metrics::set_gauge(&metrics::SYNC_SLOTS_PER_SECOND, speedo.slots_per_second().unwrap_or_else(|| 0_f64) as i64); + metrics::set_gauge( + &metrics::SYNC_SLOTS_PER_SECOND, + speedo.slots_per_second().unwrap_or_else(|| 0_f64) as i64, + ); // The next two lines take advantage of saturating subtraction on `Slot`. let head_distance = current_slot - head_slot; @@ -101,10 +95,9 @@ pub fn spawn_notifier( "head_block" => format!("{}", head_root), "head_slot" => head_slot, "current_slot" => current_slot, - "sync_state" =>format!("{}", sync_state) + "sync_state" =>format!("{}", sync_state) ); - // Log if we are syncing if sync_state.is_syncing() { let distance = format!( @@ -122,9 +115,13 @@ pub fn spawn_notifier( ); } else { if sync_state.is_synced() { - let block_info = if current_slot > head_slot { format!(" … empty") } else { format!("{}", head_root) }; + let block_info = if current_slot > head_slot { + format!(" … empty") + } else { + format!("{}", head_root) + }; info!( - log_2, + log, "Synced"; "peers" => peer_count_pretty(connected_peer_count), "finalized_root" => format!("{}", finalized_root), @@ -135,7 +132,7 @@ pub fn spawn_notifier( ); } else { info!( - log_2, + log, "Searching for peers"; "peers" => peer_count_pretty(connected_peer_count), "finalized_root" => format!("{}", finalized_root), @@ -145,25 +142,14 @@ pub fn spawn_notifier( ); } } - Ok(()) - }) - .then(move |result| { - match result { - Ok(()) => Ok(()), - Err(e) => { - error!( - log_3, - "Notifier failed to notify"; - "error" => format!("{:?}", e) - ); - Ok(()) - } } }); + } + Ok::<(), ()>(()) + }; let (exit_signal, exit) = tokio::sync::oneshot::channel(); - context - .executor - .spawn(interval_future.select(exit).map(|_| ()).map_err(|_| ())); + // run the notifier on the current executor + tokio::spawn(futures::future::select(Box::pin(interval_future), exit)); Ok(exit_signal) } diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index a4bb724315..e4085e2429 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -7,25 +7,26 @@ edition = "2018" [dev-dependencies] eth1_test_rig = { path = "../../tests/eth1_test_rig" } environment = { path = "../../lighthouse/environment" } -toml = "^0.5" +toml = "0.5.6" web3 = "0.10.0" +sloggers = "1.0.0" [dependencies] -reqwest = "0.9" -futures = "0.1.25" -serde_json = "1.0" -serde = { version = "1.0", features = ["derive"] } -hex = "0.3" +reqwest = "0.10.4" +futures = { version = "0.3.5", features = ["compat"] } +serde_json = "1.0.52" +serde = { version = "1.0.110", features = ["derive"] } +hex = "0.4.2" types = { path = "../../eth2/types"} merkle_proof = { path = "../../eth2/utils/merkle_proof"} -eth2_ssz = { path = "../../eth2/utils/ssz"} +eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" -tree_hash = { path = "../../eth2/utils/tree_hash"} -eth2_hashing = { path = "../../eth2/utils/eth2_hashing"} -parking_lot = "0.7" -slog = "^2.2.3" -tokio = "0.1.22" +tree_hash = "0.1.0" +eth2_hashing = "0.1.0" +parking_lot = "0.10.2" +slog = "2.5.2" +tokio = { version = "0.2.20", features = ["full"] } state_processing = { path = "../../eth2/state_processing" } -libflate = "0.1" +libflate = "1.0.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics"} lazy_static = "1.4.0" diff --git a/beacon_node/eth1/src/http.rs b/beacon_node/eth1/src/http.rs index 130195245f..c8b5961a76 100644 --- a/beacon_node/eth1/src/http.rs +++ b/beacon_node/eth1/src/http.rs @@ -10,8 +10,8 @@ //! //! There is no ABI parsing here, all function signatures and topics are hard-coded as constants. -use futures::{Future, Stream}; -use reqwest::{header::CONTENT_TYPE, r#async::ClientBuilder, StatusCode}; +use futures::future::TryFutureExt; +use reqwest::{header::CONTENT_TYPE, ClientBuilder, StatusCode}; use serde_json::{json, Value}; use std::ops::Range; use std::time::Duration; @@ -40,80 +40,73 @@ pub struct Block { /// Returns the current block number. /// /// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub fn get_block_number( - endpoint: &str, - timeout: Duration, -) -> impl Future { - send_rpc_request(endpoint, "eth_blockNumber", json!([]), timeout) - .and_then(|response_body| { - hex_to_u64_be( - response_result(&response_body)? - .ok_or_else(|| "No result field was returned for block number".to_string())? - .as_str() - .ok_or_else(|| "Data was not string")?, - ) - }) - .map_err(|e| format!("Failed to get block number: {}", e)) +pub async fn get_block_number(endpoint: &str, timeout: Duration) -> Result { + let response_body = send_rpc_request(endpoint, "eth_blockNumber", json!([]), timeout).await?; + hex_to_u64_be( + response_result(&response_body)? + .ok_or_else(|| "No result field was returned for block number".to_string())? + .as_str() + .ok_or_else(|| "Data was not string")?, + ) + .map_err(|e| format!("Failed to get block number: {}", e)) } /// Gets a block hash by block number. /// /// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub fn get_block( +pub async fn get_block( endpoint: &str, block_number: u64, timeout: Duration, -) -> impl Future { +) -> Result { let params = json!([ format!("0x{:x}", block_number), false // do not return full tx objects. ]); - send_rpc_request(endpoint, "eth_getBlockByNumber", params, timeout) - .and_then(|response_body| { - let hash = hex_to_bytes( - response_result(&response_body)? - .ok_or_else(|| "No result field was returned for block".to_string())? - .get("hash") - .ok_or_else(|| "No hash for block")? - .as_str() - .ok_or_else(|| "Block hash was not string")?, - )?; - let hash = if hash.len() == 32 { - Ok(Hash256::from_slice(&hash)) - } else { - Err(format!("Block has was not 32 bytes: {:?}", hash)) - }?; + let response_body = send_rpc_request(endpoint, "eth_getBlockByNumber", params, timeout).await?; + let hash = hex_to_bytes( + response_result(&response_body)? + .ok_or_else(|| "No result field was returned for block".to_string())? + .get("hash") + .ok_or_else(|| "No hash for block")? + .as_str() + .ok_or_else(|| "Block hash was not string")?, + )?; + let hash = if hash.len() == 32 { + Ok(Hash256::from_slice(&hash)) + } else { + Err(format!("Block has was not 32 bytes: {:?}", hash)) + }?; - let timestamp = hex_to_u64_be( - response_result(&response_body)? - .ok_or_else(|| "No result field was returned for timestamp".to_string())? - .get("timestamp") - .ok_or_else(|| "No timestamp for block")? - .as_str() - .ok_or_else(|| "Block timestamp was not string")?, - )?; + let timestamp = hex_to_u64_be( + response_result(&response_body)? + .ok_or_else(|| "No result field was returned for timestamp".to_string())? + .get("timestamp") + .ok_or_else(|| "No timestamp for block")? + .as_str() + .ok_or_else(|| "Block timestamp was not string")?, + )?; - let number = hex_to_u64_be( - response_result(&response_body)? - .ok_or_else(|| "No result field was returned for number".to_string())? - .get("number") - .ok_or_else(|| "No number for block")? - .as_str() - .ok_or_else(|| "Block number was not string")?, - )?; + let number = hex_to_u64_be( + response_result(&response_body)? + .ok_or_else(|| "No result field was returned for number".to_string())? + .get("number") + .ok_or_else(|| "No number for block")? + .as_str() + .ok_or_else(|| "Block number was not string")?, + )?; - if number <= usize::max_value() as u64 { - Ok(Block { - hash, - timestamp, - number, - }) - } else { - Err(format!("Block number {} is larger than a usize", number)) - } + if number <= usize::max_value() as u64 { + Ok(Block { + hash, + timestamp, + number, }) - .map_err(|e| format!("Failed to get block number: {}", e)) + } else { + Err(format!("Block number {} is larger than a usize", number)) + } + .map_err(|e| format!("Failed to get block number: {}", e)) } /// Returns the value of the `get_deposit_count()` call at the given `address` for the given @@ -122,20 +115,21 @@ pub fn get_block( /// Assumes that the `address` has the same ABI as the eth2 deposit contract. /// /// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub fn get_deposit_count( +pub async fn get_deposit_count( endpoint: &str, address: &str, block_number: u64, timeout: Duration, -) -> impl Future, Error = String> { - call( +) -> Result, String> { + let result = call( endpoint, address, DEPOSIT_COUNT_FN_SIGNATURE, block_number, timeout, ) - .and_then(|result| match result { + .await?; + match result { None => Err("Deposit root response was none".to_string()), Some(bytes) => { if bytes.is_empty() { @@ -151,7 +145,7 @@ pub fn get_deposit_count( )) } } - }) + } } /// Returns the value of the `get_hash_tree_root()` call at the given `block_number`. @@ -159,20 +153,21 @@ pub fn get_deposit_count( /// Assumes that the `address` has the same ABI as the eth2 deposit contract. /// /// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub fn get_deposit_root( +pub async fn get_deposit_root( endpoint: &str, address: &str, block_number: u64, timeout: Duration, -) -> impl Future, Error = String> { - call( +) -> Result, String> { + let result = call( endpoint, address, DEPOSIT_ROOT_FN_SIGNATURE, block_number, timeout, ) - .and_then(|result| match result { + .await?; + match result { None => Err("Deposit root response was none".to_string()), Some(bytes) => { if bytes.is_empty() { @@ -186,7 +181,7 @@ pub fn get_deposit_root( )) } } - }) + } } /// Performs a instant, no-transaction call to the contract `address` with the given `0x`-prefixed @@ -195,13 +190,13 @@ pub fn get_deposit_root( /// Returns bytes, if any. /// /// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -fn call( +async fn call( endpoint: &str, address: &str, hex_data: &str, block_number: u64, timeout: Duration, -) -> impl Future>, Error = String> { +) -> Result>, String> { let params = json! ([ { "to": address, @@ -210,19 +205,18 @@ fn call( format!("0x{:x}", block_number) ]); - send_rpc_request(endpoint, "eth_call", params, timeout).and_then(|response_body| { - match response_result(&response_body)? { - None => Ok(None), - Some(result) => { - let hex = result - .as_str() - .map(|s| s.to_string()) - .ok_or_else(|| "'result' value was not a string".to_string())?; + let response_body = send_rpc_request(endpoint, "eth_call", params, timeout).await?; + match response_result(&response_body)? { + None => Ok(None), + Some(result) => { + let hex = result + .as_str() + .map(|s| s.to_string()) + .ok_or_else(|| "'result' value was not a string".to_string())?; - Ok(Some(hex_to_bytes(&hex)?)) - } + Ok(Some(hex_to_bytes(&hex)?)) } - }) + } } /// A reduced set of fields from an Eth1 contract log. @@ -238,12 +232,12 @@ pub struct Log { /// It's not clear from the Ethereum JSON-RPC docs if this range is inclusive or not. /// /// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub fn get_deposit_logs_in_range( +pub async fn get_deposit_logs_in_range( endpoint: &str, address: &str, block_height_range: Range, timeout: Duration, -) -> impl Future, Error = String> { +) -> Result, String> { let params = json! ([{ "address": address, "topics": [DEPOSIT_EVENT_TOPIC], @@ -251,46 +245,44 @@ pub fn get_deposit_logs_in_range( "toBlock": format!("0x{:x}", block_height_range.end), }]); - send_rpc_request(endpoint, "eth_getLogs", params, timeout) - .and_then(|response_body| { - response_result(&response_body)? - .ok_or_else(|| "No result field was returned for deposit logs".to_string())? - .as_array() - .cloned() - .ok_or_else(|| "'result' value was not an array".to_string())? - .into_iter() - .map(|value| { - let block_number = value - .get("blockNumber") - .ok_or_else(|| "No block number field in log")? - .as_str() - .ok_or_else(|| "Block number was not string")?; + let response_body = send_rpc_request(endpoint, "eth_getLogs", params, timeout).await?; + response_result(&response_body)? + .ok_or_else(|| "No result field was returned for deposit logs".to_string())? + .as_array() + .cloned() + .ok_or_else(|| "'result' value was not an array".to_string())? + .into_iter() + .map(|value| { + let block_number = value + .get("blockNumber") + .ok_or_else(|| "No block number field in log")? + .as_str() + .ok_or_else(|| "Block number was not string")?; - let data = value - .get("data") - .ok_or_else(|| "No block number field in log")? - .as_str() - .ok_or_else(|| "Data was not string")?; + let data = value + .get("data") + .ok_or_else(|| "No block number field in log")? + .as_str() + .ok_or_else(|| "Data was not string")?; - Ok(Log { - block_number: hex_to_u64_be(&block_number)?, - data: hex_to_bytes(data)?, - }) - }) - .collect::, String>>() + Ok(Log { + block_number: hex_to_u64_be(&block_number)?, + data: hex_to_bytes(data)?, + }) }) + .collect::, String>>() .map_err(|e| format!("Failed to get logs in range: {}", e)) } /// Sends an RPC request to `endpoint`, using a POST with the given `body`. /// /// Tries to receive the response and parse the body as a `String`. -pub fn send_rpc_request( +pub async fn send_rpc_request( endpoint: &str, method: &str, params: Value, timeout: Duration, -) -> impl Future { +) -> Result { let body = json! ({ "jsonrpc": "2.0", "method": method, @@ -303,7 +295,7 @@ pub fn send_rpc_request( // // A better solution would be to create some struct that contains a built client and pass it // around (similar to the `web3` crate's `Transport` structs). - ClientBuilder::new() + let response = ClientBuilder::new() .timeout(timeout) .build() .expect("The builder should always build a client") @@ -312,43 +304,32 @@ pub fn send_rpc_request( .body(body) .send() .map_err(|e| format!("Request failed: {:?}", e)) - .and_then(|response| { - if response.status() != StatusCode::OK { - Err(format!( - "Response HTTP status was not 200 OK: {}.", - response.status() - )) - } else { - Ok(response) - } - }) - .and_then(|response| { - response - .headers() - .get(CONTENT_TYPE) - .ok_or_else(|| "No content-type header in response".to_string()) - .and_then(|encoding| { - encoding - .to_str() - .map(|s| s.to_string()) - .map_err(|e| format!("Failed to parse content-type header: {}", e)) - }) - .map(|encoding| (response, encoding)) - }) - .and_then(|(response, encoding)| { - response - .into_body() - .concat2() - .map(|chunk| chunk.iter().cloned().collect::>()) - .map_err(|e| format!("Failed to receive body: {:?}", e)) - .and_then(move |bytes| match encoding.as_str() { - "application/json" => Ok(bytes), - "application/json; charset=utf-8" => Ok(bytes), - other => Err(format!("Unsupported encoding: {}", other)), - }) - .map(|bytes| String::from_utf8_lossy(&bytes).into_owned()) - .map_err(|e| format!("Failed to receive body: {:?}", e)) + .await?; + if response.status() != StatusCode::OK { + return Err(format!( + "Response HTTP status was not 200 OK: {}.", + response.status() + )); + }; + let encoding = response + .headers() + .get(CONTENT_TYPE) + .ok_or_else(|| "No content-type header in response".to_string())? + .to_str() + .map(|s| s.to_string()) + .map_err(|e| format!("Failed to parse content-type header: {}", e))?; + + response + .bytes() + .map_err(|e| format!("Failed to receive body: {:?}", e)) + .await + .and_then(move |bytes| match encoding.as_str() { + "application/json" => Ok(bytes), + "application/json; charset=utf-8" => Ok(bytes), + other => Err(format!("Unsupported encoding: {}", other)), }) + .map(|bytes| String::from_utf8_lossy(&bytes).into_owned()) + .map_err(|e| format!("Failed to receive body: {:?}", e)) } /// Accepts an entire HTTP body (as a string) and returns the `result` field, as a serde `Value`. diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 770dcb79c0..a33096b26b 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -2,21 +2,18 @@ use crate::metrics; use crate::{ block_cache::{BlockCache, Error as BlockCacheError, Eth1Block}, deposit_cache::Error as DepositCacheError, - http::{get_block, get_block_number, get_deposit_logs_in_range}, + http::{get_block, get_block_number, get_deposit_logs_in_range, Log}, inner::{DepositUpdater, Inner}, DepositLog, }; -use futures::{ - future::{loop_fn, Loop}, - stream, Future, Stream, -}; +use futures::{future::TryFutureExt, stream, stream::TryStreamExt, StreamExt}; use parking_lot::{RwLock, RwLockReadGuard}; use serde::{Deserialize, Serialize}; use slog::{debug, error, info, trace, Logger}; use std::ops::{Range, RangeInclusive}; use std::sync::Arc; -use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; -use tokio::timer::Delay; +use std::time::{SystemTime, UNIX_EPOCH}; +use tokio::time::{interval_at, Duration, Instant}; const STANDARD_TIMEOUT_MILLIS: u64 = 15_000; @@ -241,63 +238,40 @@ impl Service { /// - Err(_) if there is an error. /// /// Emits logs for debugging and errors. - pub fn update( - &self, - ) -> impl Future - { - let log_a = self.log.clone(); - let log_b = self.log.clone(); - let inner_1 = self.inner.clone(); - let inner_2 = self.inner.clone(); + pub async fn update( + service: Self, + ) -> Result<(DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), String> { + let update_deposit_cache = async { + let outcome = Service::update_deposit_cache(service.clone()) + .await + .map_err(|e| format!("Failed to update eth1 cache: {:?}", e))?; - let deposit_future = self - .update_deposit_cache() - .map_err(|e| format!("Failed to update eth1 cache: {:?}", e)) - .then(move |result| { - match &result { - Ok(DepositCacheUpdateOutcome { logs_imported }) => trace!( - log_a, - "Updated eth1 deposit cache"; - "cached_deposits" => inner_1.deposit_cache.read().cache.len(), - "logs_imported" => logs_imported, - "last_processed_eth1_block" => inner_1.deposit_cache.read().last_processed_block, - ), - Err(e) => error!( - log_a, - "Failed to update eth1 deposit cache"; - "error" => e - ), - }; + trace!( + service.log, + "Updated eth1 deposit cache"; + "cached_deposits" => service.inner.deposit_cache.read().cache.len(), + "logs_imported" => outcome.logs_imported, + "last_processed_eth1_block" => service.inner.deposit_cache.read().last_processed_block, + ); + Ok(outcome) + }; - result - }); + let update_block_cache = async { + let outcome = Service::update_block_cache(service.clone()) + .await + .map_err(|e| format!("Failed to update eth1 cache: {:?}", e))?; - let block_future = self - .update_block_cache() - .map_err(|e| format!("Failed to update eth1 cache: {:?}", e)) - .then(move |result| { - match &result { - Ok(BlockCacheUpdateOutcome { - blocks_imported, - head_block_number, - }) => trace!( - log_b, - "Updated eth1 block cache"; - "cached_blocks" => inner_2.block_cache.read().len(), - "blocks_imported" => blocks_imported, - "head_block" => head_block_number, - ), - Err(e) => error!( - log_b, - "Failed to update eth1 block cache"; - "error" => e - ), - }; + trace!( + service.log, + "Updated eth1 block cache"; + "cached_blocks" => service.inner.block_cache.read().len(), + "blocks_imported" => outcome.blocks_imported, + "head_block" => outcome.head_block_number, + ); + Ok(outcome) + }; - result - }); - - deposit_future.join(block_future) + futures::try_join!(update_deposit_cache, update_block_cache) } /// A looping future that updates the cache, then waits `config.auto_update_interval` before @@ -309,56 +283,42 @@ impl Service { /// - Err(_) if there is an error. /// /// Emits logs for debugging and errors. - pub fn auto_update( - &self, - exit: tokio::sync::oneshot::Receiver<()>, - ) -> impl Future { - let service = self.clone(); - let log = self.log.clone(); - let update_interval = Duration::from_millis(self.config().auto_update_interval_millis); + pub fn auto_update(service: Self, exit: tokio::sync::oneshot::Receiver<()>) { + let update_interval = Duration::from_millis(service.config().auto_update_interval_millis); - let loop_future = loop_fn((), move |()| { - let service = service.clone(); - let log_a = log.clone(); - let log_b = log.clone(); + let mut interval = interval_at(Instant::now(), update_interval); - service - .update() - .then(move |update_result| { - match update_result { - Err(e) => error!( - log_a, - "Failed to update eth1 cache"; - "retry_millis" => update_interval.as_millis(), - "error" => e, - ), - Ok((deposit, block)) => debug!( - log_a, - "Updated eth1 cache"; - "retry_millis" => update_interval.as_millis(), - "blocks" => format!("{:?}", block), - "deposits" => format!("{:?}", deposit), - ), - }; + let update_future = async move { + while interval.next().await.is_some() { + Service::do_update(service.clone(), update_interval) + .await + .ok(); + } + }; - // Do not break the loop if there is an update failure. - Ok(()) - }) - .and_then(move |_| Delay::new(Instant::now() + update_interval)) - .then(move |timer_result| { - if let Err(e) = timer_result { - error!( - log_b, - "Failed to trigger eth1 cache update delay"; - "error" => format!("{:?}", e), - ); - } - // Do not break the loop if there is an timer failure. - Ok(Loop::Continue(())) - }) - }); + let future = futures::future::select(Box::pin(update_future), exit); - loop_future.select(exit).map(|_| ()).map_err(|_| ()) + tokio::task::spawn(future); + } + + async fn do_update(service: Self, update_interval: Duration) -> Result<(), ()> { + let update_result = Service::update(service.clone()).await; + match update_result { + Err(e) => error!( + service.log, + "Failed to update eth1 cache"; + "retry_millis" => update_interval.as_millis(), + "error" => e, + ), + Ok((deposit, block)) => debug!( + service.log, + "Updated eth1 cache"; + "retry_millis" => update_interval.as_millis(), + "blocks" => format!("{:?}", block), + "deposits" => format!("{:?}", deposit), + ), + }; + Ok(()) } /// Contacts the remote eth1 node and attempts to import deposit logs up to the configured @@ -373,135 +333,126 @@ impl Service { /// - Err(_) if there is an error. /// /// Emits logs for debugging and errors. - pub fn update_deposit_cache( - &self, - ) -> impl Future { - let service_1 = self.clone(); - let service_2 = self.clone(); - let service_3 = self.clone(); - let blocks_per_log_query = self.config().blocks_per_log_query; - let max_log_requests_per_update = self + pub async fn update_deposit_cache(service: Self) -> Result { + let endpoint = service.config().endpoint.clone(); + let follow_distance = service.config().follow_distance; + let deposit_contract_address = service.config().deposit_contract_address.clone(); + + let blocks_per_log_query = service.config().blocks_per_log_query; + let max_log_requests_per_update = service .config() .max_log_requests_per_update .unwrap_or_else(usize::max_value); - let next_required_block = self + let next_required_block = service .deposits() .read() .last_processed_block .map(|n| n + 1) - .unwrap_or_else(|| self.config().deposit_contract_deploy_block); + .unwrap_or_else(|| service.config().deposit_contract_deploy_block); - get_new_block_numbers( - &self.config().endpoint, - next_required_block, - self.config().follow_distance, - ) - .map(move |range| { + let range = get_new_block_numbers(&endpoint, next_required_block, follow_distance).await?; + + let block_number_chunks = if let Some(range) = range { range - .map(|range| { - range - .collect::>() - .chunks(blocks_per_log_query) - .take(max_log_requests_per_update) - .map(|vec| { - let first = vec.first().cloned().unwrap_or_else(|| 0); - let last = vec.last().map(|n| n + 1).unwrap_or_else(|| 0); - first..last - }) - .collect::>>() + .collect::>() + .chunks(blocks_per_log_query) + .take(max_log_requests_per_update) + .map(|vec| { + let first = vec.first().cloned().unwrap_or_else(|| 0); + let last = vec.last().map(|n| n + 1).unwrap_or_else(|| 0); + first..last }) - .unwrap_or_else(|| vec![]) - }) - .and_then(move |block_number_chunks| { - stream::unfold( - block_number_chunks.into_iter(), - move |mut chunks| match chunks.next() { + .collect::>>() + } else { + Vec::new() + }; + + let logs: Vec<(Range, Vec)> = + stream::try_unfold(block_number_chunks.into_iter(), |mut chunks| async { + match chunks.next() { Some(chunk) => { let chunk_1 = chunk.clone(); - Some( - get_deposit_logs_in_range( - &service_1.config().endpoint, - &service_1.config().deposit_contract_address, - chunk, - Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), - ) - .map_err(Error::GetDepositLogsFailed) - .map(|logs| (chunk_1, logs)) - .map(|logs| (logs, chunks)), + match get_deposit_logs_in_range( + &endpoint, + &deposit_contract_address, + chunk, + Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), ) + .await + { + Ok(logs) => Ok(Some(((chunk_1, logs), chunks))), + Err(e) => Err(Error::GetDepositLogsFailed(e)), + } } - None => None, - }, - ) - .fold(0, move |mut sum, (block_range, log_chunk)| { - let mut cache = service_2.deposits().write(); - - log_chunk - .into_iter() - .map(|raw_log| { - DepositLog::from_log(&raw_log).map_err(|error| { - Error::FailedToParseDepositLog { - block_range: block_range.clone(), - error, - } - }) - }) - // Return early if any of the logs cannot be parsed. - // - // This costs an additional `collect`, however it enforces that no logs are - // imported if any one of them cannot be parsed. - .collect::, _>>()? - .into_iter() - .map(|deposit_log| { - cache - .cache - .insert_log(deposit_log) - .map_err(Error::FailedToInsertDeposit)?; - - sum += 1; - - Ok(()) - }) - // Returns if a deposit is unable to be added to the cache. - // - // If this error occurs, the cache will no longer be guaranteed to hold either - // none or all of the logs for each block (i.e., they may exist _some_ logs for - // a block, but not _all_ logs for that block). This scenario can cause the - // node to choose an invalid genesis state or propose an invalid block. - .collect::>()?; - - cache.last_processed_block = Some(block_range.end.saturating_sub(1)); - - metrics::set_gauge(&metrics::DEPOSIT_CACHE_LEN, cache.cache.len() as i64); - metrics::set_gauge( - &metrics::HIGHEST_PROCESSED_DEPOSIT_BLOCK, - cache.last_processed_block.unwrap_or_else(|| 0) as i64, - ); - - Ok(sum) - }) - .map(move |logs_imported| { - if logs_imported > 0 { - info!( - service_3.log, - "Imported deposit log(s)"; - "latest_block" => service_3.inner.deposit_cache.read().cache.latest_block_number(), - "total" => service_3.deposit_cache_len(), - "new" => logs_imported - ); - } else { - debug!( - service_3.log, - "No new deposits found"; - "latest_block" => service_3.inner.deposit_cache.read().cache.latest_block_number(), - "total_deposits" => service_3.deposit_cache_len(), - ); + None => Ok(None), } - - DepositCacheUpdateOutcome { logs_imported } }) - }) + .try_collect() + .await?; + + let mut logs_imported = 0; + for (block_range, log_chunk) in logs.iter() { + let mut cache = service.deposits().write(); + log_chunk + .into_iter() + .map(|raw_log| { + DepositLog::from_log(&raw_log).map_err(|error| Error::FailedToParseDepositLog { + block_range: block_range.clone(), + error, + }) + }) + // Return early if any of the logs cannot be parsed. + // + // This costs an additional `collect`, however it enforces that no logs are + // imported if any one of them cannot be parsed. + .collect::, _>>()? + .into_iter() + .map(|deposit_log| { + cache + .cache + .insert_log(deposit_log) + .map_err(Error::FailedToInsertDeposit)?; + + logs_imported += 1; + + Ok(()) + }) + // Returns if a deposit is unable to be added to the cache. + // + // If this error occurs, the cache will no longer be guaranteed to hold either + // none or all of the logs for each block (i.e., they may exist _some_ logs for + // a block, but not _all_ logs for that block). This scenario can cause the + // node to choose an invalid genesis state or propose an invalid block. + .collect::>()?; + + cache.last_processed_block = Some(block_range.end.saturating_sub(1)); + + metrics::set_gauge(&metrics::DEPOSIT_CACHE_LEN, cache.cache.len() as i64); + metrics::set_gauge( + &metrics::HIGHEST_PROCESSED_DEPOSIT_BLOCK, + cache.last_processed_block.unwrap_or_else(|| 0) as i64, + ); + } + + if logs_imported > 0 { + info!( + service.log, + "Imported deposit log(s)"; + "latest_block" => service.inner.deposit_cache.read().cache.latest_block_number(), + "total" => service.deposit_cache_len(), + "new" => logs_imported + ); + } else { + debug!( + service.log, + "No new deposits found"; + "latest_block" => service.inner.deposit_cache.read().cache.latest_block_number(), + "total_deposits" => service.deposit_cache_len(), + ); + } + + Ok(DepositCacheUpdateOutcome { logs_imported }) } /// Contacts the remote eth1 node and attempts to import all blocks up to the configured @@ -515,218 +466,249 @@ impl Service { /// - Err(_) if there is an error. /// /// Emits logs for debugging and errors. - pub fn update_block_cache(&self) -> impl Future { - let cache_1 = self.inner.clone(); - let cache_2 = self.inner.clone(); - let cache_3 = self.inner.clone(); - let cache_4 = self.inner.clone(); - let cache_5 = self.inner.clone(); - let cache_6 = self.inner.clone(); - - let service_1 = self.clone(); - - let block_cache_truncation = self.config().block_cache_truncation; - let max_blocks_per_update = self + pub async fn update_block_cache(service: Self) -> Result { + let block_cache_truncation = service.config().block_cache_truncation; + let max_blocks_per_update = service .config() .max_blocks_per_update .unwrap_or_else(usize::max_value); - let next_required_block = cache_1 + let next_required_block = service + .inner .block_cache .read() .highest_block_number() .map(|n| n + 1) - .unwrap_or_else(|| self.config().lowest_cached_block_number); + .unwrap_or_else(|| service.config().lowest_cached_block_number); - get_new_block_numbers( - &self.config().endpoint, - next_required_block, - self.config().follow_distance, - ) + let endpoint = service.config().endpoint.clone(); + let follow_distance = service.config().follow_distance; + + let range = get_new_block_numbers(&endpoint, next_required_block, follow_distance).await?; // Map the range of required blocks into a Vec. // // If the required range is larger than the size of the cache, drop the exiting cache // because it's exipred and just download enough blocks to fill the cache. - .and_then(move |range| { - range - .map(|range| { - if range.start() > range.end() { - // Note: this check is not strictly necessary, however it remains to safe - // guard against any regression which may cause an underflow in a following - // subtraction operation. - Err(Error::Internal("Range was not increasing".into())) - } else { - let range_size = range.end() - range.start(); - let max_size = block_cache_truncation - .map(|n| n as u64) - .unwrap_or_else(u64::max_value); - if range_size > max_size { - // If the range of required blocks is larger than `max_size`, drop all - // existing blocks and download `max_size` count of blocks. - let first_block = range.end() - max_size; - (*cache_5.block_cache.write()) = BlockCache::default(); - Ok((first_block..=*range.end()).collect::>()) - } else { - Ok(range.collect::>()) + let required_block_numbers = if let Some(range) = range { + if range.start() > range.end() { + // Note: this check is not strictly necessary, however it remains to safe + // guard against any regression which may cause an underflow in a following + // subtraction operation. + return Err(Error::Internal("Range was not increasing".into())); + } else { + let range_size = range.end() - range.start(); + let max_size = block_cache_truncation + .map(|n| n as u64) + .unwrap_or_else(u64::max_value); + if range_size > max_size { + // If the range of required blocks is larger than `max_size`, drop all + // existing blocks and download `max_size` count of blocks. + let first_block = range.end() - max_size; + (*service.inner.block_cache.write()) = BlockCache::default(); + (first_block..=*range.end()).collect::>() + } else { + range.collect::>() + } + } + } else { + Vec::new() + }; + // Download the range of blocks and sequentially import them into the cache. + // Last processed block in deposit cache + let latest_in_cache = service + .inner + .deposit_cache + .read() + .last_processed_block + .unwrap_or(0); + + let required_block_numbers = required_block_numbers + .into_iter() + .filter(|x| *x <= latest_in_cache) + .take(max_blocks_per_update) + .collect::>(); + // Produce a stream from the list of required block numbers and return a future that + // consumes the it. + + let eth1_blocks: Vec = stream::try_unfold( + required_block_numbers.into_iter(), + |mut block_numbers| async { + match block_numbers.next() { + Some(block_number) => { + match download_eth1_block(service.inner.clone(), block_number).await { + Ok(eth1_block) => Ok(Some((eth1_block, block_numbers))), + Err(e) => Err(e), } } - }) - .unwrap_or_else(|| Ok(vec![])) - }) - // Download the range of blocks and sequentially import them into the cache. - .and_then(move |required_block_numbers| { - // Last processed block in deposit cache - let latest_in_cache = cache_6 - .deposit_cache - .read() - .last_processed_block - .unwrap_or(0); + None => Ok(None), + } + }, + ) + .try_collect() + .await?; - let required_block_numbers = required_block_numbers - .into_iter() - .filter(|x| *x <= latest_in_cache) - .take(max_blocks_per_update) - .collect::>(); - // Produce a stream from the list of required block numbers and return a future that - // consumes the it. - stream::unfold( - required_block_numbers.into_iter(), - move |mut block_numbers| match block_numbers.next() { - Some(block_number) => Some( - download_eth1_block(cache_2.clone(), block_number) - .map(|v| (v, block_numbers)), - ), - None => None, - }, - ) - .fold(0, move |sum, eth1_block| { - cache_3 - .block_cache - .write() - .insert_root_or_child(eth1_block) - .map_err(Error::FailedToInsertEth1Block)?; - - metrics::set_gauge( - &metrics::BLOCK_CACHE_LEN, - cache_3.block_cache.read().len() as i64, - ); - metrics::set_gauge( - &metrics::LATEST_CACHED_BLOCK_TIMESTAMP, - cache_3 - .block_cache - .read() - .latest_block_timestamp() - .unwrap_or_else(|| 0) as i64, - ); - - Ok(sum + 1) - }) - }) - .and_then(move |blocks_imported| { - // Prune the block cache, preventing it from growing too large. - cache_4.prune_blocks(); + let mut blocks_imported = 0; + for eth1_block in eth1_blocks { + service + .inner + .block_cache + .write() + .insert_root_or_child(eth1_block) + .map_err(Error::FailedToInsertEth1Block)?; metrics::set_gauge( &metrics::BLOCK_CACHE_LEN, - cache_4.block_cache.read().len() as i64, + service.inner.block_cache.read().len() as i64, + ); + metrics::set_gauge( + &metrics::LATEST_CACHED_BLOCK_TIMESTAMP, + service + .inner + .block_cache + .read() + .latest_block_timestamp() + .unwrap_or_else(|| 0) as i64, ); - let block_cache = service_1.inner.block_cache.read(); - let latest_block_mins = block_cache - .latest_block_timestamp() - .and_then(|timestamp| { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .and_then(|now| now.checked_sub(Duration::from_secs(timestamp))) - }) - .map(|duration| format!("{} mins", duration.as_secs() / 60)) - .unwrap_or_else(|| "n/a".into()); + blocks_imported += 1; + } - if blocks_imported > 0 { - debug!( - service_1.log, - "Imported eth1 block(s)"; - "latest_block_age" => latest_block_mins, - "latest_block" => block_cache.highest_block_number(), - "total_cached_blocks" => block_cache.len(), - "new" => blocks_imported - ); - } else { - debug!( - service_1.log, - "No new eth1 blocks imported"; - "latest_block" => block_cache.highest_block_number(), - "cached_blocks" => block_cache.len(), - ); - } + // Prune the block cache, preventing it from growing too large. + service.inner.prune_blocks(); - Ok(BlockCacheUpdateOutcome { - blocks_imported, - head_block_number: cache_4.block_cache.read().highest_block_number(), + metrics::set_gauge( + &metrics::BLOCK_CACHE_LEN, + service.inner.block_cache.read().len() as i64, + ); + + let block_cache = service.inner.block_cache.read(); + let latest_block_mins = block_cache + .latest_block_timestamp() + .and_then(|timestamp| { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .ok() + .and_then(|now| now.checked_sub(Duration::from_secs(timestamp))) }) + .map(|duration| format!("{} mins", duration.as_secs() / 60)) + .unwrap_or_else(|| "n/a".into()); + + if blocks_imported > 0 { + info!( + service.log, + "Imported eth1 block(s)"; + "latest_block_age" => latest_block_mins, + "latest_block" => block_cache.highest_block_number(), + "total_cached_blocks" => block_cache.len(), + "new" => blocks_imported + ); + } else { + debug!( + service.log, + "No new eth1 blocks imported"; + "latest_block" => block_cache.highest_block_number(), + "cached_blocks" => block_cache.len(), + ); + } + + let block_cache = service.inner.block_cache.read(); + let latest_block_mins = block_cache + .latest_block_timestamp() + .and_then(|timestamp| { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .ok() + .and_then(|now| now.checked_sub(Duration::from_secs(timestamp))) + }) + .map(|duration| format!("{} mins", duration.as_secs() / 60)) + .unwrap_or_else(|| "n/a".into()); + + if blocks_imported > 0 { + debug!( + service.log, + "Imported eth1 block(s)"; + "latest_block_age" => latest_block_mins, + "latest_block" => block_cache.highest_block_number(), + "total_cached_blocks" => block_cache.len(), + "new" => blocks_imported + ); + } else { + debug!( + service.log, + "No new eth1 blocks imported"; + "latest_block" => block_cache.highest_block_number(), + "cached_blocks" => block_cache.len(), + ); + } + + Ok(BlockCacheUpdateOutcome { + blocks_imported, + head_block_number: service.inner.block_cache.read().highest_block_number(), }) } } /// Determine the range of blocks that need to be downloaded, given the remotes best block and /// the locally stored best block. -fn get_new_block_numbers<'a>( +async fn get_new_block_numbers<'a>( endpoint: &str, next_required_block: u64, follow_distance: u64, -) -> impl Future>, Error = Error> + 'a { - get_block_number(endpoint, Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) - .map_err(Error::GetBlockNumberFailed) - .and_then(move |remote_highest_block| { - let remote_follow_block = remote_highest_block.saturating_sub(follow_distance); +) -> Result>, Error> { + let remote_highest_block = + get_block_number(endpoint, Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) + .map_err(Error::GetBlockNumberFailed) + .await?; + let remote_follow_block = remote_highest_block.saturating_sub(follow_distance); - if next_required_block <= remote_follow_block { - Ok(Some(next_required_block..=remote_follow_block)) - } else if next_required_block > remote_highest_block + 1 { - // If this is the case, the node must have gone "backwards" in terms of it's sync - // (i.e., it's head block is lower than it was before). - // - // We assume that the `follow_distance` should be sufficient to ensure this never - // happens, otherwise it is an error. - Err(Error::RemoteNotSynced { - next_required_block, - remote_highest_block, - follow_distance, - }) - } else { - // Return an empty range. - Ok(None) - } + if next_required_block <= remote_follow_block { + Ok(Some(next_required_block..=remote_follow_block)) + } else if next_required_block > remote_highest_block + 1 { + // If this is the case, the node must have gone "backwards" in terms of it's sync + // (i.e., it's head block is lower than it was before). + // + // We assume that the `follow_distance` should be sufficient to ensure this never + // happens, otherwise it is an error. + Err(Error::RemoteNotSynced { + next_required_block, + remote_highest_block, + follow_distance, }) + } else { + // Return an empty range. + Ok(None) + } } /// Downloads the `(block, deposit_root, deposit_count)` tuple from an eth1 node for the given /// `block_number`. /// /// Performs three async calls to an Eth1 HTTP JSON RPC endpoint. -fn download_eth1_block<'a>( - cache: Arc, - block_number: u64, -) -> impl Future + 'a { +async fn download_eth1_block(cache: Arc, block_number: u64) -> Result { + let endpoint = cache.config.read().endpoint.clone(); + let deposit_root = cache .deposit_cache .read() .cache .get_deposit_root_from_cache(block_number); + let deposit_count = cache .deposit_cache .read() .cache .get_deposit_count_from_cache(block_number); + // Performs a `get_blockByNumber` call to an eth1 node. - get_block( - &cache.config.read().endpoint, + let http_block = get_block( + &endpoint, block_number, Duration::from_millis(GET_BLOCK_TIMEOUT_MILLIS), ) .map_err(Error::BlockDownloadFailed) - .map(move |http_block| Eth1Block { + .await?; + + Ok(Eth1Block { hash: http_block.hash, number: http_block.number, timestamp: http_block.timestamp, diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 7945766680..76da63aa4a 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -4,17 +4,23 @@ use eth1::http::{get_deposit_count, get_deposit_logs_in_range, get_deposit_root, use eth1::{Config, Service}; use eth1::{DepositCache, DepositLog}; use eth1_test_rig::GanacheEth1Instance; -use futures::Future; +use futures::compat::Future01CompatExt; use merkle_proof::verify_merkle_proof; +use slog::Logger; +use sloggers::{null::NullLoggerBuilder, Build}; use std::ops::Range; use std::time::Duration; -use tokio::runtime::Runtime; use tree_hash::TreeHash; use types::{DepositData, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, Signature}; use web3::{transports::Http, Web3}; const DEPOSIT_CONTRACT_TREE_DEPTH: usize = 32; +pub fn null_logger() -> Logger { + let log_builder = NullLoggerBuilder; + log_builder.build().expect("should build logger") +} + pub fn new_env() -> Environment { EnvironmentBuilder::minimal() // Use a single thread, so that when all tests are run in parallel they don't have so many @@ -47,76 +53,65 @@ fn random_deposit_data() -> DepositData { } /// Blocking operation to get the deposit logs from the `deposit_contract`. -fn blocking_deposit_logs( - runtime: &mut Runtime, - eth1: &GanacheEth1Instance, - range: Range, -) -> Vec { - runtime - .block_on(get_deposit_logs_in_range( - ð1.endpoint(), - ð1.deposit_contract.address(), - range, - timeout(), - )) - .expect("should get logs") +async fn blocking_deposit_logs(eth1: &GanacheEth1Instance, range: Range) -> Vec { + get_deposit_logs_in_range( + ð1.endpoint(), + ð1.deposit_contract.address(), + range, + timeout(), + ) + .await + .expect("should get logs") } /// Blocking operation to get the deposit root from the `deposit_contract`. -fn blocking_deposit_root( - runtime: &mut Runtime, - eth1: &GanacheEth1Instance, - block_number: u64, -) -> Option { - runtime - .block_on(get_deposit_root( - ð1.endpoint(), - ð1.deposit_contract.address(), - block_number, - timeout(), - )) - .expect("should get deposit root") +async fn blocking_deposit_root(eth1: &GanacheEth1Instance, block_number: u64) -> Option { + get_deposit_root( + ð1.endpoint(), + ð1.deposit_contract.address(), + block_number, + timeout(), + ) + .await + .expect("should get deposit root") } /// Blocking operation to get the deposit count from the `deposit_contract`. -fn blocking_deposit_count( - runtime: &mut Runtime, - eth1: &GanacheEth1Instance, - block_number: u64, -) -> Option { - runtime - .block_on(get_deposit_count( - ð1.endpoint(), - ð1.deposit_contract.address(), - block_number, - timeout(), - )) - .expect("should get deposit count") +async fn blocking_deposit_count(eth1: &GanacheEth1Instance, block_number: u64) -> Option { + get_deposit_count( + ð1.endpoint(), + ð1.deposit_contract.address(), + block_number, + timeout(), + ) + .await + .expect("should get deposit count") } -fn get_block_number(runtime: &mut Runtime, web3: &Web3) -> u64 { - runtime - .block_on(web3.eth().block_number().map(|v| v.as_u64())) +async fn get_block_number(web3: &Web3) -> u64 { + web3.eth() + .block_number() + .compat() + .await + .map(|v| v.as_u64()) .expect("should get block number") } mod eth1_cache { use super::*; - #[test] - fn simple_scenario() { - let mut env = new_env(); - let log = env.core_context().log; - let runtime = env.runtime(); + #[tokio::test] + async fn simple_scenario() { + let log = null_logger(); for follow_distance in 0..2 { - let eth1 = runtime - .block_on(GanacheEth1Instance::new()) + let eth1 = GanacheEth1Instance::new() + .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); - let initial_block_number = get_block_number(runtime, &web3); + let initial_block_number = get_block_number(&web3).await; let service = Service::new( Config { @@ -145,20 +140,18 @@ mod eth1_cache { }; for _ in 0..blocks { - runtime - .block_on(eth1.ganache.evm_mine()) - .expect("should mine block"); + eth1.ganache.evm_mine().await.expect("should mine block"); } - runtime - .block_on(service.update_deposit_cache()) + Service::update_deposit_cache(service.clone()) + .await .expect("should update deposit cache"); - runtime - .block_on(service.update_block_cache()) + Service::update_block_cache(service.clone()) + .await .expect("should update block cache"); - runtime - .block_on(service.update_block_cache()) + Service::update_block_cache(service.clone()) + .await .expect("should update cache when nothing has changed"); assert_eq!( @@ -178,14 +171,13 @@ mod eth1_cache { } /// Tests the case where we attempt to download more blocks than will fit in the cache. - #[test] - fn big_skip() { - let mut env = new_env(); - let log = env.core_context().log; - let runtime = env.runtime(); - let eth1 = runtime - .block_on(GanacheEth1Instance::new()) + #[tokio::test] + async fn big_skip() { + let log = null_logger(); + + let eth1 = GanacheEth1Instance::new() + .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); @@ -196,7 +188,7 @@ mod eth1_cache { Config { endpoint: eth1.endpoint(), deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(runtime, &web3), + lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, block_cache_truncation: Some(cache_len), ..Config::default() @@ -207,16 +199,14 @@ mod eth1_cache { let blocks = cache_len * 2; for _ in 0..blocks { - runtime - .block_on(eth1.ganache.evm_mine()) - .expect("should mine block") + eth1.ganache.evm_mine().await.expect("should mine block") } - runtime - .block_on(service.update_deposit_cache()) + Service::update_deposit_cache(service.clone()) + .await .expect("should update deposit cache"); - runtime - .block_on(service.update_block_cache()) + Service::update_block_cache(service.clone()) + .await .expect("should update block cache"); assert_eq!( @@ -228,14 +218,12 @@ mod eth1_cache { /// Tests to ensure that the cache gets pruned when doing multiple downloads smaller than the /// cache size. - #[test] - fn pruning() { - let mut env = new_env(); - let log = env.core_context().log; - let runtime = env.runtime(); + #[tokio::test] + async fn pruning() { + let log = null_logger(); - let eth1 = runtime - .block_on(GanacheEth1Instance::new()) + let eth1 = GanacheEth1Instance::new() + .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); @@ -246,7 +234,7 @@ mod eth1_cache { Config { endpoint: eth1.endpoint(), deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(runtime, &web3), + lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, block_cache_truncation: Some(cache_len), ..Config::default() @@ -254,17 +242,15 @@ mod eth1_cache { log, ); - for _ in 0..4 { + for _ in 0..4u8 { for _ in 0..cache_len / 2 { - runtime - .block_on(eth1.ganache.evm_mine()) - .expect("should mine block") + eth1.ganache.evm_mine().await.expect("should mine block") } - runtime - .block_on(service.update_deposit_cache()) + Service::update_deposit_cache(service.clone()) + .await .expect("should update deposit cache"); - runtime - .block_on(service.update_block_cache()) + Service::update_block_cache(service.clone()) + .await .expect("should update block cache"); } @@ -275,16 +261,14 @@ mod eth1_cache { ); } - #[test] - fn double_update() { - let mut env = new_env(); - let log = env.core_context().log; - let runtime = env.runtime(); + #[tokio::test] + async fn double_update() { + let log = null_logger(); let n = 16; - let eth1 = runtime - .block_on(GanacheEth1Instance::new()) + let eth1 = GanacheEth1Instance::new() + .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); @@ -293,7 +277,7 @@ mod eth1_cache { Config { endpoint: eth1.endpoint(), deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(runtime, &web3), + lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, ..Config::default() }, @@ -301,24 +285,18 @@ mod eth1_cache { ); for _ in 0..n { - runtime - .block_on(eth1.ganache.evm_mine()) - .expect("should mine block") + eth1.ganache.evm_mine().await.expect("should mine block") } - runtime - .block_on( - service - .update_deposit_cache() - .join(service.update_deposit_cache()), - ) - .expect("should perform two simultaneous updates of deposit cache"); - runtime - .block_on( - service - .update_block_cache() - .join(service.update_block_cache()), - ) - .expect("should perform two simultaneous updates of block cache"); + futures::try_join!( + Service::update_deposit_cache(service.clone()), + Service::update_deposit_cache(service.clone()) + ) + .expect("should perform two simultaneous updates of deposit cache"); + futures::try_join!( + Service::update_block_cache(service.clone()), + Service::update_block_cache(service.clone()) + ) + .expect("should perform two simultaneous updates of block cache"); assert!(service.block_cache_len() >= n, "should grow the cache"); } @@ -327,21 +305,19 @@ mod eth1_cache { mod deposit_tree { use super::*; - #[test] - fn updating() { - let mut env = new_env(); - let log = env.core_context().log; - let runtime = env.runtime(); + #[tokio::test] + async fn updating() { + let log = null_logger(); let n = 4; - let eth1 = runtime - .block_on(GanacheEth1Instance::new()) + let eth1 = GanacheEth1Instance::new() + .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); - let start_block = get_block_number(runtime, &web3); + let start_block = get_block_number(&web3).await; let service = Service::new( Config { @@ -359,16 +335,17 @@ mod deposit_tree { for deposit in &deposits { deposit_contract - .deposit(runtime, deposit.clone()) + .deposit(deposit.clone()) + .await .expect("should perform a deposit"); } - runtime - .block_on(service.update_deposit_cache()) + Service::update_deposit_cache(service.clone()) + .await .expect("should perform update"); - runtime - .block_on(service.update_deposit_cache()) + Service::update_deposit_cache(service.clone()) + .await .expect("should perform update when nothing has changed"); let first = n * round; @@ -400,21 +377,19 @@ mod deposit_tree { } } - #[test] - fn double_update() { - let mut env = new_env(); - let log = env.core_context().log; - let runtime = env.runtime(); + #[tokio::test] + async fn double_update() { + let log = null_logger(); let n = 8; - let eth1 = runtime - .block_on(GanacheEth1Instance::new()) + let eth1 = GanacheEth1Instance::new() + .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); - let start_block = get_block_number(runtime, &web3); + let start_block = get_block_number(&web3).await; let service = Service::new( Config { @@ -432,32 +407,28 @@ mod deposit_tree { for deposit in &deposits { deposit_contract - .deposit(runtime, deposit.clone()) + .deposit(deposit.clone()) + .await .expect("should perform a deposit"); } - runtime - .block_on( - service - .update_deposit_cache() - .join(service.update_deposit_cache()), - ) - .expect("should perform two updates concurrently"); + futures::try_join!( + Service::update_deposit_cache(service.clone()), + Service::update_deposit_cache(service.clone()) + ) + .expect("should perform two updates concurrently"); assert_eq!(service.deposit_cache_len(), n); } - #[test] - fn cache_consistency() { - let mut env = new_env(); - let runtime = env.runtime(); - + #[tokio::test] + async fn cache_consistency() { let n = 8; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - let eth1 = runtime - .block_on(GanacheEth1Instance::new()) + let eth1 = GanacheEth1Instance::new() + .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); @@ -468,15 +439,18 @@ mod deposit_tree { // Perform deposits to the smart contract, recording it's state along the way. for deposit in &deposits { deposit_contract - .deposit(runtime, deposit.clone()) + .deposit(deposit.clone()) + .await .expect("should perform a deposit"); - let block_number = get_block_number(runtime, &web3); + let block_number = get_block_number(&web3).await; deposit_roots.push( - blocking_deposit_root(runtime, ð1, block_number) + blocking_deposit_root(ð1, block_number) + .await .expect("should get root if contract exists"), ); deposit_counts.push( - blocking_deposit_count(runtime, ð1, block_number) + blocking_deposit_count(ð1, block_number) + .await .expect("should get count if contract exists"), ); } @@ -484,8 +458,9 @@ mod deposit_tree { let mut tree = DepositCache::default(); // Pull all the deposit logs from the contract. - let block_number = get_block_number(runtime, &web3); - let logs: Vec<_> = blocking_deposit_logs(runtime, ð1, 0..block_number) + let block_number = get_block_number(&web3).await; + let logs: Vec<_> = blocking_deposit_logs(ð1, 0..block_number) + .await .iter() .map(|raw| DepositLog::from_log(raw).expect("should parse deposit log")) .inspect(|log| { @@ -546,64 +521,59 @@ mod deposit_tree { mod http { use super::*; - fn get_block(runtime: &mut Runtime, eth1: &GanacheEth1Instance, block_number: u64) -> Block { - runtime - .block_on(eth1::http::get_block( - ð1.endpoint(), - block_number, - timeout(), - )) + async fn get_block(eth1: &GanacheEth1Instance, block_number: u64) -> Block { + eth1::http::get_block(ð1.endpoint(), block_number, timeout()) + .await .expect("should get block number") } - #[test] - fn incrementing_deposits() { - let mut env = new_env(); - let runtime = env.runtime(); - - let eth1 = runtime - .block_on(GanacheEth1Instance::new()) + #[tokio::test] + async fn incrementing_deposits() { + let eth1 = GanacheEth1Instance::new() + .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); - let block_number = get_block_number(runtime, &web3); - let logs = blocking_deposit_logs(runtime, ð1, 0..block_number); + let block_number = get_block_number(&web3).await; + let logs = blocking_deposit_logs(ð1, 0..block_number).await; assert_eq!(logs.len(), 0); - let mut old_root = blocking_deposit_root(runtime, ð1, block_number); - let mut old_block = get_block(runtime, ð1, block_number); + let mut old_root = blocking_deposit_root(ð1, block_number).await; + let mut old_block = get_block(ð1, block_number).await; let mut old_block_number = block_number; assert_eq!( - blocking_deposit_count(runtime, ð1, block_number), + blocking_deposit_count(ð1, block_number).await, Some(0), "should have deposit count zero" ); for i in 1..=8 { - runtime - .block_on(eth1.ganache.increase_time(1)) + eth1.ganache + .increase_time(1) + .await .expect("should be able to increase time on ganache"); deposit_contract - .deposit(runtime, random_deposit_data()) + .deposit(random_deposit_data()) + .await .expect("should perform a deposit"); // Check the logs. - let block_number = get_block_number(runtime, &web3); - let logs = blocking_deposit_logs(runtime, ð1, 0..block_number); + let block_number = get_block_number(&web3).await; + let logs = blocking_deposit_logs(ð1, 0..block_number).await; assert_eq!(logs.len(), i, "the number of logs should be as expected"); // Check the deposit count. assert_eq!( - blocking_deposit_count(runtime, ð1, block_number), + blocking_deposit_count(ð1, block_number).await, Some(i as u64), "should have a correct deposit count" ); // Check the deposit root. - let new_root = blocking_deposit_root(runtime, ð1, block_number); + let new_root = blocking_deposit_root(ð1, block_number).await; assert_ne!( new_root, old_root, "deposit root should change with each deposit" @@ -611,7 +581,7 @@ mod http { old_root = new_root; // Check the block hash. - let new_block = get_block(runtime, ð1, block_number); + let new_block = get_block(ð1, block_number).await; assert_ne!( new_block.hash, old_block.hash, "block hash should change with each deposit" @@ -647,19 +617,17 @@ mod fast { // Adds deposits into deposit cache and matches deposit_count and deposit_root // with the deposit count and root computed from the deposit cache. - #[test] - fn deposit_cache_query() { - let mut env = new_env(); - let log = env.core_context().log; - let runtime = env.runtime(); + #[tokio::test] + async fn deposit_cache_query() { + let log = null_logger(); - let eth1 = runtime - .block_on(GanacheEth1Instance::new()) + let eth1 = GanacheEth1Instance::new() + .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); - let now = get_block_number(runtime, &web3); + let now = get_block_number(&web3).await; let service = Service::new( Config { endpoint: eth1.endpoint(), @@ -676,16 +644,15 @@ mod fast { let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { deposit_contract - .deposit(runtime, deposit.clone()) + .deposit(deposit.clone()) + .await .expect("should perform a deposit"); // Mine an extra block between deposits to test for corner cases - runtime - .block_on(eth1.ganache.evm_mine()) - .expect("should mine block"); + eth1.ganache.evm_mine().await.expect("should mine block"); } - runtime - .block_on(service.update_deposit_cache()) + Service::update_deposit_cache(service.clone()) + .await .expect("should perform update"); assert!( @@ -693,9 +660,9 @@ mod fast { "should have imported n deposits" ); - for block_num in 0..=get_block_number(runtime, &web3) { - let expected_deposit_count = blocking_deposit_count(runtime, ð1, block_num); - let expected_deposit_root = blocking_deposit_root(runtime, ð1, block_num); + for block_num in 0..=get_block_number(&web3).await { + let expected_deposit_count = blocking_deposit_count(ð1, block_num).await; + let expected_deposit_root = blocking_deposit_root(ð1, block_num).await; let deposit_count = service .deposits() @@ -721,19 +688,17 @@ mod fast { mod persist { use super::*; - #[test] - fn test_persist_caches() { - let mut env = new_env(); - let log = env.core_context().log; - let runtime = env.runtime(); + #[tokio::test] + async fn test_persist_caches() { + let log = null_logger(); - let eth1 = runtime - .block_on(GanacheEth1Instance::new()) + let eth1 = GanacheEth1Instance::new() + .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); - let now = get_block_number(runtime, &web3); + let now = get_block_number(&web3).await; let config = Config { endpoint: eth1.endpoint(), deposit_contract_address: deposit_contract.address(), @@ -748,12 +713,13 @@ mod persist { let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { deposit_contract - .deposit(runtime, deposit.clone()) + .deposit(deposit.clone()) + .await .expect("should perform a deposit"); } - runtime - .block_on(service.update_deposit_cache()) + Service::update_deposit_cache(service.clone()) + .await .expect("should perform update"); assert!( @@ -763,8 +729,8 @@ mod persist { let deposit_count = service.deposit_cache_len(); - runtime - .block_on(service.update_block_cache()) + Service::update_block_cache(service.clone()) + .await .expect("should perform update"); assert!( diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index f0171a39a2..2eccb97aa1 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -5,38 +5,47 @@ authors = ["Age Manning "] edition = "2018" [dependencies] -hex = "0.3" -# rust-libp2p is presently being sourced from a Sigma Prime fork of the -# `libp2p/rust-libp2p` repository. -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "71cf486b4d992862f5a05f9f4ef5e5c1631f4add" } +hex = "0.4.2" types = { path = "../../eth2/types" } -hashmap_delay = { path = "../../eth2/utils/hashmap_delay" } +hashset_delay = { path = "../../eth2/utils/hashset_delay" } eth2_ssz_types = { path = "../../eth2/utils/ssz_types" } -serde = { version = "1.0.102", features = ["derive"] } -serde_derive = "1.0.102" +serde = { version = "1.0.110", features = ["derive"] } +serde_derive = "1.0.110" eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" slog = { version = "2.5.2", features = ["max_level_trace"] } version = { path = "../version" } -tokio = "0.1.22" -futures = "0.1.29" -error-chain = "0.12.1" +tokio = { version = "0.2.20", features = ["time"] } +futures = "0.3.5" +error-chain = "0.12.2" dirs = "2.0.2" fnv = "1.0.6" -unsigned-varint = "0.2.3" +unsigned-varint = { git = "https://github.com/sigp/unsigned-varint", branch = "latest-codecs", features = ["codec"] } lazy_static = "1.4.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } -tokio-io-timeout = "0.3.1" -smallvec = "1.0.0" +smallvec = "1.4.0" lru = "0.4.3" -parking_lot = "0.9.0" -sha2 = "0.8.0" -base64 = "0.11.0" -snap = "1" +parking_lot = "0.10.2" +sha2 = "0.8.1" +base64 = "0.12.1" +snap = "1.0.0" void = "1.0.2" +tokio-io-timeout = "0.4.0" +tokio-util = { version = "0.3.1", features = ["codec", "compat"] } +# Patched for quick updates +discv5 = { git = "https://github.com/sigp/discv5", rev = "7b3bd40591b62b8c002ffdb85de008aa9f82e2e5" } +tiny-keccak = "2.0.2" +libp2p-tcp = { version = "0.18.0", default-features = false, features = ["tokio"] } + +[dependencies.libp2p] +version = "0.18.1" +default-features = false +features = ["websocket", "identify", "mplex", "yamux", "noise", "secio", "gossipsub", "dns"] + [dev-dependencies] +tokio = { version = "0.2.20", features = ["full"] } slog-stdlog = "4.0.0" -slog-term = "2.4.2" -slog-async = "2.3.0" -tempdir = "0.3" +slog-term = "2.5.0" +slog-async = "2.5.0" +tempdir = "0.3.7" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 7691669b23..03990a2ee9 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -3,20 +3,22 @@ use crate::peer_manager::{PeerManager, PeerManagerEvent}; use crate::rpc::*; use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::{error, Enr, NetworkConfig, NetworkGlobals, PubsubMessage, TopicHash}; +use discv5::Discv5Event; use futures::prelude::*; use libp2p::{ - core::{identity::Keypair, ConnectedPoint}, - discv5::Discv5Event, + core::identity::Keypair, gossipsub::{Gossipsub, GossipsubEvent, MessageId}, identify::{Identify, IdentifyEvent}, - swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess}, - tokio_io::{AsyncRead, AsyncWrite}, + swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters}, NetworkBehaviour, PeerId, }; use lru::LruCache; -use slog::{crit, debug, o, warn}; -use std::marker::PhantomData; -use std::sync::Arc; +use slog::{crit, debug, o}; +use std::{ + marker::PhantomData, + sync::Arc, + task::{Context, Poll}, +}; use types::{EnrForkId, EthSpec, SubnetId}; const MAX_IDENTIFY_ADDRESSES: usize = 10; @@ -26,17 +28,17 @@ const MAX_IDENTIFY_ADDRESSES: usize = 10; /// behaviours. #[derive(NetworkBehaviour)] #[behaviour(out_event = "BehaviourEvent", poll_method = "poll")] -pub struct Behaviour { +pub struct Behaviour { /// The routing pub-sub mechanism for eth2. - gossipsub: Gossipsub, + gossipsub: Gossipsub, /// The Eth2 RPC specified in the wire-0 protocol. - eth2_rpc: RPC, + eth2_rpc: RPC, /// Keep regular connection to peers and disconnect if absent. // TODO: Using id for initial interop. This will be removed by mainnet. /// Provides IP addresses and peer information. - identify: Identify, + identify: Identify, /// Discovery behaviour. - discovery: Discovery, + discovery: Discovery, /// The peer manager that keeps track of peer's reputation and status. #[behaviour(ignore)] peer_manager: PeerManager, @@ -65,7 +67,7 @@ pub struct Behaviour { } /// Implements the combined behaviour for the libp2p service. -impl Behaviour { +impl Behaviour { pub fn new( local_key: &Keypair, net_conf: &NetworkConfig, @@ -114,12 +116,12 @@ impl Behaviour &Discovery { + pub fn discovery(&self) -> &Discovery { &self.discovery } /// Obtain a reference to the gossipsub protocol. - pub fn gs(&self) -> &Gossipsub { + pub fn gs(&self) -> &Gossipsub { &self.gossipsub } @@ -304,8 +306,10 @@ impl Behaviour id, "peer_id" => peer_id.to_string()); RPCEvent::Request(id, RPCRequest::Ping(ping)) } else { + debug!(self.log, "Sending Pong"; "request_id" => id, "peer_id" => peer_id.to_string()); RPCEvent::Response(id, RPCCodedResponse::Success(RPCResponse::Pong(ping))) }; self.send_rpc(peer_id, event); @@ -326,12 +330,50 @@ impl Behaviour &mut PeerManager { + &mut self.peer_manager + } + + /* Address in the new behaviour. Connections are now maintained at the swarm level. + /// Notifies the behaviour that a peer has connected. + pub fn notify_peer_connect(&mut self, peer_id: PeerId, endpoint: ConnectedPoint) { + match endpoint { + ConnectedPoint::Dialer { .. } => self.peer_manager.connect_outgoing(&peer_id), + ConnectedPoint::Listener { .. } => self.peer_manager.connect_ingoing(&peer_id), + }; + + // Find ENR info about a peer if possible. + if let Some(enr) = self.discovery.enr_of_peer(&peer_id) { + let bitfield = match enr.bitfield::() { + Ok(v) => v, + Err(e) => { + warn!(self.log, "Peer has invalid ENR bitfield"; + "peer_id" => format!("{}", peer_id), + "error" => format!("{:?}", e)); + return; + } + }; + + // use this as a baseline, until we get the actual meta-data + let meta_data = MetaData { + seq_number: 0, + attnets: bitfield, + }; + // TODO: Shift to the peer manager + self.network_globals + .peers + .write() + .add_metadata(&peer_id, meta_data); + } + } + */ } // Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour -impl - NetworkBehaviourEventProcess for Behaviour -{ +impl NetworkBehaviourEventProcess for Behaviour { fn inject_event(&mut self, event: GossipsubEvent) { match event { GossipsubEvent::Message(propagation_source, id, gs_msg) => { @@ -358,7 +400,7 @@ impl debug!(self.log, "Could not decode gossipsub message"; "error" => format!("{}", e)) } Ok(msg) => { - crit!(self.log, "A duplicate gossipsub message was received"; "message_source" => format!("{}", gs_msg.source), "propagated_peer" => format!("{}",propagation_source), "message" => format!("{}", msg)); + debug!(self.log, "A duplicate gossipsub message was received"; "message_source" => format!("{}", gs_msg.source), "propagated_peer" => format!("{}",propagation_source), "message" => format!("{}", msg)); } } } @@ -372,112 +414,66 @@ impl } } -impl - NetworkBehaviourEventProcess> for Behaviour -{ - fn inject_event(&mut self, event: RPCMessage) { - match event { - // TODO: These are temporary methods to give access to injected behaviour - // events to the - // peer manager. After a behaviour re-write remove these: - RPCMessage::PeerConnectedHack(peer_id, connected_point) => { - match connected_point { - ConnectedPoint::Dialer { .. } => self.peer_manager.connect_outgoing(&peer_id), - ConnectedPoint::Listener { .. } => self.peer_manager.connect_ingoing(&peer_id), - }; - - // Find ENR info about a peer if possible. - if let Some(enr) = self.discovery.enr_of_peer(&peer_id) { - let bitfield = match enr.bitfield::() { - Ok(v) => v, - Err(e) => { - warn!(self.log, "Peer has invalid ENR bitfield"; - "peer_id" => format!("{}", peer_id), - "error" => format!("{:?}", e)); - return; - } - }; - - // use this as a baseline, until we get the actual meta-data - let meta_data = MetaData { - seq_number: 0, - attnets: bitfield, - }; - // TODO: Shift to the peer manager - self.network_globals - .peers - .write() - .add_metadata(&peer_id, meta_data); - } +impl NetworkBehaviourEventProcess> for Behaviour { + fn inject_event(&mut self, message: RPCMessage) { + let peer_id = message.peer_id; + // The METADATA and PING RPC responses are handled within the behaviour and not + // propagated + // TODO: Improve the RPC types to better handle this logic discrepancy + match message.event { + RPCEvent::Request(id, RPCRequest::Ping(ping)) => { + // inform the peer manager and send the response + self.peer_manager.ping_request(&peer_id, ping.data); + // send a ping response + self.send_ping(id, peer_id, false); } - RPCMessage::PeerDisconnectedHack(peer_id, _connected_point) => { - self.peer_manager.notify_disconnect(&peer_id) + RPCEvent::Request(id, RPCRequest::MetaData(_)) => { + // send the requested meta-data + self.send_meta_data_response(id, peer_id); } - - RPCMessage::PeerDialed(peer_id) => { - self.events.push(BehaviourEvent::PeerDialed(peer_id)) + RPCEvent::Response(_, RPCCodedResponse::Success(RPCResponse::Pong(ping))) => { + self.peer_manager.pong_response(&peer_id, ping.data); } - RPCMessage::PeerDisconnected(peer_id) => { - self.events.push(BehaviourEvent::PeerDisconnected(peer_id)) + RPCEvent::Response(_, RPCCodedResponse::Success(RPCResponse::MetaData(meta_data))) => { + self.peer_manager.meta_data_response(&peer_id, meta_data); } - RPCMessage::RPC(peer_id, rpc_event) => { - // The METADATA and PING RPC responses are handled within the behaviour and not - // propagated - // TODO: Improve the RPC types to better handle this logic discrepancy - match rpc_event { - RPCEvent::Request(id, RPCRequest::Ping(ping)) => { - // inform the peer manager and send the response - self.peer_manager.ping_request(&peer_id, ping.data); - // send a ping response - self.send_ping(id, peer_id, false); - } - RPCEvent::Request(id, RPCRequest::MetaData(_)) => { - // send the requested meta-data - self.send_meta_data_response(id, peer_id); - } - RPCEvent::Response(_, RPCCodedResponse::Success(RPCResponse::Pong(ping))) => { - self.peer_manager.pong_response(&peer_id, ping.data); - } - RPCEvent::Response( - _, - RPCCodedResponse::Success(RPCResponse::MetaData(meta_data)), - ) => { - self.peer_manager.meta_data_response(&peer_id, meta_data); - } - RPCEvent::Request(_, RPCRequest::Status(_)) - | RPCEvent::Response(_, RPCCodedResponse::Success(RPCResponse::Status(_))) => { - // inform the peer manager that we have received a status from a peer - self.peer_manager.peer_statusd(&peer_id); - // propagate the STATUS message upwards - self.events.push(BehaviourEvent::RPC(peer_id, rpc_event)); - } - RPCEvent::Error(_, protocol, ref err) => { - self.peer_manager.handle_rpc_error(&peer_id, protocol, err); - self.events.push(BehaviourEvent::RPC(peer_id, rpc_event)); - } - _ => { - // propagate all other RPC messages upwards - self.events.push(BehaviourEvent::RPC(peer_id, rpc_event)) - } - } + RPCEvent::Request(_, RPCRequest::Status(_)) + | RPCEvent::Response(_, RPCCodedResponse::Success(RPCResponse::Status(_))) => { + // inform the peer manager that we have received a status from a peer + self.peer_manager.peer_statusd(&peer_id); + // propagate the STATUS message upwards + self.events + .push(BehaviourEvent::RPC(peer_id, message.event)); + } + RPCEvent::Error(_, protocol, ref err) => { + self.peer_manager.handle_rpc_error(&peer_id, protocol, err); + self.events + .push(BehaviourEvent::RPC(peer_id, message.event)); + } + _ => { + // propagate all other RPC messages upwards + self.events + .push(BehaviourEvent::RPC(peer_id, message.event)) } } } } -impl Behaviour { +impl Behaviour { /// Consumes the events list when polled. fn poll( &mut self, - ) -> Async>> { + cx: &mut Context, + _: &mut impl PollParameters, + ) -> Poll>> { // check the peer manager for events loop { - match self.peer_manager.poll() { - Ok(Async::Ready(Some(event))) => match event { + match self.peer_manager.poll_next_unpin(cx) { + Poll::Ready(Some(event)) => match event { PeerManagerEvent::Status(peer_id) => { // it's time to status. We don't keep a beacon chain reference here, so we inform // the network to send a status to this peer - return Async::Ready(NetworkBehaviourAction::GenerateEvent( + return Poll::Ready(NetworkBehaviourAction::GenerateEvent( BehaviourEvent::StatusPeer(peer_id), )); } @@ -495,25 +491,20 @@ impl Behaviour break, - Ok(Async::Ready(None)) | Err(_) => { - crit!(self.log, "Error polling peer manager"); - break; - } + Poll::Pending => break, + Poll::Ready(None) => break, // peer manager ended } } if !self.events.is_empty() { - return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); } - Async::NotReady + Poll::Pending } } -impl NetworkBehaviourEventProcess - for Behaviour -{ +impl NetworkBehaviourEventProcess for Behaviour { fn inject_event(&mut self, event: IdentifyEvent) { match event { IdentifyEvent::Received { @@ -545,9 +536,7 @@ impl NetworkBehaviourEventPr } } -impl NetworkBehaviourEventProcess - for Behaviour -{ +impl NetworkBehaviourEventProcess for Behaviour { fn inject_event(&mut self, _event: Discv5Event) { // discv5 has no events to inject } @@ -558,11 +547,6 @@ impl NetworkBehaviourEventPr pub enum BehaviourEvent { /// A received RPC event and the peer that it was received from. RPC(PeerId, RPCEvent), - /// We have completed an initial connection to a new peer. - PeerDialed(PeerId), - /// A peer has disconnected. - PeerDisconnected(PeerId), - /// A gossipsub message has been received. PubsubMessage { /// The gossipsub message id. Used when propagating blocks after validation. id: MessageId, diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index 7a5182baf1..369d0477e9 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -1,6 +1,6 @@ use crate::types::GossipKind; use crate::Enr; -use libp2p::discv5::{Discv5Config, Discv5ConfigBuilder}; +use discv5::{Discv5Config, Discv5ConfigBuilder}; use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId}; use libp2p::Multiaddr; use serde_derive::{Deserialize, Serialize}; diff --git a/beacon_node/eth2-libp2p/src/discovery/enr.rs b/beacon_node/eth2-libp2p/src/discovery/enr.rs index edd08bc9ac..3f3cbe12f7 100644 --- a/beacon_node/eth2-libp2p/src/discovery/enr.rs +++ b/beacon_node/eth2-libp2p/src/discovery/enr.rs @@ -1,15 +1,15 @@ //! Helper functions and an extension trait for Ethereum 2 ENRs. -pub use libp2p::{core::identity::Keypair, discv5::enr::CombinedKey}; +pub use discv5::enr::{self, CombinedKey, EnrBuilder}; +pub use libp2p::core::identity::Keypair; use super::ENR_FILENAME; use crate::types::{Enr, EnrBitfield}; +use crate::CombinedKeyExt; use crate::NetworkConfig; -use libp2p::discv5::enr::EnrBuilder; use slog::{debug, warn}; use ssz::{Decode, Encode}; use ssz_types::BitVector; -use std::convert::TryInto; use std::fs::File; use std::io::prelude::*; use std::path::Path; @@ -62,10 +62,7 @@ pub fn build_or_load_enr( // Build the local ENR. // Note: Discovery should update the ENR record's IP to the external IP as seen by the // majority of our peers, if the CLI doesn't expressly forbid it. - let enr_key: CombinedKey = local_key - .try_into() - .map_err(|_| "Invalid key type for ENR records")?; - + let enr_key = CombinedKey::from_libp2p(&local_key)?; let mut local_enr = build_enr::(&enr_key, config, enr_fork_id)?; let enr_f = config.network_dir.join(ENR_FILENAME); diff --git a/beacon_node/eth2-libp2p/src/discovery/enr_ext.rs b/beacon_node/eth2-libp2p/src/discovery/enr_ext.rs new file mode 100644 index 0000000000..ba0af4d1e3 --- /dev/null +++ b/beacon_node/eth2-libp2p/src/discovery/enr_ext.rs @@ -0,0 +1,190 @@ +//! ENR extension trait to support libp2p integration. +use crate::{Enr, Multiaddr, PeerId}; +use discv5::enr::{CombinedKey, CombinedPublicKey}; +use libp2p::core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol}; +use tiny_keccak::{Hasher, Keccak}; + +/// Extend ENR for libp2p types. +pub trait EnrExt { + /// The libp2p `PeerId` for the record. + fn peer_id(&self) -> PeerId; + + /// Returns a list of multiaddrs if the ENR has an `ip` and either a `tcp` or `udp` key **or** an `ip6` and either a `tcp6` or `udp6`. + /// The vector remains empty if these fields are not defined. + fn multiaddr(&self) -> Vec; +} + +/// Extend ENR CombinedPublicKey for libp2p types. +pub trait CombinedKeyPublicExt { + /// Converts the publickey into a peer id, without consuming the key. + fn into_peer_id(&self) -> PeerId; +} + +/// Extend ENR CombinedKey for conversion to libp2p keys. +pub trait CombinedKeyExt { + /// Converts a libp2p key into an ENR combined key. + fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result; +} + +impl EnrExt for Enr { + /// The libp2p `PeerId` for the record. + fn peer_id(&self) -> PeerId { + self.public_key().into_peer_id() + } + + /// Returns a list of multiaddrs if the ENR has an `ip` and either a `tcp` or `udp` key **or** an `ip6` and either a `tcp6` or `udp6`. + /// The vector remains empty if these fields are not defined. + /// + /// Note: Only available with the `libp2p` feature flag. + fn multiaddr(&self) -> Vec { + let mut multiaddrs: Vec = Vec::new(); + if let Some(ip) = self.ip() { + if let Some(udp) = self.udp() { + let mut multiaddr: Multiaddr = ip.into(); + multiaddr.push(Protocol::Udp(udp)); + multiaddrs.push(multiaddr); + } + + if let Some(tcp) = self.tcp() { + let mut multiaddr: Multiaddr = ip.into(); + multiaddr.push(Protocol::Tcp(tcp)); + multiaddrs.push(multiaddr); + } + } + if let Some(ip6) = self.ip6() { + if let Some(udp6) = self.udp6() { + let mut multiaddr: Multiaddr = ip6.into(); + multiaddr.push(Protocol::Udp(udp6)); + multiaddrs.push(multiaddr); + } + + if let Some(tcp6) = self.tcp6() { + let mut multiaddr: Multiaddr = ip6.into(); + multiaddr.push(Protocol::Tcp(tcp6)); + multiaddrs.push(multiaddr); + } + } + multiaddrs + } +} + +impl CombinedKeyPublicExt for CombinedPublicKey { + /// Converts the publickey into a peer id, without consuming the key. + /// + /// This is only available with the `libp2p` feature flag. + fn into_peer_id(&self) -> PeerId { + match self { + Self::Secp256k1(pk) => { + let pk_bytes = pk.serialize_compressed(); + let libp2p_pk = libp2p::core::PublicKey::Secp256k1( + libp2p::core::identity::secp256k1::PublicKey::decode(&pk_bytes) + .expect("valid public key"), + ); + PeerId::from_public_key(libp2p_pk) + } + Self::Ed25519(pk) => { + let pk_bytes = pk.to_bytes(); + let libp2p_pk = libp2p::core::PublicKey::Ed25519( + libp2p::core::identity::ed25519::PublicKey::decode(&pk_bytes) + .expect("valid public key"), + ); + PeerId::from_public_key(libp2p_pk) + } + } + } +} + +impl CombinedKeyExt for CombinedKey { + fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result { + match key { + Keypair::Secp256k1(key) => { + let secret = discv5::enr::secp256k1::SecretKey::parse(&key.secret().to_bytes()) + .expect("libp2p key must be valid"); + Ok(CombinedKey::Secp256k1(secret)) + } + Keypair::Ed25519(key) => { + let ed_keypair = + discv5::enr::ed25519_dalek::SecretKey::from_bytes(&key.encode()[..32]) + .expect("libp2p key must be valid"); + Ok(CombinedKey::from(ed_keypair)) + } + _ => Err("ENR: Unsupported libp2p key type"), + } + } +} + +// helper function to convert a peer_id to a node_id. This is only possible for secp256k1/ed25519 libp2p +// peer_ids +pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result { + // A libp2p peer id byte representation should be 2 length bytes + 4 protobuf bytes + compressed pk bytes + // if generated from a PublicKey with Identity multihash. + let pk_bytes = &peer_id.as_bytes()[2..]; + + match PublicKey::from_protobuf_encoding(pk_bytes).map_err(|e| { + format!( + " Cannot parse libp2p public key public key from peer id: {}", + e + ) + })? { + PublicKey::Secp256k1(pk) => { + let uncompressed_key_bytes = &pk.encode_uncompressed()[1..]; + let mut output = [0_u8; 32]; + let mut hasher = Keccak::v256(); + hasher.update(&uncompressed_key_bytes); + hasher.finalize(&mut output); + return Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length")); + } + PublicKey::Ed25519(pk) => { + let uncompressed_key_bytes = pk.encode(); + let mut output = [0_u8; 32]; + let mut hasher = Keccak::v256(); + hasher.update(&uncompressed_key_bytes); + hasher.finalize(&mut output); + return Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length")); + } + _ => return Err("Unsupported public key".into()), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_secp256k1_peer_id_conversion() { + let sk_hex = "df94a73d528434ce2309abb19c16aedb535322797dbd59c157b1e04095900f48"; + let sk_bytes = hex::decode(sk_hex).unwrap(); + let secret_key = discv5::enr::secp256k1::SecretKey::parse_slice(&sk_bytes).unwrap(); + + let libp2p_sk = libp2p::identity::secp256k1::SecretKey::from_bytes(sk_bytes).unwrap(); + let secp256k1_kp: libp2p::identity::secp256k1::Keypair = libp2p_sk.into(); + let libp2p_kp = Keypair::Secp256k1(secp256k1_kp); + let peer_id = libp2p_kp.public().into_peer_id(); + + let enr = discv5::enr::EnrBuilder::new("v4") + .build(&secret_key) + .unwrap(); + let node_id = peer_id_to_node_id(&peer_id).unwrap(); + + assert_eq!(enr.node_id(), node_id); + } + + #[test] + fn test_ed25519_peer_conversion() { + let sk_hex = "4dea8a5072119927e9d243a7d953f2f4bc95b70f110978e2f9bc7a9000e4b261"; + let sk_bytes = hex::decode(sk_hex).unwrap(); + let secret = discv5::enr::ed25519_dalek::SecretKey::from_bytes(&sk_bytes).unwrap(); + let public = discv5::enr::ed25519_dalek::PublicKey::from(&secret); + let keypair = discv5::enr::ed25519_dalek::Keypair { public, secret }; + + let libp2p_sk = libp2p::identity::ed25519::SecretKey::from_bytes(sk_bytes).unwrap(); + let ed25519_kp: libp2p::identity::ed25519::Keypair = libp2p_sk.into(); + let libp2p_kp = Keypair::Ed25519(ed25519_kp); + let peer_id = libp2p_kp.public().into_peer_id(); + + let enr = discv5::enr::EnrBuilder::new("v4").build(&keypair).unwrap(); + let node_id = peer_id_to_node_id(&peer_id).unwrap(); + + assert_eq!(enr.node_id(), node_id); + } +} diff --git a/beacon_node/eth2-libp2p/src/discovery/mod.rs b/beacon_node/eth2-libp2p/src/discovery/mod.rs index 4544656471..7800848b90 100644 --- a/beacon_node/eth2-libp2p/src/discovery/mod.rs +++ b/beacon_node/eth2-libp2p/src/discovery/mod.rs @@ -1,28 +1,35 @@ ///! This manages the discovery and management of peers. pub(crate) mod enr; +pub mod enr_ext; // Allow external use of the lighthouse ENR builder pub use enr::{build_enr, CombinedKey, Keypair}; +pub use enr_ext::{CombinedKeyExt, EnrExt}; use crate::metrics; use crate::{error, Enr, NetworkConfig, NetworkGlobals}; +use discv5::{enr::NodeId, Discv5, Discv5Event}; use enr::{Eth2Enr, BITFIELD_ENR_KEY, ETH2_ENR_KEY}; use futures::prelude::*; -use libp2p::core::{ConnectedPoint, Multiaddr, PeerId}; -use libp2p::discv5::enr::NodeId; -use libp2p::discv5::{Discv5, Discv5Event}; +use libp2p::core::{connection::ConnectionId, Multiaddr, PeerId}; use libp2p::multiaddr::Protocol; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; +use libp2p::swarm::{ + protocols_handler::DummyProtocolsHandler, DialPeerCondition, NetworkBehaviour, + NetworkBehaviourAction, PollParameters, ProtocolsHandler, +}; +use lru::LruCache; use slog::{crit, debug, info, warn}; use ssz::{Decode, Encode}; use ssz_types::BitVector; -use std::collections::{HashSet, VecDeque}; -use std::net::SocketAddr; -use std::path::Path; -use std::sync::Arc; -use std::time::{Duration, Instant}; -use tokio::io::{AsyncRead, AsyncWrite}; -use tokio::timer::Delay; +use std::{ + collections::{HashSet, VecDeque}, + net::SocketAddr, + path::Path, + sync::Arc, + task::{Context, Poll}, + time::Duration, +}; +use tokio::time::{delay_until, Delay, Instant}; use types::{EnrForkId, EthSpec, SubnetId}; /// Maximum seconds before searching for extra peers. @@ -36,10 +43,13 @@ const TARGET_SUBNET_PEERS: u64 = 3; /// Lighthouse discovery behaviour. This provides peer management and discovery using the Discv5 /// libp2p protocol. -pub struct Discovery { +pub struct Discovery { /// Events to be processed by the behaviour. events: VecDeque>, + /// A collection of seen live ENRs for quick lookup and to map peer-id's to ENRs. + cached_enrs: LruCache, + /// The currently banned peers. banned_peers: HashSet, @@ -62,7 +72,7 @@ pub struct Discovery { tcp_port: u16, /// The discovery behaviour used to discover new peers. - discovery: Discv5, + discovery: Discv5, /// A collection of network constants that can be read from other threads. network_globals: Arc>, @@ -71,7 +81,7 @@ pub struct Discovery { log: slog::Logger, } -impl Discovery { +impl Discovery { pub fn new( local_key: &Keypair, config: &NetworkConfig, @@ -91,9 +101,12 @@ impl Discovery { let listen_socket = SocketAddr::new(config.listen_address, config.discovery_port); + // convert the keypair into an ENR key + let enr_key: CombinedKey = CombinedKey::from_libp2p(&local_key)?; + let mut discovery = Discv5::new( local_enr, - local_key.clone(), + enr_key, config.discv5_config.clone(), listen_socket, ) @@ -121,9 +134,10 @@ impl Discovery { Ok(Self { events: VecDeque::with_capacity(16), + cached_enrs: LruCache::new(50), banned_peers: HashSet::new(), max_peers: config.max_peers, - peer_discovery_delay: Delay::new(Instant::now()), + peer_discovery_delay: delay_until(Instant::now()), past_discovery_delay: INITIAL_SEARCH_DELAY, tcp_port: config.libp2p_port, discovery, @@ -147,6 +161,9 @@ impl Discovery { /// Add an ENR to the routing table of the discovery mechanism. pub fn add_enr(&mut self, enr: Enr) { + // add the enr to seen caches + self.cached_enrs.put(enr.peer_id(), enr.clone()); + let _ = self.discovery.add_enr(enr).map_err(|e| { warn!( self.log, @@ -174,7 +191,18 @@ impl Discovery { /// Returns the ENR of a known peer if it exists. pub fn enr_of_peer(&mut self, peer_id: &PeerId) -> Option { - self.discovery.enr_of_peer(peer_id) + // first search the local cache + if let Some(enr) = self.cached_enrs.get(peer_id) { + return Some(enr.clone()); + } + // not in the local cache, look in the routing table + if let Ok(_node_id) = enr_ext::peer_id_to_node_id(peer_id) { + // TODO: Need to update discv5 + // self.discovery.find_enr(&node_id) + return None; + } else { + return None; + } } /// Adds/Removes a subnet from the ENR Bitfield @@ -342,48 +370,58 @@ impl Discovery { } } -// Redirect all behaviour events to underlying discovery behaviour. -impl NetworkBehaviour for Discovery -where - TSubstream: AsyncRead + AsyncWrite, -{ - type ProtocolsHandler = as NetworkBehaviour>::ProtocolsHandler; - type OutEvent = as NetworkBehaviour>::OutEvent; +// Build a dummy Network behaviour around the discv5 server +impl NetworkBehaviour for Discovery { + type ProtocolsHandler = DummyProtocolsHandler; + type OutEvent = Discv5Event; fn new_handler(&mut self) -> Self::ProtocolsHandler { - NetworkBehaviour::new_handler(&mut self.discovery) + DummyProtocolsHandler::default() } fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - // Let discovery track possible known peers. - self.discovery.addresses_of_peer(peer_id) + if let Some(enr) = self.enr_of_peer(peer_id) { + // ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP + // port is removed, which is assumed to be associated with the discv5 protocol (and + // therefore irrelevant for other libp2p components). + let mut out_list = enr.multiaddr(); + out_list.retain(|addr| { + addr.iter() + .find(|v| match v { + Protocol::Udp(_) => true, + _ => false, + }) + .is_none() + }); + + out_list + } else { + // PeerId is not known + Vec::new() + } } - fn inject_connected(&mut self, _peer_id: PeerId, _endpoint: ConnectedPoint) {} + // ignore libp2p connections/streams + fn inject_connected(&mut self, _: &PeerId) {} - fn inject_disconnected(&mut self, _peer_id: &PeerId, _endpoint: ConnectedPoint) {} + // ignore libp2p connections/streams + fn inject_disconnected(&mut self, _: &PeerId) {} - fn inject_replaced( + // no libp2p discv5 events - event originate from the session_service. + fn inject_event( &mut self, - _peer_id: PeerId, - _closed: ConnectedPoint, - _opened: ConnectedPoint, - ) { - // discv5 doesn't implement - } - - fn inject_node_event( - &mut self, - _peer_id: PeerId, + _: PeerId, + _: ConnectionId, _event: ::OutEvent, ) { - // discv5 doesn't implement + void::unreachable(_event) } fn poll( &mut self, - params: &mut impl PollParameters, - ) -> Async< + cx: &mut Context, + _: &mut impl PollParameters, + ) -> Poll< NetworkBehaviourAction< ::InEvent, Self::OutEvent, @@ -391,8 +429,8 @@ where > { // search for peers if it is time loop { - match self.peer_discovery_delay.poll() { - Ok(Async::Ready(_)) => { + match self.peer_discovery_delay.poll_unpin(cx) { + Poll::Ready(_) => { if self.network_globals.connected_peers() < self.max_peers { self.find_peers(); } @@ -401,17 +439,14 @@ where Instant::now() + Duration::from_secs(MAX_TIME_BETWEEN_PEER_SEARCHES), ); } - Ok(Async::NotReady) => break, - Err(e) => { - warn!(self.log, "Discovery peer search failed"; "error" => format!("{:?}", e)); - } + Poll::Pending => break, } } // Poll discovery loop { - match self.discovery.poll(params) { - Async::Ready(NetworkBehaviourAction::GenerateEvent(event)) => { + match self.discovery.poll_next_unpin(cx) { + Poll::Ready(Some(event)) => { match event { Discv5Event::Discovered(_enr) => { // peers that get discovered during a query but are not contactable or @@ -434,7 +469,7 @@ where let enr = self.discovery.local_enr(); enr::save_enr_to_disk(Path::new(&self.enr_dir), enr, &self.log); - return Async::Ready(NetworkBehaviourAction::ReportObservedAddr { + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, }); } @@ -451,9 +486,12 @@ where self.peer_discovery_delay .reset(Instant::now() + Duration::from_secs(delay)); - for peer_id in closer_peers { - // if we need more peers, attempt a connection + for enr in closer_peers { + // cache known peers + let peer_id = enr.peer_id(); + self.cached_enrs.put(enr.peer_id(), enr); + // if we need more peers, attempt a connection if self.network_globals.connected_or_dialing_peers() < self.max_peers && !self @@ -463,10 +501,18 @@ where .is_connected_or_dialing(&peer_id) && !self.banned_peers.contains(&peer_id) { - debug!(self.log, "Connecting to discovered peer"; "peer_id"=> format!("{:?}", peer_id)); - self.network_globals.peers.write().dialing_peer(&peer_id); - self.events - .push_back(NetworkBehaviourAction::DialPeer { peer_id }); + // TODO: Debugging only + // NOTE: The peer manager will get updated by the global swarm. + let connection_status = self + .network_globals + .peers + .read() + .connection_status(&peer_id); + debug!(self.log, "Connecting to discovered peer"; "peer_id"=> peer_id.to_string(), "status" => format!("{:?}", connection_status)); + self.events.push_back(NetworkBehaviourAction::DialPeer { + peer_id, + condition: DialPeerCondition::Disconnected, + }); } } } @@ -474,16 +520,16 @@ where } } // discv5 does not output any other NetworkBehaviourAction - Async::Ready(_) => {} - Async::NotReady => break, + Poll::Ready(_) => {} + Poll::Pending => break, } } // process any queued events if let Some(event) = self.events.pop_front() { - return Async::Ready(event); + return Poll::Ready(event); } - Async::NotReady + Poll::Pending } } diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 9230a4afb0..2c028eac6f 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -17,9 +17,10 @@ pub mod types; pub use crate::types::{error, Enr, GossipTopic, NetworkGlobals, PubsubMessage}; pub use behaviour::BehaviourEvent; pub use config::Config as NetworkConfig; +pub use discovery::enr_ext::{CombinedKeyExt, EnrExt}; pub use libp2p::gossipsub::{MessageId, Topic, TopicHash}; +pub use libp2p::{core::ConnectedPoint, PeerId, Swarm}; pub use libp2p::{multiaddr, Multiaddr}; -pub use libp2p::{PeerId, Swarm}; -pub use peer_manager::{PeerDB, PeerInfo, PeerSyncStatus, SyncInfo}; +pub use peer_manager::{client::Client, PeerDB, PeerInfo, PeerSyncStatus, SyncInfo}; pub use rpc::RPCEvent; -pub use service::{Service, NETWORK_KEY_FILENAME}; +pub use service::{Libp2pEvent, Service, NETWORK_KEY_FILENAME}; diff --git a/beacon_node/eth2-libp2p/src/peer_manager/client.rs b/beacon_node/eth2-libp2p/src/peer_manager/client.rs index 36a67325b5..3e8015cd0f 100644 --- a/beacon_node/eth2-libp2p/src/peer_manager/client.rs +++ b/beacon_node/eth2-libp2p/src/peer_manager/client.rs @@ -131,6 +131,18 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String let unknown = String::from("unknown"); (kind, unknown.clone(), unknown) } + Some("nim-libp2p") => { + let kind = ClientKind::Nimbus; + let mut version = String::from("unknown"); + let mut os_version = version.clone(); + if let Some(agent_version) = agent_split.next() { + version = agent_version.into(); + if let Some(agent_os_version) = agent_split.next() { + os_version = agent_os_version.into(); + } + } + (kind, version, os_version) + } _ => { let unknown = String::from("unknown"); (ClientKind::Unknown, unknown.clone(), unknown) diff --git a/beacon_node/eth2-libp2p/src/peer_manager/mod.rs b/beacon_node/eth2-libp2p/src/peer_manager/mod.rs index f281c8a41f..d3ca8954a0 100644 --- a/beacon_node/eth2-libp2p/src/peer_manager/mod.rs +++ b/beacon_node/eth2-libp2p/src/peer_manager/mod.rs @@ -6,16 +6,18 @@ use crate::rpc::{MetaData, Protocol, RPCError, RPCResponseErrorCode}; use crate::{NetworkGlobals, PeerId}; use futures::prelude::*; use futures::Stream; -use hashmap_delay::HashSetDelay; +use hashset_delay::HashSetDelay; use libp2p::identify::IdentifyInfo; use slog::{crit, debug, error, warn}; use smallvec::SmallVec; use std::convert::TryInto; +use std::pin::Pin; use std::sync::Arc; +use std::task::{Context, Poll}; use std::time::{Duration, Instant}; use types::EthSpec; -mod client; +pub mod client; mod peer_info; mod peer_sync_status; mod peerdb; @@ -24,7 +26,7 @@ pub use peer_info::{PeerConnectionStatus::*, PeerInfo}; pub use peer_sync_status::{PeerSyncStatus, SyncInfo}; /// The minimum reputation before a peer is disconnected. // Most likely this needs tweaking. -const MIN_REP_BEFORE_BAN: Rep = 10; +const _MIN_REP_BEFORE_BAN: Rep = 10; /// The time in seconds between re-status's peers. const STATUS_INTERVAL: u64 = 300; /// The time in seconds between PING events. We do not send a ping if the other peer as PING'd us within @@ -42,7 +44,7 @@ pub struct PeerManager { /// A collection of peers awaiting to be Status'd. status_peers: HashSetDelay, /// Last updated moment. - last_updated: Instant, + _last_updated: Instant, /// The logger associated with the `PeerManager`. log: slog::Logger, } @@ -104,7 +106,7 @@ impl PeerManager { PeerManager { network_globals, events: SmallVec::new(), - last_updated: Instant::now(), + _last_updated: Instant::now(), ping_peers: HashSetDelay::new(Duration::from_secs(PING_INTERVAL)), status_peers: HashSetDelay::new(Duration::from_secs(STATUS_INTERVAL)), log: log.clone(), @@ -123,7 +125,7 @@ impl PeerManager { debug!(self.log, "Received a ping request"; "peer_id" => peer_id.to_string(), "seq_no" => seq); self.ping_peers.insert(peer_id.clone()); - // if the sequence number is unknown send update the meta data of the peer. + // if the sequence number is unknown send an update the meta data of the peer. if let Some(meta_data) = &peer_info.meta_data { if meta_data.seq_number < seq { debug!(self.log, "Requesting new metadata from peer"; @@ -180,9 +182,7 @@ impl PeerManager { "peer_id" => peer_id.to_string(), "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number); peer_info.meta_data = Some(meta_data); } else { - // TODO: isn't this malicious/random behaviour? What happens if the seq_number - // is the same but the contents differ? - warn!(self.log, "Received old metadata"; + debug!(self.log, "Received old metadata"; "peer_id" => peer_id.to_string(), "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number); } } else { @@ -204,11 +204,8 @@ impl PeerManager { /// Updates the state of the peer as disconnected. pub fn notify_disconnect(&mut self, peer_id: &PeerId) { - self.update_reputations(); - { - let mut peerdb = self.network_globals.peers.write(); - peerdb.disconnect(peer_id); - } + //self.update_reputations(); + self.network_globals.peers.write().disconnect(peer_id); // remove the ping and status timer for the peer self.ping_peers.remove(peer_id); @@ -223,25 +220,31 @@ impl PeerManager { /// Sets a peer as connected as long as their reputation allows it /// Informs if the peer was accepted pub fn connect_ingoing(&mut self, peer_id: &PeerId) -> bool { - self.connect_peer(peer_id, false) + self.connect_peer(peer_id, ConnectingType::IngoingConnected) } /// Sets a peer as connected as long as their reputation allows it /// Informs if the peer was accepted pub fn connect_outgoing(&mut self, peer_id: &PeerId) -> bool { - self.connect_peer(peer_id, true) + self.connect_peer(peer_id, ConnectingType::OutgoingConnected) + } + + /// Updates the database informing that a peer is being dialed. + pub fn dialing_peer(&mut self, peer_id: &PeerId) -> bool { + self.connect_peer(peer_id, ConnectingType::Dialing) } /// Reports a peer for some action. /// /// If the peer doesn't exist, log a warning and insert defaults. pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction) { - self.update_reputations(); + //TODO: Check these. There are double disconnects for example + // self.update_reputations(); self.network_globals .peers .write() .add_reputation(peer_id, action.rep_change()); - self.update_reputations(); + // self.update_reputations(); } /// Updates `PeerInfo` with `identify` information. @@ -255,7 +258,14 @@ impl PeerManager { } pub fn handle_rpc_error(&mut self, peer_id: &PeerId, protocol: Protocol, err: &RPCError) { - debug!(self.log, "RPCError"; "protocol" => protocol.to_string(), "err" => err.to_string()); + let client = self + .network_globals + .peers + .read() + .peer_info(peer_id) + .map(|info| info.client.clone()) + .unwrap_or_default(); + debug!(self.log, "RPCError"; "protocol" => protocol.to_string(), "err" => err.to_string(), "client" => client.to_string()); // Map this error to a `PeerAction` (if any) let peer_action = match err { @@ -321,21 +331,23 @@ impl PeerManager { /// /// This informs if the peer was accepted in to the db or not. // TODO: Drop peers if over max_peer limit - fn connect_peer(&mut self, peer_id: &PeerId, outgoing: bool) -> bool { + fn connect_peer(&mut self, peer_id: &PeerId, connection: ConnectingType) -> bool { // TODO: remove after timed updates - self.update_reputations(); + //self.update_reputations(); { let mut peerdb = self.network_globals.peers.write(); if peerdb.connection_status(peer_id).map(|c| c.is_banned()) == Some(true) { // don't connect if the peer is banned - return false; + // TODO: Handle this case. If peer is banned this shouldn't be reached. It will put + // our connection/disconnection out of sync with libp2p + // return false; } - if outgoing { - peerdb.connect_outgoing(peer_id); - } else { - peerdb.connect_ingoing(peer_id); + match connection { + ConnectingType::Dialing => peerdb.dialing_peer(peer_id), + ConnectingType::IngoingConnected => peerdb.connect_outgoing(peer_id), + ConnectingType::OutgoingConnected => peerdb.connect_ingoing(peer_id), } } @@ -366,10 +378,10 @@ impl PeerManager { /// /// A banned(disconnected) peer that gets its rep above(below) MIN_REP_BEFORE_BAN is /// now considered a disconnected(banned) peer. - fn update_reputations(&mut self) { + fn _update_reputations(&mut self) { // avoid locking the peerdb too often // TODO: call this on a timer - if self.last_updated.elapsed().as_secs() < 30 { + if self._last_updated.elapsed().as_secs() < 30 { return; } @@ -382,7 +394,7 @@ impl PeerManager { /* Check how long have peers been in this state and update their reputations if needed */ let mut pdb = self.network_globals.peers.write(); - for (id, info) in pdb.peers_mut() { + for (id, info) in pdb._peers_mut() { // Update reputations match info.connection_status { Connected { .. } => { @@ -398,7 +410,7 @@ impl PeerManager { .as_secs() / 3600; let last_dc_hours = self - .last_updated + ._last_updated .checked_duration_since(since) .unwrap_or_else(|| Duration::from_secs(0)) .as_secs() @@ -423,12 +435,13 @@ impl PeerManager { // TODO: decide how to handle this } } + Unknown => {} //TODO: Handle this case } // Check if the peer gets banned or unbanned and if it should be disconnected - if info.reputation < MIN_REP_BEFORE_BAN && !info.connection_status.is_banned() { + if info.reputation < _MIN_REP_BEFORE_BAN && !info.connection_status.is_banned() { // This peer gets banned. Check if we should request disconnection ban_queue.push(id.clone()); - } else if info.reputation >= MIN_REP_BEFORE_BAN && info.connection_status.is_banned() { + } else if info.reputation >= _MIN_REP_BEFORE_BAN && info.connection_status.is_banned() { // This peer gets unbanned unban_queue.push(id.clone()); } @@ -444,57 +457,56 @@ impl PeerManager { pdb.disconnect(&id); } - self.last_updated = Instant::now(); + self._last_updated = Instant::now(); } } impl Stream for PeerManager { type Item = PeerManagerEvent; - type Error = (); - fn poll(&mut self) -> Poll, Self::Error> { + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { // poll the timeouts for pings and status' - // TODO: Remove task notifies and temporary vecs for stable futures - // These exist to handle a bug in delayqueue - let mut peers_to_add = Vec::new(); - while let Async::Ready(Some(peer_id)) = self.ping_peers.poll().map_err(|e| { - error!(self.log, "Failed to check for peers to ping"; "error" => e.to_string()); - })? { - debug!(self.log, "Pinging peer"; "peer_id" => peer_id.to_string()); - // add the ping timer back - peers_to_add.push(peer_id.clone()); - self.events.push(PeerManagerEvent::Ping(peer_id)); + loop { + match self.ping_peers.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(peer_id))) => { + self.ping_peers.insert(peer_id.clone()); + self.events.push(PeerManagerEvent::Ping(peer_id)); + } + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for peers to ping"; "error" => format!("{}",e)) + } + Poll::Ready(None) | Poll::Pending => break, + } } - if !peers_to_add.is_empty() { - futures::task::current().notify(); - } - while let Some(peer) = peers_to_add.pop() { - self.ping_peers.insert(peer); - } - - while let Async::Ready(Some(peer_id)) = self.status_peers.poll().map_err(|e| { - error!(self.log, "Failed to check for peers to status"; "error" => e.to_string()); - })? { - debug!(self.log, "Sending Status to peer"; "peer_id" => peer_id.to_string()); - // add the status timer back - peers_to_add.push(peer_id.clone()); - self.events.push(PeerManagerEvent::Status(peer_id)); - } - - if !peers_to_add.is_empty() { - futures::task::current().notify(); - } - while let Some(peer) = peers_to_add.pop() { - self.status_peers.insert(peer); + loop { + match self.status_peers.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(peer_id))) => { + self.status_peers.insert(peer_id.clone()); + self.events.push(PeerManagerEvent::Status(peer_id)) + } + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for peers to ping"; "error" => format!("{}",e)) + } + Poll::Ready(None) | Poll::Pending => break, + } } if !self.events.is_empty() { - return Ok(Async::Ready(Some(self.events.remove(0)))); + return Poll::Ready(Some(self.events.remove(0))); } else { self.events.shrink_to_fit(); } - Ok(Async::NotReady) + Poll::Pending } } + +enum ConnectingType { + /// We are in the process of dialing this peer. + Dialing, + /// A peer has dialed us. + IngoingConnected, + /// We have successfully dialed a peer. + OutgoingConnected, +} diff --git a/beacon_node/eth2-libp2p/src/peer_manager/peer_info.rs b/beacon_node/eth2-libp2p/src/peer_manager/peer_info.rs index 611766a160..4c97b2c081 100644 --- a/beacon_node/eth2-libp2p/src/peer_manager/peer_info.rs +++ b/beacon_node/eth2-libp2p/src/peer_manager/peer_info.rs @@ -100,6 +100,8 @@ pub enum PeerConnectionStatus { /// time since we last communicated with the peer. since: Instant, }, + /// The connection status has not been specified. + Unknown, } /// Serialization for http requests. @@ -127,15 +129,14 @@ impl Serialize for PeerConnectionStatus { s.serialize_field("since", &since.elapsed().as_secs())?; s.end() } + Unknown => serializer.serialize_unit_variant("", 4, "Unknown"), } } } impl Default for PeerConnectionStatus { fn default() -> Self { - PeerConnectionStatus::Dialing { - since: Instant::now(), - } + PeerConnectionStatus::Unknown } } @@ -177,7 +178,7 @@ impl PeerConnectionStatus { pub fn connect_ingoing(&mut self) { match self { Connected { n_in, .. } => *n_in += 1, - Disconnected { .. } | Banned { .. } | Dialing { .. } => { + Disconnected { .. } | Banned { .. } | Dialing { .. } | Unknown => { *self = Connected { n_in: 1, n_out: 0 } } } @@ -188,7 +189,7 @@ impl PeerConnectionStatus { pub fn connect_outgoing(&mut self) { match self { Connected { n_out, .. } => *n_out += 1, - Disconnected { .. } | Banned { .. } | Dialing { .. } => { + Disconnected { .. } | Banned { .. } | Dialing { .. } | Unknown => { *self = Connected { n_in: 0, n_out: 1 } } } diff --git a/beacon_node/eth2-libp2p/src/peer_manager/peerdb.rs b/beacon_node/eth2-libp2p/src/peer_manager/peerdb.rs index d5fa4bcf7f..69b7b12152 100644 --- a/beacon_node/eth2-libp2p/src/peer_manager/peerdb.rs +++ b/beacon_node/eth2-libp2p/src/peer_manager/peerdb.rs @@ -2,8 +2,8 @@ use super::peer_info::{PeerConnectionStatus, PeerInfo}; use super::peer_sync_status::PeerSyncStatus; use crate::rpc::methods::MetaData; use crate::PeerId; -use slog::{crit, warn}; -use std::collections::HashMap; +use slog::{crit, debug, warn}; +use std::collections::{hash_map::Entry, HashMap}; use std::time::Instant; use types::{EthSpec, SubnetId}; @@ -77,7 +77,7 @@ impl PeerDB { } /// Returns an iterator over all peers in the db. - pub(super) fn peers_mut(&mut self) -> impl Iterator)> { + pub(super) fn _peers_mut(&mut self) -> impl Iterator)> { self.peers.iter_mut() } @@ -228,11 +228,12 @@ impl PeerDB { let info = self.peers.entry(peer_id.clone()).or_default(); if info.connection_status.is_disconnected() { - self.n_dc -= 1; + self.n_dc = self.n_dc.saturating_sub(1); } info.connection_status = PeerConnectionStatus::Dialing { since: Instant::now(), }; + debug!(self.log, "Peer dialing in db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc); } /// Sets a peer as connected with an ingoing connection. @@ -240,9 +241,10 @@ impl PeerDB { let info = self.peers.entry(peer_id.clone()).or_default(); if info.connection_status.is_disconnected() { - self.n_dc -= 1; + self.n_dc = self.n_dc.saturating_sub(1); } info.connection_status.connect_ingoing(); + debug!(self.log, "Peer connected to db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc); } /// Sets a peer as connected with an outgoing connection. @@ -250,9 +252,10 @@ impl PeerDB { let info = self.peers.entry(peer_id.clone()).or_default(); if info.connection_status.is_disconnected() { - self.n_dc -= 1; + self.n_dc = self.n_dc.saturating_sub(1); } info.connection_status.connect_outgoing(); + debug!(self.log, "Peer connected to db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc); } /// Sets the peer as disconnected. A banned peer remains banned @@ -263,11 +266,11 @@ impl PeerDB { "peer_id" => peer_id.to_string()); PeerInfo::default() }); - if !info.connection_status.is_disconnected() && !info.connection_status.is_banned() { info.connection_status.disconnect(); self.n_dc += 1; } + debug!(self.log, "Peer disconnected from db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc); self.shrink_to_fit(); } @@ -284,7 +287,7 @@ impl PeerDB { .map(|(id, _)| id.clone()) .unwrap(); // should be safe since n_dc > MAX_DC_PEERS > 0 self.peers.remove(&to_drop); - self.n_dc -= 1; + self.n_dc = self.n_dc.saturating_sub(1); } } @@ -297,8 +300,9 @@ impl PeerDB { PeerInfo::default() }); if info.connection_status.is_disconnected() { - self.n_dc -= 1; + self.n_dc = self.n_dc.saturating_sub(1); } + debug!(self.log, "Peer banned"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc); info.connection_status.ban(); } @@ -334,11 +338,14 @@ impl PeerDB { /// upper (lower) bounds, it stays at the maximum (minimum) value. pub(super) fn add_reputation(&mut self, peer_id: &PeerId, change: RepChange) { let log_ref = &self.log; - let info = self.peers.entry(peer_id.clone()).or_insert_with(|| { - warn!(log_ref, "Adding to the reputation of an unknown peer"; - "peer_id" => peer_id.to_string()); - PeerInfo::default() - }); + let info = match self.peers.entry(peer_id.clone()) { + Entry::Vacant(_) => { + warn!(log_ref, "Peer is unknown, no reputation change made"; + "peer_id" => peer_id.to_string()); + return; + } + Entry::Occupied(e) => e.into_mut(), + }; info.reputation = if change.is_good { info.reputation.saturating_add(change.diff) diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/base.rs b/beacon_node/eth2-libp2p/src/rpc/codec/base.rs index 43f0f494c2..4bf277cdc5 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/base.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/base.rs @@ -4,10 +4,10 @@ use crate::rpc::{ErrorMessage, RPCCodedResponse, RPCRequest, RPCResponse}; use libp2p::bytes::BufMut; use libp2p::bytes::BytesMut; use std::marker::PhantomData; -use tokio::codec::{Decoder, Encoder}; +use tokio_util::codec::{Decoder, Encoder}; use types::EthSpec; -pub trait OutboundCodec: Encoder + Decoder { +pub trait OutboundCodec: Encoder + Decoder { type ErrorType; fn decode_error( @@ -21,7 +21,7 @@ pub trait OutboundCodec: Encoder + Decoder { pub struct BaseInboundCodec where - TCodec: Encoder + Decoder, + TCodec: Encoder> + Decoder, TSpec: EthSpec, { /// Inner codec for handling various encodings @@ -31,7 +31,7 @@ where impl BaseInboundCodec where - TCodec: Encoder + Decoder, + TCodec: Encoder> + Decoder, TSpec: EthSpec, { pub fn new(codec: TCodec) -> Self { @@ -46,7 +46,7 @@ where // This deals with Decoding RPC Responses from other peers and encoding our requests pub struct BaseOutboundCodec where - TOutboundCodec: OutboundCodec, + TOutboundCodec: OutboundCodec>, TSpec: EthSpec, { /// Inner codec for handling various encodings. @@ -59,7 +59,7 @@ where impl BaseOutboundCodec where TSpec: EthSpec, - TOutboundCodec: OutboundCodec, + TOutboundCodec: OutboundCodec>, { pub fn new(codec: TOutboundCodec) -> Self { BaseOutboundCodec { @@ -75,15 +75,18 @@ where /* Base Inbound Codec */ // This Encodes RPC Responses sent to external peers -impl Encoder for BaseInboundCodec +impl Encoder> for BaseInboundCodec where TSpec: EthSpec, - TCodec: Decoder + Encoder>, + TCodec: Decoder + Encoder>, { - type Item = RPCCodedResponse; - type Error = ::Error; + type Error = >>::Error; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode( + &mut self, + item: RPCCodedResponse, + dst: &mut BytesMut, + ) -> Result<(), Self::Error> { dst.clear(); dst.reserve(1); dst.put_u8( @@ -98,7 +101,7 @@ where impl Decoder for BaseInboundCodec where TSpec: EthSpec, - TCodec: Encoder + Decoder>, + TCodec: Encoder> + Decoder>, { type Item = RPCRequest; type Error = ::Error; @@ -111,15 +114,14 @@ where /* Base Outbound Codec */ // This Encodes RPC Requests sent to external peers -impl Encoder for BaseOutboundCodec +impl Encoder> for BaseOutboundCodec where TSpec: EthSpec, - TCodec: OutboundCodec + Encoder>, + TCodec: OutboundCodec> + Encoder>, { - type Item = RPCRequest; - type Error = ::Error; + type Error = >>::Error; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: RPCRequest, dst: &mut BytesMut) -> Result<(), Self::Error> { self.inner.encode(item, dst) } } @@ -128,7 +130,8 @@ where impl Decoder for BaseOutboundCodec where TSpec: EthSpec, - TCodec: OutboundCodec + Decoder>, + TCodec: OutboundCodec, ErrorType = ErrorMessage> + + Decoder>, { type Item = RPCCodedResponse; type Error = ::Error; @@ -168,3 +171,47 @@ where inner_result } } + +#[cfg(test)] +mod tests { + use super::super::ssz::*; + use super::super::ssz_snappy::*; + use super::*; + use crate::rpc::protocol::*; + + #[test] + fn test_decode_status_message() { + let message = hex::decode("ff060000734e615070590032000006e71e7b54989925efd6c9cbcb8ceb9b5f71216f5137282bf6a1e3b50f64e42d6c7fb347abe07eb0db8200000005029e2800").unwrap(); + let mut buf = BytesMut::new(); + buf.extend_from_slice(&message); + + type Spec = types::MainnetEthSpec; + + let snappy_protocol_id = + ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy); + let ssz_protocol_id = ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZ); + + let mut snappy_outbound_codec = + SSZSnappyOutboundCodec::::new(snappy_protocol_id, 1_048_576); + let mut ssz_outbound_codec = SSZOutboundCodec::::new(ssz_protocol_id, 1_048_576); + + // decode message just as snappy message + let snappy_decoded_message = snappy_outbound_codec.decode(&mut buf.clone()); + // decode message just a ssz message + let ssz_decoded_message = ssz_outbound_codec.decode(&mut buf.clone()); + + // build codecs for entire chunk + let mut snappy_base_outbound_codec = BaseOutboundCodec::new(snappy_outbound_codec); + let mut ssz_base_outbound_codec = BaseOutboundCodec::new(ssz_outbound_codec); + + // decode message as ssz snappy chunk + let snappy_decoded_chunk = snappy_base_outbound_codec.decode(&mut buf.clone()); + // decode message just a ssz chunk + let ssz_decoded_chunk = ssz_base_outbound_codec.decode(&mut buf.clone()); + + let _ = dbg!(snappy_decoded_message); + let _ = dbg!(ssz_decoded_message); + let _ = dbg!(snappy_decoded_chunk); + let _ = dbg!(ssz_decoded_chunk); + } +} diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/mod.rs b/beacon_node/eth2-libp2p/src/rpc/codec/mod.rs index 1fd97a78b3..c117f52feb 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/mod.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/mod.rs @@ -8,7 +8,7 @@ use self::ssz_snappy::{SSZSnappyInboundCodec, SSZSnappyOutboundCodec}; use crate::rpc::protocol::RPCError; use crate::rpc::{RPCCodedResponse, RPCRequest}; use libp2p::bytes::BytesMut; -use tokio::codec::{Decoder, Encoder}; +use tokio_util::codec::{Decoder, Encoder}; use types::EthSpec; // Known types of codecs @@ -22,11 +22,10 @@ pub enum OutboundCodec { SSZ(BaseOutboundCodec, TSpec>), } -impl Encoder for InboundCodec { - type Item = RPCCodedResponse; +impl Encoder> for InboundCodec { type Error = RPCError; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: RPCCodedResponse, dst: &mut BytesMut) -> Result<(), Self::Error> { match self { InboundCodec::SSZ(codec) => codec.encode(item, dst), InboundCodec::SSZSnappy(codec) => codec.encode(item, dst), @@ -46,11 +45,10 @@ impl Decoder for InboundCodec { } } -impl Encoder for OutboundCodec { - type Item = RPCRequest; +impl Encoder> for OutboundCodec { type Error = RPCError; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: RPCRequest, dst: &mut BytesMut) -> Result<(), Self::Error> { match self { OutboundCodec::SSZ(codec) => codec.encode(item, dst), OutboundCodec::SSZSnappy(codec) => codec.encode(item, dst), diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs index 37ea4eac55..0f763e997d 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs @@ -7,7 +7,7 @@ use crate::rpc::{ErrorMessage, RPCCodedResponse, RPCRequest, RPCResponse}; use libp2p::bytes::{BufMut, Bytes, BytesMut}; use ssz::{Decode, Encode}; use std::marker::PhantomData; -use tokio::codec::{Decoder, Encoder}; +use tokio_util::codec::{Decoder, Encoder}; use types::{EthSpec, SignedBeaconBlock}; use unsigned_varint::codec::UviBytes; @@ -19,7 +19,7 @@ pub struct SSZInboundCodec { phantom: PhantomData, } -impl SSZInboundCodec { +impl SSZInboundCodec { pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self { let mut uvi_codec = UviBytes::default(); uvi_codec.set_max_len(max_packet_size); @@ -36,11 +36,14 @@ impl SSZInboundCodec { } // Encoder for inbound streams: Encodes RPC Responses sent to peers. -impl Encoder for SSZInboundCodec { - type Item = RPCCodedResponse; +impl Encoder> for SSZInboundCodec { type Error = RPCError; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode( + &mut self, + item: RPCCodedResponse, + dst: &mut BytesMut, + ) -> Result<(), Self::Error> { let bytes = match item { RPCCodedResponse::Success(resp) => match resp { RPCResponse::Status(res) => res.as_ssz_bytes(), @@ -145,11 +148,10 @@ impl SSZOutboundCodec { } // Encoder for outbound streams: Encodes RPC Requests to peers -impl Encoder for SSZOutboundCodec { - type Item = RPCRequest; +impl Encoder> for SSZOutboundCodec { type Error = RPCError; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: RPCRequest, dst: &mut BytesMut) -> Result<(), Self::Error> { let bytes = match item { RPCRequest::Status(req) => req.as_ssz_bytes(), RPCRequest::Goodbye(req) => req.as_ssz_bytes(), @@ -201,7 +203,7 @@ impl Decoder for SSZOutboundCodec { match self.inner.decode(src).map_err(RPCError::from) { Ok(Some(mut packet)) => { // take the bytes from the buffer - let raw_bytes = packet.take(); + let raw_bytes = packet.split(); match self.protocol.message_name { Protocol::Status => match self.protocol.version { @@ -239,7 +241,7 @@ impl Decoder for SSZOutboundCodec { } } -impl OutboundCodec for SSZOutboundCodec { +impl OutboundCodec> for SSZOutboundCodec { type ErrorType = ErrorMessage; fn decode_error(&mut self, src: &mut BytesMut) -> Result, RPCError> { diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz_snappy.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz_snappy.rs index c99d1f6fdd..6c6e09f4db 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz_snappy.rs @@ -12,7 +12,7 @@ use std::io::Cursor; use std::io::ErrorKind; use std::io::{Read, Write}; use std::marker::PhantomData; -use tokio::codec::{Decoder, Encoder}; +use tokio_util::codec::{Decoder, Encoder}; use types::{EthSpec, SignedBeaconBlock}; use unsigned_varint::codec::Uvi; @@ -44,11 +44,14 @@ impl SSZSnappyInboundCodec { } // Encoder for inbound streams: Encodes RPC Responses sent to peers. -impl Encoder for SSZSnappyInboundCodec { - type Item = RPCCodedResponse; +impl Encoder> for SSZSnappyInboundCodec { type Error = RPCError; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode( + &mut self, + item: RPCCodedResponse, + dst: &mut BytesMut, + ) -> Result<(), Self::Error> { let bytes = match item { RPCCodedResponse::Success(resp) => match resp { RPCResponse::Status(res) => res.as_ssz_bytes(), @@ -116,7 +119,7 @@ impl Decoder for SSZSnappyInboundCodec { // `n` is how many bytes the reader read in the compressed stream let n = reader.get_ref().position(); self.len = None; - src.split_to(n as usize); + let _read_bytes = src.split_to(n as usize); match self.protocol.message_name { Protocol::Status => match self.protocol.version { Version::V1 => Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes( @@ -193,11 +196,10 @@ impl SSZSnappyOutboundCodec { } // Encoder for outbound streams: Encodes RPC Requests to peers -impl Encoder for SSZSnappyOutboundCodec { - type Item = RPCRequest; +impl Encoder> for SSZSnappyOutboundCodec { type Error = RPCError; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: RPCRequest, dst: &mut BytesMut) -> Result<(), Self::Error> { let bytes = match item { RPCRequest::Status(req) => req.as_ssz_bytes(), RPCRequest::Goodbye(req) => req.as_ssz_bytes(), @@ -262,7 +264,7 @@ impl Decoder for SSZSnappyOutboundCodec { // `n` is how many bytes the reader read in the compressed stream let n = reader.get_ref().position(); self.len = None; - src.split_to(n as usize); + let _read_byts = src.split_to(n as usize); match self.protocol.message_name { Protocol::Status => match self.protocol.version { Version::V1 => Ok(Some(RPCResponse::Status( @@ -307,7 +309,7 @@ impl Decoder for SSZSnappyOutboundCodec { } } -impl OutboundCodec for SSZSnappyOutboundCodec { +impl OutboundCodec> for SSZSnappyOutboundCodec { type ErrorType = ErrorMessage; fn decode_error(&mut self, src: &mut BytesMut) -> Result, RPCError> { @@ -334,7 +336,7 @@ impl OutboundCodec for SSZSnappyOutboundCodec { // `n` is how many bytes the reader read in the compressed stream let n = reader.get_ref().position(); self.len = None; - src.split_to(n as usize); + let _read_bytes = src.split_to(n as usize); Ok(Some(ErrorMessage::from_ssz_bytes(&decoded_buffer)?)) } Err(e) => match e.kind() { diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index d8fa347abd..14ab6f9562 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -5,7 +5,6 @@ use super::methods::{ErrorMessage, RPCCodedResponse, RequestId, ResponseTerminat use super::protocol::{Protocol, RPCError, RPCProtocol, RPCRequest}; use super::RPCEvent; use crate::rpc::protocol::{InboundFramed, OutboundFramed}; -use core::marker::PhantomData; use fnv::FnvHashMap; use futures::prelude::*; use libp2p::core::upgrade::{ @@ -14,15 +13,18 @@ use libp2p::core::upgrade::{ use libp2p::swarm::protocols_handler::{ KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol, }; +use libp2p::swarm::NegotiatedSubstream; use slog::{crit, debug, error, trace, warn}; use smallvec::SmallVec; -use std::collections::hash_map::Entry; -use std::time::{Duration, Instant}; -use tokio::io::{AsyncRead, AsyncWrite}; -use tokio::timer::{delay_queue, DelayQueue}; +use std::{ + collections::hash_map::Entry, + pin::Pin, + task::{Context, Poll}, + time::{Duration, Instant}, +}; +use tokio::time::{delay_queue, DelayQueue}; use types::EthSpec; -//TODO: Implement close() on the substream types to improve the poll code. //TODO: Implement check_timeout() on the substream types /// The time (in seconds) before a substream that is awaiting a response from the user times out. @@ -39,9 +41,8 @@ type InboundRequestId = RequestId; type OutboundRequestId = RequestId; /// Implementation of `ProtocolsHandler` for the RPC protocol. -pub struct RPCHandler +pub struct RPCHandler where - TSubstream: AsyncRead + AsyncWrite, TSpec: EthSpec, { /// The upgrade for inbound substreams. @@ -63,7 +64,7 @@ where inbound_substreams: FnvHashMap< InboundRequestId, ( - InboundSubstreamState, + InboundSubstreamState, Option, Protocol, ), @@ -74,14 +75,8 @@ where /// Map of outbound substreams that need to be driven to completion. The `RequestId` is /// maintained by the application sending the request. - outbound_substreams: FnvHashMap< - OutboundRequestId, - ( - OutboundSubstreamState, - delay_queue::Key, - Protocol, - ), - >, + outbound_substreams: + FnvHashMap, delay_queue::Key, Protocol)>, /// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout. outbound_substreams_delay: DelayQueue, @@ -107,21 +102,27 @@ where /// Logger for handling RPC streams log: slog::Logger, - - /// Marker to pin the generic stream. - _phantom: PhantomData, } -/// State of an outbound substream. Either waiting for a response, or in the process of sending. -pub enum InboundSubstreamState +pub enum InboundSubstreamState where - TSubstream: AsyncRead + AsyncWrite, TSpec: EthSpec, { - /// A response has been sent, pending writing and flush. + /// A response has been sent, pending writing. ResponsePendingSend { /// The substream used to send the response - substream: futures::sink::Send>, + substream: InboundFramed, + /// The message that is attempting to be sent. + message: RPCCodedResponse, + /// Whether a stream termination is requested. If true the stream will be closed after + /// this send. Otherwise it will transition to an idle state until a stream termination is + /// requested or a timeout is reached. + closing: bool, + }, + /// A response has been sent, pending flush. + ResponsePendingFlush { + /// The substream used to send the response + substream: InboundFramed, /// Whether a stream termination is requested. If true the stream will be closed after /// this send. Otherwise it will transition to an idle state until a stream termination is /// requested or a timeout is reached. @@ -129,31 +130,31 @@ where }, /// The response stream is idle and awaiting input from the application to send more chunked /// responses. - ResponseIdle(InboundFramed), + ResponseIdle(InboundFramed), /// The substream is attempting to shutdown. - Closing(InboundFramed), + Closing(InboundFramed), /// Temporary state during processing Poisoned, } -pub enum OutboundSubstreamState { +/// State of an outbound substream. Either waiting for a response, or in the process of sending. +pub enum OutboundSubstreamState { /// A request has been sent, and we are awaiting a response. This future is driven in the /// handler because GOODBYE requests can be handled and responses dropped instantly. RequestPendingResponse { /// The framed negotiated substream. - substream: OutboundFramed, + substream: OutboundFramed, /// Keeps track of the actual request sent. request: RPCRequest, }, /// Closing an outbound substream> - Closing(OutboundFramed), + Closing(OutboundFramed), /// Temporary state during processing Poisoned, } -impl InboundSubstreamState +impl InboundSubstreamState where - TSubstream: AsyncRead + AsyncWrite, TSpec: EthSpec, { /// Moves the substream state to closing and informs the connected peer. The @@ -172,18 +173,37 @@ where RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange); match std::mem::replace(self, InboundSubstreamState::Poisoned) { - InboundSubstreamState::ResponsePendingSend { substream, closing } => { + // if we are busy awaiting a send/flush add the termination to the queue + InboundSubstreamState::ResponsePendingSend { + substream, + message, + closing, + } => { if !closing { outbound_queue.push(error); outbound_queue.push(stream_termination); } // if the stream is closing after the send, allow it to finish - *self = InboundSubstreamState::ResponsePendingSend { substream, closing } + *self = InboundSubstreamState::ResponsePendingSend { + substream, + message, + closing, + } + } + // if we are busy awaiting a send/flush add the termination to the queue + InboundSubstreamState::ResponsePendingFlush { substream, closing } => { + if !closing { + outbound_queue.push(error); + outbound_queue.push(stream_termination); + } + // if the stream is closing after the send, allow it to finish + *self = InboundSubstreamState::ResponsePendingFlush { substream, closing } } InboundSubstreamState::ResponseIdle(substream) => { *self = InboundSubstreamState::ResponsePendingSend { - substream: substream.send(error), + substream: substream, + message: error, closing: true, }; } @@ -198,9 +218,8 @@ where } } -impl RPCHandler +impl RPCHandler where - TSubstream: AsyncRead + AsyncWrite, TSpec: EthSpec, { pub fn new( @@ -225,7 +244,6 @@ where inactive_timeout, outbound_io_error_retries: 0, log: log.clone(), - _phantom: PhantomData, } } @@ -258,15 +276,13 @@ where } } -impl ProtocolsHandler for RPCHandler +impl ProtocolsHandler for RPCHandler where - TSubstream: AsyncRead + AsyncWrite, TSpec: EthSpec, { type InEvent = RPCEvent; type OutEvent = RPCEvent; - type Error = ProtocolsHandlerUpgrErr; - type Substream = TSubstream; + type Error = RPCError; type InboundProtocol = RPCProtocol; type OutboundProtocol = RPCRequest; type OutboundOpenInfo = (RequestId, RPCRequest); // Keep track of the id and the request @@ -277,14 +293,14 @@ where fn inject_fully_negotiated_inbound( &mut self, - out: as InboundUpgrade>::Output, + substream: >::Output, ) { // update the keep alive timeout if there are no more remaining outbound streams if let KeepAlive::Until(_) = self.keep_alive { self.keep_alive = KeepAlive::Until(Instant::now() + self.inactive_timeout); } - let (req, substream) = out; + let (req, substream) = substream; // drop the stream and return a 0 id for goodbye "requests" if let r @ RPCRequest::Goodbye(_) = req { self.events_out.push(RPCEvent::Request(0, r)); @@ -309,7 +325,7 @@ where fn inject_fully_negotiated_outbound( &mut self, - out: as OutboundUpgrade>::Output, + out: >::Output, request_info: Self::OutboundOpenInfo, ) { self.dial_negotiated -= 1; @@ -394,15 +410,18 @@ where // if it's a single rpc request or an error, close the stream after *substream_state = InboundSubstreamState::ResponsePendingSend { - substream: substream.send(response), + substream: substream, + message: response, closing: !res_is_multiple | res_is_error, // close if an error or we are not expecting more responses }; } } } - InboundSubstreamState::ResponsePendingSend { substream, closing } - if res_is_multiple => - { + InboundSubstreamState::ResponsePendingSend { + substream, + message, + closing, + } if res_is_multiple => { // the stream is in use, add the request to a pending queue self.queued_outbound_items .entry(rpc_id) @@ -411,6 +430,22 @@ where // return the state *substream_state = InboundSubstreamState::ResponsePendingSend { + substream, + message, + closing, + }; + } + InboundSubstreamState::ResponsePendingFlush { substream, closing } + if res_is_multiple => + { + // the stream is in use, add the request to a pending queue + self.queued_outbound_items + .entry(rpc_id) + .or_insert_with(Vec::new) + .push(response); + + // return the state + *substream_state = InboundSubstreamState::ResponsePendingFlush { substream, closing, }; @@ -419,8 +454,20 @@ where *substream_state = InboundSubstreamState::Closing(substream); debug!(self.log, "Response not sent. Stream is closing"; "response" => format!("{}",response)); } - InboundSubstreamState::ResponsePendingSend { substream, .. } => { + InboundSubstreamState::ResponsePendingSend { + substream, + message, + .. + } => { *substream_state = InboundSubstreamState::ResponsePendingSend { + substream, + message, + closing: true, + }; + error!(self.log, "Attempted sending multiple responses to a single response request"); + } + InboundSubstreamState::ResponsePendingFlush { substream, .. } => { + *substream_state = InboundSubstreamState::ResponsePendingFlush { substream, closing: true, }; @@ -433,7 +480,7 @@ where } } None => { - warn!(self.log, "Stream has expired. Response not sent"; "response" => format!("{}", response)); + warn!(self.log, "Stream has expired. Response not sent"; "response" => response.to_string(), "id" => rpc_id); } }; } @@ -446,7 +493,7 @@ where &mut self, request_info: Self::OutboundOpenInfo, error: ProtocolsHandlerUpgrErr< - >::Error, + >::Error, >, ) { let (id, req) = request_info; @@ -470,7 +517,7 @@ where ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select( NegotiationError::ProtocolError(e), )) => match e { - ProtocolError::IoError(io_err) => RPCError::IoError(io_err), + ProtocolError::IoError(io_err) => RPCError::IoError(io_err.to_string()), ProtocolError::InvalidProtocol => { RPCError::InternalError("Protocol was deemed invalid") } @@ -490,64 +537,82 @@ where fn poll( &mut self, + cx: &mut Context<'_>, ) -> Poll< - ProtocolsHandlerEvent, - Self::Error, + ProtocolsHandlerEvent< + Self::OutboundProtocol, + Self::OutboundOpenInfo, + Self::OutEvent, + Self::Error, + >, > { if !self.pending_error.is_empty() { let (id, protocol, err) = self.pending_error.remove(0); - return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( - RPCEvent::Error(id, protocol, err), + return Poll::Ready(ProtocolsHandlerEvent::Custom(RPCEvent::Error( + id, protocol, err, ))); } // return any events that need to be reported if !self.events_out.is_empty() { - return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( - self.events_out.remove(0), - ))); + return Poll::Ready(ProtocolsHandlerEvent::Custom(self.events_out.remove(0))); } else { self.events_out.shrink_to_fit(); } // purge expired inbound substreams and send an error - while let Async::Ready(Some(stream_id)) = - self.inbound_substreams_delay.poll().map_err(|e| { - warn!(self.log, "Inbound substream poll failed"; "error" => format!("{:?}", e)); - ProtocolsHandlerUpgrErr::Timer - })? - { - let rpc_id = stream_id.get_ref(); + loop { + match self.inbound_substreams_delay.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(stream_id))) => { + // handle a stream timeout for various states + if let Some((substream_state, delay_key, _)) = + self.inbound_substreams.get_mut(stream_id.get_ref()) + { + // the delay has been removed + *delay_key = None; - // handle a stream timeout for various states - if let Some((substream_state, delay_key, _)) = self.inbound_substreams.get_mut(rpc_id) { - // the delay has been removed - *delay_key = None; - - let outbound_queue = self - .queued_outbound_items - .entry(*rpc_id) - .or_insert_with(Vec::new); - substream_state.close(outbound_queue); + let outbound_queue = self + .queued_outbound_items + .entry(stream_id.into_inner()) + .or_insert_with(Vec::new); + substream_state.close(outbound_queue); + } + } + Poll::Ready(Some(Err(e))) => { + warn!(self.log, "Inbound substream poll failed"; "error" => format!("{:?}", e)); + // drops the peer if we cannot read the delay queue + return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::InternalError( + "Could not poll inbound stream timer", + ))); + } + Poll::Pending | Poll::Ready(None) => break, } } // purge expired outbound substreams - if let Async::Ready(Some(stream_id)) = - self.outbound_substreams_delay.poll().map_err(|e| { - warn!(self.log, "Outbound substream poll failed"; "error" => format!("{:?}", e)); - ProtocolsHandlerUpgrErr::Timer - })? - { - if let Some((_id, _stream, protocol)) = - self.outbound_substreams.remove(stream_id.get_ref()) - { - // notify the user - return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( - RPCEvent::Error(*stream_id.get_ref(), protocol, RPCError::StreamTimeout), - ))); - } else { - crit!(self.log, "timed out substream not in the books"; "stream_id" => stream_id.get_ref()); + loop { + match self.outbound_substreams_delay.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(stream_id))) => { + if let Some((_id, _stream, protocol)) = + self.outbound_substreams.remove(stream_id.get_ref()) + { + // notify the user + return Poll::Ready(ProtocolsHandlerEvent::Custom(RPCEvent::Error( + *stream_id.get_ref(), + protocol, + RPCError::StreamTimeout, + ))); + } else { + crit!(self.log, "timed out substream not in the books"; "stream_id" => stream_id.get_ref()); + } + } + Poll::Ready(Some(Err(e))) => { + warn!(self.log, "Outbound substream poll failed"; "error" => format!("{:?}", e)); + return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::InternalError( + "Could not poll outbound stream timer", + ))); + } + Poll::Pending | Poll::Ready(None) => break, } } @@ -566,20 +631,75 @@ where ) { InboundSubstreamState::ResponsePendingSend { mut substream, + message, closing, } => { - match substream.poll() { - Ok(Async::Ready(raw_substream)) => { - // completed the send - - // close the stream if required + match Sink::poll_ready(Pin::new(&mut substream), cx) { + Poll::Ready(Ok(())) => { + // stream is ready to send data + match Sink::start_send(Pin::new(&mut substream), message) { + Ok(()) => { + // await flush + entry.get_mut().0 = + InboundSubstreamState::ResponsePendingFlush { + substream, + closing, + } + } + Err(e) => { + // error with sending in the codec + warn!(self.log, "Error sending RPC message"; "error" => e.to_string()); + // keep connection with the peer and return the + // stream to awaiting response if this message + // wasn't closing the stream + // TODO: Duplicate code + if closing { + entry.get_mut().0 = + InboundSubstreamState::Closing(substream) + } else { + // check for queued chunks and update the stream + entry.get_mut().0 = apply_queued_responses( + substream, + &mut self + .queued_outbound_items + .get_mut(&request_id), + &mut new_items_to_send, + ); + } + } + } + } + Poll::Ready(Err(e)) => { + error!(self.log, "Outbound substream error while sending RPC message: {:?}", e); + entry.remove(); + return Poll::Ready(ProtocolsHandlerEvent::Close(e)); + } + Poll::Pending => { + // the stream is not yet ready, continue waiting + entry.get_mut().0 = + InboundSubstreamState::ResponsePendingSend { + substream, + message, + closing, + }; + } + } + } + InboundSubstreamState::ResponsePendingFlush { + mut substream, + closing, + } => { + match Sink::poll_flush(Pin::new(&mut substream), cx) { + Poll::Ready(Ok(())) => { + // finished flushing + // TODO: Duplicate code if closing { entry.get_mut().0 = - InboundSubstreamState::Closing(raw_substream) + InboundSubstreamState::Closing(substream) } else { // check for queued chunks and update the stream entry.get_mut().0 = apply_queued_responses( - raw_substream, + substream, &mut self .queued_outbound_items .get_mut(&request_id), @@ -587,24 +707,34 @@ where ); } } - Ok(Async::NotReady) => { + Poll::Ready(Err(e)) => { + // error during flush + trace!(self.log, "Error sending flushing RPC message"; "error" => e.to_string()); + // we drop the stream on error and inform the user, remove + // any pending requests + // TODO: Duplicate code + if let Some(delay_key) = &entry.get().1 { + self.inbound_substreams_delay.remove(delay_key); + } + self.queued_outbound_items.remove(&request_id); + entry.remove(); + + if self.outbound_substreams.is_empty() + && self.inbound_substreams.is_empty() + { + self.keep_alive = KeepAlive::Until( + Instant::now() + self.inactive_timeout, + ); + } + } + Poll::Pending => { entry.get_mut().0 = - InboundSubstreamState::ResponsePendingSend { + InboundSubstreamState::ResponsePendingFlush { substream, closing, }; } - Err(e) => { - if let Some(delay_key) = &entry.get().1 { - self.inbound_substreams_delay.remove(delay_key); - } - let protocol = entry.get().2; - entry.remove_entry(); - return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( - RPCEvent::Error(0, protocol, e), - ))); - } - }; + } } InboundSubstreamState::ResponseIdle(substream) => { entry.get_mut().0 = apply_queued_responses( @@ -614,9 +744,8 @@ where ); } InboundSubstreamState::Closing(mut substream) => { - match substream.close() { - Ok(Async::Ready(())) | Err(_) => { - //trace!(self.log, "Inbound stream dropped"); + match Sink::poll_close(Pin::new(&mut substream), cx) { + Poll::Ready(Ok(())) => { if let Some(delay_key) = &entry.get().1 { self.inbound_substreams_delay.remove(delay_key); } @@ -631,7 +760,25 @@ where ); } } // drop the stream - Ok(Async::NotReady) => { + Poll::Ready(Err(e)) => { + error!(self.log, "Error closing inbound stream"; "error" => e.to_string()); + // drop the stream anyway + // TODO: Duplicate code + if let Some(delay_key) = &entry.get().1 { + self.inbound_substreams_delay.remove(delay_key); + } + self.queued_outbound_items.remove(&request_id); + entry.remove(); + + if self.outbound_substreams.is_empty() + && self.inbound_substreams.is_empty() + { + self.keep_alive = KeepAlive::Until( + Instant::now() + self.inactive_timeout, + ); + } + } + Poll::Pending => { entry.get_mut().0 = InboundSubstreamState::Closing(substream); } @@ -641,7 +788,7 @@ where crit!(self.log, "Poisoned outbound substream"); unreachable!("Coding Error: Inbound Substream is poisoned"); } - }; + } } Entry::Vacant(_) => unreachable!(), } @@ -659,8 +806,8 @@ where OutboundSubstreamState::RequestPendingResponse { mut substream, request, - } => match substream.poll() { - Ok(Async::Ready(Some(response))) => { + } => match substream.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(response))) => { if request.multiple_responses() && !response.is_error() { entry.get_mut().0 = OutboundSubstreamState::RequestPendingResponse { @@ -678,11 +825,11 @@ where entry.get_mut().0 = OutboundSubstreamState::Closing(substream); } - return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( + return Poll::Ready(ProtocolsHandlerEvent::Custom( RPCEvent::Response(request_id, response), - ))); + )); } - Ok(Async::Ready(None)) => { + Poll::Ready(None) => { // stream closed // if we expected multiple streams send a stream termination, // else report the stream terminating only. @@ -694,59 +841,62 @@ where // notify the application error if request.multiple_responses() { // return an end of stream result - return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( + return Poll::Ready(ProtocolsHandlerEvent::Custom( RPCEvent::Response( request_id, RPCCodedResponse::StreamTermination( request.stream_termination(), ), ), - ))); + )); } // else we return an error, stream should not have closed early. - return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( + return Poll::Ready(ProtocolsHandlerEvent::Custom( RPCEvent::Error( request_id, request.protocol(), RPCError::IncompleteStream, ), - ))); + )); } - Ok(Async::NotReady) => { + Poll::Pending => { entry.get_mut().0 = OutboundSubstreamState::RequestPendingResponse { substream, request, } } - Err(e) => { + Poll::Ready(Some(Err(e))) => { // drop the stream let delay_key = &entry.get().1; self.outbound_substreams_delay.remove(delay_key); let protocol = entry.get().2; entry.remove_entry(); - return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( + return Poll::Ready(ProtocolsHandlerEvent::Custom( RPCEvent::Error(request_id, protocol, e), - ))); + )); } }, - OutboundSubstreamState::Closing(mut substream) => match substream.close() { - Ok(Async::Ready(())) | Err(_) => { - //trace!(self.log, "Outbound stream dropped"); - // drop the stream - let delay_key = &entry.get().1; - self.outbound_substreams_delay.remove(delay_key); - entry.remove_entry(); + OutboundSubstreamState::Closing(mut substream) => { + match Sink::poll_close(Pin::new(&mut substream), cx) { + // TODO: check if this is supposed to be a stream + Poll::Ready(_) => { + // drop the stream - including if there is an error + let delay_key = &entry.get().1; + self.outbound_substreams_delay.remove(delay_key); + entry.remove_entry(); - if self.outbound_substreams.is_empty() - && self.inbound_substreams.is_empty() - { - self.keep_alive = - KeepAlive::Until(Instant::now() + self.inactive_timeout); + if self.outbound_substreams.is_empty() + && self.inbound_substreams.is_empty() + { + self.keep_alive = KeepAlive::Until( + Instant::now() + self.inactive_timeout, + ); + } + } + Poll::Pending => { + entry.get_mut().0 = OutboundSubstreamState::Closing(substream); } } - Ok(Async::NotReady) => { - entry.get_mut().0 = OutboundSubstreamState::Closing(substream); - } - }, + } OutboundSubstreamState::Poisoned => { crit!(self.log, "Poisoned outbound substream"); unreachable!("Coding Error: Outbound substream is poisoned") @@ -762,23 +912,21 @@ where self.dial_negotiated += 1; let (id, req) = self.dial_queue.remove(0); self.dial_queue.shrink_to_fit(); - return Ok(Async::Ready( - ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(req.clone()), - info: (id, req), - }, - )); + return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(req.clone()), + info: (id, req), + }); } - Ok(Async::NotReady) + Poll::Pending } } // Check for new items to send to the peer and update the underlying stream -fn apply_queued_responses( - raw_substream: InboundFramed, +fn apply_queued_responses( + substream: InboundFramed, queued_outbound_items: &mut Option<&mut Vec>>, new_items_to_send: &mut bool, -) -> InboundSubstreamState { +) -> InboundSubstreamState { match queued_outbound_items { Some(ref mut queue) if !queue.is_empty() => { *new_items_to_send = true; @@ -786,17 +934,18 @@ fn apply_queued_responses( match queue.remove(0) { RPCCodedResponse::StreamTermination(_) => { // close the stream if this is a stream termination - InboundSubstreamState::Closing(raw_substream) + InboundSubstreamState::Closing(substream) } chunk => InboundSubstreamState::ResponsePendingSend { - substream: raw_substream.send(chunk), + substream: substream, + message: chunk, closing: false, }, } } _ => { // no items queued set to idle - InboundSubstreamState::ResponseIdle(raw_substream) + InboundSubstreamState::ResponseIdle(substream) } } } diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index e4b5b67144..2227482c8d 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -164,7 +164,7 @@ pub enum RPCResponse { } /// Indicates which response is being terminated by a stream termination response. -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum ResponseTermination { /// Blocks by range stream termination. BlocksByRange, @@ -175,7 +175,7 @@ pub enum ResponseTermination { /// The structured response containing a result/code indicating success or failure /// and the contents of the response -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum RPCCodedResponse { /// The response is a successful. Success(RPCResponse), @@ -194,7 +194,7 @@ pub enum RPCCodedResponse { } /// The code assigned to an erroneous `RPCResponse`. -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum RPCResponseErrorCode { InvalidRequest, ServerError, @@ -268,14 +268,14 @@ impl RPCCodedResponse { } } -#[derive(Encode, Decode, Debug)] +#[derive(Encode, Decode, Debug, Clone)] pub struct ErrorMessage { /// The UTF-8 encoded Error message string. pub error_message: Vec, } -impl ErrorMessage { - pub fn as_string(&self) -> String { +impl std::string::ToString for ErrorMessage { + fn to_string(&self) -> String { String::from_utf8(self.error_message.clone()).unwrap_or_else(|_| "".into()) } } diff --git a/beacon_node/eth2-libp2p/src/rpc/mod.rs b/beacon_node/eth2-libp2p/src/rpc/mod.rs index df87d8f89c..eb21bd3bef 100644 --- a/beacon_node/eth2-libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2-libp2p/src/rpc/mod.rs @@ -4,12 +4,11 @@ //! direct peer-to-peer communication primarily for sending/receiving chain information for //! syncing. -use futures::prelude::*; use handler::RPCHandler; -use libp2p::core::ConnectedPoint; +use libp2p::core::{connection::ConnectionId, ConnectedPoint}; use libp2p::swarm::{ - protocols_handler::ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, PollParameters, - SubstreamProtocol, + protocols_handler::ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, + PollParameters, SubstreamProtocol, }; use libp2p::{Multiaddr, PeerId}; pub use methods::{ @@ -19,8 +18,8 @@ pub use methods::{ pub use protocol::{Protocol, RPCError, RPCProtocol, RPCRequest}; use slog::{debug, o}; use std::marker::PhantomData; +use std::task::{Context, Poll}; use std::time::Duration; -use tokio::io::{AsyncRead, AsyncWrite}; use types::EthSpec; pub(crate) mod codec; @@ -29,7 +28,7 @@ pub mod methods; mod protocol; /// The return type used in the behaviour and the resultant event from the protocols handler. -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum RPCEvent { /// An inbound/outbound request for RPC protocol. The first parameter is a sequential /// id which tracks an awaiting substream for the response. @@ -42,6 +41,14 @@ pub enum RPCEvent { Error(RequestId, Protocol, RPCError), } +/// Messages sent to the user from the RPC protocol. +pub struct RPCMessage { + /// The peer that sent the message. + pub peer_id: PeerId, + /// The message that was sent. + pub event: RPCEvent, +} + impl RPCEvent { pub fn id(&self) -> usize { match *self { @@ -68,21 +75,18 @@ impl std::fmt::Display for RPCEvent { /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. -pub struct RPC { +pub struct RPC { /// Queue of events to processed. events: Vec, RPCMessage>>, - /// Pins the generic substream. - marker: PhantomData, /// Slog logger for RPC behaviour. log: slog::Logger, } -impl RPC { +impl RPC { pub fn new(log: slog::Logger) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); RPC { events: Vec::new(), - marker: PhantomData, log, } } @@ -91,19 +95,19 @@ impl RPC { /// /// The peer must be connected for this to succeed. pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { - self.events.push(NetworkBehaviourAction::SendEvent { + self.events.push(NetworkBehaviourAction::NotifyHandler { peer_id, + handler: NotifyHandler::Any, event: rpc_event, }); } } -impl NetworkBehaviour for RPC +impl NetworkBehaviour for RPC where - TSubstream: AsyncRead + AsyncWrite, TSpec: EthSpec, { - type ProtocolsHandler = RPCHandler; + type ProtocolsHandler = RPCHandler; type OutEvent = RPCMessage; fn new_handler(&mut self) -> Self::ProtocolsHandler { @@ -121,75 +125,64 @@ where Vec::new() } - fn inject_connected(&mut self, peer_id: PeerId, connected_point: ConnectedPoint) { - // TODO: Remove this on proper peer discovery - self.events.push(NetworkBehaviourAction::GenerateEvent( - RPCMessage::PeerConnectedHack(peer_id.clone(), connected_point.clone()), - )); - // if initialised the connection, report this upwards to send the HELLO request - if let ConnectedPoint::Dialer { .. } = connected_point { - self.events.push(NetworkBehaviourAction::GenerateEvent( - RPCMessage::PeerDialed(peer_id.clone()), - )); - } - + // Use connection established/closed instead of these currently + fn inject_connected(&mut self, peer_id: &PeerId) { // find the peer's meta-data debug!(self.log, "Requesting new peer's metadata"; "peer_id" => format!("{}",peer_id)); let rpc_event = RPCEvent::Request(RequestId::from(0usize), RPCRequest::MetaData(PhantomData)); - self.events.push(NetworkBehaviourAction::SendEvent { - peer_id, + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::Any, event: rpc_event, }); } - fn inject_disconnected(&mut self, peer_id: &PeerId, connected_point: ConnectedPoint) { - // TODO: Remove this on proper peer discovery - self.events.push(NetworkBehaviourAction::GenerateEvent( - RPCMessage::PeerDisconnectedHack(peer_id.clone(), connected_point.clone()), - )); + fn inject_disconnected(&mut self, _peer_id: &PeerId) {} - // inform the rpc handler that the peer has disconnected - self.events.push(NetworkBehaviourAction::GenerateEvent( - RPCMessage::PeerDisconnected(peer_id.clone()), - )); + fn inject_connection_established( + &mut self, + _peer_id: &PeerId, + _: &ConnectionId, + _connected_point: &ConnectedPoint, + ) { } - fn inject_node_event( + fn inject_connection_closed( + &mut self, + _peer_id: &PeerId, + _: &ConnectionId, + _connected_point: &ConnectedPoint, + ) { + } + + fn inject_event( &mut self, source: PeerId, + _: ConnectionId, event: ::OutEvent, ) { // send the event to the user self.events - .push(NetworkBehaviourAction::GenerateEvent(RPCMessage::RPC( - source, event, - ))); + .push(NetworkBehaviourAction::GenerateEvent(RPCMessage { + peer_id: source, + event, + })); } fn poll( &mut self, + _cx: &mut Context, _: &mut impl PollParameters, - ) -> Async< + ) -> Poll< NetworkBehaviourAction< ::InEvent, Self::OutEvent, >, > { if !self.events.is_empty() { - return Async::Ready(self.events.remove(0)); + return Poll::Ready(self.events.remove(0)); } - Async::NotReady + Poll::Pending } } - -/// Messages sent to the user from the RPC protocol. -pub enum RPCMessage { - RPC(PeerId, RPCEvent), - PeerDialed(PeerId), - PeerDisconnected(PeerId), - // TODO: This is a hack to give access to connections to peer manager. Remove this once - // behaviour is re-written - PeerConnectedHack(PeerId, ConnectedPoint), - PeerDisconnectedHack(PeerId, ConnectedPoint), -} diff --git a/beacon_node/eth2-libp2p/src/rpc/protocol.rs b/beacon_node/eth2-libp2p/src/rpc/protocol.rs index d0b313bcf6..808f695fa8 100644 --- a/beacon_node/eth2-libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2-libp2p/src/rpc/protocol.rs @@ -10,17 +10,19 @@ use crate::rpc::{ }, methods::ResponseTermination, }; -use futures::future::*; -use futures::{future, sink, stream, Sink, Stream}; -use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo}; +use futures::future::Ready; +use futures::prelude::*; +use futures::prelude::{AsyncRead, AsyncWrite}; +use libp2p::core::{InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo}; use std::io; use std::marker::PhantomData; +use std::pin::Pin; use std::time::Duration; -use tokio::codec::Framed; -use tokio::io::{AsyncRead, AsyncWrite}; -use tokio::timer::timeout; -use tokio::util::FutureExt; use tokio_io_timeout::TimeoutStream; +use tokio_util::{ + codec::Framed, + compat::{Compat, FuturesAsyncReadCompatExt}, +}; use types::EthSpec; /// The maximum bytes that can be sent across the RPC. @@ -171,45 +173,28 @@ impl ProtocolName for ProtocolId { pub type InboundOutput = (RPCRequest, InboundFramed); pub type InboundFramed = - Framed>, InboundCodec>; - -// Auxiliary types - -// The type of the socket timeout in the `InboundUpgrade` type `Future` -type TTimeout = - timeout::Timeout>>; -// The type of the socket timeout error in the `InboundUpgrade` type `Future` -type TTimeoutErr = timeout::Error<(RPCError, InboundFramed)>; -// `TimeoutErr` to `RPCError` mapping function -type FnMapErr = fn(TTimeoutErr) -> RPCError; - + Framed>, InboundCodec>; type FnAndThen = fn( - (Option>, InboundFramed), -) -> FutureResult, RPCError>; + ( + Option, RPCError>>, + InboundFramed, + ), +) -> Ready, RPCError>>; +type FnMapErr = fn(tokio::time::Elapsed) -> RPCError; impl InboundUpgrade for RPCProtocol where - TSocket: AsyncRead + AsyncWrite, + TSocket: AsyncRead + AsyncWrite + Unpin + Send + 'static, TSpec: EthSpec, { type Output = InboundOutput; type Error = RPCError; + type Future = Pin> + Send>>; - type Future = future::Either< - FutureResult, RPCError>, - future::AndThen< - future::MapErr, FnMapErr>, - FutureResult, RPCError>, - FnAndThen, - >, - >; - - fn upgrade_inbound( - self, - socket: upgrade::Negotiated, - protocol: ProtocolId, - ) -> Self::Future { + fn upgrade_inbound(self, socket: TSocket, protocol: ProtocolId) -> Self::Future { let protocol_name = protocol.message_name; + // convert the socket to tokio compatible socket + let socket = socket.compat(); let codec = match protocol.encoding { Encoding::SSZSnappy => { let ssz_snappy_codec = @@ -226,32 +211,23 @@ where let socket = Framed::new(timed_socket, codec); - match protocol_name { - // `MetaData` requests should be empty, return the stream + // MetaData requests should be empty, return the stream + Box::pin(match protocol_name { Protocol::MetaData => { - future::Either::A(future::ok((RPCRequest::MetaData(PhantomData), socket))) + future::Either::Left(future::ok((RPCRequest::MetaData(PhantomData), socket))) } - _ => future::Either::B({ - socket - .into_future() - .timeout(Duration::from_secs(REQUEST_TIMEOUT)) - .map_err({ - |err| { - if err.is_elapsed() { - RPCError::StreamTimeout - } else { - RPCError::InternalError("Stream timer failed") - } - } - } as FnMapErr) + + _ => future::Either::Right( + tokio::time::timeout(Duration::from_secs(REQUEST_TIMEOUT), socket.into_future()) + .map_err(RPCError::from as FnMapErr) .and_then({ |(req, stream)| match req { - Some(request) => future::ok((request, stream)), - None => future::err(RPCError::IncompleteStream), + Some(Ok(request)) => future::ok((request, stream)), + Some(Err(_)) | None => future::err(RPCError::IncompleteStream), } - } as FnAndThen) - }), - } + } as FnAndThen), + ), + }) } } @@ -371,23 +347,20 @@ impl RPCRequest { /* Outbound upgrades */ -pub type OutboundFramed = - Framed, OutboundCodec>; +pub type OutboundFramed = Framed, OutboundCodec>; impl OutboundUpgrade for RPCRequest where - TSpec: EthSpec, - TSocket: AsyncRead + AsyncWrite, + TSpec: EthSpec + Send + 'static, + TSocket: AsyncRead + AsyncWrite + Unpin + Send + 'static, { type Output = OutboundFramed; type Error = RPCError; - type Future = sink::Send>; + type Future = Pin> + Send>>; - fn upgrade_outbound( - self, - socket: upgrade::Negotiated, - protocol: Self::Info, - ) -> Self::Future { + fn upgrade_outbound(self, socket: TSocket, protocol: Self::Info) -> Self::Future { + // convert to a tokio compatible socket + let socket = socket.compat(); let codec = match protocol.encoding { Encoding::SSZSnappy => { let ssz_snappy_codec = @@ -400,18 +373,22 @@ where OutboundCodec::SSZ(ssz_codec) } }; - Framed::new(socket, codec).send(self) + + let mut socket = Framed::new(socket, codec); + + let future = async { socket.send(self).await.map(|_| socket) }; + Box::pin(future) } } /// Error in RPC Encoding/Decoding. -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum RPCError { /// Error when decoding the raw buffer from ssz. // NOTE: in the future a ssz::DecodeError should map to an InvalidData error SSZDecodeError(ssz::DecodeError), /// IO Error. - IoError(io::Error), + IoError(String), /// The peer returned a valid response but the response indicated an error. ErrorResponse(RPCResponseErrorCode), /// Timed out waiting for a response. @@ -434,10 +411,15 @@ impl From for RPCError { RPCError::SSZDecodeError(err) } } +impl From for RPCError { + fn from(_: tokio::time::Elapsed) -> Self { + RPCError::StreamTimeout + } +} impl From for RPCError { fn from(err: io::Error) -> Self { - RPCError::IoError(err) + RPCError::IoError(err.to_string()) } } @@ -463,7 +445,7 @@ impl std::error::Error for RPCError { match *self { // NOTE: this does have a source RPCError::SSZDecodeError(_) => None, - RPCError::IoError(ref err) => Some(err), + RPCError::IoError(_) => None, RPCError::StreamTimeout => None, RPCError::UnsupportedProtocol => None, RPCError::IncompleteStream => None, diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 9b5e4e473c..b3f0b64062 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -2,45 +2,76 @@ use crate::behaviour::{Behaviour, BehaviourEvent}; use crate::discovery::enr; use crate::multiaddr::Protocol; use crate::types::{error, GossipKind}; +use crate::EnrExt; use crate::{NetworkConfig, NetworkGlobals}; use futures::prelude::*; -use futures::Stream; use libp2p::core::{ identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, - nodes::Substream, transport::boxed::Boxed, upgrade::{InboundUpgradeExt, OutboundUpgradeExt}, ConnectedPoint, }; -use libp2p::{core, noise, secio, swarm::NetworkBehaviour, PeerId, Swarm, Transport}; -use slog::{crit, debug, error, info, trace, warn}; +use libp2p::{ + core, noise, secio, + swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, + PeerId, Swarm, Transport, +}; +use slog::{crit, debug, error, info, o, trace, warn}; use std::fs::File; use std::io::prelude::*; use std::io::{Error, ErrorKind}; +use std::pin::Pin; use std::sync::Arc; use std::time::Duration; -use tokio::timer::DelayQueue; +use tokio::time::DelayQueue; use types::{EnrForkId, EthSpec}; -type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>; -type Libp2pBehaviour = Behaviour, TSpec>; - pub const NETWORK_KEY_FILENAME: &str = "key"; /// The time in milliseconds to wait before banning a peer. This allows for any Goodbye messages to be /// flushed and protocols to be negotiated. const BAN_PEER_WAIT_TIMEOUT: u64 = 200; +/// The maximum simultaneous libp2p connections per peer. +const MAX_CONNECTIONS_PER_PEER: usize = 1; + +/// The types of events than can be obtained from polling the libp2p service. +/// +/// This is a subset of the events that a libp2p swarm emits. +#[derive(Debug)] +pub enum Libp2pEvent { + /// A behaviour event + Behaviour(BehaviourEvent), + /// A new listening address has been established. + NewListenAddr(Multiaddr), + /// A peer has established at least one connection. + PeerConnected { + /// The peer that connected. + peer_id: PeerId, + /// Whether the peer was a dialer or listener. + endpoint: ConnectedPoint, + }, + /// A peer no longer has any connections, i.e is disconnected. + PeerDisconnected { + /// The peer the disconnected. + peer_id: PeerId, + /// Whether the peer was a dialer or a listener. + endpoint: ConnectedPoint, + }, +} /// The configuration and state of the libp2p components for the beacon node. pub struct Service { /// The libp2p Swarm handler. //TODO: Make this private - pub swarm: Swarm>, + pub swarm: Swarm>, /// This node's PeerId. pub local_peer_id: PeerId, + /// Used for managing the state of peers. + network_globals: Arc>, + /// A current list of peers to ban after a given timeout. peers_to_ban: DelayQueue, @@ -55,8 +86,9 @@ impl Service { pub fn new( config: &NetworkConfig, enr_fork_id: EnrForkId, - log: slog::Logger, + log: &slog::Logger, ) -> error::Result<(Arc>, Self)> { + let log = log.new(o!("service"=> "libp2p")); trace!(log, "Libp2p Service starting"); // initialise the node's ID @@ -84,10 +116,22 @@ impl Service { let mut swarm = { // Set up the transport - tcp/ws with noise/secio and mplex/yamux - let transport = build_transport(local_keypair.clone()); + let transport = build_transport(local_keypair.clone()) + .map_err(|e| format!("Failed to build transport: {:?}", e))?; // Lighthouse network behaviour let behaviour = Behaviour::new(&local_keypair, config, network_globals.clone(), &log)?; - Swarm::new(transport, behaviour, local_peer_id.clone()) + + // use the executor for libp2p + struct Executor(tokio::runtime::Handle); + impl libp2p::core::Executor for Executor { + fn exec(&self, f: Pin + Send>>) { + self.0.spawn(f); + } + } + SwarmBuilder::new(transport, behaviour, local_peer_id.clone()) + .peer_connection_limit(MAX_CONNECTIONS_PER_PEER) + .executor(Box::new(Executor(tokio::runtime::Handle::current()))) + .build() }; // listen on the specified address @@ -131,19 +175,24 @@ impl Service { } // attempt to connect to any specified boot-nodes - for bootnode_enr in &config.boot_nodes { + let mut boot_nodes = config.boot_nodes.clone(); + boot_nodes.dedup(); + + for bootnode_enr in boot_nodes { for multiaddr in &bootnode_enr.multiaddr() { // ignore udp multiaddr if it exists let components = multiaddr.iter().collect::>(); if let Protocol::Udp(_) = components[1] { continue; } - // inform the peer manager that we are currently dialing this peer - network_globals + + if !network_globals .peers - .write() - .dialing_peer(&bootnode_enr.peer_id()); - dial_addr(multiaddr); + .read() + .is_connected_or_dialing(&bootnode_enr.peer_id()) + { + dial_addr(multiaddr); + } } } @@ -160,6 +209,7 @@ impl Service { let service = Service { local_peer_id, swarm, + network_globals: network_globals.clone(), peers_to_ban: DelayQueue::new(), peer_ban_timeout: DelayQueue::new(), log, @@ -177,76 +227,132 @@ impl Service { ); self.peer_ban_timeout.insert(peer_id, timeout); } -} -impl Stream for Service { - type Item = BehaviourEvent; - type Error = error::Error; - - fn poll(&mut self) -> Poll, Self::Error> { + pub async fn next_event(&mut self) -> Libp2pEvent { loop { - match self.swarm.poll() { - Ok(Async::Ready(Some(event))) => { - return Ok(Async::Ready(Some(event))); - } - Ok(Async::Ready(None)) => unreachable!("Swarm stream shouldn't end"), - Ok(Async::NotReady) => break, - _ => break, - } - } + tokio::select! { + event = self.swarm.next_event() => { + match event { + SwarmEvent::Behaviour(behaviour) => { + return Libp2pEvent::Behaviour(behaviour) + } + SwarmEvent::ConnectionEstablished { + peer_id, + endpoint, + num_established, + } => { + debug!(self.log, "Connection established"; "peer_id"=> peer_id.to_string(), "connections" => num_established.get()); + // if this is the first connection inform the network layer a new connection + // has been established and update the db + if num_established.get() == 1 { + // update the peerdb + match endpoint { + ConnectedPoint::Listener { .. } => { + self.swarm.peer_manager().connect_ingoing(&peer_id); + } + ConnectedPoint::Dialer { .. } => self + .network_globals + .peers + .write() + .connect_outgoing(&peer_id), + } + return Libp2pEvent::PeerConnected { peer_id, endpoint }; + } + } + SwarmEvent::ConnectionClosed { + peer_id, + cause, + endpoint, + num_established, + } => { + debug!(self.log, "Connection closed"; "peer_id"=> peer_id.to_string(), "cause" => cause.to_string(), "connections" => num_established); + if num_established == 0 { + // update the peer_db + self.swarm.peer_manager().notify_disconnect(&peer_id); + // the peer has disconnected + return Libp2pEvent::PeerDisconnected { + peer_id, + endpoint, + }; + } + } + SwarmEvent::NewListenAddr(multiaddr) => { + return Libp2pEvent::NewListenAddr(multiaddr) + } - // check if peers need to be banned - loop { - match self.peers_to_ban.poll() { - Ok(Async::Ready(Some(peer_id))) => { - let peer_id = peer_id.into_inner(); - Swarm::ban_peer_id(&mut self.swarm, peer_id.clone()); - // TODO: Correctly notify protocols of the disconnect - // TODO: Also remove peer from the DHT: https://github.com/sigp/lighthouse/issues/629 - let dummy_connected_point = ConnectedPoint::Dialer { - address: "/ip4/0.0.0.0" - .parse::() - .expect("valid multiaddr"), - }; - self.swarm - .inject_disconnected(&peer_id, dummy_connected_point); - // inform the behaviour that the peer has been banned - self.swarm.peer_banned(peer_id); - } - Ok(Async::NotReady) | Ok(Async::Ready(None)) => break, - Err(e) => { - warn!(self.log, "Peer banning queue failed"; "error" => format!("{:?}", e)); + SwarmEvent::IncomingConnection { + local_addr, + send_back_addr, + } => { + debug!(self.log, "Incoming connection"; "our_addr" => local_addr.to_string(), "from" => send_back_addr.to_string()) + } + SwarmEvent::IncomingConnectionError { + local_addr, + send_back_addr, + error, + } => { + debug!(self.log, "Failed incoming connection"; "our_addr" => local_addr.to_string(), "from" => send_back_addr.to_string(), "error" => error.to_string()) + } + SwarmEvent::BannedPeer { + peer_id, + endpoint: _, + } => { + debug!(self.log, "Attempted to dial a banned peer"; "peer_id" => peer_id.to_string()) + } + SwarmEvent::UnreachableAddr { + peer_id, + address, + error, + attempts_remaining, + } => { + debug!(self.log, "Failed to dial address"; "peer_id" => peer_id.to_string(), "address" => address.to_string(), "error" => error.to_string(), "attempts_remaining" => attempts_remaining); + self.swarm.peer_manager().notify_disconnect(&peer_id); + } + SwarmEvent::UnknownPeerUnreachableAddr { address, error } => { + debug!(self.log, "Peer not known at dialed address"; "address" => address.to_string(), "error" => error.to_string()); + } + SwarmEvent::ExpiredListenAddr(multiaddr) => { + debug!(self.log, "Listen address expired"; "multiaddr" => multiaddr.to_string()) + } + SwarmEvent::ListenerClosed { addresses, reason } => { + debug!(self.log, "Listener closed"; "addresses" => format!("{:?}", addresses), "reason" => format!("{:?}", reason)) + } + SwarmEvent::ListenerError { error } => { + debug!(self.log, "Listener error"; "error" => format!("{:?}", error.to_string())) + } + SwarmEvent::Dialing(peer_id) => { + debug!(self.log, "Dialing peer"; "peer" => peer_id.to_string()); + self.swarm.peer_manager().dialing_peer(&peer_id); + } } } - } - - // un-ban peer if it's timeout has expired - loop { - match self.peer_ban_timeout.poll() { - Ok(Async::Ready(Some(peer_id))) => { - let peer_id = peer_id.into_inner(); - debug!(self.log, "Peer has been unbanned"; "peer" => format!("{:?}", peer_id)); - self.swarm.peer_unbanned(&peer_id); - Swarm::unban_peer_id(&mut self.swarm, peer_id); - } - Ok(Async::NotReady) | Ok(Async::Ready(None)) => break, - Err(e) => { - warn!(self.log, "Peer banning timeout queue failed"; "error" => format!("{:?}", e)); - } + Some(Ok(peer_to_ban)) = self.peers_to_ban.next() => { + let peer_id = peer_to_ban.into_inner(); + Swarm::ban_peer_id(&mut self.swarm, peer_id.clone()); + // TODO: Correctly notify protocols of the disconnect + // TODO: Also remove peer from the DHT: https://github.com/sigp/lighthouse/issues/629 + self.swarm.inject_disconnected(&peer_id); + // inform the behaviour that the peer has been banned + self.swarm.peer_banned(peer_id); + } + Some(Ok(peer_to_unban)) = self.peer_ban_timeout.next() => { + debug!(self.log, "Peer has been unbanned"; "peer" => format!("{:?}", peer_to_unban)); + let unban_peer = peer_to_unban.into_inner(); + self.swarm.peer_unbanned(&unban_peer); + Swarm::unban_peer_id(&mut self.swarm, unban_peer); + } } } - - Ok(Async::NotReady) } } /// The implementation supports TCP/IP, WebSockets over TCP/IP, noise/secio as the encryption layer, and /// mplex or yamux as the multiplexing layer. -fn build_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox), Error> { - // TODO: The Wire protocol currently doesn't specify encryption and this will need to be customised - // in the future. - let transport = libp2p::tcp::TcpConfig::new().nodelay(true); - let transport = libp2p::dns::DnsConfig::new(transport); +fn build_transport( + local_private_key: Keypair, +) -> Result, Error> { + let transport = libp2p_tcp::TokioTcpConfig::new().nodelay(true); + let transport = libp2p::dns::DnsConfig::new(transport)?; #[cfg(feature = "libp2p-websocket")] let transport = { let trans_clone = transport.clone(); @@ -260,7 +366,7 @@ fn build_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox) secio::SecioConfig::new(local_private_key), ); core::upgrade::apply(stream, upgrade, endpoint, core::upgrade::Version::V1).and_then( - move |out| { + |out| async move { match out { // Noise was negotiated core::either::EitherOutput::First((remote_id, out)) => { @@ -288,12 +394,12 @@ fn build_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox) .map_outbound(move |muxer| (peer_id2, muxer)); core::upgrade::apply(stream, upgrade, endpoint, core::upgrade::Version::V1) - .map(|(id, muxer)| (id, core::muxing::StreamMuxerBox::new(muxer))) + .map_ok(|(id, muxer)| (id, core::muxing::StreamMuxerBox::new(muxer))) }) .timeout(Duration::from_secs(20)) .map_err(|err| Error::new(ErrorKind::Other, err)) .boxed(); - transport + Ok(transport) } fn keypair_from_hex(hex_bytes: &str) -> error::Result { diff --git a/beacon_node/eth2-libp2p/src/types/globals.rs b/beacon_node/eth2-libp2p/src/types/globals.rs index 3912bf86c3..60ae12a8c5 100644 --- a/beacon_node/eth2-libp2p/src/types/globals.rs +++ b/beacon_node/eth2-libp2p/src/types/globals.rs @@ -2,6 +2,7 @@ use crate::peer_manager::PeerDB; use crate::rpc::methods::MetaData; use crate::types::SyncState; +use crate::EnrExt; use crate::{discovery::enr::Eth2Enr, Enr, GossipTopic, Multiaddr, PeerId}; use parking_lot::RwLock; use std::collections::HashSet; diff --git a/beacon_node/eth2-libp2p/src/types/mod.rs b/beacon_node/eth2-libp2p/src/types/mod.rs index 94d24bad6e..8f9b07fd33 100644 --- a/beacon_node/eth2-libp2p/src/types/mod.rs +++ b/beacon_node/eth2-libp2p/src/types/mod.rs @@ -9,7 +9,7 @@ use types::{BitVector, EthSpec}; #[allow(type_alias_bounds)] pub type EnrBitfield = BitVector; -pub type Enr = libp2p::discv5::enr::Enr; +pub type Enr = discv5::enr::Enr; pub use globals::NetworkGlobals; pub use pubsub::PubsubMessage; diff --git a/beacon_node/eth2-libp2p/tests/common/mod.rs b/beacon_node/eth2-libp2p/tests/common/mod.rs index 45168c2e6d..99857cb1a3 100644 --- a/beacon_node/eth2-libp2p/tests/common/mod.rs +++ b/beacon_node/eth2-libp2p/tests/common/mod.rs @@ -1,8 +1,9 @@ #![cfg(test)] use eth2_libp2p::Enr; +use eth2_libp2p::EnrExt; use eth2_libp2p::Multiaddr; -use eth2_libp2p::NetworkConfig; use eth2_libp2p::Service as LibP2PService; +use eth2_libp2p::{Libp2pEvent, NetworkConfig}; use slog::{debug, error, o, Drain}; use std::net::{TcpListener, UdpSocket}; use std::time::Duration; @@ -85,7 +86,7 @@ pub fn build_libp2p_instance( let port = unused_port("tcp").unwrap(); let config = build_config(port, boot_nodes, secret_key); // launch libp2p service - LibP2PService::new(&config, EnrForkId::default(), log.clone()) + LibP2PService::new(&config, EnrForkId::default(), &log) .expect("should build libp2p instance") .1 } @@ -93,7 +94,6 @@ pub fn build_libp2p_instance( #[allow(dead_code)] pub fn get_enr(node: &LibP2PService) -> Enr { let enr = node.swarm.discovery().local_enr().clone(); - dbg!(enr.multiaddr()); enr } @@ -121,19 +121,46 @@ pub fn build_full_mesh(log: slog::Logger, n: usize) -> Vec> { nodes } -// Constructs a pair of nodes with seperate loggers. The sender dials the receiver. +// Constructs a pair of nodes with separate loggers. The sender dials the receiver. // This returns a (sender, receiver) pair. #[allow(dead_code)] -pub fn build_node_pair(log: &slog::Logger) -> (LibP2PService, LibP2PService) { +pub async fn build_node_pair(log: &slog::Logger) -> (LibP2PService, LibP2PService) { let sender_log = log.new(o!("who" => "sender")); let receiver_log = log.new(o!("who" => "receiver")); let mut sender = build_libp2p_instance(vec![], None, sender_log); - let receiver = build_libp2p_instance(vec![], None, receiver_log); + let mut receiver = build_libp2p_instance(vec![], None, receiver_log); let receiver_multiaddr = receiver.swarm.discovery().local_enr().clone().multiaddr()[1].clone(); - match libp2p::Swarm::dial_addr(&mut sender.swarm, receiver_multiaddr) { - Ok(()) => debug!(log, "Sender dialed receiver"), + + // let the two nodes set up listeners + let sender_fut = async { + loop { + if let Libp2pEvent::NewListenAddr(_) = sender.next_event().await { + return; + } + } + }; + let receiver_fut = async { + loop { + if let Libp2pEvent::NewListenAddr(_) = receiver.next_event().await { + return; + } + } + }; + + let joined = futures::future::join(sender_fut, receiver_fut); + + // wait for either both nodes to listen or a timeout + tokio::select! { + _ = tokio::time::delay_for(Duration::from_millis(500)) => {} + _ = joined => {} + } + + match libp2p::Swarm::dial_addr(&mut sender.swarm, receiver_multiaddr.clone()) { + Ok(()) => { + debug!(log, "Sender dialed receiver"; "address" => format!("{:?}", receiver_multiaddr)) + } Err(_) => error!(log, "Dialing failed"), }; (sender, receiver) diff --git a/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs b/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs index aac5387444..3fcd9b7014 100644 --- a/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs +++ b/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs @@ -2,7 +2,6 @@ use crate::types::GossipEncoding; use ::types::{BeaconBlock, EthSpec, MinimalEthSpec, Signature, SignedBeaconBlock}; use eth2_libp2p::*; -use futures::prelude::*; use slog::{debug, Level}; type E = MinimalEthSpec; @@ -19,8 +18,8 @@ mod common; // // node1 <-> node2 <-> node3 ..... <-> node(n-1) <-> node(n) -#[test] -fn test_gossipsub_forward() { +#[tokio::test] +async fn test_gossipsub_forward() { // set up the logging. The level and enabled or not let log = common::build_log(Level::Info, false); @@ -41,55 +40,64 @@ fn test_gossipsub_forward() { .clone() .into(); let mut subscribed_count = 0; - tokio::run(futures::future::poll_fn(move || -> Result<_, ()> { + let fut = async move { for node in nodes.iter_mut() { loop { - match node.poll().unwrap() { - Async::Ready(Some(BehaviourEvent::PubsubMessage { - topics, - message, - source, - id, - })) => { - assert_eq!(topics.len(), 1); - // Assert topic is the published topic - assert_eq!( - topics.first().unwrap(), - &TopicHash::from_raw(publishing_topic.clone()) - ); - // Assert message received is the correct one - assert_eq!(message, pubsub_message.clone()); - received_count += 1; - // Since `propagate_message` is false, need to propagate manually - node.swarm.propagate_message(&source, id); - // Test should succeed if all nodes except the publisher receive the message - if received_count == num_nodes - 1 { - debug!(log.clone(), "Received message at {} nodes", num_nodes - 1); - return Ok(Async::Ready(())); - } - } - Async::Ready(Some(BehaviourEvent::PeerSubscribed(_, topic))) => { - // Publish on beacon block topic - if topic == TopicHash::from_raw(publishing_topic.clone()) { - subscribed_count += 1; - // Every node except the corner nodes are connected to 2 nodes. - if subscribed_count == (num_nodes * 2) - 2 { - node.swarm.publish(vec![pubsub_message.clone()]); + match node.next_event().await { + Libp2pEvent::Behaviour(b) => match b { + BehaviourEvent::PubsubMessage { + topics, + message, + source, + id, + } => { + assert_eq!(topics.len(), 1); + // Assert topic is the published topic + assert_eq!( + topics.first().unwrap(), + &TopicHash::from_raw(publishing_topic.clone()) + ); + // Assert message received is the correct one + assert_eq!(message, pubsub_message.clone()); + received_count += 1; + // Since `propagate_message` is false, need to propagate manually + node.swarm.propagate_message(&source, id); + // Test should succeed if all nodes except the publisher receive the message + if received_count == num_nodes - 1 { + debug!(log.clone(), "Received message at {} nodes", num_nodes - 1); + return; } } - } + BehaviourEvent::PeerSubscribed(_, topic) => { + // Publish on beacon block topic + if topic == TopicHash::from_raw(publishing_topic.clone()) { + subscribed_count += 1; + // Every node except the corner nodes are connected to 2 nodes. + if subscribed_count == (num_nodes * 2) - 2 { + node.swarm.publish(vec![pubsub_message.clone()]); + } + } + } + _ => break, + }, _ => break, } } } - Ok(Async::NotReady) - })) + }; + + tokio::select! { + _ = fut => {} + _ = tokio::time::delay_for(tokio::time::Duration::from_millis(800)) => { + panic!("Future timed out"); + } + } } // Test publishing of a message with a full mesh for the topic // Not very useful but this is the bare minimum functionality. -#[test] -fn test_gossipsub_full_mesh_publish() { +#[tokio::test] +async fn test_gossipsub_full_mesh_publish() { // set up the logging. The level and enabled or not let log = common::build_log(Level::Debug, false); @@ -115,11 +123,13 @@ fn test_gossipsub_full_mesh_publish() { .into(); let mut subscribed_count = 0; let mut received_count = 0; - tokio::run(futures::future::poll_fn(move || -> Result<_, ()> { + let fut = async move { for node in nodes.iter_mut() { - while let Async::Ready(Some(BehaviourEvent::PubsubMessage { - topics, message, .. - })) = node.poll().unwrap() + while let Libp2pEvent::Behaviour(BehaviourEvent::PubsubMessage { + topics, + message, + .. + }) = node.next_event().await { assert_eq!(topics.len(), 1); // Assert topic is the published topic @@ -131,12 +141,12 @@ fn test_gossipsub_full_mesh_publish() { assert_eq!(message, pubsub_message.clone()); received_count += 1; if received_count == num_nodes - 1 { - return Ok(Async::Ready(())); + return; } } } - while let Async::Ready(Some(BehaviourEvent::PeerSubscribed(_, topic))) = - publishing_node.poll().unwrap() + while let Libp2pEvent::Behaviour(BehaviourEvent::PeerSubscribed(_, topic)) = + publishing_node.next_event().await { // Publish on beacon block topic if topic == TopicHash::from_raw(publishing_topic.clone()) { @@ -146,6 +156,11 @@ fn test_gossipsub_full_mesh_publish() { } } } - Ok(Async::NotReady) - })) + }; + tokio::select! { + _ = fut => {} + _ = tokio::time::delay_for(tokio::time::Duration::from_millis(800)) => { + panic!("Future timed out"); + } + } } diff --git a/beacon_node/eth2-libp2p/tests/noise.rs b/beacon_node/eth2-libp2p/tests/noise.rs index 236150b632..ba12c7346d 100644 --- a/beacon_node/eth2-libp2p/tests/noise.rs +++ b/beacon_node/eth2-libp2p/tests/noise.rs @@ -1,39 +1,39 @@ #![cfg(test)] -use crate::behaviour::{Behaviour, BehaviourEvent}; +use crate::behaviour::Behaviour; use crate::multiaddr::Protocol; use ::types::{EnrForkId, MinimalEthSpec}; -use eth2_libp2p::discovery::build_enr; +use eth2_libp2p::discovery::{build_enr, CombinedKey, CombinedKeyExt}; use eth2_libp2p::*; use futures::prelude::*; use libp2p::core::identity::Keypair; use libp2p::{ core, - core::{muxing::StreamMuxerBox, nodes::Substream, transport::boxed::Boxed}, - secio, PeerId, Swarm, Transport, + core::{muxing::StreamMuxerBox, transport::boxed::Boxed}, + secio, + swarm::{SwarmBuilder, SwarmEvent}, + PeerId, Swarm, Transport, }; use slog::{crit, debug, info, Level}; -use std::convert::TryInto; use std::io::{Error, ErrorKind}; -use std::sync::atomic::{AtomicBool, Ordering::Relaxed}; +use std::pin::Pin; use std::sync::Arc; use std::time::Duration; -use tokio::prelude::*; type TSpec = MinimalEthSpec; mod common; -type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>; -type Libp2pBehaviour = Behaviour, TSpec>; +type Libp2pBehaviour = Behaviour; /// Build and return a eth2_libp2p Swarm with only secio support. fn build_secio_swarm( config: &NetworkConfig, log: slog::Logger, -) -> error::Result> { +) -> error::Result> { let local_keypair = Keypair::generate_secp256k1(); let local_peer_id = PeerId::from(local_keypair.public()); - let enr_key: libp2p::discv5::enr::CombinedKey = local_keypair.clone().try_into().unwrap(); + let enr_key = CombinedKey::from_libp2p(&local_keypair).unwrap(); + let enr = build_enr::(&enr_key, config, EnrForkId::default()).unwrap(); let network_globals = Arc::new(NetworkGlobals::new( enr, @@ -47,7 +47,16 @@ fn build_secio_swarm( let transport = build_secio_transport(local_keypair.clone()); // Lighthouse network behaviour let behaviour = Behaviour::new(&local_keypair, config, network_globals.clone(), &log)?; - Swarm::new(transport, behaviour, local_peer_id.clone()) + // requires a tokio runtime + struct Executor(tokio::runtime::Handle); + impl libp2p::core::Executor for Executor { + fn exec(&self, f: Pin + Send>>) { + self.0.spawn(f); + } + } + SwarmBuilder::new(transport, behaviour, local_peer_id.clone()) + .executor(Box::new(Executor(tokio::runtime::Handle::current()))) + .build() }; // listen on the specified address @@ -101,7 +110,7 @@ fn build_secio_swarm( /// Build a simple TCP transport with secio, mplex/yamux. fn build_secio_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox), Error> { - let transport = libp2p::tcp::TcpConfig::new().nodelay(true); + let transport = libp2p_tcp::TokioTcpConfig::new().nodelay(true); transport .upgrade(core::upgrade::Version::V1) .authenticate(secio::SecioConfig::new(local_private_key)) @@ -117,8 +126,8 @@ fn build_secio_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMux } /// Test if the encryption falls back to secio if noise isn't available -#[test] -fn test_secio_noise_fallback() { +#[tokio::test] +async fn test_secio_noise_fallback() { // set up the logging. The level and enabled logging or not let log_level = Level::Trace; let enable_logging = false; @@ -127,7 +136,7 @@ fn test_secio_noise_fallback() { let port = common::unused_port("tcp").unwrap(); let noisy_config = common::build_config(port, vec![], None); - let mut noisy_node = Service::new(&noisy_config, EnrForkId::default(), log.clone()) + let mut noisy_node = Service::new(&noisy_config, EnrForkId::default(), &log) .expect("should build a libp2p instance") .1; @@ -142,40 +151,31 @@ fn test_secio_noise_fallback() { let secio_log = log.clone(); - let noisy_future = future::poll_fn(move || -> Poll { + let noisy_future = async { loop { - match noisy_node.poll().unwrap() { - _ => return Ok(Async::NotReady), - } + noisy_node.next_event().await; } - }); + }; - let secio_future = future::poll_fn(move || -> Poll { + let secio_future = async { loop { - match secio_swarm.poll().unwrap() { - Async::Ready(Some(BehaviourEvent::PeerDialed(peer_id))) => { + match secio_swarm.next_event().await { + SwarmEvent::ConnectionEstablished { peer_id, .. } => { // secio node negotiated a secio transport with // the noise compatible node info!(secio_log, "Connected to peer {}", peer_id); - return Ok(Async::Ready(true)); + return; } - _ => return Ok(Async::NotReady), + _ => {} // Ignore all other events } } - }); + }; - // execute the futures and check the result - let test_result = Arc::new(AtomicBool::new(false)); - let error_result = test_result.clone(); - let thread_result = test_result.clone(); - tokio::run( - noisy_future - .select(secio_future) - .timeout(Duration::from_millis(1000)) - .map_err(move |_| error_result.store(false, Relaxed)) - .map(move |result| { - thread_result.store(result.0, Relaxed); - }), - ); - assert!(test_result.load(Relaxed)); + tokio::select! { + _ = noisy_future => {} + _ = secio_future => {} + _ = tokio::time::delay_for(Duration::from_millis(800)) => { + panic!("Future timed out"); + } + } } diff --git a/beacon_node/eth2-libp2p/tests/rpc_tests.rs b/beacon_node/eth2-libp2p/tests/rpc_tests.rs index 6f2a00bbb8..db74e75b85 100644 --- a/beacon_node/eth2-libp2p/tests/rpc_tests.rs +++ b/beacon_node/eth2-libp2p/tests/rpc_tests.rs @@ -1,12 +1,10 @@ #![cfg(test)] use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::*; -use eth2_libp2p::{BehaviourEvent, RPCEvent}; -use slog::{warn, Level}; -use std::sync::atomic::{AtomicBool, Ordering::Relaxed}; -use std::sync::{Arc, Mutex}; +use eth2_libp2p::{BehaviourEvent, Libp2pEvent, RPCEvent}; +use slog::{debug, warn, Level}; use std::time::Duration; -use tokio::prelude::*; +use tokio::time::delay_for; use types::{ BeaconBlock, Epoch, EthSpec, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, }; @@ -15,17 +13,17 @@ mod common; type E = MinimalEthSpec; -#[test] +#[tokio::test] // Tests the STATUS RPC message -fn test_status_rpc() { +async fn test_status_rpc() { // set up the logging. The level and enabled logging or not - let log_level = Level::Trace; + let log_level = Level::Debug; let enable_logging = false; let log = common::build_log(log_level, enable_logging); // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log); + let (mut sender, mut receiver) = common::build_node_pair(&log).await; // Dummy STATUS RPC message let rpc_request = RPCRequest::Status(StatusMessage { @@ -45,92 +43,80 @@ fn test_status_rpc() { head_slot: Slot::new(1), }); - let sender_request = rpc_request.clone(); - let sender_log = log.clone(); - let sender_response = rpc_response.clone(); - // build the sender future - let sender_future = future::poll_fn(move || -> Poll { + let sender_future = async { loop { - match sender.poll().unwrap() { - Async::Ready(Some(BehaviourEvent::PeerDialed(peer_id))) => { + match sender.next_event().await { + Libp2pEvent::PeerConnected { peer_id, .. } => { // Send a STATUS message - warn!(sender_log, "Sending RPC"); + debug!(log, "Sending RPC"); sender .swarm - .send_rpc(peer_id, RPCEvent::Request(1, sender_request.clone())); + .send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone())); } - Async::Ready(Some(BehaviourEvent::RPC(_, event))) => match event { + Libp2pEvent::Behaviour(BehaviourEvent::RPC(_, event)) => match event { // Should receive the RPC response RPCEvent::Response(id, response @ RPCCodedResponse::Success(_)) => { - if id == 1 { - warn!(sender_log, "Sender Received"); + if id == 10 { + debug!(log, "Sender Received"); let response = { match response { RPCCodedResponse::Success(r) => r, _ => unreachable!(), } }; - assert_eq!(response, sender_response.clone()); - - warn!(sender_log, "Sender Completed"); - return Ok(Async::Ready(true)); + assert_eq!(response, rpc_response.clone()); + debug!(log, "Sender Completed"); + return; } } - e => panic!("Received invalid RPC message {}", e), + _ => {} // Ignore other RPC messages }, - Async::Ready(Some(_)) => (), - Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady), - }; - } - }); - - // build the receiver future - let receiver_future = future::poll_fn(move || -> Poll { - loop { - match receiver.poll().unwrap() { - Async::Ready(Some(BehaviourEvent::RPC(peer_id, event))) => match event { - // Should receive sent RPC request - RPCEvent::Request(id, request) => { - if request == rpc_request { - // send the response - warn!(log, "Receiver Received"); - receiver.swarm.send_rpc( - peer_id, - RPCEvent::Response( - id, - RPCCodedResponse::Success(rpc_response.clone()), - ), - ); - } - } - e => panic!("Received invalid RPC message {}", e), - }, - Async::Ready(Some(_)) => (), - Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady), + _ => {} } } - }); + }; - // execute the futures and check the result - let test_result = Arc::new(AtomicBool::new(false)); - let error_result = test_result.clone(); - let thread_result = test_result.clone(); - tokio::run( - sender_future - .select(receiver_future) - .timeout(Duration::from_millis(1000)) - .map_err(move |_| error_result.store(false, Relaxed)) - .map(move |result| { - thread_result.store(result.0, Relaxed); - }), - ); - assert!(test_result.load(Relaxed)); + // build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::RPC(peer_id, event)) => { + match event { + // Should receive sent RPC request + RPCEvent::Request(id, request) => { + if request == rpc_request { + // send the response + debug!(log, "Receiver Received"); + receiver.swarm.send_rpc( + peer_id, + RPCEvent::Response( + id, + RPCCodedResponse::Success(rpc_response.clone()), + ), + ); + } + } + _ => {} // Ignore other RPC requests + } + } + _ => {} // Ignore other events + } + } + }; + + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = delay_for(Duration::from_millis(800)) => { + panic!("Future timed out"); + } + } } -#[test] +#[tokio::test] // Tests a streamed BlocksByRange RPC Message -fn test_blocks_by_range_chunked_rpc() { +async fn test_blocks_by_range_chunked_rpc() { // set up the logging. The level and enabled logging or not let log_level = Level::Trace; let enable_logging = false; @@ -140,7 +126,7 @@ fn test_blocks_by_range_chunked_rpc() { let log = common::build_log(log_level, enable_logging); // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log); + let (mut sender, mut receiver) = common::build_node_pair(&log).await; // BlocksByRange Request let rpc_request = RPCRequest::BlocksByRange(BlocksByRangeRequest { @@ -158,116 +144,100 @@ fn test_blocks_by_range_chunked_rpc() { }; let rpc_response = RPCResponse::BlocksByRange(Box::new(empty_signed)); - let sender_request = rpc_request.clone(); - let sender_log = log.clone(); - let sender_response = rpc_response.clone(); - // keep count of the number of messages received - let messages_received = Arc::new(Mutex::new(0)); + let mut messages_received = 0; // build the sender future - let sender_future = future::poll_fn(move || -> Poll { + let sender_future = async { loop { - match sender.poll().unwrap() { - Async::Ready(Some(BehaviourEvent::PeerDialed(peer_id))) => { - // Send a BlocksByRange request - warn!(sender_log, "Sender sending RPC request"); + match sender.next_event().await { + Libp2pEvent::PeerConnected { peer_id, .. } => { + // Send a STATUS message + debug!(log, "Sending RPC"); sender .swarm - .send_rpc(peer_id, RPCEvent::Request(1, sender_request.clone())); + .send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone())); } - Async::Ready(Some(BehaviourEvent::RPC(_, event))) => match event { + Libp2pEvent::Behaviour(BehaviourEvent::RPC(_, event)) => match event { // Should receive the RPC response RPCEvent::Response(id, response) => { - if id == 1 { - warn!(sender_log, "Sender received a response"); + if id == 10 { + warn!(log, "Sender received a response"); match response { RPCCodedResponse::Success(res) => { - assert_eq!(res, sender_response.clone()); - *messages_received.lock().unwrap() += 1; - warn!(sender_log, "Chunk received"); + assert_eq!(res, rpc_response.clone()); + messages_received += 1; + warn!(log, "Chunk received"); } - RPCCodedResponse::StreamTermination( - ResponseTermination::BlocksByRange, - ) => { + RPCCodedResponse::StreamTermination(_) => { // should be exactly 10 messages before terminating - assert_eq!( - *messages_received.lock().unwrap(), - messages_to_send - ); + assert_eq!(messages_received, messages_to_send); // end the test - return Ok(Async::Ready(true)); + return; } _ => panic!("Invalid RPC received"), } } } - _ => panic!("Received invalid RPC message"), + _ => {} // Ignore other RPC messages }, - Async::Ready(Some(_)) => {} - Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady), - }; + _ => {} // Ignore other behaviour events + } } - }); + }; // build the receiver future - let receiver_future = future::poll_fn(move || -> Poll { + let receiver_future = async { loop { - match receiver.poll().unwrap() { - Async::Ready(Some(BehaviourEvent::RPC(peer_id, event))) => match event { - // Should receive the sent RPC request - RPCEvent::Request(id, request) => { - if request == rpc_request { - // send the response - warn!(log, "Receiver got request"); + match receiver.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::RPC(peer_id, event)) => { + match event { + // Should receive sent RPC request + RPCEvent::Request(id, request) => { + if request == rpc_request { + // send the response + warn!(log, "Receiver got request"); - for _ in 1..=messages_to_send { + for _ in 1..=messages_to_send { + receiver.swarm.send_rpc( + peer_id.clone(), + RPCEvent::Response( + id, + RPCCodedResponse::Success(rpc_response.clone()), + ), + ); + } + // send the stream termination receiver.swarm.send_rpc( - peer_id.clone(), + peer_id, RPCEvent::Response( id, - RPCCodedResponse::Success(rpc_response.clone()), + RPCCodedResponse::StreamTermination( + ResponseTermination::BlocksByRange, + ), ), ); } - // send the stream termination - receiver.swarm.send_rpc( - peer_id, - RPCEvent::Response( - id, - RPCCodedResponse::StreamTermination( - ResponseTermination::BlocksByRange, - ), - ), - ); } + _ => {} // Ignore other events } - _ => panic!("Received invalid RPC message"), - }, - Async::Ready(Some(_)) => (), - Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady), + } + _ => {} // Ignore other events } } - }); + }; - // execute the futures and check the result - let test_result = Arc::new(AtomicBool::new(false)); - let error_result = test_result.clone(); - let thread_result = test_result.clone(); - tokio::run( - sender_future - .select(receiver_future) - .timeout(Duration::from_millis(1000)) - .map_err(move |_| error_result.store(false, Relaxed)) - .map(move |result| { - thread_result.store(result.0, Relaxed); - }), - ); - assert!(test_result.load(Relaxed)); + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = delay_for(Duration::from_millis(800)) => { + panic!("Future timed out"); + } + } } -#[test] +#[tokio::test] // Tests an empty response to a BlocksByRange RPC Message -fn test_blocks_by_range_single_empty_rpc() { +async fn test_blocks_by_range_single_empty_rpc() { // set up the logging. The level and enabled logging or not let log_level = Level::Trace; let enable_logging = false; @@ -275,7 +245,7 @@ fn test_blocks_by_range_single_empty_rpc() { let log = common::build_log(log_level, enable_logging); // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log); + let (mut sender, mut receiver) = common::build_node_pair(&log).await; // BlocksByRange Request let rpc_request = RPCRequest::BlocksByRange(BlocksByRangeRequest { @@ -293,116 +263,106 @@ fn test_blocks_by_range_single_empty_rpc() { }; let rpc_response = RPCResponse::BlocksByRange(Box::new(empty_signed)); - let sender_request = rpc_request.clone(); - let sender_log = log.clone(); - let sender_response = rpc_response.clone(); + let messages_to_send = 1; // keep count of the number of messages received - let messages_received = Arc::new(Mutex::new(0)); + let mut messages_received = 0; // build the sender future - let sender_future = future::poll_fn(move || -> Poll { + let sender_future = async { loop { - match sender.poll().unwrap() { - Async::Ready(Some(BehaviourEvent::PeerDialed(peer_id))) => { - // Send a BlocksByRange request - warn!(sender_log, "Sender sending RPC request"); + match sender.next_event().await { + Libp2pEvent::PeerConnected { peer_id, .. } => { + // Send a STATUS message + debug!(log, "Sending RPC"); sender .swarm - .send_rpc(peer_id, RPCEvent::Request(1, sender_request.clone())); + .send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone())); } - Async::Ready(Some(BehaviourEvent::RPC(_, event))) => match event { + Libp2pEvent::Behaviour(BehaviourEvent::RPC(_, event)) => match event { // Should receive the RPC response RPCEvent::Response(id, response) => { - if id == 1 { - warn!(sender_log, "Sender received a response"); + if id == 10 { + warn!(log, "Sender received a response"); match response { RPCCodedResponse::Success(res) => { - assert_eq!(res, sender_response.clone()); - *messages_received.lock().unwrap() += 1; - warn!(sender_log, "Chunk received"); + assert_eq!(res, rpc_response.clone()); + messages_received += 1; + warn!(log, "Chunk received"); } - RPCCodedResponse::StreamTermination( - ResponseTermination::BlocksByRange, - ) => { - // should be exactly 1 messages before terminating - assert_eq!(*messages_received.lock().unwrap(), 1); + RPCCodedResponse::StreamTermination(_) => { + // should be exactly 10 messages before terminating + assert_eq!(messages_received, messages_to_send); // end the test - return Ok(Async::Ready(true)); + return; } _ => panic!("Invalid RPC received"), } } } - m => panic!("Received invalid RPC message: {}", m), + _ => {} // Ignore other RPC messages }, - Async::Ready(Some(_)) => {} - Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady), - }; - } - }); - - // build the receiver future - let receiver_future = future::poll_fn(move || -> Poll { - loop { - match receiver.poll().unwrap() { - Async::Ready(Some(BehaviourEvent::RPC(peer_id, event))) => match event { - // Should receive the sent RPC request - RPCEvent::Request(id, request) => { - if request == rpc_request { - // send the response - warn!(log, "Receiver got request"); - - receiver.swarm.send_rpc( - peer_id.clone(), - RPCEvent::Response( - id, - RPCCodedResponse::Success(rpc_response.clone()), - ), - ); - // send the stream termination - receiver.swarm.send_rpc( - peer_id, - RPCEvent::Response( - id, - RPCCodedResponse::StreamTermination( - ResponseTermination::BlocksByRange, - ), - ), - ); - } - } - _ => panic!("Received invalid RPC message"), - }, - Async::Ready(Some(_)) => (), - Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady), + _ => {} // Ignore other behaviour events } } - }); + }; - // execute the futures and check the result - let test_result = Arc::new(AtomicBool::new(false)); - let error_result = test_result.clone(); - let thread_result = test_result.clone(); - tokio::run( - sender_future - .select(receiver_future) - .timeout(Duration::from_millis(1000)) - .map_err(move |_| error_result.store(false, Relaxed)) - .map(move |result| { - thread_result.store(result.0, Relaxed); - }), - ); - assert!(test_result.load(Relaxed)); + // build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::RPC(peer_id, event)) => { + match event { + // Should receive sent RPC request + RPCEvent::Request(id, request) => { + if request == rpc_request { + // send the response + warn!(log, "Receiver got request"); + + for _ in 1..=messages_to_send { + receiver.swarm.send_rpc( + peer_id.clone(), + RPCEvent::Response( + id, + RPCCodedResponse::Success(rpc_response.clone()), + ), + ); + } + // send the stream termination + receiver.swarm.send_rpc( + peer_id, + RPCEvent::Response( + id, + RPCCodedResponse::StreamTermination( + ResponseTermination::BlocksByRange, + ), + ), + ); + } + } + _ => {} // Ignore other events + } + } + _ => {} // Ignore other events + } + } + }; + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = delay_for(Duration::from_millis(800)) => { + panic!("Future timed out"); + } + } } -#[test] +#[tokio::test] // Tests a streamed, chunked BlocksByRoot RPC Message // The size of the reponse is a full `BeaconBlock` // which is greater than the Snappy frame size. Hence, this test // serves to test the snappy framing format as well. -fn test_blocks_by_root_chunked_rpc() { +async fn test_blocks_by_root_chunked_rpc() { // set up the logging. The level and enabled logging or not - let log_level = Level::Trace; + let log_level = Level::Debug; let enable_logging = false; let messages_to_send = 3; @@ -411,7 +371,7 @@ fn test_blocks_by_root_chunked_rpc() { let spec = E::default_spec(); // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log); + let (mut sender, mut receiver) = common::build_node_pair(&log).await; // BlocksByRoot Request let rpc_request = RPCRequest::BlocksByRoot(BlocksByRootRequest { @@ -426,112 +386,101 @@ fn test_blocks_by_root_chunked_rpc() { }; let rpc_response = RPCResponse::BlocksByRoot(Box::new(signed_full_block)); - let sender_request = rpc_request.clone(); - let sender_log = log.clone(); - let sender_response = rpc_response.clone(); - // keep count of the number of messages received - let messages_received = Arc::new(Mutex::new(0)); + let mut messages_received = 0; // build the sender future - let sender_future = future::poll_fn(move || -> Poll { + let sender_future = async { loop { - match sender.poll().unwrap() { - Async::Ready(Some(BehaviourEvent::PeerDialed(peer_id))) => { - // Send a BlocksByRoot request - warn!(sender_log, "Sender sending RPC request"); + match sender.next_event().await { + Libp2pEvent::PeerConnected { peer_id, .. } => { + // Send a STATUS message + debug!(log, "Sending RPC"); sender .swarm - .send_rpc(peer_id, RPCEvent::Request(1, sender_request.clone())); + .send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone())); } - Async::Ready(Some(BehaviourEvent::RPC(_, event))) => match event { + Libp2pEvent::Behaviour(BehaviourEvent::RPC(_, event)) => match event { // Should receive the RPC response RPCEvent::Response(id, response) => { - warn!(sender_log, "Sender received a response"); - assert_eq!(id, 1); - match response { - RPCCodedResponse::Success(res) => { - assert_eq!(res, sender_response.clone()); - *messages_received.lock().unwrap() += 1; - warn!(sender_log, "Chunk received"); + if id == 10 { + debug!(log, "Sender received a response"); + match response { + RPCCodedResponse::Success(res) => { + assert_eq!(res, rpc_response.clone()); + messages_received += 1; + debug!(log, "Chunk received"); + } + RPCCodedResponse::StreamTermination(_) => { + // should be exactly messages_to_send + assert_eq!(messages_received, messages_to_send); + // end the test + return; + } + _ => {} // Ignore other RPC messages } - RPCCodedResponse::StreamTermination( - ResponseTermination::BlocksByRoot, - ) => { - // should be exactly 10 messages before terminating - assert_eq!(*messages_received.lock().unwrap(), messages_to_send); - // end the test - return Ok(Async::Ready(true)); - } - m => panic!("Invalid RPC received: {}", m), } } - m => panic!("Received invalid RPC message: {}", m), + _ => {} // Ignore other RPC messages }, - Async::Ready(Some(_)) => {} - Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady), - }; - } - }); - - // build the receiver future - let receiver_future = future::poll_fn(move || -> Poll { - loop { - match receiver.poll().unwrap() { - Async::Ready(Some(BehaviourEvent::RPC(peer_id, event))) => match event { - // Should receive the sent RPC request - RPCEvent::Request(id, request) => { - if request == rpc_request { - // send the response - warn!(log, "Receiver got request"); - - for _ in 1..=messages_to_send { - receiver.swarm.send_rpc( - peer_id.clone(), - RPCEvent::Response( - id, - RPCCodedResponse::Success(rpc_response.clone()), - ), - ); - } - // send the stream termination - receiver.swarm.send_rpc( - peer_id, - RPCEvent::Response( - id, - RPCCodedResponse::StreamTermination( - ResponseTermination::BlocksByRange, - ), - ), - ); - } - } - _ => panic!("Received invalid RPC message"), - }, - Async::Ready(Some(_)) => (), - Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady), + _ => {} // Ignore other behaviour events } } - }); + }; - // execute the futures and check the result - let test_result = Arc::new(AtomicBool::new(false)); - let error_result = test_result.clone(); - let thread_result = test_result.clone(); - tokio::run( - sender_future - .select(receiver_future) - .timeout(Duration::from_millis(1000)) - .map_err(move |_| error_result.store(false, Relaxed)) - .map(move |result| { - thread_result.store(result.0, Relaxed); - }), - ); - assert!(test_result.load(Relaxed)); + // build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::RPC(peer_id, event)) => { + match event { + // Should receive sent RPC request + RPCEvent::Request(id, request) => { + if request == rpc_request { + // send the response + debug!(log, "Receiver got request"); + + for _ in 1..=messages_to_send { + receiver.swarm.send_rpc( + peer_id.clone(), + RPCEvent::Response( + id, + RPCCodedResponse::Success(rpc_response.clone()), + ), + ); + debug!(log, "Sending message"); + } + // send the stream termination + receiver.swarm.send_rpc( + peer_id, + RPCEvent::Response( + id, + RPCCodedResponse::StreamTermination( + ResponseTermination::BlocksByRange, + ), + ), + ); + debug!(log, "Send stream term"); + } + } + _ => {} // Ignore other events + } + } + _ => {} // Ignore other events + } + } + }; + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = delay_for(Duration::from_millis(1000)) => { + panic!("Future timed out"); + } + } } -#[test] +#[tokio::test] // Tests a Goodbye RPC message -fn test_goodbye_rpc() { +async fn test_goodbye_rpc() { // set up the logging. The level and enabled logging or not let log_level = Level::Trace; let enable_logging = false; @@ -539,65 +488,54 @@ fn test_goodbye_rpc() { let log = common::build_log(log_level, enable_logging); // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log); + let (mut sender, mut receiver) = common::build_node_pair(&log).await; // Goodbye Request let rpc_request = RPCRequest::Goodbye(GoodbyeReason::ClientShutdown); - let sender_request = rpc_request.clone(); - let sender_log = log.clone(); - // build the sender future - let sender_future = future::poll_fn(move || -> Poll { + let sender_future = async { loop { - match sender.poll().unwrap() { - Async::Ready(Some(BehaviourEvent::PeerDialed(peer_id))) => { - // Send a Goodbye request - warn!(sender_log, "Sender sending RPC request"); + match sender.next_event().await { + Libp2pEvent::PeerConnected { peer_id, .. } => { + // Send a STATUS message + debug!(log, "Sending RPC"); sender .swarm - .send_rpc(peer_id, RPCEvent::Request(1, sender_request.clone())); + .send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone())); } - Async::Ready(Some(_)) => {} - Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady), - }; - } - }); - - // build the receiver future - let receiver_future = future::poll_fn(move || -> Poll { - loop { - match receiver.poll().unwrap() { - Async::Ready(Some(BehaviourEvent::RPC(_, event))) => match event { - // Should receive the sent RPC request - RPCEvent::Request(id, request) => { - if request == rpc_request { - assert_eq!(id, 0); - assert_eq!(rpc_request.clone(), request); - // receives the goodbye. Nothing left to do - return Ok(Async::Ready(true)); - } - } - _ => panic!("Received invalid RPC message"), - }, - Async::Ready(Some(_)) => (), - Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady), + _ => {} // Ignore other RPC messages } } - }); + }; - // execute the futures and check the result - let test_result = Arc::new(AtomicBool::new(false)); - let error_result = test_result.clone(); - let thread_result = test_result.clone(); - tokio::run( - sender_future - .select(receiver_future) - .timeout(Duration::from_millis(1000)) - .map_err(move |_| error_result.store(false, Relaxed)) - .map(move |result| { - thread_result.store(result.0, Relaxed); - }), - ); - assert!(test_result.load(Relaxed)); + // build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::RPC(_peer_id, event)) => { + match event { + // Should receive sent RPC request + RPCEvent::Request(id, request) => { + if request == rpc_request { + assert_eq!(id, 0); + assert_eq!(rpc_request.clone(), request); // receives the goodbye. Nothing left to do + return; + } + } + _ => {} // Ignore other events + } + } + _ => {} // Ignore other events + } + } + }; + + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = delay_for(Duration::from_millis(1000)) => { + panic!("Future timed out"); + } + } } diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index a5723ee6e1..d23804454b 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -6,23 +6,22 @@ edition = "2018" [dev-dependencies] eth1_test_rig = { path = "../../tests/eth1_test_rig" } -futures = "0.1.25" [dependencies] -futures = "0.1.25" +futures = "0.3.5" types = { path = "../../eth2/types"} environment = { path = "../../lighthouse/environment"} eth1 = { path = "../eth1"} -rayon = "1.0" +rayon = "1.3.0" state_processing = { path = "../../eth2/state_processing" } merkle_proof = { path = "../../eth2/utils/merkle_proof" } -eth2_ssz = "0.1" -eth2_hashing = { path = "../../eth2/utils/eth2_hashing" } -tree_hash = "0.1" -tokio = "0.1.22" -parking_lot = "0.7" -slog = "^2.2.3" -exit-future = "0.1.4" -serde = "1.0" -serde_derive = "1.0" +eth2_ssz = "0.1.2" +eth2_hashing = "0.1.0" +tree_hash = "0.1.0" +tokio = { version = "0.2.20", features = ["full"] } +parking_lot = "0.10.2" +slog = "2.5.2" +exit-future = "0.2.0" +serde = "1.0.110" +serde_derive = "1.0.110" int_to_bytes = { path = "../../eth2/utils/int_to_bytes" } diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index ffefb58b23..8d4f82fafd 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -2,11 +2,6 @@ pub use crate::{common::genesis_deposits, interop::interop_genesis_state}; pub use eth1::Config as Eth1Config; use eth1::{DepositLog, Eth1Block, Service}; -use futures::{ - future, - future::{loop_fn, Loop}, - Future, -}; use parking_lot::Mutex; use slog::{debug, error, info, trace, Logger}; use state_processing::{ @@ -14,8 +9,8 @@ use state_processing::{ per_block_processing::process_deposit, process_activations, }; use std::sync::Arc; -use std::time::{Duration, Instant}; -use tokio::timer::Delay; +use std::time::Duration; +use tokio::time::delay_for; use types::{BeaconState, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256}; /// Provides a service that connects to some Eth1 HTTP JSON-RPC endpoint and maintains a cache of eth1 @@ -87,117 +82,83 @@ impl Eth1GenesisService { /// /// - `Ok(state)` once the canonical eth2 genesis state has been discovered. /// - `Err(e)` if there is some internal error during updates. - pub fn wait_for_genesis_state( + pub async fn wait_for_genesis_state( &self, update_interval: Duration, spec: ChainSpec, - ) -> impl Future, Error = String> { + ) -> Result, String> { let service = self.clone(); + let log = service.core.log.clone(); + let min_genesis_active_validator_count = spec.min_genesis_active_validator_count; + let min_genesis_time = spec.min_genesis_time; + loop { + // **WARNING** `delay_for` panics on error + delay_for(update_interval).await; + let update_result = Service::update_deposit_cache(self.core.clone()) + .await + .map_err(|e| format!("{:?}", e)); - loop_fn::<(ChainSpec, Option>), _, _, _>( - (spec, None), - move |(spec, state)| { - let service_1 = service.clone(); - let service_2 = service.clone(); - let service_3 = service.clone(); - let service_4 = service.clone(); - let log = service.core.log.clone(); - let min_genesis_active_validator_count = spec.min_genesis_active_validator_count; - let min_genesis_time = spec.min_genesis_time; + if let Err(e) = update_result { + error!( + log, + "Failed to update eth1 deposit cache"; + "error" => e + ) + } - Delay::new(Instant::now() + update_interval) - .map_err(|e| format!("Delay between genesis deposit checks failed: {:?}", e)) - .and_then(move |()| { - service_1 - .core - .update_deposit_cache() - .map_err(|e| format!("{:?}", e)) - }) - .then(move |update_result| { - if let Err(e) = update_result { - error!( - log, - "Failed to update eth1 deposit cache"; - "error" => e - ) - } + // Do not exit the loop if there is an error whilst updating. + // Only enable the `sync_blocks` flag if there are enough deposits to feasibly + // trigger genesis. + // + // Note: genesis is triggered by the _active_ validator count, not just the + // deposit count, so it's possible that block downloads are started too early. + // This is just wasteful, not erroneous. + let mut sync_blocks = self.sync_blocks.lock(); - // Do not exit the loop if there is an error whilst updating. - Ok(()) - }) - // Only enable the `sync_blocks` flag if there are enough deposits to feasibly - // trigger genesis. - // - // Note: genesis is triggered by the _active_ validator count, not just the - // deposit count, so it's possible that block downloads are started too early. - // This is just wasteful, not erroneous. - .and_then(move |()| { - let mut sync_blocks = service_2.sync_blocks.lock(); + if !(*sync_blocks) { + if let Some(viable_eth1_block) = + self.first_viable_eth1_block(min_genesis_active_validator_count as usize) + { + info!( + log, + "Minimum genesis deposit count met"; + "deposit_count" => min_genesis_active_validator_count, + "block_number" => viable_eth1_block, + ); + self.core.set_lowest_cached_block(viable_eth1_block); + *sync_blocks = true + } + } - if !(*sync_blocks) { - if let Some(viable_eth1_block) = service_2.first_viable_eth1_block( - min_genesis_active_validator_count as usize, - ) { - info!( - service_2.core.log, - "Minimum genesis deposit count met"; - "deposit_count" => min_genesis_active_validator_count, - "block_number" => viable_eth1_block, - ); - service_2.core.set_lowest_cached_block(viable_eth1_block); - *sync_blocks = true - } - } - - Ok(*sync_blocks) - }) - .and_then(move |should_update_block_cache| { - let maybe_update_future: Box + Send> = - if should_update_block_cache { - Box::new(service_3.core.update_block_cache().then( - move |update_result| { - if let Err(e) = update_result { - error!( - service_3.core.log, - "Failed to update eth1 block cache"; - "error" => format!("{:?}", e) - ); - } - - // Do not exit the loop if there is an error whilst updating. - Ok(()) - }, - )) - } else { - Box::new(future::ok(())) - }; - - maybe_update_future - }) - .and_then(move |()| { - if let Some(genesis_state) = service_4 - .scan_new_blocks::(&spec) - .map_err(|e| format!("Failed to scan for new blocks: {}", e))? - { - Ok(Loop::Break((spec, genesis_state))) - } else { - debug!( - service_4.core.log, - "No eth1 genesis block found"; - "latest_block_timestamp" => service_4.core.latest_block_timestamp(), - "min_genesis_time" => min_genesis_time, - "min_validator_count" => min_genesis_active_validator_count, - "cached_blocks" => service_4.core.block_cache_len(), - "cached_deposits" => service_4.core.deposit_cache_len(), - "cache_head" => service_4.highest_known_block(), - ); - - Ok(Loop::Continue((spec, state))) - } - }) - }, - ) - .map(|(_spec, state)| state) + let should_update_block_cache = *sync_blocks; + if should_update_block_cache { + let update_result = Service::update_block_cache(self.core.clone()).await; + if let Err(e) = update_result { + error!( + log, + "Failed to update eth1 block cache"; + "error" => format!("{:?}", e) + ); + } + }; + if let Some(genesis_state) = self + .scan_new_blocks::(&spec) + .map_err(|e| format!("Failed to scan for new blocks: {}", e))? + { + break Ok(genesis_state); + } else { + debug!( + log, + "No eth1 genesis block found"; + "latest_block_timestamp" => self.core.latest_block_timestamp(), + "min_genesis_time" => min_genesis_time, + "min_validator_count" => min_genesis_active_validator_count, + "cached_blocks" => self.core.block_cache_len(), + "cached_deposits" => self.core.deposit_cache_len(), + "cache_head" => self.highest_known_block(), + ); + } + } } /// Processes any new blocks that have appeared since this function was last run. diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index bf9a8ce897..7e2131f511 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -5,7 +5,7 @@ #![cfg(test)] use environment::{Environment, EnvironmentBuilder}; use eth1_test_rig::{DelayThenDeposit, GanacheEth1Instance}; -use futures::Future; +use futures::compat::Future01CompatExt; use genesis::{Eth1Config, Eth1GenesisService}; use state_processing::is_valid_genesis_state; use std::time::Duration; @@ -24,81 +24,85 @@ pub fn new_env() -> Environment { #[test] fn basic() { let mut env = new_env(); - let log = env.core_context().log; + let log = env.core_context().log.clone(); let mut spec = env.eth2_config().spec.clone(); - let runtime = env.runtime(); - let eth1 = runtime - .block_on(GanacheEth1Instance::new()) - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + env.runtime().block_on(async { + let eth1 = GanacheEth1Instance::new() + .await + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); - let now = runtime - .block_on(web3.eth().block_number().map(|v| v.as_u64())) - .expect("should get block number"); + let now = web3 + .eth() + .block_number() + .compat() + .await + .map(|v| v.as_u64()) + .expect("should get block number"); - let service = Eth1GenesisService::new( - Eth1Config { - endpoint: eth1.endpoint(), - deposit_contract_address: deposit_contract.address(), - deposit_contract_deploy_block: now, - lowest_cached_block_number: now, - follow_distance: 0, - block_cache_truncation: None, - ..Eth1Config::default() - }, - log, - ); + let service = Eth1GenesisService::new( + Eth1Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + deposit_contract_deploy_block: now, + lowest_cached_block_number: now, + follow_distance: 0, + block_cache_truncation: None, + ..Eth1Config::default() + }, + log, + ); - // NOTE: this test is sensitive to the response speed of the external web3 server. If - // you're experiencing failures, try increasing the update_interval. - let update_interval = Duration::from_millis(500); + // NOTE: this test is sensitive to the response speed of the external web3 server. If + // you're experiencing failures, try increasing the update_interval. + let update_interval = Duration::from_millis(500); - spec.min_genesis_time = 0; - spec.min_genesis_active_validator_count = 8; + spec.min_genesis_time = 0; + spec.min_genesis_active_validator_count = 8; - let deposits = (0..spec.min_genesis_active_validator_count + 2) - .map(|i| { - deposit_contract.deposit_helper::( - generate_deterministic_keypair(i as usize), - Hash256::from_low_u64_le(i), - 32_000_000_000, - ) - }) - .map(|deposit| DelayThenDeposit { - delay: Duration::from_secs(0), - deposit, - }) - .collect::>(); + let deposits = (0..spec.min_genesis_active_validator_count + 2) + .map(|i| { + deposit_contract.deposit_helper::( + generate_deterministic_keypair(i as usize), + Hash256::from_low_u64_le(i), + 32_000_000_000, + ) + }) + .map(|deposit| DelayThenDeposit { + delay: Duration::from_secs(0), + deposit, + }) + .collect::>(); - let deposit_future = deposit_contract.deposit_multiple(deposits); + let deposit_future = deposit_contract.deposit_multiple(deposits); - let wait_future = - service.wait_for_genesis_state::(update_interval, spec.clone()); + let wait_future = + service.wait_for_genesis_state::(update_interval, spec.clone()); - let state = runtime - .block_on(deposit_future.join(wait_future)) - .map(|(_, state)| state) - .expect("should finish waiting for genesis"); + let state = futures::try_join!(deposit_future, wait_future) + .map(|(_, state)| state) + .expect("should finish waiting for genesis"); - // Note: using ganache these deposits are 1-per-block, therefore we know there should only be - // the minimum number of validators. - assert_eq!( - state.validators.len(), - spec.min_genesis_active_validator_count as usize, - "should have expected validator count" - ); + // Note: using ganache these deposits are 1-per-block, therefore we know there should only be + // the minimum number of validators. + assert_eq!( + state.validators.len(), + spec.min_genesis_active_validator_count as usize, + "should have expected validator count" + ); - assert!(state.genesis_time > 0, "should have some genesis time"); + assert!(state.genesis_time > 0, "should have some genesis time"); - assert!( - is_valid_genesis_state(&state, &spec), - "should be valid genesis state" - ); + assert!( + is_valid_genesis_state(&state, &spec), + "should be valid genesis state" + ); - assert!( - is_valid_genesis_state(&state, &spec), - "should be valid genesis state" - ); + assert!( + is_valid_genesis_state(&state, &spec), + "should be valid genesis state" + ); + }); } diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index d6cfd270c2..3857840f84 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -5,32 +5,32 @@ authors = ["Age Manning "] edition = "2018" [dev-dependencies] -sloggers = "0.3.4" +sloggers = "1.0.0" genesis = { path = "../genesis" } -tempdir = "0.3" lazy_static = "1.4.0" +matches = "0.1.8" +tempfile = "3.1.0" [dependencies] beacon_chain = { path = "../beacon_chain" } store = { path = "../store" } eth2-libp2p = { path = "../eth2-libp2p" } -hashmap_delay = { path = "../../eth2/utils/hashmap_delay" } +hashset_delay = { path = "../../eth2/utils/hashset_delay" } rest_types = { path = "../../eth2/utils/rest_types" } types = { path = "../../eth2/types" } slot_clock = { path = "../../eth2/utils/slot_clock" } slog = { version = "2.5.2", features = ["max_level_trace"] } -hex = "0.3" +hex = "0.4.2" eth2_ssz = "0.1.2" tree_hash = "0.1.0" -futures = "0.1.29" -error-chain = "0.12.1" -tokio = "0.1.22" -parking_lot = "0.9.0" -smallvec = "1.0.0" +futures = "0.3.5" +error-chain = "0.12.2" +tokio = { version = "0.2.20", features = ["full"] } +parking_lot = "0.10.2" +smallvec = "1.4.0" # TODO: Remove rand crate for mainnet -rand = "0.7.2" +rand = "0.7.3" fnv = "1.0.6" -rlp = "0.4.3" -tokio-timer = "0.2.12" -matches = "0.1.8" -tempfile = "3.1.0" \ No newline at end of file +rlp = "0.4.5" +lazy_static = "1.4.0" +lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } diff --git a/beacon_node/network/src/attestation_service/mod.rs b/beacon_node/network/src/attestation_service/mod.rs index a1d19abe6a..90f67629f3 100644 --- a/beacon_node/network/src/attestation_service/mod.rs +++ b/beacon_node/network/src/attestation_service/mod.rs @@ -5,16 +5,20 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::{types::GossipKind, MessageId, NetworkGlobals, PeerId}; use futures::prelude::*; -use hashmap_delay::HashSetDelay; +use hashset_delay::HashSetDelay; use rand::seq::SliceRandom; use rest_types::ValidatorSubscription; use slog::{crit, debug, error, o, warn}; use slot_clock::SlotClock; use std::collections::VecDeque; +use std::pin::Pin; use std::sync::Arc; +use std::task::{Context, Poll}; use std::time::{Duration, Instant}; use types::{Attestation, EthSpec, Slot, SubnetId}; +mod tests; + /// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the /// slot is less than this number, skip the peer discovery process. const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 1; @@ -564,7 +568,7 @@ impl AttestationService { return Ok(()); } - let subscribed_subnets = self.random_subnets.keys_vec(); + let subscribed_subnets = self.random_subnets.keys().cloned().collect::>(); let to_remove_subnets = subscribed_subnets.choose_multiple( &mut rand::thread_rng(), random_subnets_per_validator as usize, @@ -576,10 +580,10 @@ impl AttestationService { for subnet_id in to_remove_subnets { // If a subscription is queued for two slots in the future, it's associated unsubscription // will unsubscribe from the expired subnet. - // If there is no subscription for this subnet,slot it is safe to add one, without + // If there is no unsubscription for this subnet,slot it is safe to add one, without // unsubscribing early from a required subnet let subnet = ExactSubnet { - subnet_id: **subnet_id, + subnet_id: *subnet_id, slot: current_slot + 2, }; if self.subscriptions.get(&subnet).is_none() { @@ -597,11 +601,11 @@ impl AttestationService { self.unsubscriptions .insert_at(subnet, unsubscription_duration); } - // as the long lasting subnet subscription is being removed, remove the subnet_id from // the ENR bitfield self.events - .push_back(AttServiceMessage::EnrRemove(**subnet_id)); + .push_back(AttServiceMessage::EnrRemove(*subnet_id)); + self.random_subnets.remove(subnet_id); } Ok(()) } @@ -609,648 +613,64 @@ impl AttestationService { impl Stream for AttestationService { type Item = AttServiceMessage; - type Error = (); - fn poll(&mut self) -> Poll, Self::Error> { + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { // process any peer discovery events - while let Async::Ready(Some(exact_subnet)) = - self.discover_peers.poll().map_err(|e| { - error!(self.log, "Failed to check for peer discovery requests"; "error"=> format!("{}", e)); - })? - { - self.handle_discover_peers(exact_subnet); - } + match self.discover_peers.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(exact_subnet))) => self.handle_discover_peers(exact_subnet), + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for peer discovery requests"; "error"=> format!("{}", e)); + } + Poll::Ready(None) | Poll::Pending => {} + } // process any subscription events - while let Async::Ready(Some(exact_subnet)) = self.subscriptions.poll().map_err(|e| { - error!(self.log, "Failed to check for subnet subscription times"; "error"=> format!("{}", e)); - })? - { - self.handle_subscriptions(exact_subnet); - } + match self.subscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(exact_subnet))) => self.handle_subscriptions(exact_subnet), + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for subnet subscription times"; "error"=> format!("{}", e)); + } + Poll::Ready(None) | Poll::Pending => {} + } // process any un-subscription events - while let Async::Ready(Some(exact_subnet)) = self.unsubscriptions.poll().map_err(|e| { - error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> format!("{}", e)); - })? - { - self.handle_unsubscriptions(exact_subnet); - } + match self.unsubscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(exact_subnet))) => self.handle_unsubscriptions(exact_subnet), + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> format!("{}", e)); + } + Poll::Ready(None) | Poll::Pending => {} + } // process any random subnet expiries - while let Async::Ready(Some(subnet)) = self.random_subnets.poll().map_err(|e| { - error!(self.log, "Failed to check for random subnet cycles"; "error"=> format!("{}", e)); - })? - { - self.handle_random_subnet_expiry(subnet); - } + match self.random_subnets.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(subnet))) => self.handle_random_subnet_expiry(subnet), + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for random subnet cycles"; "error"=> format!("{}", e)); + } + Poll::Ready(None) | Poll::Pending => {} + } // process any known validator expiries - while let Async::Ready(Some(_validator_index)) = self.known_validators.poll().map_err(|e| { - error!(self.log, "Failed to check for random subnet cycles"; "error"=> format!("{}", e)); - })? - { - let _ = self.handle_known_validator_expiry(); - } + match self.known_validators.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(_validator_index))) => { + let _ = self.handle_known_validator_expiry(); + } + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for random subnet cycles"; "error"=> format!("{}", e)); + } + Poll::Ready(None) | Poll::Pending => {} + } // poll to remove entries on expiration, no need to act on expiration events - let _ = self.aggregate_validators_on_subnet.poll().map_err(|e| { error!(self.log, "Failed to check for aggregate validator on subnet expirations"; "error"=> format!("{}", e)); }); + if let Poll::Ready(Some(Err(e))) = self.aggregate_validators_on_subnet.poll_next_unpin(cx) { + error!(self.log, "Failed to check for aggregate validator on subnet expirations"; "error"=> format!("{}", e)); + } // process any generated events if let Some(event) = self.events.pop_front() { - return Ok(Async::Ready(Some(event))); + return Poll::Ready(Some(event)); } - Ok(Async::NotReady) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use beacon_chain::builder::{BeaconChainBuilder, Witness}; - use beacon_chain::eth1_chain::CachingEth1Backend; - use beacon_chain::events::NullEventHandler; - use beacon_chain::migrate::NullMigrator; - use eth2_libp2p::discovery::{build_enr, Keypair}; - use eth2_libp2p::{discovery::CombinedKey, NetworkConfig, NetworkGlobals}; - use futures::Stream; - use genesis::{generate_deterministic_keypairs, interop_genesis_state}; - use lazy_static::lazy_static; - use matches::assert_matches; - use slog::Logger; - use sloggers::{null::NullLoggerBuilder, Build}; - use slot_clock::{SlotClock, SystemTimeSlotClock}; - use std::convert::TryInto; - use std::sync::atomic::{AtomicBool, Ordering::Relaxed}; - use std::time::SystemTime; - use store::MemoryStore; - use tempfile::tempdir; - use tokio::prelude::*; - use types::{CommitteeIndex, EnrForkId, EthSpec, MinimalEthSpec}; - - const SLOT_DURATION_MILLIS: u64 = 200; - - type TestBeaconChainType = Witness< - MemoryStore, - NullMigrator, - SystemTimeSlotClock, - CachingEth1Backend>, - MinimalEthSpec, - NullEventHandler, - >; - - pub struct TestBeaconChain { - chain: Arc>, - } - - impl TestBeaconChain { - pub fn new_with_system_clock() -> Self { - let data_dir = tempdir().expect("should create temporary data_dir"); - let spec = MinimalEthSpec::default_spec(); - - let keypairs = generate_deterministic_keypairs(1); - - let log = get_logger(); - let chain = Arc::new( - BeaconChainBuilder::new(MinimalEthSpec) - .logger(log.clone()) - .custom_spec(spec.clone()) - .store(Arc::new(MemoryStore::open())) - .store_migrator(NullMigrator) - .data_dir(data_dir.path().to_path_buf()) - .genesis_state( - interop_genesis_state::(&keypairs, 0, &spec) - .expect("should generate interop state"), - ) - .expect("should build state using recent genesis") - .dummy_eth1_backend() - .expect("should build dummy backend") - .null_event_handler() - .slot_clock(SystemTimeSlotClock::new( - Slot::new(0), - Duration::from_secs(recent_genesis_time()), - Duration::from_millis(SLOT_DURATION_MILLIS), - )) - .reduced_tree_fork_choice() - .expect("should add fork choice to builder") - .build() - .expect("should build"), - ); - Self { chain } - } - } - - pub fn recent_genesis_time() -> u64 { - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs() - } - - fn get_logger() -> Logger { - NullLoggerBuilder.build().expect("logger should build") - } - - lazy_static! { - static ref CHAIN: TestBeaconChain = { TestBeaconChain::new_with_system_clock() }; - } - - fn get_attestation_service() -> AttestationService { - let log = get_logger(); - - let beacon_chain = CHAIN.chain.clone(); - - let config = NetworkConfig::default(); - let enr_key: CombinedKey = Keypair::generate_secp256k1().try_into().unwrap(); - let enr = build_enr::(&enr_key, &config, EnrForkId::default()).unwrap(); - - let network_globals: NetworkGlobals = NetworkGlobals::new(enr, 0, 0, &log); - AttestationService::new(beacon_chain, Arc::new(network_globals), &log) - } - - fn get_subscription( - validator_index: u64, - attestation_committee_index: CommitteeIndex, - slot: Slot, - ) -> ValidatorSubscription { - let is_aggregator = true; - ValidatorSubscription { - validator_index, - attestation_committee_index, - slot, - is_aggregator, - } - } - - fn _get_subscriptions(validator_count: u64, slot: Slot) -> Vec { - let mut subscriptions: Vec = Vec::new(); - for validator_index in 0..validator_count { - let is_aggregator = true; - subscriptions.push(ValidatorSubscription { - validator_index, - attestation_committee_index: validator_index, - slot, - is_aggregator, - }); - } - subscriptions - } - - // gets a number of events from the subscription service, or returns none if it times out after a number - // of slots - fn get_events>( - stream: S, - no_events: u64, - no_slots_before_timeout: u32, - ) -> impl Future, Error = ()> { - stream - .take(no_events) - .collect() - .timeout(Duration::from_millis(SLOT_DURATION_MILLIS) * no_slots_before_timeout) - .map_err(|_| ()) - } - - #[test] - fn subscribe_current_slot() { - // subscription config - let validator_index = 1; - let committee_index = 1; - let subscription_slot = 0; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let subscriptions = vec![get_subscription( - validator_index, - committee_index, - current_slot + Slot::new(subscription_slot), - )]; - - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions) - .unwrap(); - - // not enough time for peer discovery, just subscribe - let expected = vec![AttServiceMessage::Subscribe(SubnetId::new(validator_index))]; - - let test_result = Arc::new(AtomicBool::new(false)); - let thread_result = test_result.clone(); - tokio::run( - get_events(attestation_service, 4, 1) - .map(move |events| { - assert_matches!( - events[..3], - [ - AttServiceMessage::DiscoverPeers(_any2), - AttServiceMessage::Subscribe(_any1), - AttServiceMessage::EnrAdd(_any3) - ] - ); - assert_eq!(expected[..], events[3..]); - // test completed successfully - thread_result.store(true, Relaxed); - }) - // this doesn't need to be here, but helps with debugging - .map_err(|_| panic!("Did not receive desired events in the given time frame")), - ); - assert!(test_result.load(Relaxed)) - } - - #[test] - fn subscribe_current_slot_wait_for_unsubscribe() { - // subscription config - let validator_index = 1; - let committee_index = 1; - let subscription_slot = 0; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let subscriptions = vec![get_subscription( - validator_index, - committee_index, - current_slot + Slot::new(subscription_slot), - )]; - - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions) - .unwrap(); - - // not enough time for peer discovery, just subscribe, unsubscribe - let expected = vec![ - AttServiceMessage::Subscribe(SubnetId::new(validator_index)), - AttServiceMessage::Unsubscribe(SubnetId::new(validator_index)), - ]; - - let test_result = Arc::new(AtomicBool::new(false)); - let thread_result = test_result.clone(); - tokio::run( - get_events(attestation_service, 5, 2) - .map(move |events| { - assert_matches!( - events[..3], - [ - AttServiceMessage::DiscoverPeers(_any2), - AttServiceMessage::Subscribe(_any1), - AttServiceMessage::EnrAdd(_any3) - ] - ); - assert_eq!(expected[..], events[3..]); - // test completed successfully - thread_result.store(true, Relaxed); - }) - // this doesn't need to be here, but helps with debugging - .map_err(|_| panic!("Did not receive desired events in the given time frame")), - ); - assert!(test_result.load(Relaxed)) - } - - #[test] - fn subscribe_five_slots_ahead() { - // subscription config - let validator_index = 1; - let committee_index = 1; - let subscription_slot = 5; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let subscriptions = vec![get_subscription( - validator_index, - committee_index, - current_slot + Slot::new(subscription_slot), - )]; - - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions) - .unwrap(); - - // just discover peers, don't subscribe yet - let expected = vec![AttServiceMessage::DiscoverPeers(SubnetId::new( - validator_index, - ))]; - - let test_result = Arc::new(AtomicBool::new(false)); - let thread_result = test_result.clone(); - tokio::run( - get_events(attestation_service, 4, 1) - .map(move |events| { - assert_matches!( - events[..3], - [ - AttServiceMessage::DiscoverPeers(_any1), - AttServiceMessage::Subscribe(_any2), - AttServiceMessage::EnrAdd(_any3) - ] - ); - assert_eq!(expected[..], events[3..]); - // test completed successfully - thread_result.store(true, Relaxed); - }) - // this doesn't need to be here, but helps with debugging - .map_err(|_| panic!("Did not receive desired events in the given time frame")), - ); - assert!(test_result.load(Relaxed)) - } - - #[test] - fn subscribe_five_slots_ahead_wait_five_slots() { - // subscription config - let validator_index = 1; - let committee_index = 1; - let subscription_slot = 5; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let subscriptions = vec![get_subscription( - validator_index, - committee_index, - current_slot + Slot::new(subscription_slot), - )]; - - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions) - .unwrap(); - - // we should discover peers, wait, then subscribe - let expected = vec![ - AttServiceMessage::DiscoverPeers(SubnetId::new(validator_index)), - AttServiceMessage::Subscribe(SubnetId::new(validator_index)), - ]; - - let test_result = Arc::new(AtomicBool::new(false)); - let thread_result = test_result.clone(); - tokio::run( - get_events(attestation_service, 5, 5) - .map(move |events| { - //dbg!(&events); - assert_matches!( - events[..3], - [ - AttServiceMessage::DiscoverPeers(_any1), - AttServiceMessage::Subscribe(_any2), - AttServiceMessage::EnrAdd(_any3) - ] - ); - assert_eq!(expected[..], events[3..]); - // test completed successfully - thread_result.store(true, Relaxed); - }) - // this doesn't need to be here, but helps with debugging - .map_err(|_| panic!("Did not receive desired events in the given time frame")), - ); - assert!(test_result.load(Relaxed)) - } - - #[test] - fn subscribe_ten_slots_ahead() { - // subscription config - let validator_index = 1; - let committee_index = 1; - let subscription_slot = 10; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let subscriptions = vec![get_subscription( - validator_index, - committee_index, - current_slot + Slot::new(subscription_slot), - )]; - - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions) - .unwrap(); - - // ten slots ahead is before our target peer discover time, so expect no messages - let expected: Vec = vec![]; - - let test_result = Arc::new(AtomicBool::new(false)); - let thread_result = test_result.clone(); - tokio::run( - get_events(attestation_service, 3, 1) - .map(move |events| { - assert_matches!( - events[..3], - [ - AttServiceMessage::DiscoverPeers(_any1), - AttServiceMessage::Subscribe(_any2), - AttServiceMessage::EnrAdd(_any3) - ] - ); - assert_eq!(expected[..], events[3..]); - // test completed successfully - thread_result.store(true, Relaxed); - }) - // this doesn't need to be here, but helps with debugging - .map_err(|_| panic!("Did not receive desired events in the given time frame")), - ); - assert!(test_result.load(Relaxed)) - } - - #[test] - fn subscribe_ten_slots_ahead_wait_five_slots() { - // subscription config - let validator_index = 1; - let committee_index = 1; - let subscription_slot = 10; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let subscriptions = vec![get_subscription( - validator_index, - committee_index, - current_slot + Slot::new(subscription_slot), - )]; - - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions) - .unwrap(); - - // expect discover peers because we will enter TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD range - let expected: Vec = vec![AttServiceMessage::DiscoverPeers( - SubnetId::new(validator_index), - )]; - - let test_result = Arc::new(AtomicBool::new(false)); - let thread_result = test_result.clone(); - tokio::run( - get_events(attestation_service, 4, 5) - .map(move |events| { - assert_matches!( - events[..3], - [ - AttServiceMessage::DiscoverPeers(_any1), - AttServiceMessage::Subscribe(_any2), - AttServiceMessage::EnrAdd(_any3) - ] - ); - assert_eq!(expected[..], events[3..]); - // test completed successfully - thread_result.store(true, Relaxed); - }) - // this doesn't need to be here, but helps with debugging - .map_err(|_| panic!("Did not receive desired events in the given time frame")), - ); - assert!(test_result.load(Relaxed)) - } - - #[test] - fn subscribe_all_random_subnets() { - // subscribe 10 slots ahead so we do not produce any exact subnet messages - let subscription_slot = 10; - let subscription_count = 64; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let subscriptions = - _get_subscriptions(subscription_count, current_slot + subscription_slot); - - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions) - .unwrap(); - - let test_result = Arc::new(AtomicBool::new(false)); - let thread_result = test_result.clone(); - tokio::run( - get_events(attestation_service, 192, 3) - .map(move |events| { - let mut discover_peer_count = 0; - let mut subscribe_count = 0; - let mut enr_add_count = 0; - let mut unexpected_msg_count = 0; - - for event in events { - match event { - AttServiceMessage::DiscoverPeers(_any_subnet) => { - discover_peer_count = discover_peer_count + 1 - } - AttServiceMessage::Subscribe(_any_subnet) => { - subscribe_count = subscribe_count + 1 - } - AttServiceMessage::EnrAdd(_any_subnet) => { - enr_add_count = enr_add_count + 1 - } - _ => unexpected_msg_count = unexpected_msg_count + 1, - } - } - - assert_eq!(discover_peer_count, 64); - assert_eq!(subscribe_count, 64); - assert_eq!(enr_add_count, 64); - assert_eq!(unexpected_msg_count, 0); - // test completed successfully - thread_result.store(true, Relaxed); - }) - // this doesn't need to be here, but helps with debugging - .map_err(|_| panic!("Did not receive desired events in the given time frame")), - ); - assert!(test_result.load(Relaxed)) - } - - #[test] - fn subscribe_all_random_subnets_plus_one() { - // subscribe 10 slots ahead so we do not produce any exact subnet messages - let subscription_slot = 10; - // the 65th subscription should result in no more messages than the previous scenario - let subscription_count = 65; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let subscriptions = - _get_subscriptions(subscription_count, current_slot + subscription_slot); - - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions) - .unwrap(); - - let test_result = Arc::new(AtomicBool::new(false)); - let thread_result = test_result.clone(); - tokio::run( - get_events(attestation_service, 192, 3) - .map(move |events| { - let mut discover_peer_count = 0; - let mut subscribe_count = 0; - let mut enr_add_count = 0; - let mut unexpected_msg_count = 0; - - for event in events { - match event { - AttServiceMessage::DiscoverPeers(_any_subnet) => { - discover_peer_count = discover_peer_count + 1 - } - AttServiceMessage::Subscribe(_any_subnet) => { - subscribe_count = subscribe_count + 1 - } - AttServiceMessage::EnrAdd(_any_subnet) => { - enr_add_count = enr_add_count + 1 - } - _ => unexpected_msg_count = unexpected_msg_count + 1, - } - } - - assert_eq!(discover_peer_count, 64); - assert_eq!(subscribe_count, 64); - assert_eq!(enr_add_count, 64); - assert_eq!(unexpected_msg_count, 0); - // test completed successfully - thread_result.store(true, Relaxed); - }) - // this doesn't need to be here, but helps with debugging - .map_err(|_| panic!("Did not receive desired events in the given time frame")), - ); - assert!(test_result.load(Relaxed)) + Poll::Pending } } diff --git a/beacon_node/network/src/attestation_service/tests.rs b/beacon_node/network/src/attestation_service/tests.rs new file mode 100644 index 0000000000..42c52fb329 --- /dev/null +++ b/beacon_node/network/src/attestation_service/tests.rs @@ -0,0 +1,508 @@ +#[cfg(test)] +mod tests { + use super::super::*; + use beacon_chain::{ + builder::{BeaconChainBuilder, Witness}, + eth1_chain::CachingEth1Backend, + events::NullEventHandler, + migrate::NullMigrator, + }; + use eth2_libp2p::discovery::{build_enr, Keypair}; + use eth2_libp2p::{discovery::CombinedKey, CombinedKeyExt, NetworkConfig, NetworkGlobals}; + use futures::Stream; + use genesis::{generate_deterministic_keypairs, interop_genesis_state}; + use lazy_static::lazy_static; + use matches::assert_matches; + use slog::Logger; + use sloggers::{null::NullLoggerBuilder, Build}; + use slot_clock::{SlotClock, SystemTimeSlotClock}; + use std::time::SystemTime; + use store::MemoryStore; + use tempfile::tempdir; + use tokio::time::Duration; + use types::{CommitteeIndex, EnrForkId, EthSpec, MinimalEthSpec}; + + const SLOT_DURATION_MILLIS: u64 = 2000; + + type TestBeaconChainType = Witness< + MemoryStore, + NullMigrator, + SystemTimeSlotClock, + CachingEth1Backend>, + MinimalEthSpec, + NullEventHandler, + >; + + pub struct TestBeaconChain { + chain: Arc>, + } + + impl TestBeaconChain { + pub fn new_with_system_clock() -> Self { + let data_dir = tempdir().expect("should create temporary data_dir"); + let spec = MinimalEthSpec::default_spec(); + + let keypairs = generate_deterministic_keypairs(1); + + let log = get_logger(); + let chain = Arc::new( + BeaconChainBuilder::new(MinimalEthSpec) + .logger(log.clone()) + .custom_spec(spec.clone()) + .store(Arc::new(MemoryStore::open())) + .store_migrator(NullMigrator) + .data_dir(data_dir.path().to_path_buf()) + .genesis_state( + interop_genesis_state::(&keypairs, 0, &spec) + .expect("should generate interop state"), + ) + .expect("should build state using recent genesis") + .dummy_eth1_backend() + .expect("should build dummy backend") + .null_event_handler() + .slot_clock(SystemTimeSlotClock::new( + Slot::new(0), + Duration::from_secs(recent_genesis_time()), + Duration::from_millis(SLOT_DURATION_MILLIS), + )) + .reduced_tree_fork_choice() + .expect("should add fork choice to builder") + .build() + .expect("should build"), + ); + Self { chain } + } + } + + pub fn recent_genesis_time() -> u64 { + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs() + } + + fn get_logger() -> Logger { + NullLoggerBuilder.build().expect("logger should build") + } + + lazy_static! { + static ref CHAIN: TestBeaconChain = { TestBeaconChain::new_with_system_clock() }; + } + + fn get_attestation_service() -> AttestationService { + let log = get_logger(); + + let beacon_chain = CHAIN.chain.clone(); + + let config = NetworkConfig::default(); + let enr_key = CombinedKey::from_libp2p(&Keypair::generate_secp256k1()).unwrap(); + let enr = build_enr::(&enr_key, &config, EnrForkId::default()).unwrap(); + + let network_globals: NetworkGlobals = NetworkGlobals::new(enr, 0, 0, &log); + AttestationService::new(beacon_chain, Arc::new(network_globals), &log) + } + + fn get_subscription( + validator_index: u64, + attestation_committee_index: CommitteeIndex, + slot: Slot, + ) -> ValidatorSubscription { + let is_aggregator = true; + ValidatorSubscription { + validator_index, + attestation_committee_index, + slot, + is_aggregator, + } + } + + fn _get_subscriptions(validator_count: u64, slot: Slot) -> Vec { + let mut subscriptions: Vec = Vec::new(); + for validator_index in 0..validator_count { + let is_aggregator = true; + subscriptions.push(ValidatorSubscription { + validator_index, + attestation_committee_index: validator_index, + slot, + is_aggregator, + }); + } + subscriptions + } + + // gets a number of events from the subscription service, or returns none if it times out after a number + // of slots + async fn get_events + Unpin>( + mut stream: S, + no_events: usize, + no_slots_before_timeout: u32, + ) -> Vec { + let mut events = Vec::new(); + + let collect_stream_fut = async { + loop { + if let Some(result) = stream.next().await { + events.push(result); + if events.len() == no_events { + return; + } + } + } + }; + + tokio::select! { + _ = collect_stream_fut => {return events} + _ = tokio::time::delay_for( + Duration::from_millis(SLOT_DURATION_MILLIS) * no_slots_before_timeout, + ) => { return events; } + } + } + + #[tokio::test] + async fn subscribe_current_slot() { + // subscription config + let validator_index = 1; + let committee_index = 1; + let subscription_slot = 0; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = vec![get_subscription( + validator_index, + committee_index, + current_slot + Slot::new(subscription_slot), + )]; + + // submit the subscriptions + attestation_service + .validator_subscriptions(subscriptions) + .unwrap(); + + // not enough time for peer discovery, just subscribe + let expected = vec![AttServiceMessage::Subscribe(SubnetId::new(validator_index))]; + + let events = get_events(attestation_service, 4, 1).await; + assert_matches!( + events[..3], + [ + AttServiceMessage::DiscoverPeers(_any2), + AttServiceMessage::Subscribe(_any1), + AttServiceMessage::EnrAdd(_any3) + ] + ); + assert_eq!(expected[..], events[3..]); + } + + #[tokio::test] + async fn subscribe_current_slot_wait_for_unsubscribe() { + // subscription config + let validator_index = 1; + let committee_index = 1; + let subscription_slot = 0; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = vec![get_subscription( + validator_index, + committee_index, + current_slot + Slot::new(subscription_slot), + )]; + + // submit the subscriptions + attestation_service + .validator_subscriptions(subscriptions) + .unwrap(); + + // not enough time for peer discovery, just subscribe, unsubscribe + let expected = vec![ + AttServiceMessage::Subscribe(SubnetId::new(validator_index)), + AttServiceMessage::Unsubscribe(SubnetId::new(validator_index)), + ]; + + let events = get_events(attestation_service, 5, 2).await; + assert_matches!( + events[..3], + [ + AttServiceMessage::DiscoverPeers(_any2), + AttServiceMessage::Subscribe(_any1), + AttServiceMessage::EnrAdd(_any3) + ] + ); + assert_eq!(expected[..], events[3..]); + } + + #[tokio::test] + async fn subscribe_five_slots_ahead() { + // subscription config + let validator_index = 1; + let committee_index = 1; + let subscription_slot = 5; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = vec![get_subscription( + validator_index, + committee_index, + current_slot + Slot::new(subscription_slot), + )]; + + // submit the subscriptions + attestation_service + .validator_subscriptions(subscriptions) + .unwrap(); + + // just discover peers, don't subscribe yet + let expected = vec![AttServiceMessage::DiscoverPeers(SubnetId::new( + validator_index, + ))]; + + let events = get_events(attestation_service, 4, 1).await; + assert_matches!( + events[..3], + [ + AttServiceMessage::DiscoverPeers(_any1), + AttServiceMessage::Subscribe(_any2), + AttServiceMessage::EnrAdd(_any3) + ] + ); + assert_eq!(expected[..], events[3..]); + } + + #[tokio::test] + async fn subscribe_five_slots_ahead_wait_five_slots() { + // subscription config + let validator_index = 1; + let committee_index = 1; + let subscription_slot = 5; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = vec![get_subscription( + validator_index, + committee_index, + current_slot + Slot::new(subscription_slot), + )]; + + // submit the subscriptions + attestation_service + .validator_subscriptions(subscriptions) + .unwrap(); + + // we should discover peers, wait, then subscribe + let expected = vec![ + AttServiceMessage::DiscoverPeers(SubnetId::new(validator_index)), + AttServiceMessage::Subscribe(SubnetId::new(validator_index)), + ]; + + let events = get_events(attestation_service, 5, 5).await; + assert_matches!( + events[..3], + [ + AttServiceMessage::DiscoverPeers(_any1), + AttServiceMessage::Subscribe(_any2), + AttServiceMessage::EnrAdd(_any3) + ] + ); + assert_eq!(expected[..], events[3..]); + } + + #[tokio::test] + async fn subscribe_7_slots_ahead() { + // subscription config + let validator_index = 1; + let committee_index = 1; + let subscription_slot = 7; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = vec![get_subscription( + validator_index, + committee_index, + current_slot + Slot::new(subscription_slot), + )]; + + // submit the subscriptions + attestation_service + .validator_subscriptions(subscriptions) + .unwrap(); + + // ten slots ahead is before our target peer discover time, so expect no messages + let expected: Vec = vec![]; + + let events = get_events(attestation_service, 3, 1).await; + assert_matches!( + events[..3], + [ + AttServiceMessage::DiscoverPeers(_any1), + AttServiceMessage::Subscribe(_any2), + AttServiceMessage::EnrAdd(_any3) + ] + ); + assert_eq!(expected[..], events[3..]); + } + + #[tokio::test] + async fn subscribe_ten_slots_ahead_wait_five_slots() { + // subscription config + let validator_index = 1; + let committee_index = 1; + let subscription_slot = 10; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = vec![get_subscription( + validator_index, + committee_index, + current_slot + Slot::new(subscription_slot), + )]; + + // submit the subscriptions + attestation_service + .validator_subscriptions(subscriptions) + .unwrap(); + + // expect discover peers because we will enter TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD range + let expected: Vec = vec![AttServiceMessage::DiscoverPeers( + SubnetId::new(validator_index), + )]; + + let events = get_events(attestation_service, 4, 5).await; + assert_matches!( + events[..3], + [ + AttServiceMessage::DiscoverPeers(_any1), + AttServiceMessage::Subscribe(_any2), + AttServiceMessage::EnrAdd(_any3) + ] + ); + assert_eq!(expected[..], events[3..]); + } + + #[tokio::test] + async fn subscribe_all_random_subnets() { + // subscribe 10 slots ahead so we do not produce any exact subnet messages + let subscription_slot = 10; + let subscription_count = 64; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = + _get_subscriptions(subscription_count, current_slot + subscription_slot); + + // submit the subscriptions + attestation_service + .validator_subscriptions(subscriptions) + .unwrap(); + + let events = get_events(attestation_service, 192, 3).await; + let mut discover_peer_count = 0; + let mut subscribe_count = 0; + let mut enr_add_count = 0; + let mut unexpected_msg_count = 0; + + for event in events { + match event { + AttServiceMessage::DiscoverPeers(_any_subnet) => { + discover_peer_count = discover_peer_count + 1 + } + AttServiceMessage::Subscribe(_any_subnet) => subscribe_count = subscribe_count + 1, + AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count = enr_add_count + 1, + _ => unexpected_msg_count = unexpected_msg_count + 1, + } + } + + assert_eq!(discover_peer_count, 64); + assert_eq!(subscribe_count, 64); + assert_eq!(enr_add_count, 64); + assert_eq!(unexpected_msg_count, 0); + // test completed successfully + } + + #[tokio::test] + async fn subscribe_all_random_subnets_plus_one() { + // subscribe 10 slots ahead so we do not produce any exact subnet messages + let subscription_slot = 10; + // the 65th subscription should result in no more messages than the previous scenario + let subscription_count = 65; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = + _get_subscriptions(subscription_count, current_slot + subscription_slot); + + // submit the subscriptions + attestation_service + .validator_subscriptions(subscriptions) + .unwrap(); + + let events = get_events(attestation_service, 192, 3).await; + let mut discover_peer_count = 0; + let mut subscribe_count = 0; + let mut enr_add_count = 0; + let mut unexpected_msg_count = 0; + + for event in events { + match event { + AttServiceMessage::DiscoverPeers(_any_subnet) => { + discover_peer_count = discover_peer_count + 1 + } + AttServiceMessage::Subscribe(_any_subnet) => subscribe_count = subscribe_count + 1, + AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count = enr_add_count + 1, + _ => unexpected_msg_count = unexpected_msg_count + 1, + } + } + + assert_eq!(discover_peer_count, 64); + assert_eq!(subscribe_count, 64); + assert_eq!(enr_add_count, 64); + assert_eq!(unexpected_msg_count, 0); + } +} diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 040ade2b22..f7cc8051c8 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -1,8 +1,12 @@ +#[macro_use] +extern crate lazy_static; + /// This crate provides the network server for Lighthouse. pub mod error; pub mod service; mod attestation_service; +mod metrics; mod persisted_dht; mod router; mod sync; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs new file mode 100644 index 0000000000..bc45546623 --- /dev/null +++ b/beacon_node/network/src/metrics.rs @@ -0,0 +1,39 @@ +pub use lighthouse_metrics::*; + +lazy_static! { + /* + * Gossip Rx + */ + pub static ref GOSSIP_BLOCKS_RX: Result = try_create_int_counter( + "network_gossip_blocks_rx_total", + "Count of gossip blocks received" + ); + pub static ref GOSSIP_UNAGGREGATED_ATTESTATIONS_RX: Result = try_create_int_counter( + "network_gossip_unaggregated_attestations_rx_total", + "Count of gossip unaggregated attestations received" + ); + pub static ref GOSSIP_UNAGGREGATED_ATTESTATIONS_IGNORED: Result = try_create_int_counter( + "network_gossip_unaggregated_attestations_ignored_total", + "Count of gossip unaggregated attestations ignored by attestation service" + ); + pub static ref GOSSIP_AGGREGATED_ATTESTATIONS_RX: Result = try_create_int_counter( + "network_gossip_aggregated_attestations_rx_total", + "Count of gossip aggregated attestations received" + ); + + /* + * Gossip Tx + */ + pub static ref GOSSIP_BLOCKS_TX: Result = try_create_int_counter( + "network_gossip_blocks_tx_total", + "Count of gossip blocks transmitted" + ); + pub static ref GOSSIP_UNAGGREGATED_ATTESTATIONS_TX: Result = try_create_int_counter( + "network_gossip_unaggregated_attestations_tx_total", + "Count of gossip unaggregated attestations transmitted" + ); + pub static ref GOSSIP_AGGREGATED_ATTESTATIONS_TX: Result = try_create_int_counter( + "network_gossip_aggregated_attestations_tx_total", + "Count of gossip aggregated attestations transmitted" + ); +} diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index d6a222e20c..900c1825bf 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -16,10 +16,9 @@ use eth2_libp2p::{ }, MessageId, NetworkGlobals, PeerId, PubsubMessage, RPCEvent, }; -use futures::future::Future; -use futures::stream::Stream; +use futures::prelude::*; use processor::Processor; -use slog::{debug, o, trace, warn}; +use slog::{debug, info, o, trace, warn}; use std::sync::Arc; use tokio::sync::mpsc; use types::EthSpec; @@ -60,7 +59,7 @@ impl Router { beacon_chain: Arc>, network_globals: Arc>, network_send: mpsc::UnboundedSender>, - executor: &tokio::runtime::TaskExecutor, + runtime_handle: &tokio::runtime::Handle, log: slog::Logger, ) -> error::Result>> { let message_handler_log = log.new(o!("service"=> "router")); @@ -70,7 +69,7 @@ impl Router { // Initialise a message instance, which itself spawns the syncing thread. let processor = Processor::new( - executor, + runtime_handle, beacon_chain, network_globals, network_send.clone(), @@ -85,13 +84,12 @@ impl Router { }; // spawn handler task and move the message handler instance into the spawned thread - executor.spawn( + runtime_handle.spawn(async move { handler_recv - .for_each(move |msg| Ok(handler.handle_message(msg))) - .map_err(move |_| { - debug!(log, "Network message handler terminated."); - }), - ); + .for_each(move |msg| future::ready(handler.handle_message(msg))) + .await; + debug!(log, "Network message handler terminated."); + }); Ok(handler_send) } @@ -172,7 +170,7 @@ impl Router { // an error could have occurred. match error_response { RPCCodedResponse::InvalidRequest(error) => { - warn!(self.log, "Peer indicated invalid request"; "peer_id" => format!("{:?}", peer_id), "error" => error.as_string()); + warn!(self.log, "RPC Invalid Request"; "peer_id" => peer_id.to_string(), "request_id" => request_id, "error" => error.to_string()); self.handle_rpc_error( peer_id, request_id, @@ -180,7 +178,7 @@ impl Router { ); } RPCCodedResponse::ServerError(error) => { - warn!(self.log, "Peer internal server error"; "peer_id" => format!("{:?}", peer_id), "error" => error.as_string()); + warn!(self.log, "RPC Server Error"; "peer_id" => peer_id.to_string(), "request_id" => request_id, "error" => error.to_string()); self.handle_rpc_error( peer_id, request_id, @@ -188,7 +186,7 @@ impl Router { ); } RPCCodedResponse::Unknown(error) => { - warn!(self.log, "Unknown peer error"; "peer" => format!("{:?}", peer_id), "error" => error.as_string()); + warn!(self.log, "RPC Unknown Error"; "peer_id" => peer_id.to_string(), "request_id" => request_id, "error" => error.to_string()); self.handle_rpc_error( peer_id, request_id, @@ -278,6 +276,7 @@ impl Router { PubsubMessage::BeaconBlock(block) => { match self.processor.should_forward_block(&peer_id, block) { Ok(verified_block) => { + info!(self.log, "New block received"; "slot" => verified_block.block.slot(), "hash" => verified_block.block_root.to_string()); self.propagate_message(id, peer_id.clone()); self.processor.on_block_gossip(peer_id, verified_block); } @@ -313,7 +312,7 @@ impl Router { /// Informs the network service that the message should be forwarded to other peers. fn propagate_message(&mut self, message_id: MessageId, propagation_source: PeerId) { self.network_send - .try_send(NetworkMessage::Propagate { + .send(NetworkMessage::Propagate { propagation_source, message_id, }) diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 94fc5fcb04..cf791e5579 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -44,7 +44,7 @@ pub struct Processor { impl Processor { /// Instantiate a `Processor` instance pub fn new( - executor: &tokio::runtime::TaskExecutor, + runtime_handle: &tokio::runtime::Handle, beacon_chain: Arc>, network_globals: Arc>, network_send: mpsc::UnboundedSender>, @@ -54,7 +54,7 @@ impl Processor { // spawn the sync thread let (sync_send, _sync_exit) = crate::sync::manager::spawn( - executor, + runtime_handle, beacon_chain.clone(), network_globals, network_send.clone(), @@ -71,7 +71,7 @@ impl Processor { } fn send_to_sync(&mut self, message: SyncMessage) { - self.sync_send.try_send(message).unwrap_or_else(|_| { + self.sync_send.send(message).unwrap_or_else(|_| { warn!( self.log, "Could not send message to the sync service"; @@ -485,10 +485,9 @@ impl Processor { ) -> Result, BlockError> { let result = self.chain.verify_block_for_gossip(*block.clone()); - if let Err(BlockError::ParentUnknown(block_hash)) = result { + if let Err(BlockError::ParentUnknown(_)) = result { // if we don't know the parent, start a parent lookup // TODO: Modify the return to avoid the block clone. - debug!(self.log, "Unknown block received. Starting a parent lookup"; "block_slot" => block.message.slot, "block_hash" => format!("{}", block_hash)); self.send_to_sync(SyncMessage::UnknownBlock(peer_id.clone(), block)); } result @@ -929,7 +928,7 @@ impl HandlerNetworkContext { ); self.send_rpc_request(peer_id.clone(), RPCRequest::Goodbye(reason)); self.network_send - .try_send(NetworkMessage::Disconnect { peer_id }) + .send(NetworkMessage::Disconnect { peer_id }) .unwrap_or_else(|_| { warn!( self.log, @@ -970,7 +969,7 @@ impl HandlerNetworkContext { fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { self.network_send - .try_send(NetworkMessage::RPC(peer_id, rpc_event)) + .send(NetworkMessage::RPC(peer_id, rpc_event)) .unwrap_or_else(|_| { warn!( self.log, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 5eff3654e8..ff9063ea50 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,23 +1,22 @@ -use crate::error; use crate::persisted_dht::{load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; use crate::{ attestation_service::{AttServiceMessage, AttestationService}, NetworkConfig, }; +use crate::{error, metrics}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::Service as LibP2PService; -use eth2_libp2p::{rpc::RPCRequest, BehaviourEvent, Enr, MessageId, NetworkGlobals, PeerId, Swarm}; -use eth2_libp2p::{PubsubMessage, RPCEvent}; +use eth2_libp2p::{rpc::RPCRequest, BehaviourEvent, Enr, MessageId, NetworkGlobals, PeerId}; +use eth2_libp2p::{Libp2pEvent, PubsubMessage, RPCEvent}; use futures::prelude::*; -use futures::Stream; use rest_types::ValidatorSubscription; -use slog::{debug, error, info, trace}; +use slog::{debug, error, info, o, trace}; use std::sync::Arc; -use std::time::{Duration, Instant}; -use tokio::runtime::TaskExecutor; +use std::time::Duration; +use tokio::runtime::Handle; use tokio::sync::{mpsc, oneshot}; -use tokio::timer::Delay; +use tokio::time::Delay; use types::EthSpec; mod tests; @@ -42,8 +41,6 @@ pub struct NetworkService { store: Arc, /// A collection of global variables, accessible outside of the network service. network_globals: Arc>, - /// An initial delay to update variables after the libp2p service has started. - initial_delay: Delay, /// A delay that expires when a new fork takes place. next_fork_update: Option, /// The logger for the network service. @@ -56,7 +53,7 @@ impl NetworkService { pub fn start( beacon_chain: Arc>, config: &NetworkConfig, - executor: &TaskExecutor, + runtime_handle: &Handle, network_log: slog::Logger, ) -> error::Result<( Arc>, @@ -78,16 +75,12 @@ impl NetworkService { // launch libp2p service let (network_globals, mut libp2p) = - LibP2PService::new(config, enr_fork_id, network_log.clone())?; + runtime_handle.enter(|| LibP2PService::new(config, enr_fork_id, &network_log))?; for enr in load_dht::(store.clone()) { libp2p.swarm.add_enr(enr); } - // A delay used to initialise code after the network has started - // This is currently used to obtain the listening addresses from the libp2p service. - let initial_delay = Delay::new(Instant::now() + Duration::from_secs(1)); - // launch derived network services // router task @@ -95,7 +88,7 @@ impl NetworkService { beacon_chain.clone(), network_globals.clone(), network_send.clone(), - executor, + runtime_handle, network_log.clone(), )?; @@ -104,6 +97,7 @@ impl NetworkService { AttestationService::new(beacon_chain.clone(), network_globals.clone(), &network_log); // create the network service and spawn the task + let network_log = network_log.new(o!("service"=> "network")); let network_service = NetworkService { beacon_chain, libp2p, @@ -112,13 +106,12 @@ impl NetworkService { router_send, store, network_globals: network_globals.clone(), - initial_delay, next_fork_update, log: network_log, propagation_percentage, }; - let network_exit = spawn_service(network_service, &executor)?; + let network_exit = runtime_handle.enter(|| spawn_service(network_service))?; Ok((network_globals, network_send, network_exit)) } @@ -126,248 +119,249 @@ impl NetworkService { fn spawn_service( mut service: NetworkService, - executor: &TaskExecutor, ) -> error::Result> { let (network_exit, mut exit_rx) = tokio::sync::oneshot::channel(); // spawn on the current executor - executor.spawn( - futures::future::poll_fn(move || -> Result<_, ()> { - - let log = &service.log; - - // handles any logic which requires an initial delay - if !service.initial_delay.is_elapsed() { - if let Ok(Async::Ready(_)) = service.initial_delay.poll() { - let multi_addrs = Swarm::listeners(&service.libp2p.swarm).cloned().collect(); - *service.network_globals.listen_multiaddrs.write() = multi_addrs; - } - } - - // perform termination tasks when the network is being shutdown - if let Ok(Async::Ready(_)) | Err(_) = exit_rx.poll() { + tokio::spawn(async move { + loop { + // build the futures to check simultaneously + tokio::select! { + // handle network shutdown + _ = (&mut exit_rx) => { // network thread is terminating let enrs: Vec = service.libp2p.swarm.enr_entries().cloned().collect(); debug!( - log, + service.log, "Persisting DHT to store"; "Number of peers" => format!("{}", enrs.len()), ); match persist_dht::(service.store.clone(), enrs) { Err(e) => error!( - log, + service.log, "Failed to persist DHT on drop"; "error" => format!("{:?}", e) ), Ok(_) => info!( - log, + service.log, "Saved DHT state"; ), } - info!(log.clone(), "Network service shutdown"); - return Ok(Async::Ready(())); - } - - // processes the network channel before processing the libp2p swarm - loop { - // poll the network channel - match service.network_recv.poll() { - Ok(Async::Ready(Some(message))) => match message { - NetworkMessage::RPC(peer_id, rpc_event) => { - trace!(log, "Sending RPC"; "rpc" => format!("{}", rpc_event)); - service.libp2p.swarm.send_rpc(peer_id, rpc_event); - } - NetworkMessage::Propagate { - propagation_source, - message_id, - } => { - // TODO: Remove this for mainnet - // randomly prevents propagation - let mut should_send = true; - if let Some(percentage) = service.propagation_percentage { - // not exact percentage but close enough - let rand = rand::random::() % 100; - if rand > percentage { - // don't propagate - should_send = false; + info!(service.log, "Network service shutdown"); + return; + } + // handle a message sent to the network + Some(message) = service.network_recv.recv() => { + match message { + NetworkMessage::RPC(peer_id, rpc_event) => { + trace!(service.log, "Sending RPC"; "rpc" => format!("{}", rpc_event)); + service.libp2p.swarm.send_rpc(peer_id, rpc_event); + } + NetworkMessage::Propagate { + propagation_source, + message_id, + } => { + // TODO: Remove this for mainnet + // randomly prevents propagation + let mut should_send = true; + if let Some(percentage) = service.propagation_percentage { + // not exact percentage but close enough + let rand = rand::random::() % 100; + if rand > percentage { + // don't propagate + should_send = false; + } + } + if !should_send { + info!(service.log, "Random filter did not propagate message"); + } else { + trace!(service.log, "Propagating gossipsub message"; + "propagation_peer" => format!("{:?}", propagation_source), + "message_id" => message_id.to_string(), + ); + service + .libp2p + .swarm + .propagate_message(&propagation_source, message_id); } } - if !should_send { - info!(log, "Random filter did not propagate message"); - } else { - trace!(log, "Propagating gossipsub message"; - "propagation_peer" => format!("{:?}", propagation_source), - "message_id" => message_id.to_string(), - ); - service.libp2p - .swarm - .propagate_message(&propagation_source, message_id); - } - } - NetworkMessage::Publish { messages } => { - // TODO: Remove this for mainnet - // randomly prevents propagation - let mut should_send = true; - if let Some(percentage) = service.propagation_percentage { - // not exact percentage but close enough - let rand = rand::random::() % 100; - if rand > percentage { - // don't propagate - should_send = false; + NetworkMessage::Publish { messages } => { + // TODO: Remove this for mainnet + // randomly prevents propagation + let mut should_send = true; + if let Some(percentage) = service.propagation_percentage { + // not exact percentage but close enough + let rand = rand::random::() % 100; + if rand > percentage { + // don't propagate + should_send = false; + } } - } - if !should_send { - info!(log, "Random filter did not publish messages"); - } else { - let mut topic_kinds = Vec::new(); - for message in &messages { + if !should_send { + info!(service.log, "Random filter did not publish messages"); + } else { + let mut topic_kinds = Vec::new(); + for message in &messages { if !topic_kinds.contains(&message.kind()) { topic_kinds.push(message.kind()); } } - debug!(log, "Sending pubsub messages"; "count" => messages.len(), "topics" => format!("{:?}", topic_kinds)); - service.libp2p.swarm.publish(messages); - } - } - NetworkMessage::Disconnect { peer_id } => { - service.libp2p.disconnect_and_ban_peer( - peer_id, - std::time::Duration::from_secs(BAN_PEER_TIMEOUT), - ); - } - NetworkMessage::Subscribe { subscriptions } => - { - // the result is dropped as it used solely for ergonomics - let _ = service.attestation_service.validator_subscriptions(subscriptions); - } - }, - Ok(Async::NotReady) => break, - Ok(Async::Ready(None)) => { - debug!(log, "Network channel closed"); - return Err(()); - } - Err(e) => { - debug!(log, "Network channel error"; "error" => format!("{}", e)); - return Err(()); - } - } - } - - // process any attestation service events - // NOTE: This must come after the network message processing as that may trigger events in - // the attestation service. - while let Ok(Async::Ready(Some(attestation_service_message))) = service.attestation_service.poll() { - match attestation_service_message { - // TODO: Implement - AttServiceMessage::Subscribe(subnet_id) => { - service.libp2p.swarm.subscribe_to_subnet(subnet_id); - }, - AttServiceMessage::Unsubscribe(subnet_id) => { - service.libp2p.swarm.subscribe_to_subnet(subnet_id); - }, - AttServiceMessage::EnrAdd(subnet_id) => { - service.libp2p.swarm.update_enr_subnet(subnet_id, true); - }, - AttServiceMessage::EnrRemove(subnet_id) => { - service.libp2p.swarm.update_enr_subnet(subnet_id, false); - }, - AttServiceMessage::DiscoverPeers(subnet_id) => { - service.libp2p.swarm.peers_request(subnet_id); - }, - } - } - - let mut peers_to_ban = Vec::new(); - // poll the swarm - loop { - match service.libp2p.poll() { - Ok(Async::Ready(Some(event))) => match event { - BehaviourEvent::RPC(peer_id, rpc_event) => { - // if we received a Goodbye message, drop and ban the peer - if let RPCEvent::Request(_, RPCRequest::Goodbye(_)) = rpc_event { - peers_to_ban.push(peer_id.clone()); - }; - service.router_send - .try_send(RouterMessage::RPC(peer_id, rpc_event)) - .map_err(|_| { debug!(log, "Failed to send RPC to router");} )?; - } - BehaviourEvent::PeerDialed(peer_id) => { - debug!(log, "Peer Dialed"; "peer_id" => format!("{}", peer_id)); - service.router_send - .try_send(RouterMessage::PeerDialed(peer_id)) - .map_err(|_| { debug!(log, "Failed to send peer dialed to router");})?; - } - BehaviourEvent::PeerDisconnected(peer_id) => { - debug!(log, "Peer Disconnected"; "peer_id" => format!("{}", peer_id)); - service.router_send - .try_send(RouterMessage::PeerDisconnected(peer_id)) - .map_err(|_| { debug!(log, "Failed to send peer disconnect to router");})?; - } - BehaviourEvent::StatusPeer(peer_id) => { - service.router_send - .try_send(RouterMessage::StatusPeer(peer_id)) - .map_err(|_| { debug!(log, "Failed to send re-status peer to router");})?; - } - BehaviourEvent::PubsubMessage { - id, - source, - message, - .. - } => { - - match message { - // attestation information gets processed in the attestation service - PubsubMessage::Attestation(ref subnet_and_attestation) => { - let subnet = &subnet_and_attestation.0; - let attestation = &subnet_and_attestation.1; - // checks if we have an aggregator for the slot. If so, we process - // the attestation - if service.attestation_service.should_process_attestation(&id, &source, subnet, attestation) { - service.router_send - .try_send(RouterMessage::PubsubMessage(id, source, message)) - .map_err(|_| { debug!(log, "Failed to send pubsub message to router");})?; - } - } - _ => { - // all else is sent to the router - service.router_send - .try_send(RouterMessage::PubsubMessage(id, source, message)) - .map_err(|_| { debug!(log, "Failed to send pubsub message to router");})?; + debug!( + service.log, + "Sending pubsub messages"; + "count" => messages.len(), + "topics" => format!("{:?}", topic_kinds) + ); + expose_publish_metrics(&messages); + service.libp2p.swarm.publish(messages); } } - } - BehaviourEvent::PeerSubscribed(_, _) => {} - }, - Ok(Async::Ready(None)) => unreachable!("Stream never ends"), - Ok(Async::NotReady) => break, - Err(_) => break, + NetworkMessage::Disconnect { peer_id } => { + service.libp2p.disconnect_and_ban_peer( + peer_id, + std::time::Duration::from_secs(BAN_PEER_TIMEOUT), + ); + } + NetworkMessage::Subscribe { subscriptions } => { + // the result is dropped as it used solely for ergonomics + let _ = service + .attestation_service + .validator_subscriptions(subscriptions); + } + } + } + // process any attestation service events + Some(attestation_service_message) = service.attestation_service.next() => { + match attestation_service_message { + // TODO: Implement + AttServiceMessage::Subscribe(subnet_id) => { + service.libp2p.swarm.subscribe_to_subnet(subnet_id); + } + AttServiceMessage::Unsubscribe(subnet_id) => { + service.libp2p.swarm.subscribe_to_subnet(subnet_id); + } + AttServiceMessage::EnrAdd(subnet_id) => { + service.libp2p.swarm.update_enr_subnet(subnet_id, true); + } + AttServiceMessage::EnrRemove(subnet_id) => { + service.libp2p.swarm.update_enr_subnet(subnet_id, false); + } + AttServiceMessage::DiscoverPeers(subnet_id) => { + service.libp2p.swarm.peers_request(subnet_id); + } + } + } + libp2p_event = service.libp2p.next_event() => { + // poll the swarm + match libp2p_event { + Libp2pEvent::Behaviour(event) => match event { + BehaviourEvent::RPC(peer_id, rpc_event) => { + // if we received a Goodbye message, drop and ban the peer + if let RPCEvent::Request(_, RPCRequest::Goodbye(_)) = rpc_event { + //peers_to_ban.push(peer_id.clone()); + service.libp2p.disconnect_and_ban_peer( + peer_id.clone(), + std::time::Duration::from_secs(BAN_PEER_TIMEOUT), + ); + }; + let _ = service + .router_send + .send(RouterMessage::RPC(peer_id, rpc_event)) + .map_err(|_| { + debug!(service.log, "Failed to send RPC to router"); + }); + } + BehaviourEvent::StatusPeer(peer_id) => { + let _ = service + .router_send + .send(RouterMessage::StatusPeer(peer_id)) + .map_err(|_| { + debug!(service.log, "Failed to send re-status peer to router"); + }); + } + BehaviourEvent::PubsubMessage { + id, + source, + message, + .. + } => { + // Update prometheus metrics. + expose_receive_metrics(&message); + match message { + // attestation information gets processed in the attestation service + PubsubMessage::Attestation(ref subnet_and_attestation) => { + let subnet = &subnet_and_attestation.0; + let attestation = &subnet_and_attestation.1; + // checks if we have an aggregator for the slot. If so, we process + // the attestation + if service.attestation_service.should_process_attestation( + &id, + &source, + subnet, + attestation, + ) { + let _ = service + .router_send + .send(RouterMessage::PubsubMessage(id, source, message)) + .map_err(|_| { + debug!(service.log, "Failed to send pubsub message to router"); + }); + } else { + metrics::inc_counter(&metrics::GOSSIP_UNAGGREGATED_ATTESTATIONS_IGNORED) + } + } + _ => { + // all else is sent to the router + let _ = service + .router_send + .send(RouterMessage::PubsubMessage(id, source, message)) + .map_err(|_| { + debug!(service.log, "Failed to send pubsub message to router"); + }); + } + } + } + BehaviourEvent::PeerSubscribed(_, _) => {}, + } + Libp2pEvent::NewListenAddr(multiaddr) => { + service.network_globals.listen_multiaddrs.write().push(multiaddr); + } + Libp2pEvent::PeerConnected{ peer_id, endpoint,} => { + debug!(service.log, "Peer Connected"; "peer_id" => peer_id.to_string(), "endpoint" => format!("{:?}", endpoint)); + if let eth2_libp2p::ConnectedPoint::Dialer { .. } = endpoint { + let _ = service + .router_send + .send(RouterMessage::PeerDialed(peer_id)) + .map_err(|_| { + debug!(service.log, "Failed to send peer dialed to router"); }); + } + } + Libp2pEvent::PeerDisconnected{ peer_id, endpoint,} => { + debug!(service.log, "Peer Disconnected"; "peer_id" => peer_id.to_string(), "endpoint" => format!("{:?}", endpoint)); + let _ = service + .router_send + .send(RouterMessage::PeerDisconnected(peer_id)) + .map_err(|_| { + debug!(service.log, "Failed to send peer disconnect to router"); + }); + } + } + } } - } - // ban and disconnect any peers that sent Goodbye requests - while let Some(peer_id) = peers_to_ban.pop() { - service.libp2p.disconnect_and_ban_peer( - peer_id.clone(), - std::time::Duration::from_secs(BAN_PEER_TIMEOUT), - ); - } - - // if we have just forked, update inform the libp2p layer - if let Some(mut update_fork_delay) = service.next_fork_update.take() { - if !update_fork_delay.is_elapsed() { - if let Ok(Async::Ready(_)) = update_fork_delay.poll() { - service.libp2p.swarm.update_fork_version(service.beacon_chain.enr_fork_id()); - service.next_fork_update = next_fork_delay(&service.beacon_chain); + if let Some(delay) = &service.next_fork_update { + if delay.is_elapsed() { + service + .libp2p + .swarm + .update_fork_version(service.beacon_chain.enr_fork_id()); + service.next_fork_update = next_fork_delay(&service.beacon_chain); } } } - - Ok(Async::NotReady) - }) - - ); + }); Ok(network_exit) } @@ -376,11 +370,11 @@ fn spawn_service( /// If there is no scheduled fork, `None` is returned. fn next_fork_delay( beacon_chain: &BeaconChain, -) -> Option { +) -> Option { beacon_chain.duration_to_next_fork().map(|until_fork| { // Add a short time-out to start within the new fork period. let delay = Duration::from_millis(200); - tokio::timer::Delay::new(Instant::now() + until_fork + delay) + tokio::time::delay_until(tokio::time::Instant::now() + until_fork + delay) }) } @@ -403,3 +397,33 @@ pub enum NetworkMessage { /// Disconnect and bans a peer id. Disconnect { peer_id: PeerId }, } + +/// Inspects the `messages` that were being sent to the network and updates Prometheus metrics. +fn expose_publish_metrics(messages: &[PubsubMessage]) { + for message in messages { + match message { + PubsubMessage::BeaconBlock(_) => metrics::inc_counter(&metrics::GOSSIP_BLOCKS_TX), + PubsubMessage::Attestation(_) => { + metrics::inc_counter(&metrics::GOSSIP_UNAGGREGATED_ATTESTATIONS_TX) + } + PubsubMessage::AggregateAndProofAttestation(_) => { + metrics::inc_counter(&metrics::GOSSIP_AGGREGATED_ATTESTATIONS_TX) + } + _ => {} + } + } +} + +/// Inspects a `message` received from the network and updates Prometheus metrics. +fn expose_receive_metrics(message: &PubsubMessage) { + match message { + PubsubMessage::BeaconBlock(_) => metrics::inc_counter(&metrics::GOSSIP_BLOCKS_RX), + PubsubMessage::Attestation(_) => { + metrics::inc_counter(&metrics::GOSSIP_UNAGGREGATED_ATTESTATIONS_RX) + } + PubsubMessage::AggregateAndProofAttestation(_) => { + metrics::inc_counter(&metrics::GOSSIP_AGGREGATED_ATTESTATIONS_RX) + } + _ => {} + } +} diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 90a6170dbd..a33bd9eeb3 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -5,7 +5,6 @@ mod tests { use crate::{NetworkConfig, NetworkService}; use beacon_chain::test_utils::BeaconChainHarness; use eth2_libp2p::Enr; - use futures::{Future, IntoFuture}; use slog::Logger; use sloggers::{null::NullLoggerBuilder, Build}; use std::str::FromStr; @@ -33,21 +32,20 @@ mod tests { let enrs = vec![enr1, enr2]; let runtime = Runtime::new().unwrap(); - let executor = runtime.executor(); + let handle = runtime.handle().clone(); let mut config = NetworkConfig::default(); config.libp2p_port = 21212; config.discovery_port = 21212; config.boot_nodes = enrs.clone(); - runtime - .block_on_all( - // Create a new network service which implicitly gets dropped at the - // end of the block. - NetworkService::start(beacon_chain.clone(), &config, &executor, log.clone()) - .into_future() - .and_then(move |(_globals, _service, _exit)| Ok(())), - ) - .unwrap(); + runtime.spawn(async move { + // Create a new network service which implicitly gets dropped at the + // end of the block. + + let _ = + NetworkService::start(beacon_chain.clone(), &config, &handle, log.clone()).unwrap(); + }); + runtime.shutdown_timeout(tokio::time::Duration::from_millis(300)); // Load the persisted dht from the store let persisted_enrs = load_dht(store); diff --git a/beacon_node/network/src/sync/block_processor.rs b/beacon_node/network/src/sync/block_processor.rs index 8c53869e40..3284a38a3e 100644 --- a/beacon_node/network/src/sync/block_processor.rs +++ b/beacon_node/network/src/sync/block_processor.rs @@ -34,26 +34,38 @@ pub fn spawn_block_processor( chain: Weak>, process_id: ProcessId, downloaded_blocks: Vec>, - mut sync_send: mpsc::UnboundedSender>, + sync_send: mpsc::UnboundedSender>, log: slog::Logger, ) { std::thread::spawn(move || { match process_id { // this a request from the range sync ProcessId::RangeBatchId(chain_id, batch_id) => { - debug!(log, "Processing batch"; "id" => *batch_id, "blocks" => downloaded_blocks.len()); + let len = downloaded_blocks.len(); + let start_slot = if len > 0 { + downloaded_blocks[0].message.slot.as_u64() + } else { + 0 + }; + let end_slot = if len > 0 { + downloaded_blocks[len - 1].message.slot.as_u64() + } else { + 0 + }; + + debug!(log, "Processing batch"; "id" => *batch_id, "blocks" => downloaded_blocks.len(), "start_slot" => start_slot, "end_slot" => end_slot); let result = match process_blocks(chain, downloaded_blocks.iter(), &log) { (_, Ok(_)) => { - debug!(log, "Batch processed"; "id" => *batch_id ); + debug!(log, "Batch processed"; "id" => *batch_id , "start_slot" => start_slot, "end_slot" => end_slot); BatchProcessResult::Success } (imported_blocks, Err(e)) if imported_blocks > 0 => { - debug!(log, "Batch processing failed but imported some blocks"; + warn!(log, "Batch processing failed but imported some blocks"; "id" => *batch_id, "error" => e, "imported_blocks"=> imported_blocks); BatchProcessResult::Partial } (_, Err(e)) => { - debug!(log, "Batch processing failed"; "id" => *batch_id, "error" => e); + warn!(log, "Batch processing failed"; "id" => *batch_id, "error" => e); BatchProcessResult::Failed } }; @@ -64,7 +76,7 @@ pub fn spawn_block_processor( downloaded_blocks, result, }; - sync_send.try_send(msg).unwrap_or_else(|_| { + sync_send.send(msg).unwrap_or_else(|_| { debug!( log, "Block processor could not inform range sync result. Likely shutting down." @@ -84,7 +96,7 @@ pub fn spawn_block_processor( (_, Err(e)) => { warn!(log, "Parent lookup failed"; "last_peer_id" => format!("{}", peer_id), "error" => e); sync_send - .try_send(SyncMessage::ParentLookupFailed(peer_id)) + .send(SyncMessage::ParentLookupFailed(peer_id)) .unwrap_or_else(|_| { // on failure, inform to downvote the peer debug!( diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 47fa65d881..ce8e26b3ee 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -43,7 +43,6 @@ use eth2_libp2p::rpc::{methods::*, RequestId}; use eth2_libp2p::types::NetworkGlobals; use eth2_libp2p::PeerId; use fnv::FnvHashMap; -use futures::prelude::*; use slog::{crit, debug, error, info, trace, warn, Logger}; use smallvec::SmallVec; use std::boxed::Box; @@ -182,7 +181,7 @@ impl SingleBlockRequest { /// chain. This allows the chain to be /// dropped during the syncing process which will gracefully end the `SyncManager`. pub fn spawn( - executor: &tokio::runtime::TaskExecutor, + runtime_handle: &tokio::runtime::Handle, beacon_chain: Arc>, network_globals: Arc>, network_send: mpsc::UnboundedSender>, @@ -197,14 +196,14 @@ pub fn spawn( let (sync_send, sync_recv) = mpsc::unbounded_channel::>(); // create an instance of the SyncManager - let sync_manager = SyncManager { + let mut sync_manager = SyncManager { range_sync: RangeSync::new( beacon_chain.clone(), network_globals.clone(), sync_send.clone(), log.clone(), ), - network: SyncNetworkContext::new(network_send, log.clone()), + network: SyncNetworkContext::new(network_send, network_globals.clone(), log.clone()), chain: beacon_chain, network_globals, input_channel: sync_recv, @@ -216,14 +215,10 @@ pub fn spawn( // spawn the sync manager thread debug!(log, "Sync Manager started"); - executor.spawn( - sync_manager - .select(exit_rx.then(|_| Ok(()))) - .then(move |_| { - info!(log.clone(), "Sync Manager shutdown"); - Ok(()) - }), - ); + runtime_handle.spawn(async move { + futures::future::select(Box::pin(sync_manager.main()), exit_rx).await; + info!(log.clone(), "Sync Manager shutdown"); + }); (sync_send, sync_exit) } @@ -470,6 +465,8 @@ impl SyncManager { } } + debug!(self.log, "Unknown block received. Starting a parent lookup"; "block_slot" => block.message.slot, "block_hash" => format!("{}", block.canonical_root())); + let parent_request = ParentRequests { downloaded_blocks: vec![block], failed_attempts: 0, @@ -730,17 +727,13 @@ impl SyncManager { self.parent_queue.push(parent_request); } } -} -impl Future for SyncManager { - type Item = (); - type Error = String; - - fn poll(&mut self) -> Result, Self::Error> { + /// The main driving future for the sync manager. + async fn main(&mut self) { // process any inbound messages loop { - match self.input_channel.poll() { - Ok(Async::Ready(Some(message))) => match message { + if let Some(sync_message) = self.input_channel.recv().await { + match sync_message { SyncMessage::AddPeer(peer_id, info) => { self.add_peer(peer_id, info); } @@ -792,17 +785,8 @@ impl Future for SyncManager { SyncMessage::ParentLookupFailed(peer_id) => { self.network.downvote_peer(peer_id); } - }, - Ok(Async::NotReady) => break, - Ok(Async::Ready(None)) => { - return Err("Sync manager channel closed".into()); - } - Err(e) => { - return Err(format!("Sync Manager channel error: {:?}", e)); } } } - - Ok(Async::NotReady) } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index b28164b6d5..a5813ff967 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -6,7 +6,7 @@ use crate::service::NetworkMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RequestId}; -use eth2_libp2p::PeerId; +use eth2_libp2p::{Client, NetworkGlobals, PeerId}; use slog::{debug, trace, warn}; use std::sync::Arc; use tokio::sync::mpsc; @@ -18,20 +18,39 @@ pub struct SyncNetworkContext { /// The network channel to relay messages to the Network service. network_send: mpsc::UnboundedSender>, + /// Access to the network global vars. + network_globals: Arc>, + + /// A sequential ID for all RPC requests. request_id: RequestId, /// Logger for the `SyncNetworkContext`. log: slog::Logger, } impl SyncNetworkContext { - pub fn new(network_send: mpsc::UnboundedSender>, log: slog::Logger) -> Self { + pub fn new( + network_send: mpsc::UnboundedSender>, + network_globals: Arc>, + log: slog::Logger, + ) -> Self { Self { network_send, + network_globals, request_id: 1, log, } } + /// Returns the Client type of the peer if known + pub fn client_type(&self, peer_id: &PeerId) -> Client { + self.network_globals + .peers + .read() + .peer_info(peer_id) + .map(|info| info.client.clone()) + .unwrap_or_default() + } + pub fn status_peer( &mut self, chain: Arc>, @@ -104,7 +123,7 @@ impl SyncNetworkContext { // ignore the error if the channel send fails let _ = self.send_rpc_request(peer_id.clone(), RPCRequest::Goodbye(reason)); self.network_send - .try_send(NetworkMessage::Disconnect { peer_id }) + .send(NetworkMessage::Disconnect { peer_id }) .unwrap_or_else(|_| { warn!( self.log, @@ -130,7 +149,7 @@ impl SyncNetworkContext { rpc_event: RPCEvent, ) -> Result<(), &'static str> { self.network_send - .try_send(NetworkMessage::RPC(peer_id, rpc_event)) + .send(NetworkMessage::RPC(peer_id, rpc_event)) .map_err(|_| { debug!( self.log, diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 424c3a7e88..3a98adcac5 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -31,6 +31,7 @@ const BATCH_BUFFER_SIZE: u8 = 5; /// be downvoted. const INVALID_BATCH_LOOKUP_ATTEMPTS: u8 = 3; +#[derive(PartialEq)] /// A return type for functions that act on a `Chain` which informs the caller whether the chain /// has been completed and should be removed or to be kept if further processing is /// required. @@ -380,8 +381,8 @@ impl SyncingChain { } } BatchProcessResult::Failed => { - warn!(self.log, "Batch processing failed"; - "chain_id" => self.id,"id" => *batch.id, "peer" => format!("{}", batch.current_peer)); + debug!(self.log, "Batch processing failed"; + "chain_id" => self.id,"id" => *batch.id, "peer" => batch.current_peer.to_string(), "client" => network.client_type(&batch.current_peer).to_string()); // The batch processing failed // This could be because this batch is invalid, or a previous invalidated batch // is invalid. We need to find out which and downvote the peer that has sent us diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index becfd7df24..0c0e15419f 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -369,7 +369,19 @@ impl ChainCollection { .find_map(|(index, chain)| Some((index, func(chain)?))) } - /// Runs a function on all finalized chains. + /// Given a chain iterator, runs a given function on each chain and return all `Some` results. + fn request_function_all<'a, F, I, U>(chain: I, mut func: F) -> Vec<(usize, U)> + where + I: Iterator>, + F: FnMut(&'a mut SyncingChain) -> Option, + { + chain + .enumerate() + .filter_map(|(index, chain)| Some((index, func(chain)?))) + .collect() + } + + /// Runs a function on finalized chains until we get the first `Some` result from `F`. pub fn finalized_request(&mut self, func: F) -> Option<(usize, U)> where F: FnMut(&mut SyncingChain) -> Option, @@ -377,7 +389,7 @@ impl ChainCollection { ChainCollection::request_function(self.finalized_chains.iter_mut(), func) } - /// Runs a function on all head chains. + /// Runs a function on head chains until we get the first `Some` result from `F`. pub fn head_request(&mut self, func: F) -> Option<(usize, U)> where F: FnMut(&mut SyncingChain) -> Option, @@ -385,7 +397,7 @@ impl ChainCollection { ChainCollection::request_function(self.head_chains.iter_mut(), func) } - /// Runs a function on all finalized and head chains. + /// Runs a function on finalized and head chains until we get the first `Some` result from `F`. pub fn head_finalized_request(&mut self, func: F) -> Option<(usize, U)> where F: FnMut(&mut SyncingChain) -> Option, @@ -398,6 +410,19 @@ impl ChainCollection { ) } + /// Runs a function on all finalized and head chains and collects all `Some` results from `F`. + pub fn head_finalized_request_all(&mut self, func: F) -> Vec<(usize, U)> + where + F: FnMut(&mut SyncingChain) -> Option, + { + ChainCollection::request_function_all( + self.finalized_chains + .iter_mut() + .chain(self.head_chains.iter_mut()), + func, + ) + } + /// Removes any outdated finalized or head chains. /// /// This removes chains with no peers, or chains whose start block slot is less than our current diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 59c789f819..ca827082e1 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -355,7 +355,7 @@ impl RangeSync { peer_id: &PeerId, ) { // if the peer is in the awaiting head mapping, remove it - self.awaiting_head_peers.remove(&peer_id); + self.awaiting_head_peers.remove(peer_id); // remove the peer from any peer pool self.remove_peer(network, peer_id); @@ -370,26 +370,26 @@ impl RangeSync { /// for this peer. If so we mark the batch as failed. The batch may then hit it's maximum /// retries. In this case, we need to remove the chain and re-status all the peers. fn remove_peer(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { - if let Some((index, ProcessingResult::RemoveChain)) = - self.chains.head_finalized_request(|chain| { - if chain.peer_pool.remove(peer_id) { - // this chain contained the peer - while let Some(batch) = chain.pending_batches.remove_batch_by_peer(peer_id) { - if let ProcessingResult::RemoveChain = chain.failed_batch(network, batch) { - // a single batch failed, remove the chain - return Some(ProcessingResult::RemoveChain); - } + for (index, result) in self.chains.head_finalized_request_all(|chain| { + if chain.peer_pool.remove(peer_id) { + // this chain contained the peer + while let Some(batch) = chain.pending_batches.remove_batch_by_peer(peer_id) { + if let ProcessingResult::RemoveChain = chain.failed_batch(network, batch) { + // a single batch failed, remove the chain + return Some(ProcessingResult::RemoveChain); } - // peer removed from chain, no batch failed - Some(ProcessingResult::KeepChain) - } else { - None } - }) - { - // the chain needed to be removed - debug!(self.log, "Chain being removed due to failed batch"); - self.chains.remove_chain(network, index); + // peer removed from chain, no batch failed + Some(ProcessingResult::KeepChain) + } else { + None + } + }) { + if result == ProcessingResult::RemoveChain { + // the chain needed to be removed + debug!(self.log, "Chain being removed due to failed batch"); + self.chains.remove_chain(network, index); + } } } diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index c754ba4814..5a6ec50387 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -13,31 +13,31 @@ network = { path = "../network" } eth2-libp2p = { path = "../eth2-libp2p" } store = { path = "../store" } version = { path = "../version" } -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -serde_yaml = "0.8" -slog = "2.5" -slog-term = "2.4" -slog-async = "2.3" -eth2_ssz = { path = "../../eth2/utils/ssz" } -eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" } +serde = { version = "1.0.110", features = ["derive"] } +serde_json = "1.0.52" +serde_yaml = "0.8.11" +slog = "2.5.2" +slog-term = "2.5.0" +slog-async = "2.5.0" +eth2_ssz = "0.1.2" +eth2_ssz_derive = "0.1.0" state_processing = { path = "../../eth2/state_processing" } types = { path = "../../eth2/types" } -http = "0.1" -hyper = "0.12" -tokio = "0.1.22" -url = "2.1" -lazy_static = "1.3.0" +http = "0.2.1" +hyper = "0.13.5" +tokio = { version = "0.2", features = ["sync"] } +url = "2.1.1" +lazy_static = "1.4.0" eth2_config = { path = "../../eth2/utils/eth2_config" } lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } slot_clock = { path = "../../eth2/utils/slot_clock" } -hex = "0.3" -parking_lot = "0.9" -futures = "0.1.29" +hex = "0.4.2" +parking_lot = "0.10.2" +futures = "0.3.5" operation_pool = { path = "../../eth2/operation_pool" } rayon = "1.3.0" [dev-dependencies] remote_beacon_node = { path = "../../eth2/utils/remote_beacon_node" } node_test_rig = { path = "../../tests/node_test_rig" } -tree_hash = { path = "../../eth2/utils/tree_hash" } +tree_hash = "0.1.0" diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 0df3673eed..7d41535b30 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -1,9 +1,8 @@ use crate::helpers::*; use crate::response_builder::ResponseBuilder; use crate::validator::get_state_for_epoch; -use crate::{ApiError, ApiResult, BoxFut, UrlQuery}; +use crate::{ApiError, ApiResult, UrlQuery}; use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig}; -use futures::{Future, Stream}; use hyper::{Body, Request}; use rest_types::{ BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse, @@ -216,23 +215,22 @@ pub fn get_active_validators( /// /// This method allows for a basically unbounded list of `pubkeys`, where as the `get_validators` /// request is limited by the max number of pubkeys you can fit in a URL. -pub fn post_validators( +pub async fn post_validators( req: Request, beacon_chain: Arc>, -) -> BoxFut { +) -> ApiResult { let response_builder = ResponseBuilder::new(&req); - let future = req - .into_body() - .concat2() - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e))) - .and_then(|chunks| { - serde_json::from_slice::(&chunks).map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorRequest: {:?}", - e - )) - }) + let body = req.into_body(); + let chunks = hyper::body::to_bytes(body) + .await + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; + serde_json::from_slice::(&chunks) + .map_err(|e| { + ApiError::BadRequest(format!( + "Unable to parse JSON into ValidatorRequest: {:?}", + e + )) }) .and_then(|bulk_request| { validator_responses_by_pubkey( @@ -241,9 +239,7 @@ pub fn post_validators( bulk_request.pubkeys, ) }) - .and_then(|validators| response_builder?.body(&validators)); - - Box::new(future) + .and_then(|validators| response_builder?.body(&validators)) } /// Returns either the state given by `state_root_opt`, or the canonical head state if it is @@ -449,23 +445,23 @@ pub fn get_genesis_validators_root( ResponseBuilder::new(&req)?.body(&beacon_chain.head_info()?.genesis_validators_root) } -pub fn proposer_slashing( +pub async fn proposer_slashing( req: Request, beacon_chain: Arc>, -) -> BoxFut { +) -> ApiResult { let response_builder = ResponseBuilder::new(&req); - let future = req - .into_body() - .concat2() - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e))) - .and_then(|chunks| { - serde_json::from_slice::(&chunks).map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ProposerSlashing: {:?}", - e - )) - }) + let body = req.into_body(); + let chunks = hyper::body::to_bytes(body) + .await + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; + + serde_json::from_slice::(&chunks) + .map_err(|e| { + ApiError::BadRequest(format!( + "Unable to parse JSON into ProposerSlashing: {:?}", + e + )) }) .and_then(move |proposer_slashing| { let spec = &beacon_chain.spec; @@ -481,33 +477,31 @@ pub fn proposer_slashing( )) }) } else { - Err(ApiError::BadRequest( + return Err(ApiError::BadRequest( "Cannot insert proposer slashing on node without Eth1 connection.".to_string(), - )) + )); } }) - .and_then(|_| response_builder?.body(&true)); - - Box::new(future) + .and_then(|_| response_builder?.body(&true)) } -pub fn attester_slashing( +pub async fn attester_slashing( req: Request, beacon_chain: Arc>, -) -> BoxFut { +) -> ApiResult { let response_builder = ResponseBuilder::new(&req); - let future = req - .into_body() - .concat2() - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e))) - .and_then(|chunks| { - serde_json::from_slice::>(&chunks).map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into AttesterSlashing: {:?}", - e - )) - }) + let body = req.into_body(); + let chunks = hyper::body::to_bytes(body) + .await + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; + + serde_json::from_slice::>(&chunks) + .map_err(|e| { + ApiError::BadRequest(format!( + "Unable to parse JSON into AttesterSlashing: {:?}", + e + )) }) .and_then(move |attester_slashing| { let spec = &beacon_chain.spec; @@ -528,7 +522,5 @@ pub fn attester_slashing( )) } }) - .and_then(|_| response_builder?.body(&true)); - - Box::new(future) + .and_then(|_| response_builder?.body(&true)) } diff --git a/beacon_node/rest_api/src/consensus.rs b/beacon_node/rest_api/src/consensus.rs index 64b5a5df32..a006b379f7 100644 --- a/beacon_node/rest_api/src/consensus.rs +++ b/beacon_node/rest_api/src/consensus.rs @@ -1,8 +1,7 @@ use crate::helpers::*; use crate::response_builder::ResponseBuilder; -use crate::{ApiError, ApiResult, BoxFut, UrlQuery}; +use crate::{ApiError, ApiResult, UrlQuery}; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use futures::{Future, Stream}; use hyper::{Body, Request}; use rest_types::{IndividualVotesRequest, IndividualVotesResponse}; use serde::{Deserialize, Serialize}; @@ -71,23 +70,23 @@ pub fn get_vote_count( ResponseBuilder::new(&req)?.body(&report) } -pub fn post_individual_votes( +pub async fn post_individual_votes( req: Request, beacon_chain: Arc>, -) -> BoxFut { +) -> ApiResult { let response_builder = ResponseBuilder::new(&req); - let future = req - .into_body() - .concat2() - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e))) - .and_then(|chunks| { - serde_json::from_slice::(&chunks).map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorDutiesRequest: {:?}", - e - )) - }) + let body = req.into_body(); + let chunks = hyper::body::to_bytes(body) + .await + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; + + serde_json::from_slice::(&chunks) + .map_err(|e| { + ApiError::BadRequest(format!( + "Unable to parse JSON into ValidatorDutiesRequest: {:?}", + e + )) }) .and_then(move |body| { let epoch = body.epoch; @@ -136,7 +135,5 @@ pub fn post_individual_votes( }) .collect::, _>>() }) - .and_then(|votes| response_builder?.body_no_ssz(&votes)); - - Box::new(future) + .and_then(|votes| response_builder?.body_no_ssz(&votes)) } diff --git a/beacon_node/rest_api/src/error.rs b/beacon_node/rest_api/src/error.rs index 913fa8bd6d..0897e62fef 100644 --- a/beacon_node/rest_api/src/error.rs +++ b/beacon_node/rest_api/src/error.rs @@ -1,4 +1,3 @@ -use crate::BoxFut; use hyper::{Body, Response, StatusCode}; use std::error::Error as StdError; @@ -42,12 +41,6 @@ impl Into> for ApiError { } } -impl Into for ApiError { - fn into(self) -> BoxFut { - Box::new(futures::future::err(self)) - } -} - impl From for ApiError { fn from(e: store::Error) -> ApiError { ApiError::ServerError(format!("Database error: {:?}", e)) diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 171a10d246..b07bb97d58 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -229,14 +229,14 @@ pub fn implementation_pending_response(_req: Request) -> ApiResult { } pub fn publish_beacon_block_to_network( - mut chan: NetworkChannel, + chan: NetworkChannel, block: SignedBeaconBlock, ) -> Result<(), ApiError> { // send the block via SSZ encoding let messages = vec![PubsubMessage::BeaconBlock(Box::new(block))]; // Publish the block to the p2p network via gossipsub. - if let Err(e) = chan.try_send(NetworkMessage::Publish { messages }) { + if let Err(e) = chan.send(NetworkMessage::Publish { messages }) { return Err(ApiError::ServerError(format!( "Unable to send new block to network: {:?}", e diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 2702f38c90..c05cb66ab4 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -26,23 +26,21 @@ pub use config::ApiEncodingFormat; use error::{ApiError, ApiResult}; use eth2_config::Eth2Config; use eth2_libp2p::NetworkGlobals; -use hyper::rt::Future; +use futures::future::TryFutureExt; use hyper::server::conn::AddrStream; use hyper::service::{make_service_fn, service_fn}; -use hyper::{Body, Request, Response, Server}; +use hyper::{Body, Request, Server}; use slog::{info, warn}; use std::net::SocketAddr; use std::ops::Deref; use std::path::PathBuf; use std::sync::Arc; -use tokio::runtime::TaskExecutor; use tokio::sync::{mpsc, oneshot}; use url_query::UrlQuery; pub use crate::helpers::parse_pubkey_bytes; pub use config::Config; -pub type BoxFut = Box, Error = ApiError> + Send>; pub type NetworkChannel = mpsc::UnboundedSender>; pub struct NetworkInfo { @@ -54,7 +52,6 @@ pub struct NetworkInfo { #[allow(clippy::too_many_arguments)] pub fn start_server( config: &Config, - executor: &TaskExecutor, beacon_chain: Arc>, network_info: NetworkInfo, db_path: PathBuf, @@ -75,18 +72,20 @@ pub fn start_server( let db_path = db_path.clone(); let freezer_db_path = freezer_db_path.clone(); - service_fn(move |req: Request| { - router::route( - req, - beacon_chain.clone(), - network_globals.clone(), - network_channel.clone(), - eth2_config.clone(), - log.clone(), - db_path.clone(), - freezer_db_path.clone(), - ) - }) + async move { + Ok::<_, hyper::Error>(service_fn(move |req: Request| { + router::route( + req, + beacon_chain.clone(), + network_globals.clone(), + network_channel.clone(), + eth2_config.clone(), + log.clone(), + db_path.clone(), + freezer_db_path.clone(), + ) + })) + } }); let bind_addr = (config.listen_address, config.port).into(); @@ -99,16 +98,19 @@ pub fn start_server( let actual_listen_addr = server.local_addr(); // Build a channel to kill the HTTP server. - let (exit_signal, exit) = oneshot::channel(); + let (exit_signal, exit) = oneshot::channel::<()>(); let inner_log = log.clone(); - let server_exit = exit.and_then(move |_| { + let server_exit = async move { + let _ = exit.await; info!(inner_log, "HTTP service shutdown"); - Ok(()) - }); + }; + // Configure the `hyper` server to gracefully shutdown when the shutdown channel is triggered. let inner_log = log.clone(); let server_future = server - .with_graceful_shutdown(server_exit) + .with_graceful_shutdown(async { + server_exit.await; + }) .map_err(move |e| { warn!( inner_log, @@ -123,7 +125,7 @@ pub fn start_server( "port" => actual_listen_addr.port(), ); - executor.spawn(server_future); + tokio::spawn(server_future); Ok((exit_signal, actual_listen_addr)) } diff --git a/beacon_node/rest_api/src/macros.rs b/beacon_node/rest_api/src/macros.rs index e95cfb8aed..f43224e5db 100644 --- a/beacon_node/rest_api/src/macros.rs +++ b/beacon_node/rest_api/src/macros.rs @@ -2,9 +2,7 @@ macro_rules! try_future { ($expr:expr) => { match $expr { core::result::Result::Ok(val) => val, - core::result::Result::Err(err) => { - return Box::new(futures::future::err(std::convert::From::from(err))) - } + core::result::Result::Err(err) => return Err(std::convert::From::from(err)), } }; ($expr:expr,) => { diff --git a/beacon_node/rest_api/src/response_builder.rs b/beacon_node/rest_api/src/response_builder.rs index 0c8752a113..2377167915 100644 --- a/beacon_node/rest_api/src/response_builder.rs +++ b/beacon_node/rest_api/src/response_builder.rs @@ -1,6 +1,6 @@ use super::{ApiError, ApiResult}; use crate::config::ApiEncodingFormat; -use http::header; +use hyper::header; use hyper::{Body, Request, Response, StatusCode}; use serde::Serialize; use ssz::Encode; diff --git a/beacon_node/rest_api/src/router.rs b/beacon_node/rest_api/src/router.rs index 1c86e8ebc7..6c7924cce3 100644 --- a/beacon_node/rest_api/src/router.rs +++ b/beacon_node/rest_api/src/router.rs @@ -1,11 +1,10 @@ use crate::{ advanced, beacon, consensus, error::ApiError, helpers, lighthouse, metrics, network, node, - spec, validator, BoxFut, NetworkChannel, + spec, validator, NetworkChannel, }; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_config::Eth2Config; use eth2_libp2p::NetworkGlobals; -use futures::{Future, IntoFuture}; use hyper::{Body, Error, Method, Request, Response}; use slog::debug; use std::path::PathBuf; @@ -13,17 +12,9 @@ use std::sync::Arc; use std::time::Instant; use types::Slot; -fn into_boxfut(item: F) -> BoxFut -where - F: IntoFuture, Error = ApiError>, - F::Future: Send, -{ - Box::new(item.into_future()) -} - // Allowing more than 7 arguments. #[allow(clippy::too_many_arguments)] -pub fn route( +pub async fn route( req: Request, beacon_chain: Arc>, network_globals: Arc>, @@ -32,7 +23,7 @@ pub fn route( local_log: slog::Logger, db_path: PathBuf, freezer_db_path: PathBuf, -) -> impl Future, Error = Error> { +) -> Result, Error> { metrics::inc_counter(&metrics::REQUEST_COUNT); let timer = metrics::start_timer(&metrics::REQUEST_RESPONSE_TIME); let received_instant = Instant::now(); @@ -40,222 +31,179 @@ pub fn route( let path = req.uri().path().to_string(); let log = local_log.clone(); - let request_result: Box, Error = _> + Send> = - match (req.method(), path.as_ref()) { - // Methods for Client - (&Method::GET, "/node/version") => into_boxfut(node::get_version(req)), - (&Method::GET, "/node/syncing") => { - // inform the current slot, or set to 0 - let current_slot = beacon_chain - .head_info() - .map(|info| info.slot) - .unwrap_or_else(|_| Slot::from(0u64)); + let request_result = match (req.method(), path.as_ref()) { + // Methods for Client + (&Method::GET, "/node/version") => node::get_version(req), + (&Method::GET, "/node/syncing") => { + // inform the current slot, or set to 0 + let current_slot = beacon_chain + .head_info() + .map(|info| info.slot) + .unwrap_or_else(|_| Slot::from(0u64)); - into_boxfut(node::syncing::( - req, - network_globals, - current_slot, - )) - } + node::syncing::(req, network_globals, current_slot) + } - // Methods for Network - (&Method::GET, "/network/enr") => { - into_boxfut(network::get_enr::(req, network_globals)) - } - (&Method::GET, "/network/peer_count") => { - into_boxfut(network::get_peer_count::(req, network_globals)) - } - (&Method::GET, "/network/peer_id") => { - into_boxfut(network::get_peer_id::(req, network_globals)) - } - (&Method::GET, "/network/peers") => { - into_boxfut(network::get_peer_list::(req, network_globals)) - } - (&Method::GET, "/network/listen_port") => { - into_boxfut(network::get_listen_port::(req, network_globals)) - } - (&Method::GET, "/network/listen_addresses") => { - into_boxfut(network::get_listen_addresses::(req, network_globals)) - } + // Methods for Network + (&Method::GET, "/network/enr") => network::get_enr::(req, network_globals), + (&Method::GET, "/network/peer_count") => network::get_peer_count::(req, network_globals), + (&Method::GET, "/network/peer_id") => network::get_peer_id::(req, network_globals), + (&Method::GET, "/network/peers") => network::get_peer_list::(req, network_globals), + (&Method::GET, "/network/listen_port") => { + network::get_listen_port::(req, network_globals) + } + (&Method::GET, "/network/listen_addresses") => { + network::get_listen_addresses::(req, network_globals) + } - // Methods for Beacon Node - (&Method::GET, "/beacon/head") => into_boxfut(beacon::get_head::(req, beacon_chain)), - (&Method::GET, "/beacon/heads") => { - into_boxfut(beacon::get_heads::(req, beacon_chain)) - } - (&Method::GET, "/beacon/block") => { - into_boxfut(beacon::get_block::(req, beacon_chain)) - } - (&Method::GET, "/beacon/block_root") => { - into_boxfut(beacon::get_block_root::(req, beacon_chain)) - } - (&Method::GET, "/beacon/fork") => into_boxfut(beacon::get_fork::(req, beacon_chain)), - (&Method::GET, "/beacon/genesis_time") => { - into_boxfut(beacon::get_genesis_time::(req, beacon_chain)) - } - (&Method::GET, "/beacon/genesis_validators_root") => { - into_boxfut(beacon::get_genesis_validators_root::(req, beacon_chain)) - } - (&Method::GET, "/beacon/validators") => { - into_boxfut(beacon::get_validators::(req, beacon_chain)) - } - (&Method::POST, "/beacon/validators") => { - into_boxfut(beacon::post_validators::(req, beacon_chain)) - } - (&Method::GET, "/beacon/validators/all") => { - into_boxfut(beacon::get_all_validators::(req, beacon_chain)) - } - (&Method::GET, "/beacon/validators/active") => { - into_boxfut(beacon::get_active_validators::(req, beacon_chain)) - } - (&Method::GET, "/beacon/state") => { - into_boxfut(beacon::get_state::(req, beacon_chain)) - } - (&Method::GET, "/beacon/state_root") => { - into_boxfut(beacon::get_state_root::(req, beacon_chain)) - } - (&Method::GET, "/beacon/state/genesis") => { - into_boxfut(beacon::get_genesis_state::(req, beacon_chain)) - } - (&Method::GET, "/beacon/committees") => { - into_boxfut(beacon::get_committees::(req, beacon_chain)) - } - (&Method::POST, "/beacon/proposer_slashing") => { - into_boxfut(beacon::proposer_slashing::(req, beacon_chain)) - } - (&Method::POST, "/beacon/attester_slashing") => { - into_boxfut(beacon::attester_slashing::(req, beacon_chain)) - } + // Methods for Beacon Node + (&Method::GET, "/beacon/head") => beacon::get_head::(req, beacon_chain), + (&Method::GET, "/beacon/heads") => beacon::get_heads::(req, beacon_chain), + (&Method::GET, "/beacon/block") => beacon::get_block::(req, beacon_chain), + (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req, beacon_chain), + (&Method::GET, "/beacon/fork") => beacon::get_fork::(req, beacon_chain), + (&Method::GET, "/beacon/genesis_time") => beacon::get_genesis_time::(req, beacon_chain), + (&Method::GET, "/beacon/genesis_validators_root") => { + beacon::get_genesis_validators_root::(req, beacon_chain) + } + (&Method::GET, "/beacon/validators") => beacon::get_validators::(req, beacon_chain), + (&Method::POST, "/beacon/validators") => { + beacon::post_validators::(req, beacon_chain).await + } + (&Method::GET, "/beacon/validators/all") => { + beacon::get_all_validators::(req, beacon_chain) + } + (&Method::GET, "/beacon/validators/active") => { + beacon::get_active_validators::(req, beacon_chain) + } + (&Method::GET, "/beacon/state") => beacon::get_state::(req, beacon_chain), + (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req, beacon_chain), + (&Method::GET, "/beacon/state/genesis") => { + beacon::get_genesis_state::(req, beacon_chain) + } + (&Method::GET, "/beacon/committees") => beacon::get_committees::(req, beacon_chain), + (&Method::POST, "/beacon/proposer_slashing") => { + beacon::proposer_slashing::(req, beacon_chain).await + } + (&Method::POST, "/beacon/attester_slashing") => { + beacon::attester_slashing::(req, beacon_chain).await + } - // Methods for Validator - (&Method::POST, "/validator/duties") => { - let timer = - metrics::start_timer(&metrics::VALIDATOR_GET_DUTIES_REQUEST_RESPONSE_TIME); - let response = validator::post_validator_duties::(req, beacon_chain); - drop(timer); - into_boxfut(response) - } - (&Method::POST, "/validator/subscribe") => { - validator::post_validator_subscriptions::(req, network_channel) - } - (&Method::GET, "/validator/duties/all") => { - into_boxfut(validator::get_all_validator_duties::(req, beacon_chain)) - } - (&Method::GET, "/validator/duties/active") => into_boxfut( - validator::get_active_validator_duties::(req, beacon_chain), - ), - (&Method::GET, "/validator/block") => { - let timer = - metrics::start_timer(&metrics::VALIDATOR_GET_BLOCK_REQUEST_RESPONSE_TIME); - let response = validator::get_new_beacon_block::(req, beacon_chain, log); - drop(timer); - into_boxfut(response) - } - (&Method::POST, "/validator/block") => { - validator::publish_beacon_block::(req, beacon_chain, network_channel, log) - } - (&Method::GET, "/validator/attestation") => { - let timer = - metrics::start_timer(&metrics::VALIDATOR_GET_ATTESTATION_REQUEST_RESPONSE_TIME); - let response = validator::get_new_attestation::(req, beacon_chain); - drop(timer); - into_boxfut(response) - } - (&Method::GET, "/validator/aggregate_attestation") => { - into_boxfut(validator::get_aggregate_attestation::(req, beacon_chain)) - } - (&Method::POST, "/validator/attestations") => { - validator::publish_attestations::(req, beacon_chain, network_channel, log) - } - (&Method::POST, "/validator/aggregate_and_proofs") => { - validator::publish_aggregate_and_proofs::( - req, - beacon_chain, - network_channel, - log, - ) - } + // Methods for Validator + (&Method::POST, "/validator/duties") => { + let timer = metrics::start_timer(&metrics::VALIDATOR_GET_DUTIES_REQUEST_RESPONSE_TIME); + let response = validator::post_validator_duties::(req, beacon_chain); + drop(timer); + response.await + } + (&Method::POST, "/validator/subscribe") => { + validator::post_validator_subscriptions::(req, network_channel).await + } + (&Method::GET, "/validator/duties/all") => { + validator::get_all_validator_duties::(req, beacon_chain) + } + (&Method::GET, "/validator/duties/active") => { + validator::get_active_validator_duties::(req, beacon_chain) + } + (&Method::GET, "/validator/block") => { + let timer = metrics::start_timer(&metrics::VALIDATOR_GET_BLOCK_REQUEST_RESPONSE_TIME); + let response = validator::get_new_beacon_block::(req, beacon_chain, log); + drop(timer); + response + } + (&Method::POST, "/validator/block") => { + validator::publish_beacon_block::(req, beacon_chain, network_channel, log).await + } + (&Method::GET, "/validator/attestation") => { + let timer = + metrics::start_timer(&metrics::VALIDATOR_GET_ATTESTATION_REQUEST_RESPONSE_TIME); + let response = validator::get_new_attestation::(req, beacon_chain); + drop(timer); + response + } + (&Method::GET, "/validator/aggregate_attestation") => { + validator::get_aggregate_attestation::(req, beacon_chain) + } + (&Method::POST, "/validator/attestations") => { + validator::publish_attestations::(req, beacon_chain, network_channel, log).await + } + (&Method::POST, "/validator/aggregate_and_proofs") => { + validator::publish_aggregate_and_proofs::(req, beacon_chain, network_channel, log) + .await + } - // Methods for consensus - (&Method::GET, "/consensus/global_votes") => { - into_boxfut(consensus::get_vote_count::(req, beacon_chain)) - } - (&Method::POST, "/consensus/individual_votes") => { - consensus::post_individual_votes::(req, beacon_chain) - } + // Methods for consensus + (&Method::GET, "/consensus/global_votes") => { + consensus::get_vote_count::(req, beacon_chain) + } + (&Method::POST, "/consensus/individual_votes") => { + consensus::post_individual_votes::(req, beacon_chain).await + } - // Methods for bootstrap and checking configuration - (&Method::GET, "/spec") => into_boxfut(spec::get_spec::(req, beacon_chain)), - (&Method::GET, "/spec/slots_per_epoch") => { - into_boxfut(spec::get_slots_per_epoch::(req)) - } - (&Method::GET, "/spec/deposit_contract") => { - into_boxfut(helpers::implementation_pending_response(req)) - } - (&Method::GET, "/spec/eth2_config") => { - into_boxfut(spec::get_eth2_config::(req, eth2_config)) - } + // Methods for bootstrap and checking configuration + (&Method::GET, "/spec") => spec::get_spec::(req, beacon_chain), + (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), + (&Method::GET, "/spec/deposit_contract") => helpers::implementation_pending_response(req), + (&Method::GET, "/spec/eth2_config") => spec::get_eth2_config::(req, eth2_config), - // Methods for advanced parameters - (&Method::GET, "/advanced/fork_choice") => { - into_boxfut(advanced::get_fork_choice::(req, beacon_chain)) - } - (&Method::GET, "/advanced/operation_pool") => { - into_boxfut(advanced::get_operation_pool::(req, beacon_chain)) - } - (&Method::GET, "/metrics") => into_boxfut(metrics::get_prometheus::( - req, - beacon_chain, - db_path, - freezer_db_path, - )), + // Methods for advanced parameters + (&Method::GET, "/advanced/fork_choice") => { + advanced::get_fork_choice::(req, beacon_chain) + } + (&Method::GET, "/advanced/operation_pool") => { + advanced::get_operation_pool::(req, beacon_chain) + } - // Lighthouse specific - (&Method::GET, "/lighthouse/syncing") => { - into_boxfut(lighthouse::syncing::(req, network_globals)) - } - (&Method::GET, "/lighthouse/peers") => { - into_boxfut(lighthouse::peers::(req, network_globals)) - } - (&Method::GET, "/lighthouse/connected_peers") => into_boxfut( - lighthouse::connected_peers::(req, network_globals), - ), - _ => Box::new(futures::future::err(ApiError::NotFound( - "Request path and/or method not found.".to_owned(), - ))), - }; + (&Method::GET, "/metrics") => { + metrics::get_prometheus::(req, beacon_chain, db_path, freezer_db_path) + } + + // Lighthouse specific + (&Method::GET, "/lighthouse/syncing") => { + lighthouse::syncing::(req, network_globals) + } + + (&Method::GET, "/lighthouse/peers") => { + lighthouse::peers::(req, network_globals) + } + + (&Method::GET, "/lighthouse/connected_peers") => { + lighthouse::connected_peers::(req, network_globals) + } + _ => Err(ApiError::NotFound( + "Request path and/or method not found.".to_owned(), + )), + }; // Map the Rust-friendly `Result` in to a http-friendly response. In effect, this ensures that // any `Err` returned from our response handlers becomes a valid http response to the client // (e.g., a response with a 404 or 500 status). - request_result.then(move |result| { - let duration = Instant::now().duration_since(received_instant); - match result { - Ok(response) => { - debug!( - local_log, - "HTTP API request successful"; - "path" => path, - "duration_ms" => duration.as_millis() - ); - metrics::inc_counter(&metrics::SUCCESS_COUNT); - metrics::stop_timer(timer); + let duration = Instant::now().duration_since(received_instant); + match request_result { + Ok(response) => { + debug!( + local_log, + "HTTP API request successful"; + "path" => path, + "duration_ms" => duration.as_millis() + ); + metrics::inc_counter(&metrics::SUCCESS_COUNT); + metrics::stop_timer(timer); - Ok(response) - } - Err(e) => { - let error_response = e.into(); - - debug!( - local_log, - "HTTP API request failure"; - "path" => path, - "duration_ms" => duration.as_millis() - ); - metrics::stop_timer(timer); - - Ok(error_response) - } + Ok(response) } - }) + Err(e) => { + let error_response = e.into(); + + debug!( + local_log, + "HTTP API request failure"; + "path" => path, + "duration_ms" => duration.as_millis() + ); + metrics::stop_timer(timer); + + Ok(error_response) + } + } } diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 18bd628c0c..7656437ea7 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -1,13 +1,12 @@ use crate::helpers::{check_content_type_for_json, publish_beacon_block_to_network}; use crate::response_builder::ResponseBuilder; -use crate::{ApiError, ApiResult, BoxFut, NetworkChannel, UrlQuery}; +use crate::{ApiError, ApiResult, NetworkChannel, UrlQuery}; use beacon_chain::{ attestation_verification::Error as AttnError, BeaconChain, BeaconChainTypes, BlockError, StateSkipConfig, }; use bls::PublicKeyBytes; use eth2_libp2p::PubsubMessage; -use futures::{Future, Stream}; use hyper::{Body, Request}; use network::NetworkMessage; use rayon::prelude::*; @@ -23,23 +22,23 @@ use types::{ /// HTTP Handler to retrieve the duties for a set of validators during a particular epoch. This /// method allows for collecting bulk sets of validator duties without risking exceeding the max /// URL length with query pairs. -pub fn post_validator_duties( +pub async fn post_validator_duties( req: Request, beacon_chain: Arc>, -) -> BoxFut { +) -> ApiResult { let response_builder = ResponseBuilder::new(&req); - let future = req - .into_body() - .concat2() - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e))) - .and_then(|chunks| { - serde_json::from_slice::(&chunks).map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorDutiesRequest: {:?}", - e - )) - }) + let body = req.into_body(); + let chunks = hyper::body::to_bytes(body) + .await + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; + + serde_json::from_slice::(&chunks) + .map_err(|e| { + ApiError::BadRequest(format!( + "Unable to parse JSON into ValidatorDutiesRequest: {:?}", + e + )) }) .and_then(|bulk_request| { return_validator_duties( @@ -48,45 +47,42 @@ pub fn post_validator_duties( bulk_request.pubkeys.into_iter().map(Into::into).collect(), ) }) - .and_then(|duties| response_builder?.body_no_ssz(&duties)); - - Box::new(future) + .and_then(|duties| response_builder?.body_no_ssz(&duties)) } /// HTTP Handler to retrieve subscriptions for a set of validators. This allows the node to /// organise peer discovery and topic subscription for known validators. -pub fn post_validator_subscriptions( +pub async fn post_validator_subscriptions( req: Request, - mut network_chan: NetworkChannel, -) -> BoxFut { + network_chan: NetworkChannel, +) -> ApiResult { try_future!(check_content_type_for_json(&req)); let response_builder = ResponseBuilder::new(&req); let body = req.into_body(); - Box::new( - body.concat2() - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e))) - .and_then(|chunks| { - serde_json::from_slice(&chunks).map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorSubscriptions: {:?}", + let chunks = hyper::body::to_bytes(body) + .await + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; + + serde_json::from_slice(&chunks) + .map_err(|e| { + ApiError::BadRequest(format!( + "Unable to parse JSON into ValidatorSubscriptions: {:?}", + e + )) + }) + .and_then(move |subscriptions: Vec| { + network_chan + .send(NetworkMessage::Subscribe { subscriptions }) + .map_err(|e| { + ApiError::ServerError(format!( + "Unable to subscriptions to the network: {:?}", e )) - }) - }) - .and_then(move |subscriptions: Vec| { - network_chan - .try_send(NetworkMessage::Subscribe { subscriptions }) - .map_err(|e| { - ApiError::ServerError(format!( - "Unable to subscriptions to the network: {:?}", - e - )) - })?; - Ok(()) - }) - .and_then(|_| response_builder?.body_no_ssz(&())), - ) + })?; + Ok(()) + }) + .and_then(|_| response_builder?.body_no_ssz(&())) } /// HTTP Handler to retrieve all validator duties for the given epoch. @@ -291,24 +287,23 @@ pub fn get_new_beacon_block( } /// HTTP Handler to publish a SignedBeaconBlock, which has been signed by a validator. -pub fn publish_beacon_block( +pub async fn publish_beacon_block( req: Request, beacon_chain: Arc>, network_chan: NetworkChannel, log: Logger, -) -> BoxFut { +) -> ApiResult { try_future!(check_content_type_for_json(&req)); let response_builder = ResponseBuilder::new(&req); let body = req.into_body(); - Box::new( - body.concat2() - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e))) - .and_then(|chunks| { - serde_json::from_slice(&chunks).map_err(|e| { + let chunks = hyper::body::to_bytes(body) + .await + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; + + serde_json::from_slice(&chunks).map_err(|e| { ApiError::BadRequest(format!("Unable to parse JSON into SignedBeaconBlock: {:?}", e)) }) - }) .and_then(move |block: SignedBeaconBlock| { let slot = block.slot(); match beacon_chain.process_block(block.clone()) { @@ -382,7 +377,6 @@ pub fn publish_beacon_block( } }) .and_then(|_| response_builder?.body_no_ssz(&())) - ) } /// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. @@ -424,59 +418,56 @@ pub fn get_aggregate_attestation( } /// HTTP Handler to publish a list of Attestations, which have been signed by a number of validators. -pub fn publish_attestations( +pub async fn publish_attestations( req: Request, beacon_chain: Arc>, network_chan: NetworkChannel, log: Logger, -) -> BoxFut { +) -> ApiResult { try_future!(check_content_type_for_json(&req)); let response_builder = ResponseBuilder::new(&req); - Box::new( - req.into_body() - .concat2() - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e))) - .map(|chunk| chunk.iter().cloned().collect::>()) - .and_then(|chunks| { - serde_json::from_slice(&chunks.as_slice()).map_err(|e| { - ApiError::BadRequest(format!( - "Unable to deserialize JSON into a list of attestations: {:?}", - e - )) + let body = req.into_body(); + let chunk = hyper::body::to_bytes(body) + .await + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; + + let chunks = chunk.iter().cloned().collect::>(); + serde_json::from_slice(&chunks.as_slice()) + .map_err(|e| { + ApiError::BadRequest(format!( + "Unable to deserialize JSON into a list of attestations: {:?}", + e + )) + }) + // Process all of the aggregates _without_ exiting early if one fails. + .map(move |attestations: Vec>| { + attestations + .into_par_iter() + .enumerate() + .map(|(i, attestation)| { + process_unaggregated_attestation( + &beacon_chain, + network_chan.clone(), + attestation, + i, + &log, + ) }) - }) - // Process all of the aggregates _without_ exiting early if one fails. - .map(move |attestations: Vec>| { - attestations - .into_par_iter() - .enumerate() - .map(|(i, attestation)| { - process_unaggregated_attestation( - &beacon_chain, - network_chan.clone(), - attestation, - i, - &log, - ) - }) - .collect::>>() - }) - // Iterate through all the results and return on the first `Err`. - // - // Note: this will only provide info about the _first_ failure, not all failures. - .and_then(|processing_results| { - processing_results.into_iter().try_for_each(|result| result) - }) - .and_then(|_| response_builder?.body_no_ssz(&())), - ) + .collect::>>() + }) + // Iterate through all the results and return on the first `Err`. + // + // Note: this will only provide info about the _first_ failure, not all failures. + .and_then(|processing_results| processing_results.into_iter().try_for_each(|result| result)) + .and_then(|_| response_builder?.body_no_ssz(&())) } /// Processes an unaggregrated attestation that was included in a list of attestations with the /// index `i`. fn process_unaggregated_attestation( beacon_chain: &BeaconChain, - mut network_chan: NetworkChannel, + network_chan: NetworkChannel, attestation: Attestation, i: usize, log: &Logger, @@ -496,7 +487,7 @@ fn process_unaggregated_attestation( })?; // Publish the attestation to the network - if let Err(e) = network_chan.try_send(NetworkMessage::Publish { + if let Err(e) = network_chan.send(NetworkMessage::Publish { messages: vec![PubsubMessage::Attestation(Box::new(( attestation .subnet_id(&beacon_chain.spec) @@ -542,61 +533,56 @@ fn process_unaggregated_attestation( } /// HTTP Handler to publish an Attestation, which has been signed by a validator. -pub fn publish_aggregate_and_proofs( +pub async fn publish_aggregate_and_proofs( req: Request, beacon_chain: Arc>, network_chan: NetworkChannel, log: Logger, -) -> BoxFut { +) -> ApiResult { try_future!(check_content_type_for_json(&req)); let response_builder = ResponseBuilder::new(&req); - - Box::new( - req.into_body() - .concat2() - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e))) - .map(|chunk| chunk.iter().cloned().collect::>()) - .and_then(|chunks| { - serde_json::from_slice(&chunks.as_slice()).map_err(|e| { - ApiError::BadRequest(format!( - "Unable to deserialize JSON into a list of SignedAggregateAndProof: {:?}", - e - )) - }) - }) - // Process all of the aggregates _without_ exiting early if one fails. - .map( - move |signed_aggregates: Vec>| { - signed_aggregates - .into_par_iter() - .enumerate() - .map(|(i, signed_aggregate)| { - process_aggregated_attestation( - &beacon_chain, - network_chan.clone(), - signed_aggregate, - i, - &log, - ) - }) - .collect::>>() - }, - ) - // Iterate through all the results and return on the first `Err`. - // - // Note: this will only provide info about the _first_ failure, not all failures. - .and_then(|processing_results| { - processing_results.into_iter().try_for_each(|result| result) - }) - .and_then(|_| response_builder?.body_no_ssz(&())), - ) + let body = req.into_body(); + let chunk = hyper::body::to_bytes(body) + .await + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; + let chunks = chunk.iter().cloned().collect::>(); + serde_json::from_slice(&chunks.as_slice()) + .map_err(|e| { + ApiError::BadRequest(format!( + "Unable to deserialize JSON into a list of SignedAggregateAndProof: {:?}", + e + )) + }) + // Process all of the aggregates _without_ exiting early if one fails. + .map( + move |signed_aggregates: Vec>| { + signed_aggregates + .into_par_iter() + .enumerate() + .map(|(i, signed_aggregate)| { + process_aggregated_attestation( + &beacon_chain, + network_chan.clone(), + signed_aggregate, + i, + &log, + ) + }) + .collect::>>() + }, + ) + // Iterate through all the results and return on the first `Err`. + // + // Note: this will only provide info about the _first_ failure, not all failures. + .and_then(|processing_results| processing_results.into_iter().try_for_each(|result| result)) + .and_then(|_| response_builder?.body_no_ssz(&())) } /// Processes an aggregrated attestation that was included in a list of attestations with the index /// `i`. fn process_aggregated_attestation( beacon_chain: &BeaconChain, - mut network_chan: NetworkChannel, + network_chan: NetworkChannel, signed_aggregate: SignedAggregateAndProof, i: usize, log: &Logger, @@ -643,7 +629,7 @@ fn process_aggregated_attestation( }; // Publish the attestation to the network - if let Err(e) = network_chan.try_send(NetworkMessage::Publish { + if let Err(e) = network_chan.send(NetworkMessage::Publish { messages: vec![PubsubMessage::AggregateAndProofAttestation(Box::new( signed_aggregate, ))], diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 20870a99a3..dd1381ae4f 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -18,7 +18,6 @@ use beacon_chain::{ use clap::ArgMatches; use config::get_config; use environment::RuntimeContext; -use futures::{Future, IntoFuture}; use slog::{info, warn}; use std::ops::{Deref, DerefMut}; use types::EthSpec; @@ -51,27 +50,26 @@ impl ProductionBeaconNode { /// Identical to `start_from_client_config`, however the `client_config` is generated from the /// given `matches` and potentially configuration files on the local filesystem or other /// configurations hosted remotely. - pub fn new_from_cli<'a, 'b>( + pub async fn new_from_cli<'a, 'b>( context: RuntimeContext, matches: &ArgMatches<'b>, - ) -> impl Future + 'a { - get_config::( + ) -> Result { + let client_config = get_config::( &matches, &context.eth2_config.spec_constants, &context.eth2_config().spec, context.log.clone(), - ) - .into_future() - .and_then(move |client_config| Self::new(context, client_config)) + )?; + Self::new(context, client_config).await } /// Starts a new beacon node `Client` in the given `environment`. /// /// Client behaviour is defined by the given `client_config`. - pub fn new( + pub async fn new( context: RuntimeContext, mut client_config: ClientConfig, - ) -> impl Future { + ) -> Result { let http_eth2_config = context.eth2_config().clone(); let spec = context.eth2_config().spec.clone(); let client_config_1 = client_config.clone(); @@ -79,60 +77,56 @@ impl ProductionBeaconNode { let store_config = client_config.store.clone(); let log = context.log.clone(); - let db_path_res = client_config.create_db_path(); + let db_path = client_config.create_db_path()?; let freezer_db_path_res = client_config.create_freezer_db_path(); - db_path_res - .into_future() - .and_then(move |db_path| { - Ok(ClientBuilder::new(context.eth_spec_instance.clone()) - .runtime_context(context) - .chain_spec(spec) - .disk_store(&db_path, &freezer_db_path_res?, store_config)? - .background_migrator()?) - }) - .and_then(move |builder| builder.beacon_chain_builder(client_genesis, client_config_1)) - .and_then(move |builder| { - let builder = if client_config.sync_eth1_chain && !client_config.dummy_eth1_backend - { - info!( - log, - "Block production enabled"; - "endpoint" => &client_config.eth1.endpoint, - "method" => "json rpc via http" - ); - builder.caching_eth1_backend(client_config.eth1.clone())? - } else if client_config.dummy_eth1_backend { - warn!( - log, - "Block production impaired"; - "reason" => "dummy eth1 backend is enabled" - ); - builder.dummy_eth1_backend()? - } else { - info!( - log, - "Block production disabled"; - "reason" => "no eth1 backend configured" - ); - builder.no_eth1_backend()? - }; + let builder = ClientBuilder::new(context.eth_spec_instance.clone()) + .runtime_context(context) + .chain_spec(spec) + .disk_store(&db_path, &freezer_db_path_res?, store_config)? + .background_migrator()?; - let builder = builder - .system_time_slot_clock()? - .websocket_event_handler(client_config.websocket_server.clone())? - .build_beacon_chain()? - .network(&mut client_config.network)? - .notifier()?; + let builder = builder + .beacon_chain_builder(client_genesis, client_config_1) + .await?; + let builder = if client_config.sync_eth1_chain && !client_config.dummy_eth1_backend { + info!( + log, + "Block production enabled"; + "endpoint" => &client_config.eth1.endpoint, + "method" => "json rpc via http" + ); + builder.caching_eth1_backend(client_config.eth1.clone())? + } else if client_config.dummy_eth1_backend { + warn!( + log, + "Block production impaired"; + "reason" => "dummy eth1 backend is enabled" + ); + builder.dummy_eth1_backend()? + } else { + info!( + log, + "Block production disabled"; + "reason" => "no eth1 backend configured" + ); + builder.no_eth1_backend()? + }; - let builder = if client_config.rest_api.enabled { - builder.http_server(&client_config, &http_eth2_config)? - } else { - builder - }; + let builder = builder + .system_time_slot_clock()? + .websocket_event_handler(client_config.websocket_server.clone())? + .build_beacon_chain()? + .network(&mut client_config.network)? + .notifier()?; - Ok(Self(builder.build())) - }) + let builder = if client_config.rest_api.enabled { + builder.http_server(&client_config, &http_eth2_config)? + } else { + builder + }; + + Ok(Self(builder.build())) } pub fn into_inner(self) -> ProductionClient { diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 36a34738a4..77388f4344 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -10,23 +10,23 @@ harness = false [dev-dependencies] tempfile = "3.1.0" -sloggers = "0.3.2" -criterion = "0.3.0" -rayon = "1.2.0" +sloggers = "1.0.0" +criterion = "0.3.2" +rayon = "1.3.0" [dependencies] db-key = "0.0.5" leveldb = "0.8.4" -parking_lot = "0.9.0" -itertools = "0.8" +parking_lot = "0.10.2" +itertools = "0.9.0" eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" tree_hash = "0.1.0" types = { path = "../../eth2/types" } state_processing = { path = "../../eth2/state_processing" } -slog = "2.2.3" -serde = "1.0" -serde_derive = "1.0.102" +slog = "2.5.2" +serde = "1.0.110" +serde_derive = "1.0.110" lazy_static = "1.4.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } lru = "0.4.3" diff --git a/beacon_node/timer/Cargo.toml b/beacon_node/timer/Cargo.toml index a0c132cbe1..c7b49ec855 100644 --- a/beacon_node/timer/Cargo.toml +++ b/beacon_node/timer/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" beacon_chain = { path = "../beacon_chain" } types = { path = "../../eth2/types" } slot_clock = { path = "../../eth2/utils/slot_clock" } -tokio = "0.1.22" +tokio = { version = "0.2.20", features = ["full"] } slog = "2.5.2" -parking_lot = "0.10.0" -futures = "0.1.29" +parking_lot = "0.10.2" +futures = "0.3.5" diff --git a/beacon_node/timer/src/lib.rs b/beacon_node/timer/src/lib.rs index 20054d8540..26f8bb60ea 100644 --- a/beacon_node/timer/src/lib.rs +++ b/beacon_node/timer/src/lib.rs @@ -3,20 +3,20 @@ //! This service allows task execution on the beacon node for various functionality. use beacon_chain::{BeaconChain, BeaconChainTypes}; -use futures::{future, prelude::*}; -use slog::error; +use futures::future; +use futures::stream::StreamExt; use slot_clock::SlotClock; use std::sync::Arc; -use std::time::{Duration, Instant}; -use tokio::runtime::TaskExecutor; -use tokio::timer::Interval; +use std::time::Duration; +use tokio::time::{interval_at, Instant}; /// Spawns a timer service which periodically executes tasks for the beacon chain +/// TODO: We might not need a `Handle` to the runtime since this function should be +/// called from the context of a runtime and we can simply spawn using task::spawn. +/// Check for issues without the Handle. pub fn spawn( - executor: &TaskExecutor, beacon_chain: Arc>, milliseconds_per_slot: u64, - log: slog::Logger, ) -> Result, &'static str> { let (exit_signal, exit) = tokio::sync::oneshot::channel(); @@ -26,25 +26,15 @@ pub fn spawn( .duration_to_next_slot() .ok_or_else(|| "slot_notifier unable to determine time to next slot")?; - let timer_future = Interval::new(start_instant, Duration::from_millis(milliseconds_per_slot)) - .map_err(move |e| { - error!( - log, - "Beacon chain timer failed"; - "error" => format!("{:?}", e) - ) - }) + // Warning: `interval_at` panics if `milliseconds_per_slot` = 0. + let timer_future = interval_at(start_instant, Duration::from_millis(milliseconds_per_slot)) .for_each(move |_| { beacon_chain.per_slot_task(); - future::ok(()) + future::ready(()) }); - executor.spawn( - exit.map_err(|_| ()) - .select(timer_future) - .map(|_| ()) - .map_err(|_| ()), - ); + let future = futures::future::select(timer_future, exit); + tokio::spawn(future); Ok(exit_signal) } diff --git a/beacon_node/websocket_server/Cargo.toml b/beacon_node/websocket_server/Cargo.toml index 0ed8d628cc..2b32156769 100644 --- a/beacon_node/websocket_server/Cargo.toml +++ b/beacon_node/websocket_server/Cargo.toml @@ -7,11 +7,11 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -futures = "0.1.29" -serde = "1.0.102" -serde_derive = "1.0.102" -serde_json = "1.0.41" +futures = "0.3.5" +serde = "1.0.110" +serde_derive = "1.0.110" +serde_json = "1.0.52" slog = "2.5.2" -tokio = "0.1.22" +tokio = { version = "0.2.20", features = ["full"] } types = { path = "../../eth2/types" } ws = "0.9.1" diff --git a/beacon_node/websocket_server/src/lib.rs b/beacon_node/websocket_server/src/lib.rs index 01b48ab18b..7ffff1b89b 100644 --- a/beacon_node/websocket_server/src/lib.rs +++ b/beacon_node/websocket_server/src/lib.rs @@ -1,9 +1,6 @@ -use futures::Future; use slog::{debug, error, info, warn, Logger}; use std::marker::PhantomData; use std::net::SocketAddr; -use std::thread; -use tokio::runtime::TaskExecutor; use types::EthSpec; use ws::{Sender, WebSocket}; @@ -38,7 +35,6 @@ impl WebSocketSender { pub fn start_server( config: &Config, - executor: &TaskExecutor, log: &Logger, ) -> Result< ( @@ -76,30 +72,29 @@ pub fn start_server( let log_inner = log.clone(); let broadcaster_inner = server.broadcaster(); - let exit_future = exit - .and_then(move |_| { - if let Err(e) = broadcaster_inner.shutdown() { - warn!( - log_inner, - "Websocket server errored on shutdown"; - "error" => format!("{:?}", e) - ); - } else { - info!(log_inner, "Websocket server shutdown"); - } - Ok(()) - }) - .map_err(|_| ()); + let exit_future = async move { + let _ = exit.await; + if let Err(e) = broadcaster_inner.shutdown() { + warn!( + log_inner, + "Websocket server errored on shutdown"; + "error" => format!("{:?}", e) + ); + } else { + info!(log_inner, "Websocket server shutdown"); + } + }; - // Place a future on the executor that will shutdown the websocket server when the + // Place a future on the handle that will shutdown the websocket server when the // application exits. - executor.spawn(exit_future); + tokio::spawn(exit_future); exit_channel }; let log_inner = log.clone(); - let _handle = thread::spawn(move || match server.run() { + + let _ = std::thread::spawn(move || match server.run() { Ok(_) => { debug!( log_inner, diff --git a/eth2/operation_pool/Cargo.toml b/eth2/operation_pool/Cargo.toml index 7bfc86063a..b2eb5d7a0a 100644 --- a/eth2/operation_pool/Cargo.toml +++ b/eth2/operation_pool/Cargo.toml @@ -6,14 +6,14 @@ edition = "2018" [dependencies] int_to_bytes = { path = "../utils/int_to_bytes" } -parking_lot = "0.9.0" +parking_lot = "0.10.2" types = { path = "../types" } state_processing = { path = "../state_processing" } eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" -serde = "1.0.102" -serde_derive = "1.0.102" +serde = "1.0.110" +serde_derive = "1.0.110" store = { path = "../../beacon_node/store" } [dev-dependencies] -rand = "0.7.2" +rand = "0.7.3" diff --git a/eth2/proto_array_fork_choice/Cargo.toml b/eth2/proto_array_fork_choice/Cargo.toml index f17515acc3..7578c92f86 100644 --- a/eth2/proto_array_fork_choice/Cargo.toml +++ b/eth2/proto_array_fork_choice/Cargo.toml @@ -9,11 +9,11 @@ name = "proto_array_fork_choice" path = "src/bin.rs" [dependencies] -parking_lot = "0.9.0" +parking_lot = "0.10.2" types = { path = "../types" } -itertools = "0.8.1" +itertools = "0.9.0" eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" -serde = "1.0.102" -serde_derive = "1.0.102" +serde = "1.0.110" +serde_derive = "1.0.110" serde_yaml = "0.8.11" diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index a13e25f123..c9bf9d4f4c 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -9,10 +9,10 @@ name = "benches" harness = false [dev-dependencies] -criterion = "0.3.0" +criterion = "0.3.2" env_logger = "0.7.1" -serde = "1.0.102" -serde_derive = "1.0.102" +serde = "1.0.110" +serde_derive = "1.0.110" lazy_static = "1.4.0" serde_yaml = "0.8.11" beacon_chain = { path = "../../beacon_node/beacon_chain" } @@ -21,20 +21,20 @@ store = { path = "../../beacon_node/store" } [dependencies] bls = { path = "../utils/bls" } -integer-sqrt = "0.1.2" -itertools = "0.8.1" +integer-sqrt = "0.1.3" +itertools = "0.9.0" eth2_ssz = "0.1.2" eth2_ssz_types = { path = "../utils/ssz_types" } merkle_proof = { path = "../utils/merkle_proof" } log = "0.4.8" safe_arith = { path = "../utils/safe_arith" } tree_hash = "0.1.0" -tree_hash_derive = "0.2" +tree_hash_derive = "0.2.0" types = { path = "../types" } -rayon = "1.2.0" -eth2_hashing = { path = "../utils/eth2_hashing" } +rayon = "1.3.0" +eth2_hashing = "0.1.0" int_to_bytes = { path = "../utils/int_to_bytes" } -arbitrary = { version = "0.4.3", features = ["derive"], optional = true } +arbitrary = { version = "0.4.4", features = ["derive"], optional = true } [features] fake_crypto = ["bls/fake_crypto"] diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 99ac78a772..be427a972f 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -15,6 +15,11 @@ pub use process_slashings::process_slashings; pub use registry_updates::process_registry_updates; pub use validator_statuses::{TotalBalances, ValidatorStatus, ValidatorStatuses}; +/// Provides a summary of validator participation during the epoch. +pub struct EpochProcessingSummary { + pub total_balances: TotalBalances, +} + /// Performs per-epoch processing on some BeaconState. /// /// Mutates the given `BeaconState`, returning early if an error is encountered. If an error is @@ -24,7 +29,7 @@ pub use validator_statuses::{TotalBalances, ValidatorStatus, ValidatorStatuses}; pub fn per_epoch_processing( state: &mut BeaconState, spec: &ChainSpec, -) -> Result<(), Error> { +) -> Result { // Ensure the committee caches are built. state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; @@ -58,7 +63,9 @@ pub fn per_epoch_processing( // Rotate the epoch caches to suit the epoch transition. state.advance_caches(); - Ok(()) + Ok(EpochProcessingSummary { + total_balances: validator_statuses.total_balances, + }) } /// Update the following fields on the `BeaconState`: diff --git a/eth2/state_processing/src/per_slot_processing.rs b/eth2/state_processing/src/per_slot_processing.rs index a98dc545bf..46c608275e 100644 --- a/eth2/state_processing/src/per_slot_processing.rs +++ b/eth2/state_processing/src/per_slot_processing.rs @@ -1,4 +1,4 @@ -use crate::*; +use crate::{per_epoch_processing::EpochProcessingSummary, *}; use types::*; #[derive(Debug, PartialEq)] @@ -18,16 +18,18 @@ pub fn per_slot_processing( state: &mut BeaconState, state_root: Option, spec: &ChainSpec, -) -> Result<(), Error> { +) -> Result, Error> { cache_state(state, state_root)?; + let mut summary = None; + if state.slot > spec.genesis_slot && (state.slot + 1) % T::slots_per_epoch() == 0 { - per_epoch_processing(state, spec)?; + summary = Some(per_epoch_processing(state, spec)?); } state.slot += 1; - Ok(()) + Ok(summary) } fn cache_state( diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index 31db5cfd86..a8467d5da9 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -13,19 +13,19 @@ bls = { path = "../utils/bls" } compare_fields = { path = "../utils/compare_fields" } compare_fields_derive = { path = "../utils/compare_fields_derive" } dirs = "2.0.2" -derivative = "1.0.3" +derivative = "2.1.1" eth2_interop_keypairs = { path = "../utils/eth2_interop_keypairs" } ethereum-types = "0.9.1" eth2_hashing = "0.1.0" -hex = "0.3" +hex = "0.4.2" int_to_bytes = { path = "../utils/int_to_bytes" } log = "0.4.8" merkle_proof = { path = "../utils/merkle_proof" } -rayon = "1.2.0" -rand = "0.7.2" +rayon = "1.3.0" +rand = "0.7.3" safe_arith = { path = "../utils/safe_arith" } -serde = "1.0.102" -serde_derive = "1.0.102" +serde = "1.0.110" +serde_derive = "1.0.110" slog = "2.5.2" eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" @@ -33,17 +33,17 @@ eth2_ssz_types = { path = "../utils/ssz_types" } swap_or_not_shuffle = { path = "../utils/swap_or_not_shuffle" } test_random_derive = { path = "../utils/test_random_derive" } tree_hash = "0.1.0" -tree_hash_derive = "0.2" +tree_hash_derive = "0.2.0" rand_xorshift = "0.2.0" cached_tree_hash = { path = "../utils/cached_tree_hash" } serde_yaml = "0.8.11" tempfile = "3.1.0" -arbitrary = { version = "0.4", features = ["derive"], optional = true } +arbitrary = { version = "0.4.4", features = ["derive"], optional = true } [dev-dependencies] env_logger = "0.7.1" -serde_json = "1.0.41" -criterion = "0.3.0" +serde_json = "1.0.52" +criterion = "0.3.2" [features] arbitrary-fuzz = [ diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml index 93df124921..ce9cbe02eb 100644 --- a/eth2/utils/bls/Cargo.toml +++ b/eth2/utils/bls/Cargo.toml @@ -7,15 +7,15 @@ edition = "2018" [dependencies] milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.0.1" } eth2_hashing = "0.1.0" -hex = "0.3" -rand = "0.7.2" -serde = "1.0.102" -serde_derive = "1.0.102" +hex = "0.4.2" +rand = "0.7.3" +serde = "1.0.110" +serde_derive = "1.0.110" serde_hex = { path = "../serde_hex" } eth2_ssz = "0.1.2" eth2_ssz_types = { path = "../ssz_types" } tree_hash = "0.1.0" -arbitrary = { version = "0.4", features = ["derive"], optional = true } +arbitrary = { version = "0.4.4", features = ["derive"], optional = true } [features] fake_crypto = [] diff --git a/eth2/utils/cached_tree_hash/Cargo.toml b/eth2/utils/cached_tree_hash/Cargo.toml index 5ac4d5e81d..1bb86f0c30 100644 --- a/eth2/utils/cached_tree_hash/Cargo.toml +++ b/eth2/utils/cached_tree_hash/Cargo.toml @@ -5,17 +5,17 @@ authors = ["Michael Sproul "] edition = "2018" [dependencies] -ethereum-types = "0.9" +ethereum-types = "0.9.1" eth2_ssz_types = { path = "../ssz_types" } -eth2_hashing = "0.1" +eth2_hashing = "0.1.0" eth2_ssz_derive = "0.1.0" eth2_ssz = "0.1.2" -tree_hash = "0.1" -smallvec = "1.2.0" +tree_hash = "0.1.0" +smallvec = "1.4.0" [dev-dependencies] -quickcheck = "0.9" -quickcheck_macros = "0.8" +quickcheck = "0.9.2" +quickcheck_macros = "0.9.1" [features] arbitrary = ["ethereum-types/arbitrary"] diff --git a/eth2/utils/clap_utils/Cargo.toml b/eth2/utils/clap_utils/Cargo.toml index f1916c4ba4..b7c8cdfa47 100644 --- a/eth2/utils/clap_utils/Cargo.toml +++ b/eth2/utils/clap_utils/Cargo.toml @@ -8,8 +8,8 @@ edition = "2018" [dependencies] clap = "2.33.0" -hex = "0.3" -dirs = "2.0" +hex = "0.4.2" +dirs = "2.0.2" types = { path = "../../types" } eth2_testnet_config = { path = "../eth2_testnet_config" } -eth2_ssz = { path = "../ssz" } +eth2_ssz = "0.1.2" diff --git a/eth2/utils/compare_fields_derive/Cargo.toml b/eth2/utils/compare_fields_derive/Cargo.toml index 485b2708db..550615b146 100644 --- a/eth2/utils/compare_fields_derive/Cargo.toml +++ b/eth2/utils/compare_fields_derive/Cargo.toml @@ -8,5 +8,5 @@ edition = "2018" proc-macro = true [dependencies] -syn = "0.15" -quote = "0.6" +syn = "1.0.18" +quote = "1.0.4" diff --git a/eth2/utils/compare_fields_derive/src/lib.rs b/eth2/utils/compare_fields_derive/src/lib.rs index 15137efa37..a8f95a1bd9 100644 --- a/eth2/utils/compare_fields_derive/src/lib.rs +++ b/eth2/utils/compare_fields_derive/src/lib.rs @@ -8,7 +8,7 @@ use syn::{parse_macro_input, DeriveInput}; fn is_slice(field: &syn::Field) -> bool { field.attrs.iter().any(|attr| { attr.path.is_ident("compare_fields") - && attr.tts.to_string().replace(" ", "") == "(as_slice)" + && attr.tokens.to_string().replace(" ", "") == "(as_slice)" }) } diff --git a/eth2/utils/deposit_contract/Cargo.toml b/eth2/utils/deposit_contract/Cargo.toml index 0c3abacf55..14dafda2df 100644 --- a/eth2/utils/deposit_contract/Cargo.toml +++ b/eth2/utils/deposit_contract/Cargo.toml @@ -7,11 +7,11 @@ edition = "2018" build = "build.rs" [build-dependencies] -reqwest = "0.9.20" -serde_json = "1.0" +reqwest = { version = "0.10.4", features = ["blocking", "json"] } +serde_json = "1.0.52" [dependencies] types = { path = "../../types"} -eth2_ssz = { path = "../ssz"} -tree_hash = { path = "../tree_hash"} -ethabi = "12.0" +eth2_ssz = "0.1.2" +tree_hash = "0.1.0" +ethabi = "12.0.0" diff --git a/eth2/utils/deposit_contract/build.rs b/eth2/utils/deposit_contract/build.rs index 752b2e5bb8..096b3f7337 100644 --- a/eth2/utils/deposit_contract/build.rs +++ b/eth2/utils/deposit_contract/build.rs @@ -56,8 +56,8 @@ pub fn download_deposit_contract( if abi_file.exists() { // Nothing to do. } else { - match reqwest::get(url) { - Ok(mut response) => { + match reqwest::blocking::get(url) { + Ok(response) => { let mut abi_file = File::create(abi_file) .map_err(|e| format!("Failed to create local abi file: {:?}", e))?; let mut bytecode_file = File::create(bytecode_file) diff --git a/eth2/utils/eth2_config/Cargo.toml b/eth2/utils/eth2_config/Cargo.toml index c9af2fef3b..13be59963c 100644 --- a/eth2/utils/eth2_config/Cargo.toml +++ b/eth2/utils/eth2_config/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -serde = "1.0.102" -serde_derive = "1.0.102" -toml = "0.5.4" +serde = "1.0.110" +serde_derive = "1.0.110" +toml = "0.5.6" types = { path = "../../types" } diff --git a/eth2/utils/eth2_hashing/Cargo.toml b/eth2/utils/eth2_hashing/Cargo.toml index 3047a7a4df..2ffb37c9e4 100644 --- a/eth2/utils/eth2_hashing/Cargo.toml +++ b/eth2/utils/eth2_hashing/Cargo.toml @@ -13,13 +13,13 @@ lazy_static = { version = "1.4.0", optional = true } ring = "0.16.9" [target.'cfg(target_arch = "wasm32")'.dependencies] -sha2 = "0.8.0" +sha2 = "0.8.1" [dev-dependencies] -rustc-hex = "2.0.1" +rustc-hex = "2.1.0" [target.'cfg(target_arch = "wasm32")'.dev-dependencies] -wasm-bindgen-test = "0.3.2" +wasm-bindgen-test = "0.3.12" [features] default = ["zero_hash_cache"] diff --git a/eth2/utils/eth2_interop_keypairs/Cargo.toml b/eth2/utils/eth2_interop_keypairs/Cargo.toml index 30509a8f97..bc678f4e93 100644 --- a/eth2/utils/eth2_interop_keypairs/Cargo.toml +++ b/eth2/utils/eth2_interop_keypairs/Cargo.toml @@ -8,13 +8,13 @@ edition = "2018" [dependencies] lazy_static = "1.4.0" -num-bigint = "0.2.3" +num-bigint = "0.2.6" eth2_hashing = "0.1.0" -hex = "0.3" +hex = "0.4.2" milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.0.1" } serde_yaml = "0.8.11" -serde = "1.0.102" -serde_derive = "1.0.102" +serde = "1.0.110" +serde_derive = "1.0.110" [dev-dependencies] -base64 = "0.11.0" +base64 = "0.12.1" diff --git a/eth2/utils/eth2_keystore/Cargo.toml b/eth2/utils/eth2_keystore/Cargo.toml index 917a3f63ba..e93b272082 100644 --- a/eth2/utils/eth2_keystore/Cargo.toml +++ b/eth2/utils/eth2_keystore/Cargo.toml @@ -11,7 +11,7 @@ rand = "0.7.2" rust-crypto = "0.2.36" uuid = { version = "0.8", features = ["serde", "v4"] } zeroize = { version = "1.0.0", features = ["zeroize_derive"] } -serde = "1.0.102" +serde = "1.0.110" serde_repr = "0.1" hex = "0.3" bls = { path = "../bls" } diff --git a/eth2/utils/eth2_testnet_config/Cargo.toml b/eth2/utils/eth2_testnet_config/Cargo.toml index 3df6897a49..a0ccdc5d8f 100644 --- a/eth2/utils/eth2_testnet_config/Cargo.toml +++ b/eth2/utils/eth2_testnet_config/Cargo.toml @@ -7,15 +7,14 @@ edition = "2018" build = "build.rs" [build-dependencies] -reqwest = "0.9.20" +reqwest = { version = "0.10.4", features = ["blocking"] } [dev-dependencies] -tempdir = "0.3" -reqwest = "0.9.20" +tempdir = "0.3.7" [dependencies] -serde = "1.0" -serde_yaml = "0.8" +serde = "1.0.110" +serde_yaml = "0.8.11" types = { path = "../../types"} eth2-libp2p = { path = "../../../beacon_node/eth2-libp2p"} -eth2_ssz = { path = "../ssz"} +eth2_ssz = "0.1.2" diff --git a/eth2/utils/eth2_testnet_config/build.rs b/eth2/utils/eth2_testnet_config/build.rs index 72c7af12c9..6a01b00057 100644 --- a/eth2/utils/eth2_testnet_config/build.rs +++ b/eth2/utils/eth2_testnet_config/build.rs @@ -46,13 +46,18 @@ pub fn get_file(filename: &str) -> Result<(), String> { let mut file = File::create(path).map_err(|e| format!("Failed to create {}: {:?}", filename, e))?; - let mut response = reqwest::get(&url) + let request = reqwest::blocking::Client::builder() + .build() + .map_err(|_| "Could not build request client".to_string())? + .get(&url) + .timeout(std::time::Duration::from_secs(120)); + + let contents = request + .send() .map_err(|e| format!("Failed to download {}: {}", filename, e))? .error_for_status() - .map_err(|e| format!("Error downloading {}: {}", filename, e))?; - let mut contents: Vec = vec![]; - response - .copy_to(&mut contents) + .map_err(|e| format!("Error downloading {}: {}", filename, e))? + .bytes() .map_err(|e| format!("Failed to read {} response bytes: {}", filename, e))?; file.write(&contents) diff --git a/eth2/utils/eth2_wallet/Cargo.toml b/eth2/utils/eth2_wallet/Cargo.toml index 93f0cc8ffc..a3fa8a1d54 100644 --- a/eth2/utils/eth2_wallet/Cargo.toml +++ b/eth2/utils/eth2_wallet/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -serde = "1.0.102" +serde = "1.0.110" serde_json = "1.0.41" serde_repr = "0.1" uuid = { version = "0.8", features = ["serde", "v4"] } diff --git a/eth2/utils/hashmap_delay/Cargo.toml b/eth2/utils/hashmap_delay/Cargo.toml deleted file mode 100644 index 918a8568f4..0000000000 --- a/eth2/utils/hashmap_delay/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "hashmap_delay" -version = "0.2.0" -authors = ["Age Manning "] -edition = "2018" - -[dependencies] -tokio-timer = "0.2.12" -futures = "0.1.29" diff --git a/eth2/utils/hashmap_delay/src/hashmap_delay.rs b/eth2/utils/hashmap_delay/src/hashmap_delay.rs deleted file mode 100644 index ea4fa14574..0000000000 --- a/eth2/utils/hashmap_delay/src/hashmap_delay.rs +++ /dev/null @@ -1,161 +0,0 @@ -//! A simple hashmap object coupled with a `delay_queue` which has entries that expire after a -//! fixed time. -//! -//! A `HashMapDelay` implements `Stream` which removes expired items from the map. - -/// The default delay for entries, in seconds. This is only used when `insert()` is used to add -/// entries. -const DEFAULT_DELAY: u64 = 30; - -use futures::prelude::*; -use std::collections::HashMap; -use std::time::Duration; -use tokio_timer::delay_queue::{self, DelayQueue}; - -pub struct HashMapDelay -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, -{ - /// The given entries. - entries: HashMap>, - /// A queue holding the timeouts of each entry. - expirations: DelayQueue, - /// The default expiration timeout of an entry. - default_entry_timeout: Duration, -} - -/// A wrapping around entries that adds the link to the entry's expiration, via a `delay_queue` key. -struct MapEntry { - /// The expiration key for the entry. - key: delay_queue::Key, - /// The actual entry. - value: V, -} - -impl Default for HashMapDelay -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, -{ - fn default() -> Self { - HashMapDelay::new(Duration::from_secs(DEFAULT_DELAY)) - } -} - -impl HashMapDelay -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, -{ - /// Creates a new instance of `HashMapDelay`. - pub fn new(default_entry_timeout: Duration) -> Self { - HashMapDelay { - entries: HashMap::new(), - expirations: DelayQueue::new(), - default_entry_timeout, - } - } - - /// Insert an entry into the mapping. Entries will expire after the `default_entry_timeout`. - pub fn insert(&mut self, key: K, value: V) { - self.insert_at(key, value, self.default_entry_timeout); - } - - /// Inserts an entry that will expire at a given instant. - pub fn insert_at(&mut self, key: K, value: V, entry_duration: Duration) { - let delay_key = self.expirations.insert(key.clone(), entry_duration); - let entry = MapEntry { - key: delay_key, - value, - }; - self.entries.insert(key, entry); - } - - /// Gets a reference to an entry if it exists. - /// - /// Returns None if the entry does not exist. - pub fn get(&self, key: &K) -> Option<&V> { - self.entries.get(key).map(|entry| &entry.value) - } - - /// Gets a mutable reference to an entry if it exists. - /// - /// Returns None if the entry does not exist. - pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { - self.entries.get_mut(key).map(|entry| &mut entry.value) - } - - /// Returns true if the key exists, false otherwise. - pub fn contains_key(&self, key: &K) -> bool { - self.entries.contains_key(key) - } - - /// Returns the length of the mapping. - pub fn len(&self) -> usize { - self.entries.len() - } - - /// Updates the timeout for a given key. Returns true if the key existed, false otherwise. - /// - /// Panics if the duration is too far in the future. - pub fn update_timeout(&mut self, key: &K, timeout: Duration) -> bool { - if let Some(entry) = self.entries.get(key) { - self.expirations.reset(&entry.key, timeout); - true - } else { - false - } - } - - /// Removes a key from the map returning the value associated with the key that was in the map. - /// - /// Return None if the key was not in the map. - pub fn remove(&mut self, key: &K) -> Option { - if let Some(entry) = self.entries.remove(key) { - self.expirations.remove(&entry.key); - return Some(entry.value); - } - return None; - } - - /// Retains only the elements specified by the predicate. - /// - /// In other words, remove all pairs `(k, v)` such that `f(&k,&mut v)` returns false. - pub fn retain bool>(&mut self, mut f: F) { - let expiration = &mut self.expirations; - self.entries.retain(|key, entry| { - let result = f(key, &mut entry.value); - if !result { - expiration.remove(&entry.key); - } - result - }) - } - - /// Removes all entries from the map. - pub fn clear(&mut self) { - self.entries.clear(); - self.expirations.clear(); - } -} - -impl Stream for HashMapDelay -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, -{ - type Item = (K, V); - type Error = &'static str; - - fn poll(&mut self) -> Poll, Self::Error> { - match self.expirations.poll() { - Ok(Async::Ready(Some(key))) => { - let key = key.into_inner(); - match self.entries.remove(&key) { - Some(entry) => Ok(Async::Ready(Some((key, entry.value)))), - None => Err("Value no longer exists in expirations"), - } - } - Ok(Async::Ready(None)) => Ok(Async::Ready(None)), - Ok(Async::NotReady) => Ok(Async::NotReady), - Err(_) => Err("Error polling HashMapDelay"), - } - } -} diff --git a/eth2/utils/hashmap_delay/src/lib.rs b/eth2/utils/hashmap_delay/src/lib.rs deleted file mode 100644 index 140106b42e..0000000000 --- a/eth2/utils/hashmap_delay/src/lib.rs +++ /dev/null @@ -1,21 +0,0 @@ -//! This crate provides two objects: -//! - `HashMapDelay` -//! - `HashSetDelay` -//! -//! # HashMapDelay -//! -//! This provides a `HashMap` coupled with a `DelayQueue`. Objects that are inserted into -//! the map are inserted with an expiry. `Stream` is implemented on the `HashMapDelay` -//! which return objects that have expired. These objects are removed from the mapping. -//! -//! # HashSetDelay -//! -//! This is similar to a `HashMapDelay` except the mapping maps to the expiry time. This -//! allows users to add objects and check their expiry deadlines before the `Stream` -//! consumes them. - -mod hashmap_delay; -mod hashset_delay; - -pub use crate::hashmap_delay::HashMapDelay; -pub use crate::hashset_delay::HashSetDelay; diff --git a/eth2/utils/hashset_delay/Cargo.toml b/eth2/utils/hashset_delay/Cargo.toml new file mode 100644 index 0000000000..6470479b04 --- /dev/null +++ b/eth2/utils/hashset_delay/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "hashset_delay" +version = "0.2.0" +authors = ["Age Manning "] +edition = "2018" + +[dependencies] +futures = "0.3.5" +tokio = { version = "0.2.20", features = ["time"] } + +[dev-dependencies] +tokio = { version = "0.2.20", features = ["time", "rt-threaded", "macros"] } diff --git a/eth2/utils/hashmap_delay/src/hashset_delay.rs b/eth2/utils/hashset_delay/src/hashset_delay.rs similarity index 70% rename from eth2/utils/hashmap_delay/src/hashset_delay.rs rename to eth2/utils/hashset_delay/src/hashset_delay.rs index bd93d6c8e7..77d4f610f5 100644 --- a/eth2/utils/hashmap_delay/src/hashset_delay.rs +++ b/eth2/utils/hashset_delay/src/hashset_delay.rs @@ -6,13 +6,17 @@ const DEFAULT_DELAY: u64 = 30; use futures::prelude::*; -use std::collections::HashMap; -use std::time::{Duration, Instant}; -use tokio_timer::delay_queue::{self, DelayQueue}; +use std::{ + collections::HashMap, + pin::Pin, + task::{Context, Poll}, + time::{Duration, Instant}, +}; +use tokio::time::delay_queue::{self, DelayQueue}; pub struct HashSetDelay where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, + K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, { /// The given entries. entries: HashMap, @@ -32,7 +36,7 @@ struct MapEntry { impl Default for HashSetDelay where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, + K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, { fn default() -> Self { HashSetDelay::new(Duration::from_secs(DEFAULT_DELAY)) @@ -41,7 +45,7 @@ where impl HashSetDelay where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, + K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, { /// Creates a new instance of `HashSetDelay`. pub fn new(default_entry_timeout: Duration) -> Self { @@ -134,30 +138,55 @@ where } /// Returns a vector of referencing all keys in the map. - pub fn keys_vec(&self) -> Vec<&K> { - self.entries.keys().collect() + pub fn keys(&self) -> impl Iterator { + self.entries.keys() } } impl Stream for HashSetDelay where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, + K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, { - type Item = K; - type Error = &'static str; + type Item = Result; - fn poll(&mut self) -> Poll, Self::Error> { - match self.expirations.poll() { - Ok(Async::Ready(Some(key))) => { - let key = key.into_inner(); - match self.entries.remove(&key) { - Some(_) => Ok(Async::Ready(Some(key))), - None => Err("Value no longer exists in expirations"), - } + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + match self.expirations.poll_expired(cx) { + Poll::Ready(Some(Ok(key))) => match self.entries.remove(key.get_ref()) { + Some(_) => Poll::Ready(Some(Ok(key.into_inner()))), + None => Poll::Ready(Some(Err("Value no longer exists in expirations".into()))), + }, + Poll::Ready(Some(Err(e))) => { + Poll::Ready(Some(Err(format!("delay queue error: {:?}", e)))) } - Ok(Async::Ready(None)) => Ok(Async::Ready(None)), - Ok(Async::NotReady) => Ok(Async::NotReady), - Err(_) => Err("Error polling HashSetDelay"), + Poll::Ready(None) => Poll::Ready(None), + Poll::Pending => Poll::Pending, } } } + +#[cfg(test)] + +mod tests { + use super::*; + + #[tokio::test] + async fn should_not_panic() { + let key = 2u8; + + let mut map = HashSetDelay::default(); + + map.insert(key); + map.update_timeout(&key, Duration::from_secs(100)); + + let fut = |cx: &mut Context| { + let _ = map.poll_next_unpin(cx); + let _ = map.poll_next_unpin(cx); + Poll::Ready(()) + }; + + future::poll_fn(fut).await; + + map.insert(key); + map.update_timeout(&key, Duration::from_secs(100)); + } +} diff --git a/eth2/utils/hashset_delay/src/lib.rs b/eth2/utils/hashset_delay/src/lib.rs new file mode 100644 index 0000000000..175ad72cfa --- /dev/null +++ b/eth2/utils/hashset_delay/src/lib.rs @@ -0,0 +1,12 @@ +//! This crate provides a single type (its counter-part HashMapDelay has been removed as it +//! currently is not in use in lighthouse): +//! - `HashSetDelay` +//! +//! # HashSetDelay +//! +//! This is similar to a `HashMapDelay` except the mapping maps to the expiry time. This +//! allows users to add objects and check their expiry deadlines before the `Stream` +//! consumes them. + +mod hashset_delay; +pub use crate::hashset_delay::HashSetDelay; diff --git a/eth2/utils/int_to_bytes/Cargo.toml b/eth2/utils/int_to_bytes/Cargo.toml index c24f657c67..87839ccaa9 100644 --- a/eth2/utils/int_to_bytes/Cargo.toml +++ b/eth2/utils/int_to_bytes/Cargo.toml @@ -5,8 +5,8 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -bytes = "0.4.12" +bytes = "0.5.4" [dev-dependencies] yaml-rust = "0.4.3" -hex = "0.3" +hex = "0.4.2" diff --git a/eth2/utils/lighthouse_metrics/Cargo.toml b/eth2/utils/lighthouse_metrics/Cargo.toml index ed4b492533..1e804c8d3b 100644 --- a/eth2/utils/lighthouse_metrics/Cargo.toml +++ b/eth2/utils/lighthouse_metrics/Cargo.toml @@ -8,4 +8,4 @@ edition = "2018" [dependencies] lazy_static = "1.4.0" -prometheus = "0.7.0" +prometheus = "0.8.0" diff --git a/eth2/utils/lighthouse_metrics/src/lib.rs b/eth2/utils/lighthouse_metrics/src/lib.rs index c7d312a4f4..2874ddc503 100644 --- a/eth2/utils/lighthouse_metrics/src/lib.rs +++ b/eth2/utils/lighthouse_metrics/src/lib.rs @@ -56,7 +56,7 @@ use prometheus::{HistogramOpts, HistogramTimer, Opts}; -pub use prometheus::{Encoder, Histogram, IntCounter, IntGauge, Result, TextEncoder}; +pub use prometheus::{Encoder, Gauge, Histogram, IntCounter, IntGauge, Result, TextEncoder}; /// Collect all the metrics for reporting. pub fn gather() -> Vec { @@ -81,6 +81,15 @@ pub fn try_create_int_gauge(name: &str, help: &str) -> Result { Ok(gauge) } +/// Attempts to crate a `Gauge`, returning `Err` if the registry does not accept the counter +/// (potentially due to naming conflict). +pub fn try_create_float_gauge(name: &str, help: &str) -> Result { + let opts = Opts::new(name, help); + let gauge = Gauge::with_opts(opts)?; + prometheus::register(Box::new(gauge.clone()))?; + Ok(gauge) +} + /// Attempts to crate a `Histogram`, returning `Err` if the registry does not accept the counter /// (potentially due to naming conflict). pub fn try_create_histogram(name: &str, help: &str) -> Result { @@ -124,6 +133,24 @@ pub fn set_gauge(gauge: &Result, value: i64) { } } +pub fn maybe_set_gauge(gauge: &Result, value_opt: Option) { + if let Some(value) = value_opt { + set_gauge(gauge, value) + } +} + +pub fn set_float_gauge(gauge: &Result, value: f64) { + if let Ok(gauge) = gauge { + gauge.set(value); + } +} + +pub fn maybe_set_float_gauge(gauge: &Result, value_opt: Option) { + if let Some(value) = value_opt { + set_float_gauge(gauge, value) + } +} + /// Sets the value of a `Histogram` manually. pub fn observe(histogram: &Result, value: f64) { if let Ok(histogram) = histogram { diff --git a/eth2/utils/logging/Cargo.toml b/eth2/utils/logging/Cargo.toml index c7ccd3617f..b3c50c6d6d 100644 --- a/eth2/utils/logging/Cargo.toml +++ b/eth2/utils/logging/Cargo.toml @@ -6,6 +6,6 @@ edition = "2018" [dependencies] slog = "2.5.2" -slog-term = "2.4.2" +slog-term = "2.5.0" lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" diff --git a/eth2/utils/merkle_proof/Cargo.toml b/eth2/utils/merkle_proof/Cargo.toml index e56fab7f31..84745d224b 100644 --- a/eth2/utils/merkle_proof/Cargo.toml +++ b/eth2/utils/merkle_proof/Cargo.toml @@ -5,14 +5,14 @@ authors = ["Michael Sproul "] edition = "2018" [dependencies] -ethereum-types = "0.9" +ethereum-types = "0.9.1" eth2_hashing = "0.1.0" lazy_static = "1.4.0" safe_arith = { path = "../safe_arith" } [dev-dependencies] -quickcheck = "0.9.0" -quickcheck_macros = "0.8.0" +quickcheck = "0.9.2" +quickcheck_macros = "0.9.1" [features] arbitrary = ["ethereum-types/arbitrary"] diff --git a/eth2/utils/remote_beacon_node/Cargo.toml b/eth2/utils/remote_beacon_node/Cargo.toml index cb43235786..6b0486401e 100644 --- a/eth2/utils/remote_beacon_node/Cargo.toml +++ b/eth2/utils/remote_beacon_node/Cargo.toml @@ -7,15 +7,15 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -reqwest = "0.9" -url = "1.2" -serde = "1.0" -futures = "0.1.25" +reqwest = { version = "0.10.4", features = ["json"] } +url = "2.1.1" +serde = "1.0.110" +futures = "0.3.5" types = { path = "../../../eth2/types" } rest_types = { path = "../rest_types" } -hex = "0.3" -eth2_ssz = { path = "../../../eth2/utils/ssz" } -serde_json = "^1.0" +hex = "0.4.2" +eth2_ssz = "0.1.2" +serde_json = "1.0.52" eth2_config = { path = "../../../eth2/utils/eth2_config" } proto_array_fork_choice = { path = "../../../eth2/proto_array_fork_choice" } operation_pool = { path = "../../../eth2/operation_pool" } diff --git a/eth2/utils/remote_beacon_node/src/lib.rs b/eth2/utils/remote_beacon_node/src/lib.rs index 2b9af924fd..612a7c01a3 100644 --- a/eth2/utils/remote_beacon_node/src/lib.rs +++ b/eth2/utils/remote_beacon_node/src/lib.rs @@ -4,11 +4,7 @@ //! Presently, this is only used for testing but it _could_ become a user-facing library. use eth2_config::Eth2Config; -use futures::{future, Future, IntoFuture}; -use reqwest::{ - r#async::{Client, ClientBuilder, Response}, - StatusCode, -}; +use reqwest::{Client, ClientBuilder, Response, StatusCode}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use ssz::Encode; use std::marker::PhantomData; @@ -119,33 +115,33 @@ impl HttpClient { self.url.join(path).map_err(|e| e.into()) } - pub fn json_post( - &self, - url: Url, - body: T, - ) -> impl Future { + pub async fn json_post(&self, url: Url, body: T) -> Result { self.client .post(&url.to_string()) .json(&body) .send() + .await .map_err(Error::from) } - pub fn json_get( + pub async fn json_get( &self, mut url: Url, query_pairs: Vec<(String, String)>, - ) -> impl Future { + ) -> Result { query_pairs.into_iter().for_each(|(key, param)| { url.query_pairs_mut().append_pair(&key, ¶m); }); - self.client + let response = self + .client .get(&url.to_string()) .send() - .map_err(Error::from) - .and_then(|response| error_for_status(response).map_err(Error::from)) - .and_then(|mut success| success.json::().map_err(Error::from)) + .await + .map_err(Error::from)?; + + let success = error_for_status(response).await.map_err(Error::from)?; + success.json::().await.map_err(Error::from) } } @@ -153,18 +149,17 @@ impl HttpClient { /// /// Distinct from `Response::error_for_status` because it includes the body of the response as /// text. This ensures the error message from the server is not discarded. -fn error_for_status( - mut response: Response, -) -> Box + Send> { +async fn error_for_status(response: Response) -> Result { let status = response.status(); if status.is_success() { - Box::new(future::ok(response)) + return Ok(response); } else { - Box::new(response.text().then(move |text_result| match text_result { + let text_result = response.text().await; + match text_result { Err(e) => Err(Error::ReqwestError(e)), Ok(body) => Err(Error::DidNotSucceed { status, body }), - })) + } } } @@ -199,94 +194,86 @@ impl Validator { } /// Produces an unsigned attestation. - pub fn produce_attestation( + pub async fn produce_attestation( &self, slot: Slot, committee_index: CommitteeIndex, - ) -> impl Future, Error = Error> { + ) -> Result, Error> { let query_params = vec![ ("slot".into(), format!("{}", slot)), ("committee_index".into(), format!("{}", committee_index)), ]; let client = self.0.clone(); - self.url("attestation") - .into_future() - .and_then(move |url| client.json_get(url, query_params)) + let url = self.url("attestation")?; + client.json_get(url, query_params).await } /// Produces an aggregate attestation. - pub fn produce_aggregate_attestation( + pub async fn produce_aggregate_attestation( &self, attestation_data: &AttestationData, - ) -> impl Future, Error = Error> { + ) -> Result, Error> { let query_params = vec![( "attestation_data".into(), as_ssz_hex_string(attestation_data), )]; let client = self.0.clone(); - self.url("aggregate_attestation") - .into_future() - .and_then(move |url| client.json_get(url, query_params)) + let url = self.url("aggregate_attestation")?; + client.json_get(url, query_params).await } /// Posts a list of attestations to the beacon node, expecting it to verify it and publish it to the network. - pub fn publish_attestations( + pub async fn publish_attestations( &self, attestation: Vec>, - ) -> impl Future { + ) -> Result { let client = self.0.clone(); - self.url("attestations") - .into_future() - .and_then(move |url| client.json_post::<_>(url, attestation)) - .and_then(|mut response| { - response - .text() - .map(|text| (response, text)) - .map_err(Error::from) - }) - .and_then(|(response, text)| match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(text)), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - }) + let url = self.url("attestations")?; + let response = client.json_post::<_>(url, attestation).await?; + + match response.status() { + StatusCode::OK => Ok(PublishStatus::Valid), + StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( + response.text().await.map_err(Error::from)?, + )), + _ => response + .error_for_status() + .map_err(Error::from) + .map(|_| PublishStatus::Unknown), + } } /// Posts a list of signed aggregates and proofs to the beacon node, expecting it to verify it and publish it to the network. - pub fn publish_aggregate_and_proof( + pub async fn publish_aggregate_and_proof( &self, signed_aggregate_and_proofs: Vec>, - ) -> impl Future { + ) -> Result { let client = self.0.clone(); - self.url("aggregate_and_proofs") - .into_future() - .and_then(move |url| client.json_post::<_>(url, signed_aggregate_and_proofs)) - .and_then(|mut response| { - response - .text() - .map(|text| (response, text)) - .map_err(Error::from) - }) - .and_then(|(response, text)| match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(text)), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - }) + let url = self.url("aggregate_and_proofs")?; + let response = client + .json_post::<_>(url, signed_aggregate_and_proofs) + .await?; + + match response.status() { + StatusCode::OK => Ok(PublishStatus::Valid), + StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( + response.text().await.map_err(Error::from)?, + )), + _ => response + .error_for_status() + .map_err(Error::from) + .map(|_| PublishStatus::Unknown), + } } /// Returns the duties required of the given validator pubkeys in the given epoch. - pub fn get_duties( + pub async fn get_duties( &self, epoch: Epoch, validator_pubkeys: &[PublicKey], - ) -> impl Future, Error = Error> { + ) -> Result, Error> { let client = self.0.clone(); let bulk_request = ValidatorDutiesRequest { @@ -297,79 +284,68 @@ impl Validator { .collect(), }; - self.url("duties") - .into_future() - .and_then(move |url| client.json_post::<_>(url, bulk_request)) - .and_then(|response| error_for_status(response).map_err(Error::from)) - .and_then(|mut success| success.json().map_err(Error::from)) + let url = self.url("duties")?; + let response = client.json_post::<_>(url, bulk_request).await?; + let success = error_for_status(response).await.map_err(Error::from)?; + success.json().await.map_err(Error::from) } /// Posts a block to the beacon node, expecting it to verify it and publish it to the network. - pub fn publish_block( - &self, - block: SignedBeaconBlock, - ) -> impl Future { + pub async fn publish_block(&self, block: SignedBeaconBlock) -> Result { let client = self.0.clone(); - self.url("block") - .into_future() - .and_then(move |url| client.json_post::<_>(url, block)) - .and_then(|mut response| { - response - .text() - .map(|text| (response, text)) - .map_err(Error::from) - }) - .and_then(|(response, text)| match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(text)), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - }) + let url = self.url("block")?; + let response = client.json_post::<_>(url, block).await?; + + match response.status() { + StatusCode::OK => Ok(PublishStatus::Valid), + StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( + response.text().await.map_err(Error::from)?, + )), + _ => response + .error_for_status() + .map_err(Error::from) + .map(|_| PublishStatus::Unknown), + } } /// Requests a new (unsigned) block from the beacon node. - pub fn produce_block( + pub async fn produce_block( &self, slot: Slot, randao_reveal: Signature, - ) -> impl Future, Error = Error> { + ) -> Result, Error> { let client = self.0.clone(); - self.url("block").into_future().and_then(move |url| { - client.json_get::>( + let url = self.url("block")?; + client + .json_get::>( url, vec![ ("slot".into(), format!("{}", slot.as_u64())), ("randao_reveal".into(), as_ssz_hex_string(&randao_reveal)), ], ) - }) + .await } /// Subscribes a list of validators to particular slots for attestation production/publication. - pub fn subscribe( + pub async fn subscribe( &self, subscriptions: Vec, - ) -> impl Future { + ) -> Result { let client = self.0.clone(); - self.url("subscribe") - .into_future() - .and_then(move |url| client.json_post::<_>(url, subscriptions)) - .and_then(|mut response| { - response - .text() - .map(|text| (response, text)) - .map_err(Error::from) - }) - .and_then(|(response, text)| match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(text)), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - }) + let url = self.url("subscribe")?; + let response = client.json_post::<_>(url, subscriptions).await?; + + match response.status() { + StatusCode::OK => Ok(PublishStatus::Valid), + StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( + response.text().await.map_err(Error::from)?, + )), + _ => response + .error_for_status() + .map_err(Error::from) + .map(|_| PublishStatus::Unknown), + } } } @@ -386,120 +362,116 @@ impl Beacon { } /// Returns the genesis time. - pub fn get_genesis_time(&self) -> impl Future { + pub async fn get_genesis_time(&self) -> Result { let client = self.0.clone(); - self.url("genesis_time") - .into_future() - .and_then(move |url| client.json_get(url, vec![])) + let url = self.url("genesis_time")?; + client.json_get(url, vec![]).await } /// Returns the genesis validators root. - pub fn get_genesis_validators_root(&self) -> impl Future { + pub async fn get_genesis_validators_root(&self) -> Result { let client = self.0.clone(); - self.url("genesis_validators_root") - .into_future() - .and_then(move |url| client.json_get(url, vec![])) + let url = self.url("genesis_validators_root")?; + client.json_get(url, vec![]).await } /// Returns the fork at the head of the beacon chain. - pub fn get_fork(&self) -> impl Future { + pub async fn get_fork(&self) -> Result { let client = self.0.clone(); - self.url("fork") - .into_future() - .and_then(move |url| client.json_get(url, vec![])) + let url = self.url("fork")?; + client.json_get(url, vec![]).await } /// Returns info about the head of the canonical beacon chain. - pub fn get_head(&self) -> impl Future { + pub async fn get_head(&self) -> Result { let client = self.0.clone(); - self.url("head") - .into_future() - .and_then(move |url| client.json_get::(url, vec![])) + let url = self.url("head")?; + client.json_get::(url, vec![]).await } /// Returns the set of known beacon chain head blocks. One of these will be the canonical head. - pub fn get_heads(&self) -> impl Future, Error = Error> { + pub async fn get_heads(&self) -> Result, Error> { let client = self.0.clone(); - self.url("heads") - .into_future() - .and_then(move |url| client.json_get(url, vec![])) + let url = self.url("heads")?; + client.json_get(url, vec![]).await } /// Returns the block and block root at the given slot. - pub fn get_block_by_slot( + pub async fn get_block_by_slot( &self, slot: Slot, - ) -> impl Future, Hash256), Error = Error> { + ) -> Result<(SignedBeaconBlock, Hash256), Error> { self.get_block("slot".to_string(), format!("{}", slot.as_u64())) + .await } /// Returns the block and block root at the given root. - pub fn get_block_by_root( + pub async fn get_block_by_root( &self, root: Hash256, - ) -> impl Future, Hash256), Error = Error> { + ) -> Result<(SignedBeaconBlock, Hash256), Error> { self.get_block("root".to_string(), root_as_string(root)) + .await } /// Returns the block and block root at the given slot. - fn get_block( + async fn get_block( &self, query_key: String, query_param: String, - ) -> impl Future, Hash256), Error = Error> { + ) -> Result<(SignedBeaconBlock, Hash256), Error> { let client = self.0.clone(); - self.url("block") - .into_future() - .and_then(move |url| { - client.json_get::>(url, vec![(query_key, query_param)]) - }) + let url = self.url("block")?; + client + .json_get::>(url, vec![(query_key, query_param)]) + .await .map(|response| (response.beacon_block, response.root)) } /// Returns the state and state root at the given slot. - pub fn get_state_by_slot( - &self, - slot: Slot, - ) -> impl Future, Hash256), Error = Error> { + pub async fn get_state_by_slot(&self, slot: Slot) -> Result<(BeaconState, Hash256), Error> { self.get_state("slot".to_string(), format!("{}", slot.as_u64())) + .await } /// Returns the state and state root at the given root. - pub fn get_state_by_root( + pub async fn get_state_by_root( &self, root: Hash256, - ) -> impl Future, Hash256), Error = Error> { + ) -> Result<(BeaconState, Hash256), Error> { self.get_state("root".to_string(), root_as_string(root)) + .await } /// Returns the root of the state at the given slot. - pub fn get_state_root(&self, slot: Slot) -> impl Future { + pub async fn get_state_root(&self, slot: Slot) -> Result { let client = self.0.clone(); - self.url("state_root").into_future().and_then(move |url| { - client.json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))]) - }) + let url = self.url("state_root")?; + client + .json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))]) + .await } /// Returns the root of the block at the given slot. - pub fn get_block_root(&self, slot: Slot) -> impl Future { + pub async fn get_block_root(&self, slot: Slot) -> Result { let client = self.0.clone(); - self.url("block_root").into_future().and_then(move |url| { - client.json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))]) - }) + let url = self.url("block_root")?; + client + .json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))]) + .await } /// Returns the state and state root at the given slot. - fn get_state( + async fn get_state( &self, query_key: String, query_param: String, - ) -> impl Future, Hash256), Error = Error> { + ) -> Result<(BeaconState, Hash256), Error> { let client = self.0.clone(); - self.url("state") - .into_future() - .and_then(move |url| { - client.json_get::>(url, vec![(query_key, query_param)]) - }) + let url = self.url("state")?; + client + .json_get::>(url, vec![(query_key, query_param)]) + .await .map(|response| (response.beacon_state, response.root)) } @@ -507,11 +479,11 @@ impl Beacon { /// /// If `state_root` is `Some`, the query will use the given state instead of the default /// canonical head state. - pub fn get_validators( + pub async fn get_validators( &self, validator_pubkeys: Vec, state_root: Option, - ) -> impl Future, Error = Error> { + ) -> Result, Error> { let client = self.0.clone(); let bulk_request = ValidatorRequest { @@ -522,21 +494,20 @@ impl Beacon { .collect(), }; - self.url("validators") - .into_future() - .and_then(move |url| client.json_post::<_>(url, bulk_request)) - .and_then(|response| error_for_status(response).map_err(Error::from)) - .and_then(|mut success| success.json().map_err(Error::from)) + let url = self.url("validators")?; + let response = client.json_post::<_>(url, bulk_request).await?; + let success = error_for_status(response).await.map_err(Error::from)?; + success.json().await.map_err(Error::from) } /// Returns all validators. /// /// If `state_root` is `Some`, the query will use the given state instead of the default /// canonical head state. - pub fn get_all_validators( + pub async fn get_all_validators( &self, state_root: Option, - ) -> impl Future, Error = Error> { + ) -> Result, Error> { let client = self.0.clone(); let query_params = if let Some(state_root) = state_root { @@ -545,19 +516,18 @@ impl Beacon { vec![] }; - self.url("validators/all") - .into_future() - .and_then(move |url| client.json_get(url, query_params)) + let url = self.url("validators/all")?; + client.json_get(url, query_params).await } /// Returns the active validators. /// /// If `state_root` is `Some`, the query will use the given state instead of the default /// canonical head state. - pub fn get_active_validators( + pub async fn get_active_validators( &self, state_root: Option, - ) -> impl Future, Error = Error> { + ) -> Result, Error> { let client = self.0.clone(); let query_params = if let Some(state_root) = state_root { @@ -566,53 +536,42 @@ impl Beacon { vec![] }; - self.url("validators/active") - .into_future() - .and_then(move |url| client.json_get(url, query_params)) + let url = self.url("validators/active")?; + client.json_get(url, query_params).await } /// Returns committees at the given epoch. - pub fn get_committees( - &self, - epoch: Epoch, - ) -> impl Future, Error = Error> { + pub async fn get_committees(&self, epoch: Epoch) -> Result, Error> { let client = self.0.clone(); - self.url("committees").into_future().and_then(move |url| { - client.json_get(url, vec![("epoch".into(), format!("{}", epoch.as_u64()))]) - }) + let url = self.url("committees")?; + client + .json_get(url, vec![("epoch".into(), format!("{}", epoch.as_u64()))]) + .await } - pub fn proposer_slashing( + pub async fn proposer_slashing( &self, proposer_slashing: ProposerSlashing, - ) -> impl Future { + ) -> Result { let client = self.0.clone(); - self.url("proposer_slashing") - .into_future() - .and_then(move |url| { - client - .json_post::<_>(url, proposer_slashing) - .and_then(|response| error_for_status(response).map_err(Error::from)) - .and_then(|mut success| success.json().map_err(Error::from)) - }) + let url = self.url("proposer_slashing")?; + let response = client.json_post::<_>(url, proposer_slashing).await?; + let success = error_for_status(response).await.map_err(Error::from)?; + success.json().await.map_err(Error::from) } - pub fn attester_slashing( + pub async fn attester_slashing( &self, attester_slashing: AttesterSlashing, - ) -> impl Future { + ) -> Result { let client = self.0.clone(); - self.url("attester_slashing") - .into_future() - .and_then(move |url| { - client - .json_post::<_>(url, attester_slashing) - .and_then(|response| error_for_status(response).map_err(Error::from)) - .and_then(|mut success| success.json().map_err(Error::from)) - }) + let url = self.url("attester_slashing")?; + let response = client.json_post::<_>(url, attester_slashing).await?; + let success = error_for_status(response).await.map_err(Error::from)?; + success.json().await.map_err(Error::from) } } @@ -628,11 +587,10 @@ impl Spec { .map_err(Into::into) } - pub fn get_eth2_config(&self) -> impl Future { + pub async fn get_eth2_config(&self) -> Result { let client = self.0.clone(); - self.url("eth2_config") - .into_future() - .and_then(move |url| client.json_get(url, vec![])) + let url = self.url("eth2_config")?; + client.json_get(url, vec![]).await } } @@ -648,18 +606,16 @@ impl Node { .map_err(Into::into) } - pub fn get_version(&self) -> impl Future { + pub async fn get_version(&self) -> Result { let client = self.0.clone(); - self.url("version") - .into_future() - .and_then(move |url| client.json_get(url, vec![])) + let url = self.url("version")?; + client.json_get(url, vec![]).await } - pub fn syncing_status(&self) -> impl Future { + pub async fn syncing_status(&self) -> Result { let client = self.0.clone(); - self.url("syncing") - .into_future() - .and_then(move |url| client.json_get(url, vec![])) + let url = self.url("syncing")?; + client.json_get(url, vec![]).await } } @@ -676,21 +632,17 @@ impl Advanced { } /// Gets the core `ProtoArray` struct from the node. - pub fn get_fork_choice(&self) -> impl Future { + pub async fn get_fork_choice(&self) -> Result { let client = self.0.clone(); - self.url("fork_choice") - .into_future() - .and_then(move |url| client.json_get(url, vec![])) + let url = self.url("fork_choice")?; + client.json_get(url, vec![]).await } /// Gets the core `PersistedOperationPool` struct from the node. - pub fn get_operation_pool( - &self, - ) -> impl Future, Error = Error> { + pub async fn get_operation_pool(&self) -> Result, Error> { let client = self.0.clone(); - self.url("operation_pool") - .into_future() - .and_then(move |url| client.json_get(url, vec![])) + let url = self.url("operation_pool")?; + client.json_get(url, vec![]).await } } @@ -707,31 +659,26 @@ impl Consensus { } /// Gets a `IndividualVote` for each of the given `pubkeys`. - pub fn get_individual_votes( + pub async fn get_individual_votes( &self, epoch: Epoch, pubkeys: Vec, - ) -> impl Future { + ) -> Result { let client = self.0.clone(); let req_body = IndividualVotesRequest { epoch, pubkeys }; - self.url("individual_votes") - .into_future() - .and_then(move |url| client.json_post::<_>(url, req_body)) - .and_then(|response| error_for_status(response).map_err(Error::from)) - .and_then(|mut success| success.json().map_err(Error::from)) + let url = self.url("individual_votes")?; + let response = client.json_post::<_>(url, req_body).await?; + let success = error_for_status(response).await.map_err(Error::from)?; + success.json().await.map_err(Error::from) } /// Gets a `VoteCount` for the given `epoch`. - pub fn get_vote_count( - &self, - epoch: Epoch, - ) -> impl Future { + pub async fn get_vote_count(&self, epoch: Epoch) -> Result { let client = self.0.clone(); let query_params = vec![("epoch".into(), format!("{}", epoch.as_u64()))]; - self.url("vote_count") - .into_future() - .and_then(move |url| client.json_get(url, query_params)) + let url = self.url("vote_count")?; + client.json_get(url, query_params).await } } diff --git a/eth2/utils/rest_types/Cargo.toml b/eth2/utils/rest_types/Cargo.toml index c2745c0603..b9867c912b 100644 --- a/eth2/utils/rest_types/Cargo.toml +++ b/eth2/utils/rest_types/Cargo.toml @@ -6,11 +6,11 @@ edition = "2018" [dependencies] types = { path = "../../types" } -eth2_ssz_derive = { path = "../ssz_derive" } -eth2_ssz = { path = "../ssz" } -eth2_hashing = { path = "../eth2_hashing" } -tree_hash = { path = "../tree_hash" } +eth2_ssz_derive = "0.1.0" +eth2_ssz = "0.1.2" +eth2_hashing = "0.1.0" +tree_hash = "0.1.0" state_processing = { path = "../../state_processing" } bls = { path = "../bls" } -serde = { version = "1.0.102", features = ["derive"] } +serde = { version = "1.0.110", features = ["derive"] } rayon = "1.3.0" diff --git a/eth2/utils/serde_hex/Cargo.toml b/eth2/utils/serde_hex/Cargo.toml index b4d7bf619a..2df5ff02a0 100644 --- a/eth2/utils/serde_hex/Cargo.toml +++ b/eth2/utils/serde_hex/Cargo.toml @@ -5,5 +5,5 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -serde = "1.0.102" -hex = "0.3" +serde = "1.0.110" +hex = "0.4.2" diff --git a/eth2/utils/serde_hex/src/lib.rs b/eth2/utils/serde_hex/src/lib.rs index 7b254cf88c..db84222757 100644 --- a/eth2/utils/serde_hex/src/lib.rs +++ b/eth2/utils/serde_hex/src/lib.rs @@ -1,17 +1,10 @@ -use hex::ToHex; use serde::de::{self, Visitor}; use std::fmt; pub fn encode>(data: T) -> String { - let mut hex = String::with_capacity(data.as_ref().len() * 2); - - // Writing to a string never errors, so we can unwrap here. - data.write_hex(&mut hex).unwrap(); - + let hex = hex::encode(data); let mut s = "0x".to_string(); - s.push_str(hex.as_str()); - s } diff --git a/eth2/utils/slot_clock/Cargo.toml b/eth2/utils/slot_clock/Cargo.toml index 81a7b57a9d..40da33af2d 100644 --- a/eth2/utils/slot_clock/Cargo.toml +++ b/eth2/utils/slot_clock/Cargo.toml @@ -8,4 +8,4 @@ edition = "2018" types = { path = "../../types" } lazy_static = "1.4.0" lighthouse_metrics = { path = "../lighthouse_metrics" } -parking_lot = "0.9.0" +parking_lot = "0.10.2" diff --git a/eth2/utils/ssz/Cargo.toml b/eth2/utils/ssz/Cargo.toml index 162c91ae13..e9220cdc64 100644 --- a/eth2/utils/ssz/Cargo.toml +++ b/eth2/utils/ssz/Cargo.toml @@ -14,7 +14,7 @@ eth2_ssz_derive = "0.1.0" [dependencies] ethereum-types = "0.9.1" -smallvec = "1.2.0" +smallvec = "1.4.0" [features] arbitrary = ["ethereum-types/arbitrary"] diff --git a/eth2/utils/ssz_derive/Cargo.toml b/eth2/utils/ssz_derive/Cargo.toml index db25e16be0..e074f001a8 100644 --- a/eth2/utils/ssz_derive/Cargo.toml +++ b/eth2/utils/ssz_derive/Cargo.toml @@ -11,5 +11,5 @@ name = "ssz_derive" proc-macro = true [dependencies] -syn = "0.15" -quote = "0.6" +syn = "1.0.18" +quote = "1.0.4" diff --git a/eth2/utils/ssz_derive/src/lib.rs b/eth2/utils/ssz_derive/src/lib.rs index 04ef8b9826..ae350a1cbb 100644 --- a/eth2/utils/ssz_derive/src/lib.rs +++ b/eth2/utils/ssz_derive/src/lib.rs @@ -54,7 +54,8 @@ fn get_serializable_field_types<'a>(struct_data: &'a syn::DataStruct) -> Vec<&'a /// The field attribute is: `#[ssz(skip_serializing)]` fn should_skip_serializing(field: &syn::Field) -> bool { field.attrs.iter().any(|attr| { - attr.path.is_ident("ssz") && attr.tts.to_string().replace(" ", "") == "(skip_serializing)" + attr.path.is_ident("ssz") + && attr.tokens.to_string().replace(" ", "") == "(skip_serializing)" }) } @@ -148,7 +149,8 @@ pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { /// The field attribute is: `#[ssz(skip_deserializing)]` fn should_skip_deserializing(field: &syn::Field) -> bool { field.attrs.iter().any(|attr| { - attr.path.is_ident("ssz") && attr.tts.to_string().replace(" ", "") == "(skip_deserializing)" + attr.path.is_ident("ssz") + && attr.tokens.to_string().replace(" ", "") == "(skip_deserializing)" }) } diff --git a/eth2/utils/ssz_types/Cargo.toml b/eth2/utils/ssz_types/Cargo.toml index eb7ffb4835..9fc841c22e 100644 --- a/eth2/utils/ssz_types/Cargo.toml +++ b/eth2/utils/ssz_types/Cargo.toml @@ -9,13 +9,13 @@ name = "ssz_types" [dependencies] tree_hash = "0.1.0" -serde = "1.0.102" -serde_derive = "1.0.102" +serde = "1.0.110" +serde_derive = "1.0.110" serde_hex = { path = "../serde_hex" } eth2_ssz = "0.1.2" -typenum = "1.11.2" -arbitrary = { version = "0.4", features = ["derive"], optional = true } +typenum = "1.12.0" +arbitrary = { version = "0.4.4", features = ["derive"], optional = true } [dev-dependencies] serde_yaml = "0.8.11" -tree_hash_derive = "0.2" +tree_hash_derive = "0.2.0" diff --git a/eth2/utils/swap_or_not_shuffle/Cargo.toml b/eth2/utils/swap_or_not_shuffle/Cargo.toml index e624964168..da2b72664d 100644 --- a/eth2/utils/swap_or_not_shuffle/Cargo.toml +++ b/eth2/utils/swap_or_not_shuffle/Cargo.toml @@ -9,9 +9,9 @@ name = "benches" harness = false [dev-dependencies] -criterion = "0.3.0" +criterion = "0.3.2" yaml-rust = "0.4.3" -hex = "0.3" +hex = "0.4.2" [dependencies] eth2_hashing = "0.1.0" diff --git a/eth2/utils/test_random_derive/Cargo.toml b/eth2/utils/test_random_derive/Cargo.toml index 494e9d8ebf..a02cb7fdad 100644 --- a/eth2/utils/test_random_derive/Cargo.toml +++ b/eth2/utils/test_random_derive/Cargo.toml @@ -9,5 +9,5 @@ description = "Procedural derive macros for implementation of TestRandom trait" proc-macro = true [dependencies] -syn = "0.15" -quote = "0.6" +syn = "1.0.18" +quote = "1.0.4" diff --git a/eth2/utils/test_random_derive/src/lib.rs b/eth2/utils/test_random_derive/src/lib.rs index d6e3a0f950..fabc61c7fd 100644 --- a/eth2/utils/test_random_derive/src/lib.rs +++ b/eth2/utils/test_random_derive/src/lib.rs @@ -10,7 +10,7 @@ use syn::{parse_macro_input, DeriveInput}; /// The field attribute is: `#[test_random(default)]` fn should_use_default(field: &syn::Field) -> bool { field.attrs.iter().any(|attr| { - attr.path.is_ident("test_random") && attr.tts.to_string().replace(" ", "") == "(default)" + attr.path.is_ident("test_random") && attr.tokens.to_string().replace(" ", "") == "(default)" }) } diff --git a/eth2/utils/tree_hash/Cargo.toml b/eth2/utils/tree_hash/Cargo.toml index db207ab4ac..ff84952e6f 100644 --- a/eth2/utils/tree_hash/Cargo.toml +++ b/eth2/utils/tree_hash/Cargo.toml @@ -11,16 +11,16 @@ name = "benches" harness = false [dev-dependencies] -criterion = "0.3.0" -rand = "0.7.2" -tree_hash_derive = "0.2" +criterion = "0.3.2" +rand = "0.7.3" +tree_hash_derive = "0.2.0" types = { path = "../../types" } lazy_static = "1.4.0" [dependencies] -ethereum-types = "0.9" +ethereum-types = "0.9.1" eth2_hashing = "0.1.0" -smallvec = "1.2.0" +smallvec = "1.4.0" [features] arbitrary = ["ethereum-types/arbitrary"] diff --git a/eth2/utils/tree_hash_derive/Cargo.toml b/eth2/utils/tree_hash_derive/Cargo.toml index 9c3050e57f..11caabe076 100644 --- a/eth2/utils/tree_hash_derive/Cargo.toml +++ b/eth2/utils/tree_hash_derive/Cargo.toml @@ -10,5 +10,5 @@ license = "Apache-2.0" proc-macro = true [dependencies] -syn = "0.15" -quote = "0.6" +syn = "1.0.18" +quote = "1.0.4" diff --git a/eth2/utils/tree_hash_derive/src/lib.rs b/eth2/utils/tree_hash_derive/src/lib.rs index e233e4ed57..d57903654d 100644 --- a/eth2/utils/tree_hash_derive/src/lib.rs +++ b/eth2/utils/tree_hash_derive/src/lib.rs @@ -51,7 +51,7 @@ fn get_cache_field_for(field: &syn::Field) -> Option { let parsed_attrs = cached_tree_hash_attr_metas(&field.attrs); if let [Meta::List(MetaList { nested, .. })] = &parsed_attrs[..] { nested.iter().find_map(|x| match x { - NestedMeta::Meta(Meta::Word(cache_field_ident)) => Some(cache_field_ident.clone()), + NestedMeta::Meta(Meta::Path(path)) => path.get_ident().cloned(), _ => None, }) } else { @@ -73,7 +73,8 @@ fn cached_tree_hash_attr_metas(attrs: &[Attribute]) -> Vec { /// The field attribute is: `#[tree_hash(skip_hashing)]` fn should_skip_hashing(field: &syn::Field) -> bool { field.attrs.iter().any(|attr| { - attr.path.is_ident("tree_hash") && attr.tts.to_string().replace(" ", "") == "(skip_hashing)" + attr.path.is_ident("tree_hash") + && attr.tokens.to_string().replace(" ", "") == "(skip_hashing)" }) } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index d173cc28e4..47dace9155 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -9,23 +9,24 @@ edition = "2018" [dependencies] clap = "2.33.0" -hex = "0.3" +hex = "0.4.2" log = "0.4.8" -serde = "1.0.102" +serde = "1.0.110" serde_yaml = "0.8.11" -simple_logger = "1.3.0" +simple_logger = "1.6.0" types = { path = "../eth2/types" } state_processing = { path = "../eth2/state_processing" } eth2_ssz = "0.1.2" -regex = "1.3.1" +regex = "1.3.7" eth1_test_rig = { path = "../tests/eth1_test_rig" } -futures = "0.1.25" +futures = { version = "0.3.5", features = ["compat"] } environment = { path = "../lighthouse/environment" } web3 = "0.10.0" eth2_testnet_config = { path = "../eth2/utils/eth2_testnet_config" } -dirs = "2.0" +dirs = "2.0.2" genesis = { path = "../beacon_node/genesis" } deposit_contract = { path = "../eth2/utils/deposit_contract" } -tree_hash = { path = "../eth2/utils/tree_hash" } +tree_hash = "0.1.0" +tokio = { version = "0.2.20", features = ["full"] } clap_utils = { path = "../eth2/utils/clap_utils" } eth2-libp2p = { path = "../beacon_node/eth2-libp2p" } diff --git a/lcli/src/deploy_deposit_contract.rs b/lcli/src/deploy_deposit_contract.rs index 22fd84fc65..c50d33cc85 100644 --- a/lcli/src/deploy_deposit_contract.rs +++ b/lcli/src/deploy_deposit_contract.rs @@ -5,7 +5,7 @@ use deposit_contract::{ CONTRACT_DEPLOY_GAS, }; use environment::Environment; -use futures::{Future, IntoFuture}; +use futures::compat::Future01CompatExt; use std::path::PathBuf; use types::EthSpec; use web3::{ @@ -15,7 +15,7 @@ use web3::{ Web3, }; -pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { +pub fn run(mut env: Environment, matches: &ArgMatches<'_>) -> Result<(), String> { let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, "eth1-ipc")?; let from_address: Address = clap_utils::parse_required(matches, "from-address")?; let confirmations: usize = clap_utils::parse_required(matches, "confirmations")?; @@ -31,18 +31,20 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< ) })?; - // It's unlikely that this will be the _actual_ deployment block, however it'll be close - // enough to serve our purposes. - // - // We only need the deposit block to put a lower bound on the block number we need to search - // for deposit logs. - let deploy_block = env - .runtime() - .block_on(web3.eth().block_number()) - .map_err(|e| format!("Failed to get block number: {}", e))?; + env.runtime().block_on(async { + // It's unlikely that this will be the _actual_ deployment block, however it'll be close + // enough to serve our purposes. + // + // We only need the deposit block to put a lower bound on the block number we need to search + // for deposit logs. + let deploy_block = web3 + .eth() + .block_number() + .compat() + .await + .map_err(|e| format!("Failed to get block number: {}", e))?; - let address = env.runtime().block_on( - Contract::deploy(web3.eth(), &ABI) + let pending_contract = Contract::deploy(web3.eth(), &ABI) .map_err(|e| format!("Unable to build contract deployer: {:?}", e))? .confirmations(confirmations) .options(Options { @@ -50,17 +52,17 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< ..Options::default() }) .execute(bytecode, (), from_address) - .into_future() - .map_err(|e| format!("Unable to execute deployment: {:?}", e)) - .and_then(|pending| { - pending.map_err(|e| format!("Unable to await pending contract: {:?}", e)) - }) - .map(|tx_receipt| tx_receipt.address()) - .map_err(|e| format!("Failed to execute deployment: {:?}", e)), - )?; + .map_err(|e| format!("Unable to execute deployment: {:?}", e))?; - println!("deposit_contract_address: {:?}", address); - println!("deposit_contract_deploy_block: {}", deploy_block); + let address = pending_contract + .compat() + .await + .map_err(|e| format!("Unable to await pending contract: {:?}", e))? + .address(); - Ok(()) + println!("deposit_contract_address: {:?}", address); + println!("deposit_contract_deploy_block: {}", deploy_block); + + Ok(()) + }) } diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index 4b06d2c11b..1d267e3266 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -1,7 +1,6 @@ use clap::ArgMatches; use environment::Environment; use eth2_testnet_config::Eth2TestnetConfig; -use futures::Future; use genesis::{Eth1Config, Eth1GenesisService}; use std::path::PathBuf; use std::time::Duration; @@ -10,7 +9,7 @@ use types::EthSpec; /// Interval between polling the eth1 node for genesis information. pub const ETH1_GENESIS_UPDATE_INTERVAL: Duration = Duration::from_millis(7_000); -pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { +pub fn run(mut env: Environment, matches: &ArgMatches<'_>) -> Result<(), String> { let endpoint = matches .value_of("eth1-endpoint") .ok_or_else(|| "eth1-endpoint not specified")?; @@ -49,19 +48,19 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< let genesis_service = Eth1GenesisService::new(config, env.core_context().log.clone()); - let future = genesis_service - .wait_for_genesis_state(ETH1_GENESIS_UPDATE_INTERVAL, spec) - .map(move |genesis_state| { - eth2_testnet_config.genesis_state = Some(genesis_state); - eth2_testnet_config.force_write_to_file(testnet_dir) - }); + env.runtime().block_on(async { + let _ = genesis_service + .wait_for_genesis_state(ETH1_GENESIS_UPDATE_INTERVAL, spec) + .await + .map(move |genesis_state| { + eth2_testnet_config.genesis_state = Some(genesis_state); + eth2_testnet_config.force_write_to_file(testnet_dir) + }) + .map_err(|e| format!("Failed to find genesis: {}", e))?; - info!("Starting service to produce genesis BeaconState from eth1"); - info!("Connecting to eth1 http endpoint: {}", endpoint); + info!("Starting service to produce genesis BeaconState from eth1"); + info!("Connecting to eth1 http endpoint: {}", endpoint); - env.runtime() - .block_on(future) - .map_err(|e| format!("Failed to find genesis: {}", e))??; - - Ok(()) + Ok(()) + }) } diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 2d6e685a62..3fc259e634 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -1,9 +1,8 @@ use clap::ArgMatches; use eth2_libp2p::{ - discovery::{build_enr, CombinedKey, Keypair, ENR_FILENAME}, + discovery::{build_enr, CombinedKey, CombinedKeyExt, Keypair, ENR_FILENAME}, NetworkConfig, NETWORK_KEY_FILENAME, }; -use std::convert::TryInto; use std::fs; use std::fs::File; use std::io::Write; @@ -30,10 +29,7 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { config.enr_tcp_port = Some(tcp_port); let local_keypair = Keypair::generate_secp256k1(); - let enr_key: CombinedKey = local_keypair - .clone() - .try_into() - .map_err(|e| format!("Unable to convert keypair: {:?}", e))?; + let enr_key = CombinedKey::from_libp2p(&local_keypair)?; let enr = build_enr::(&enr_key, &config, EnrForkId::default()) .map_err(|e| format!("Unable to create ENR: {:?}", e))?; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 99621c1b15..c72206bce6 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -465,7 +465,10 @@ fn main() { } } -fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) -> Result<(), String> { +fn run( + env_builder: EnvironmentBuilder, + matches: &ArgMatches<'_>, +) -> Result<(), String> { let env = env_builder .multi_threaded_tokio_runtime() .map_err(|e| format!("should start tokio runtime: {:?}", e))? diff --git a/lcli/src/refund_deposit_contract.rs b/lcli/src/refund_deposit_contract.rs index 719a8ef1b0..0efa557ef1 100644 --- a/lcli/src/refund_deposit_contract.rs +++ b/lcli/src/refund_deposit_contract.rs @@ -1,6 +1,6 @@ use clap::ArgMatches; use environment::Environment; -use futures::Future; +use futures::compat::Future01CompatExt; use std::path::PathBuf; use types::EthSpec; use web3::{ @@ -12,7 +12,7 @@ use web3::{ /// `keccak("steal()")[0..4]` pub const STEAL_FN_SIGNATURE: &[u8] = &[0xcf, 0x7a, 0x89, 0x65]; -pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { +pub fn run(mut env: Environment, matches: &ArgMatches<'_>) -> Result<(), String> { let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, "eth1-ipc")?; let from: Address = clap_utils::parse_required(matches, "from-address")?; let contract_address: Address = clap_utils::parse_required(matches, "contract-address")?; @@ -21,8 +21,9 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< Ipc::new(eth1_ipc_path).map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?; let web3 = Web3::new(transport); - env.runtime().block_on( - web3.eth() + env.runtime().block_on(async { + let _ = web3 + .eth() .send_transaction(TransactionRequest { from, to: Some(contract_address), @@ -33,8 +34,10 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< nonce: None, condition: None, }) - .map_err(|e| format!("Failed to call deposit fn: {:?}", e)), - )?; + .compat() + .await + .map_err(|e| format!("Failed to call steal fn: {:?}", e))?; - Ok(()) + Ok(()) + }) } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 5bd211801d..a449ba7f3a 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -9,17 +9,17 @@ write_ssz_files = ["beacon_node/write_ssz_files"] # Writes debugging .ssz files [dependencies] beacon_node = { "path" = "../beacon_node" } -tokio = "0.1.22" -slog = { version = "^2.2.3" , features = ["max_level_trace"] } -sloggers = "0.3.4" +tokio = "0.2.20" +slog = { version = "2.5.2", features = ["max_level_trace"] } +sloggers = "1.0.0" types = { "path" = "../eth2/types" } -clap = "2.32.0" -env_logger = "0.6.1" +clap = "2.33.0" +env_logger = "0.7.1" logging = { path = "../eth2/utils/logging" } -slog-term = "^2.4.0" -slog-async = "^2.3.0" +slog-term = "2.5.0" +slog-async = "2.5.0" environment = { path = "./environment" } -futures = "0.1.25" +futures = "0.3.5" validator_client = { "path" = "../validator_client" } account_manager = { "path" = "../account_manager" } clap_utils = { path = "../eth2/utils/clap_utils" } diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index 7f4c250945..358e94491c 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -6,19 +6,19 @@ edition = "2018" [dependencies] clap = "2.33.0" -tokio = "0.1.22" -slog = { version = "^2.2.3" , features = ["max_level_trace"] } -sloggers = "0.3.4" +tokio = "0.2.20" +slog = { version = "2.5.2", features = ["max_level_trace"] } +sloggers = "1.0.0" types = { "path" = "../../eth2/types" } eth2_config = { "path" = "../../eth2/utils/eth2_config" } eth2_testnet_config = { path = "../../eth2/utils/eth2_testnet_config" } -env_logger = "0.6.1" +env_logger = "0.7.1" logging = { path = "../../eth2/utils/logging" } -slog-term = "^2.4.0" -slog-async = "^2.3.0" -ctrlc = { version = "3.1.1", features = ["termination"] } -futures = "0.1.25" -parking_lot = "0.7" +slog-term = "2.5.0" +slog-async = "2.5.0" +ctrlc = { version = "3.1.4", features = ["termination"] } +futures = "0.3.5" +parking_lot = "0.10.2" slog-json = "2.3.0" [dev-dependencies] diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 20e54b0979..e6076d49f1 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -9,7 +9,7 @@ use eth2_config::Eth2Config; use eth2_testnet_config::Eth2TestnetConfig; -use futures::{sync::oneshot, Future}; +use futures::channel::oneshot; use slog::{info, o, Drain, Level, Logger}; use sloggers::{null::NullLoggerBuilder, Build}; use std::cell::RefCell; @@ -17,7 +17,7 @@ use std::ffi::OsStr; use std::fs::{rename as FsRename, OpenOptions}; use std::path::PathBuf; use std::time::{SystemTime, UNIX_EPOCH}; -use tokio::runtime::{Builder as RuntimeBuilder, Runtime, TaskExecutor}; +use tokio::runtime::{Builder as RuntimeBuilder, Handle, Runtime}; use types::{EthSpec, InteropEthSpec, MainnetEthSpec, MinimalEthSpec}; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; @@ -75,8 +75,13 @@ impl EnvironmentBuilder { /// /// The `Runtime` used is just the standard tokio runtime. pub fn multi_threaded_tokio_runtime(mut self) -> Result { - self.runtime = - Some(Runtime::new().map_err(|e| format!("Failed to start runtime: {:?}", e))?); + self.runtime = Some( + RuntimeBuilder::new() + .threaded_scheduler() + .enable_all() + .build() + .map_err(|e| format!("Failed to start runtime: {:?}", e))?, + ); Ok(self) } @@ -87,7 +92,8 @@ impl EnvironmentBuilder { pub fn single_thread_tokio_runtime(mut self) -> Result { self.runtime = Some( RuntimeBuilder::new() - .core_threads(1) + .basic_scheduler() + .enable_all() .build() .map_err(|e| format!("Failed to start runtime: {:?}", e))?, ); @@ -183,10 +189,10 @@ impl EnvironmentBuilder { /// An execution context that can be used by a service. /// /// Distinct from an `Environment` because a `Context` is not able to give a mutable reference to a -/// `Runtime`, instead it only has access to a `TaskExecutor`. +/// `Runtime`, instead it only has access to a `Runtime`. #[derive(Clone)] pub struct RuntimeContext { - pub executor: TaskExecutor, + pub runtime_handle: Handle, pub log: Logger, pub eth_spec_instance: E, pub eth2_config: Eth2Config, @@ -198,7 +204,7 @@ impl RuntimeContext { /// The generated service will have the `service_name` in all it's logs. pub fn service_context(&self, service_name: String) -> Self { Self { - executor: self.executor.clone(), + runtime_handle: self.runtime_handle.clone(), log: self.log.new(o!("service" => service_name)), eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), @@ -233,7 +239,7 @@ impl Environment { /// Returns a `Context` where no "service" has been added to the logger output. pub fn core_context(&mut self) -> RuntimeContext { RuntimeContext { - executor: self.runtime.executor(), + runtime_handle: self.runtime.handle().clone(), log: self.log.clone(), eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), @@ -243,7 +249,7 @@ impl Environment { /// Returns a `Context` where the `service_name` is added to the logger output. pub fn service_context(&mut self, service_name: String) -> RuntimeContext { RuntimeContext { - executor: self.runtime.executor(), + runtime_handle: self.runtime.handle().clone(), log: self.log.new(o!("service" => service_name)), eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), @@ -268,11 +274,9 @@ impl Environment { } /// Shutdown the `tokio` runtime when all tasks are idle. - pub fn shutdown_on_idle(self) -> Result<(), String> { + pub fn shutdown_on_idle(self) { self.runtime - .shutdown_on_idle() - .wait() - .map_err(|e| format!("Tokio runtime shutdown returned an error: {:?}", e)) + .shutdown_timeout(std::time::Duration::from_secs(5)) } /// Sets the logger (and all child loggers) to log to a file. diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index b98e8e8336..b3299fe202 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -209,9 +209,11 @@ fn run( )) .map_err(|e| format!("Failed to init validator client: {}", e))?; - validator - .start_service() - .map_err(|e| format!("Failed to start validator client service: {}", e))?; + environment.core_context().runtime_handle.enter(|| { + validator + .start_service() + .map_err(|e| format!("Failed to start validator client service: {}", e)) + })?; Some(validator) } else { @@ -232,5 +234,5 @@ fn run( drop(validator_client); // Shutdown the environment once all tasks have completed. - environment.shutdown_on_idle() + Ok(environment.shutdown_on_idle()) } diff --git a/scripts/local_testnet_beacon_node.sh b/scripts/local_testnet_beacon_node.sh new file mode 100755 index 0000000000..47e710a3fa --- /dev/null +++ b/scripts/local_testnet_beacon_node.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# +# Starts a beacon node based upon a genesis state created by +# `./local_testnet_genesis_state`. +# + +TESTNET_DIR=~/.lighthouse/local-testnet/testnet +DATADIR=~/.lighthouse/local-testnet/beacon +DEBUG_LEVEL=${1:-info} + +exec lighthouse \ + --debug-level $DEBUG_LEVEL \ + bn \ + --datadir $DATADIR \ + --testnet-dir $TESTNET_DIR \ + --dummy-eth1 \ + --http diff --git a/scripts/local_testnet_clean.sh b/scripts/local_testnet_clean.sh new file mode 100755 index 0000000000..ebf144a1bc --- /dev/null +++ b/scripts/local_testnet_clean.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# +# Removes any existing local testnet +# + +rm -rf ~/.lighthouse/local-testnet diff --git a/scripts/local_testnet_setup.sh b/scripts/local_testnet_setup.sh new file mode 100755 index 0000000000..a4e9fdfed6 --- /dev/null +++ b/scripts/local_testnet_setup.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# +# Produces a testnet specification and a genesis state where the genesis time +# is now. +# +# Optionally, supply an integer as the first argument to override the default +# validator count of 1024. +# + +TESTNET_DIR=~/.lighthouse/local-testnet/testnet +VALIDATOR_COUNT=${1:-1024} + +lcli \ + --spec mainnet \ + new-testnet \ + --deposit-contract-address 0000000000000000000000000000000000000000 \ + --testnet-dir $TESTNET_DIR \ + --min-genesis-active-validator-count $VALIDATOR_COUNT \ + --force + +echo Created tesnet directory at $TESTNET_DIR +echo "Building genesis state... (this might take a while)" + +lcli \ + --spec mainnet \ + interop-genesis \ + --testnet-dir $TESTNET_DIR \ + $VALIDATOR_COUNT + +echo Created genesis state in $TESTNET_DIR + +echo $VALIDATOR_COUNT > $TESTNET_DIR/validator_count.txt diff --git a/scripts/local_testnet_valdiator_client.sh b/scripts/local_testnet_valdiator_client.sh new file mode 100755 index 0000000000..b0caf9fd44 --- /dev/null +++ b/scripts/local_testnet_valdiator_client.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# +# Starts a validator client based upon a genesis state created by +# `./local_testnet_genesis_state`. +# + +TESTNET_DIR=~/.lighthouse/local-testnet/testnet +DATADIR=~/.lighthouse/local-testnet/validator +DEBUG_LEVEL=${1:-info} + +exec lighthouse \ + --debug-level $DEBUG_LEVEL \ + vc \ + --datadir $DATADIR \ + --testnet-dir $TESTNET_DIR \ + testnet \ + insecure \ + 0 \ + $(cat $TESTNET_DIR/validator_count.txt) diff --git a/scripts/whiteblock_start.sh b/scripts/whiteblock_start.sh deleted file mode 100755 index f9d1a90075..0000000000 --- a/scripts/whiteblock_start.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/bin/bash - -<" - echo "--peers=" - echo "--validator-keys=" - echo "--gen-state=" - echo "--port=" -} - -while [ "$1" != "" ]; -do - PARAM=`echo $1 | awk -F= '{print $1}'` - VALUE=`echo $1 | sed 's/^[^=]*=//g'` - - case $PARAM in - --identity) - IDENTITY=$VALUE - ;; - --peers) - PEERS+=",$VALUE" - ;; - --validator-keys) - VALIDATOR_KEYS=$VALUE - ;; - --gen-state) - GEN_STATE=$VALUE - ;; - --port) - PORT=$VALUE - ;; - --help) - usage - exit - ;; - *) - echo "ERROR: unknown parameter \"$PARAM\"" - usage - exit 1 - ;; - esac - shift -done - -./lighthouse \ - --logfile $BEACON_LOG_FILE \ - beacon \ - --p2p-priv-key $IDENTITY \ - --libp2p-addresses $PEERS \ - --port $PORT \ - testnet \ - --force \ - file \ - ssz \ - $GEN_STATE \ - & \ - -./lighthouse \ - --logfile $VALIDATOR_LOG_FILE \ - validator \ - testnet \ - --bootstrap \ - interop-yaml \ - $YAML_KEY_FILE \ - -trap 'trap - SIGTERM && kill 0' SIGINT SIGTERM EXIT diff --git a/tests/ef_tests/Cargo.toml b/tests/ef_tests/Cargo.toml index 252cf1f90d..d399cba37d 100644 --- a/tests/ef_tests/Cargo.toml +++ b/tests/ef_tests/Cargo.toml @@ -12,19 +12,19 @@ fake_crypto = ["bls/fake_crypto"] [dependencies] bls = { path = "../../eth2/utils/bls" } compare_fields = { path = "../../eth2/utils/compare_fields" } -ethereum-types = "0.9" -hex = "0.3" -rayon = "1.2.0" -serde = "1.0.102" -serde_derive = "1.0.102" +ethereum-types = "0.9.1" +hex = "0.4.2" +rayon = "1.3.0" +serde = "1.0.110" +serde_derive = "1.0.110" serde_repr = "0.1.5" serde_yaml = "0.8.11" eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" tree_hash = "0.1.0" -tree_hash_derive = "0.2" +tree_hash_derive = "0.2.0" cached_tree_hash = { path = "../../eth2/utils/cached_tree_hash" } state_processing = { path = "../../eth2/state_processing" } swap_or_not_shuffle = { path = "../../eth2/utils/swap_or_not_shuffle" } types = { path = "../../eth2/types" } -walkdir = "2.2.9" +walkdir = "2.3.1" diff --git a/tests/ef_tests/src/cases/sanity_slots.rs b/tests/ef_tests/src/cases/sanity_slots.rs index 6baacb5017..ead9fb5a82 100644 --- a/tests/ef_tests/src/cases/sanity_slots.rs +++ b/tests/ef_tests/src/cases/sanity_slots.rs @@ -66,7 +66,7 @@ impl Case for SanitySlots { state.build_all_caches(spec).unwrap(); let mut result = (0..self.slots) - .try_for_each(|_| per_slot_processing(&mut state, None, spec)) + .try_for_each(|_| per_slot_processing(&mut state, None, spec).map(|_| ())) .map(|_| state); compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/tests/eth1_test_rig/Cargo.toml b/tests/eth1_test_rig/Cargo.toml index 11552f4633..a3723ce264 100644 --- a/tests/eth1_test_rig/Cargo.toml +++ b/tests/eth1_test_rig/Cargo.toml @@ -6,8 +6,8 @@ edition = "2018" [dependencies] web3 = "0.10.0" -tokio = "0.1.22" -futures = "0.1.25" +tokio = { version = "0.2.20", features = ["time"] } +futures = { version = "0.3.5", features = ["compat"] } types = { path = "../../eth2/types"} -serde_json = "1.0" +serde_json = "1.0.52" deposit_contract = { path = "../../eth2/utils/deposit_contract"} diff --git a/tests/eth1_test_rig/src/ganache.rs b/tests/eth1_test_rig/src/ganache.rs index da771cdaca..faaae0ef35 100644 --- a/tests/eth1_test_rig/src/ganache.rs +++ b/tests/eth1_test_rig/src/ganache.rs @@ -1,4 +1,4 @@ -use futures::Future; +use futures::compat::Future01CompatExt; use serde_json::json; use std::io::prelude::*; use std::io::BufReader; @@ -98,28 +98,34 @@ impl GanacheInstance { } /// Increase the timestamp on future blocks by `increase_by` seconds. - pub fn increase_time(&self, increase_by: u64) -> impl Future { + pub async fn increase_time(&self, increase_by: u64) -> Result<(), String> { self.web3 .transport() .execute("evm_increaseTime", vec![json!(increase_by)]) + .compat() + .await .map(|_json_value| ()) .map_err(|e| format!("Failed to increase time on EVM (is this ganache?): {:?}", e)) } /// Returns the current block number, as u64 - pub fn block_number(&self) -> impl Future { + pub async fn block_number(&self) -> Result { self.web3 .eth() .block_number() + .compat() + .await .map(|v| v.as_u64()) .map_err(|e| format!("Failed to get block number: {:?}", e)) } /// Mines a single block. - pub fn evm_mine(&self) -> impl Future { + pub async fn evm_mine(&self) -> Result<(), String> { self.web3 .transport() .execute("evm_mine", vec![]) + .compat() + .await .map(|_| ()) .map_err(|_| { "utils should mine new block with evm_mine (only works with ganache-cli!)" diff --git a/tests/eth1_test_rig/src/lib.rs b/tests/eth1_test_rig/src/lib.rs index 3ce4426d00..a861c06b58 100644 --- a/tests/eth1_test_rig/src/lib.rs +++ b/tests/eth1_test_rig/src/lib.rs @@ -10,10 +10,10 @@ mod ganache; use deposit_contract::{ encode_eth1_tx_data, testnet, ABI, BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS, }; -use futures::{future, stream, Future, IntoFuture, Stream}; +use futures::compat::Future01CompatExt; use ganache::GanacheInstance; -use std::time::{Duration, Instant}; -use tokio::{runtime::Runtime, timer::Delay}; +use std::time::Duration; +use tokio::time::delay_for; use types::DepositData; use types::{test_utils::generate_deterministic_keypair, EthSpec, Hash256, Keypair, Signature}; use web3::contract::{Contract, Options}; @@ -31,13 +31,14 @@ pub struct GanacheEth1Instance { } impl GanacheEth1Instance { - pub fn new() -> impl Future { - GanacheInstance::new().into_future().and_then(|ganache| { - DepositContract::deploy(ganache.web3.clone(), 0, None).map(|deposit_contract| Self { + pub async fn new() -> Result { + let ganache = GanacheInstance::new()?; + DepositContract::deploy(ganache.web3.clone(), 0, None) + .await + .map(|deposit_contract| Self { ganache, deposit_contract, }) - }) } pub fn endpoint(&self) -> String { @@ -57,19 +58,19 @@ pub struct DepositContract { } impl DepositContract { - pub fn deploy( + pub async fn deploy( web3: Web3, confirmations: usize, password: Option, - ) -> impl Future { - Self::deploy_bytecode(web3, confirmations, BYTECODE, ABI, password) + ) -> Result { + Self::deploy_bytecode(web3, confirmations, BYTECODE, ABI, password).await } - pub fn deploy_testnet( + pub async fn deploy_testnet( web3: Web3, confirmations: usize, password: Option, - ) -> impl Future { + ) -> Result { Self::deploy_bytecode( web3, confirmations, @@ -77,35 +78,33 @@ impl DepositContract { testnet::ABI, password, ) + .await } - fn deploy_bytecode( + async fn deploy_bytecode( web3: Web3, confirmations: usize, bytecode: &[u8], abi: &[u8], password: Option, - ) -> impl Future { - let web3_1 = web3.clone(); - - deploy_deposit_contract( + ) -> Result { + let address = deploy_deposit_contract( web3.clone(), confirmations, bytecode.to_vec(), abi.to_vec(), password, ) + .await .map_err(|e| { format!( "Failed to deploy contract: {}. Is scripts/ganache_tests_node.sh running?.", e ) - }) - .and_then(move |address| { - Contract::from_json(web3_1.eth(), address, ABI) - .map_err(|e| format!("Failed to init contract: {:?}", e)) - }) - .map(|contract| Self { contract, web3 }) + })?; + Contract::from_json(web3.clone().eth(), address, ABI) + .map_err(|e| format!("Failed to init contract: {:?}", e)) + .map(move |contract| Self { contract, web3 }) } /// The deposit contract's address in `0x00ab...` format. @@ -136,7 +135,7 @@ impl DepositContract { /// Creates a random, valid deposit and submits it to the deposit contract. /// /// The keypairs are created randomly and destroyed. - pub fn deposit_random(&self, runtime: &mut Runtime) -> Result<(), String> { + pub async fn deposit_random(&self) -> Result<(), String> { let keypair = Keypair::random(); let mut deposit = DepositData { @@ -148,21 +147,21 @@ impl DepositContract { deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); - self.deposit(runtime, deposit) + self.deposit(deposit).await } /// Perfoms a blocking deposit. - pub fn deposit(&self, runtime: &mut Runtime, deposit_data: DepositData) -> Result<(), String> { - runtime - .block_on(self.deposit_async(deposit_data)) + pub async fn deposit(&self, deposit_data: DepositData) -> Result<(), String> { + self.deposit_async(deposit_data) + .await .map_err(|e| format!("Deposit failed: {:?}", e)) } - pub fn deposit_deterministic_async( + pub async fn deposit_deterministic_async( &self, keypair_index: usize, amount: u64, - ) -> impl Future { + ) -> Result<(), String> { let keypair = generate_deterministic_keypair(keypair_index); let mut deposit = DepositData { @@ -174,73 +173,57 @@ impl DepositContract { deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); - self.deposit_async(deposit) + self.deposit_async(deposit).await } /// Performs a non-blocking deposit. - pub fn deposit_async( - &self, - deposit_data: DepositData, - ) -> impl Future { - let contract = self.contract.clone(); - let web3_1 = self.web3.clone(); - - self.web3 + pub async fn deposit_async(&self, deposit_data: DepositData) -> Result<(), String> { + let from = self + .web3 .eth() .accounts() + .compat() + .await .map_err(|e| format!("Failed to get accounts: {:?}", e)) .and_then(|accounts| { accounts .get(DEPOSIT_ACCOUNTS_INDEX) .cloned() .ok_or_else(|| "Insufficient accounts for deposit".to_string()) - }) - .and_then(move |from| { - let tx_request = TransactionRequest { - from, - to: Some(contract.address()), - gas: Some(U256::from(DEPOSIT_GAS)), - gas_price: None, - value: Some(from_gwei(deposit_data.amount)), - // Note: the reason we use this `TransactionRequest` instead of just using the - // function in `self.contract` is so that the `encode_eth1_tx_data` function gets used - // during testing. - // - // It's important that `encode_eth1_tx_data` stays correct and does not suffer from - // code-rot. - data: encode_eth1_tx_data(&deposit_data).map(Into::into).ok(), - nonce: None, - condition: None, - }; + })?; + let tx_request = TransactionRequest { + from, + to: Some(self.contract.address()), + gas: Some(U256::from(DEPOSIT_GAS)), + gas_price: None, + value: Some(from_gwei(deposit_data.amount)), + // Note: the reason we use this `TransactionRequest` instead of just using the + // function in `self.contract` is so that the `eth1_tx_data` function gets used + // during testing. + // + // It's important that `eth1_tx_data` stays correct and does not suffer from + // code-rot. + data: encode_eth1_tx_data(&deposit_data).map(Into::into).ok(), + nonce: None, + condition: None, + }; - web3_1 - .eth() - .send_transaction(tx_request) - .map_err(|e| format!("Failed to call deposit fn: {:?}", e)) - }) - .map(|_| ()) + self.web3 + .eth() + .send_transaction(tx_request) + .compat() + .await + .map_err(|e| format!("Failed to call deposit fn: {:?}", e))?; + Ok(()) } /// Peforms many deposits, each preceded by a delay. - pub fn deposit_multiple( - &self, - deposits: Vec, - ) -> impl Future { - let s = self.clone(); - stream::unfold(deposits.into_iter(), move |mut deposit_iter| { - let s = s.clone(); - match deposit_iter.next() { - Some(deposit) => Some( - Delay::new(Instant::now() + deposit.delay) - .map_err(|e| format!("Failed to execute delay: {:?}", e)) - .and_then(move |_| s.deposit_async(deposit.deposit)) - .map(move |yielded| (yielded, deposit_iter)), - ), - None => None, - } - }) - .collect() - .map(|_| ()) + pub async fn deposit_multiple(&self, deposits: Vec) -> Result<(), String> { + for deposit in deposits.into_iter() { + delay_for(deposit.delay).await; + self.deposit_async(deposit.deposit).await?; + } + Ok(()) } } @@ -260,61 +243,56 @@ fn from_gwei(gwei: u64) -> U256 { /// Deploys the deposit contract to the given web3 instance using the account with index /// `DEPLOYER_ACCOUNTS_INDEX`. -fn deploy_deposit_contract( +async fn deploy_deposit_contract( web3: Web3, confirmations: usize, bytecode: Vec, abi: Vec, password_opt: Option, -) -> impl Future { +) -> Result { let bytecode = String::from_utf8(bytecode).expect("bytecode must be valid utf8"); - let web3_1 = web3.clone(); - web3.eth() + let from_address = web3 + .eth() .accounts() + .compat() + .await .map_err(|e| format!("Failed to get accounts: {:?}", e)) .and_then(|accounts| { accounts .get(DEPLOYER_ACCOUNTS_INDEX) .cloned() .ok_or_else(|| "Insufficient accounts for deployer".to_string()) - }) - .and_then(move |from_address| { - let future: Box + Send> = - if let Some(password) = password_opt { - // Unlock for only a single transaction. - let duration = None; + })?; - let future = web3_1 - .personal() - .unlock_account(from_address, &password, duration) - .then(move |result| match result { - Ok(true) => Ok(from_address), - Ok(false) => Err("Eth1 node refused to unlock account".to_string()), - Err(e) => Err(format!("Eth1 unlock request failed: {:?}", e)), - }); + let deploy_address = if let Some(password) = password_opt { + let result = web3 + .personal() + .unlock_account(from_address, &password, None) + .compat() + .await; + match result { + Ok(true) => return Ok(from_address), + Ok(false) => return Err("Eth1 node refused to unlock account".to_string()), + Err(e) => return Err(format!("Eth1 unlock request failed: {:?}", e)), + }; + } else { + from_address + }; - Box::new(future) - } else { - Box::new(future::ok(from_address)) - }; + let pending_contract = Contract::deploy(web3.eth(), &abi) + .map_err(|e| format!("Unable to build contract deployer: {:?}", e))? + .confirmations(confirmations) + .options(Options { + gas: Some(U256::from(CONTRACT_DEPLOY_GAS)), + ..Options::default() + }) + .execute(bytecode, (), deploy_address) + .map_err(|e| format!("Failed to execute deployment: {:?}", e))?; - future - }) - .and_then(move |deploy_address| { - Contract::deploy(web3.eth(), &abi) - .map_err(|e| format!("Unable to build contract deployer: {:?}", e))? - .confirmations(confirmations) - .options(Options { - gas: Some(U256::from(CONTRACT_DEPLOY_GAS)), - ..Options::default() - }) - .execute(bytecode, (), deploy_address) - .map_err(|e| format!("Failed to execute deployment: {:?}", e)) - }) - .and_then(|pending_contract| { - pending_contract - .map(|contract| contract.address()) - .map_err(|e| format!("Unable to resolve pending contract: {:?}", e)) - }) + pending_contract + .compat() + .await + .map(|contract| contract.address()) + .map_err(|e| format!("Unable to resolve pending contract: {:?}", e)) } diff --git a/tests/node_test_rig/Cargo.toml b/tests/node_test_rig/Cargo.toml index 013cbe0274..b13fb767f6 100644 --- a/tests/node_test_rig/Cargo.toml +++ b/tests/node_test_rig/Cargo.toml @@ -9,11 +9,11 @@ environment = { path = "../../lighthouse/environment" } beacon_node = { path = "../../beacon_node" } types = { path = "../../eth2/types" } eth2_config = { path = "../../eth2/utils/eth2_config" } -tempdir = "0.3" -reqwest = "0.9" -url = "1.2" -serde = "1.0" -futures = "0.1.25" +tempdir = "0.3.7" +reqwest = "0.10.4" +url = "2.1.1" +serde = "1.0.110" +futures = "0.3.5" genesis = { path = "../../beacon_node/genesis" } remote_beacon_node = { path = "../../eth2/utils/remote_beacon_node" } validator_client = { path = "../../validator_client" } diff --git a/tests/node_test_rig/src/lib.rs b/tests/node_test_rig/src/lib.rs index e5b9245b1e..67347a640c 100644 --- a/tests/node_test_rig/src/lib.rs +++ b/tests/node_test_rig/src/lib.rs @@ -4,7 +4,6 @@ use beacon_node::ProductionBeaconNode; use environment::RuntimeContext; -use futures::Future; use std::path::PathBuf; use std::time::{SystemTime, UNIX_EPOCH}; use tempdir::TempDir; @@ -29,10 +28,10 @@ impl LocalBeaconNode { /// Starts a new, production beacon node on the tokio runtime in the given `context`. /// /// The node created is using the same types as the node we use in production. - pub fn production( + pub async fn production( context: RuntimeContext, mut client_config: ClientConfig, - ) -> impl Future { + ) -> Result { // Creates a temporary directory that will be deleted once this `TempDir` is dropped. let datadir = TempDir::new("lighthouse_node_test_rig") .expect("should create temp directory for client datadir"); @@ -40,10 +39,12 @@ impl LocalBeaconNode { client_config.data_dir = datadir.path().into(); client_config.network.network_dir = PathBuf::from(datadir.path()).join("network"); - ProductionBeaconNode::new(context, client_config).map(move |client| Self { - client: client.into_inner(), - datadir, - }) + ProductionBeaconNode::new(context, client_config) + .await + .map(move |client| Self { + client: client.into_inner(), + datadir, + }) } } @@ -103,47 +104,49 @@ impl LocalValidatorClient { /// are created in a temp dir then removed when the process exits. /// /// The validator created is using the same types as the node we use in production. - pub fn production_with_insecure_keypairs( + pub async fn production_with_insecure_keypairs( context: RuntimeContext, mut config: ValidatorConfig, keypair_indices: &[usize], - ) -> impl Future { + ) -> Result { // Creates a temporary directory that will be deleted once this `TempDir` is dropped. let datadir = TempDir::new("lighthouse-beacon-node") .expect("should create temp directory for client datadir"); config.key_source = KeySource::InsecureKeypairs(keypair_indices.to_vec()); - Self::new(context, config, datadir) + Self::new(context, config, datadir).await } /// Creates a validator client that attempts to read keys from the default data dir. /// /// - The validator created is using the same types as the node we use in production. /// - It is recommended to use `production_with_insecure_keypairs` for testing. - pub fn production( + pub async fn production( context: RuntimeContext, config: ValidatorConfig, - ) -> impl Future { + ) -> Result { // Creates a temporary directory that will be deleted once this `TempDir` is dropped. let datadir = TempDir::new("lighthouse-validator") .expect("should create temp directory for client datadir"); - Self::new(context, config, datadir) + Self::new(context, config, datadir).await } - fn new( + async fn new( context: RuntimeContext, mut config: ValidatorConfig, datadir: TempDir, - ) -> impl Future { + ) -> Result { config.data_dir = datadir.path().into(); - ProductionValidatorClient::new(context, config).map(move |mut client| { - client - .start_service() - .expect("should start validator services"); - Self { client, datadir } - }) + ProductionValidatorClient::new(context, config) + .await + .map(move |mut client| { + client + .start_service() + .expect("should start validator services"); + Self { client, datadir } + }) } } diff --git a/tests/simulator/Cargo.toml b/tests/simulator/Cargo.toml index 65b6614705..36a3c3af26 100644 --- a/tests/simulator/Cargo.toml +++ b/tests/simulator/Cargo.toml @@ -10,9 +10,9 @@ edition = "2018" node_test_rig = { path = "../node_test_rig" } types = { path = "../../eth2/types" } validator_client = { path = "../../validator_client" } -parking_lot = "0.9.0" -futures = "0.1.29" -tokio = "0.1.22" +parking_lot = "0.10.2" +futures = "0.3.5" +tokio = "0.2.20" eth1_test_rig = { path = "../eth1_test_rig" } env_logger = "0.7.1" clap = "2.33.0" diff --git a/tests/simulator/src/checks.rs b/tests/simulator/src/checks.rs index 8cfb0aaa23..a63f1ee43f 100644 --- a/tests/simulator/src/checks.rs +++ b/tests/simulator/src/checks.rs @@ -1,136 +1,128 @@ use crate::local_network::LocalNetwork; -use futures::{stream, Future, IntoFuture, Stream}; -use std::time::{Duration, Instant}; -use tokio::timer::Delay; +use std::time::Duration; use types::{Epoch, EthSpec, Slot, Unsigned}; /// Checks that all of the validators have on-boarded by the start of the second eth1 voting /// period. -pub fn verify_initial_validator_count( +pub async fn verify_initial_validator_count( network: LocalNetwork, slot_duration: Duration, initial_validator_count: usize, -) -> impl Future { - slot_delay(Slot::new(1), slot_duration) - .and_then(move |()| verify_validator_count(network, initial_validator_count)) +) -> Result<(), String> { + slot_delay(Slot::new(1), slot_duration).await; + verify_validator_count(network, initial_validator_count).await?; + Ok(()) } /// Checks that all of the validators have on-boarded by the start of the second eth1 voting /// period. -pub fn verify_validator_onboarding( +pub async fn verify_validator_onboarding( network: LocalNetwork, slot_duration: Duration, expected_validator_count: usize, -) -> impl Future { +) -> Result<(), String> { slot_delay( Slot::new(E::SlotsPerEth1VotingPeriod::to_u64()), slot_duration, ) - .and_then(move |()| verify_validator_count(network, expected_validator_count)) + .await; + verify_validator_count(network, expected_validator_count).await?; + Ok(()) } /// Checks that the chain has made the first possible finalization. /// /// Intended to be run as soon as chain starts. -pub fn verify_first_finalization( +pub async fn verify_first_finalization( network: LocalNetwork, slot_duration: Duration, -) -> impl Future { - epoch_delay(Epoch::new(4), slot_duration, E::slots_per_epoch()) - .and_then(|()| verify_all_finalized_at(network, Epoch::new(2))) +) -> Result<(), String> { + epoch_delay(Epoch::new(4), slot_duration, E::slots_per_epoch()).await; + verify_all_finalized_at(network, Epoch::new(2)).await?; + Ok(()) } /// Delays for `epochs`, plus half a slot extra. -pub fn epoch_delay( - epochs: Epoch, - slot_duration: Duration, - slots_per_epoch: u64, -) -> impl Future { +pub async fn epoch_delay(epochs: Epoch, slot_duration: Duration, slots_per_epoch: u64) { let duration = slot_duration * (epochs.as_u64() * slots_per_epoch) as u32 + slot_duration / 2; - - Delay::new(Instant::now() + duration).map_err(|e| format!("Epoch delay failed: {:?}", e)) + tokio::time::delay_for(duration).await } /// Delays for `slots`, plus half a slot extra. -fn slot_delay(slots: Slot, slot_duration: Duration) -> impl Future { +async fn slot_delay(slots: Slot, slot_duration: Duration) { let duration = slot_duration * slots.as_u64() as u32 + slot_duration / 2; - - Delay::new(Instant::now() + duration).map_err(|e| format!("Epoch delay failed: {:?}", e)) + tokio::time::delay_for(duration).await; } /// Verifies that all beacon nodes in the given network have a head state that has a finalized /// epoch of `epoch`. -pub fn verify_all_finalized_at( +pub async fn verify_all_finalized_at( network: LocalNetwork, epoch: Epoch, -) -> impl Future { - network - .remote_nodes() - .into_future() - .and_then(|remote_nodes| { - stream::unfold(remote_nodes.into_iter(), |mut iter| { - iter.next().map(|remote_node| { - remote_node - .http - .beacon() - .get_head() - .map(|head| head.finalized_slot.epoch(E::slots_per_epoch())) - .map(|epoch| (epoch, iter)) - .map_err(|e| format!("Get head via http failed: {:?}", e)) - }) - }) - .collect() - }) - .and_then(move |epochs| { - if epochs.iter().any(|node_epoch| *node_epoch != epoch) { - Err(format!( - "Nodes are not finalized at epoch {}. Finalized epochs: {:?}", - epoch, epochs - )) - } else { - Ok(()) - } - }) +) -> Result<(), String> { + let epochs = { + let mut epochs = Vec::new(); + for remote_node in network.remote_nodes()? { + epochs.push( + remote_node + .http + .beacon() + .get_head() + .await + .map(|head| head.finalized_slot.epoch(E::slots_per_epoch())) + .map_err(|e| format!("Get head via http failed: {:?}", e))?, + ); + } + epochs + }; + + if epochs.iter().any(|node_epoch| *node_epoch != epoch) { + Err(format!( + "Nodes are not finalized at epoch {}. Finalized epochs: {:?}", + epoch, epochs + )) + } else { + Ok(()) + } } /// Verifies that all beacon nodes in the given `network` have a head state that contains /// `expected_count` validators. -fn verify_validator_count( +async fn verify_validator_count( network: LocalNetwork, expected_count: usize, -) -> impl Future { - network - .remote_nodes() - .into_future() - .and_then(|remote_nodes| { - stream::unfold(remote_nodes.into_iter(), |mut iter| { - iter.next().map(|remote_node| { - let beacon = remote_node.http.beacon(); - beacon - .get_head() - .map_err(|e| format!("Get head via http failed: {:?}", e)) - .and_then(move |head| { - beacon - .get_state_by_root(head.state_root) - .map(|(state, _root)| state) - .map_err(|e| format!("Get state root via http failed: {:?}", e)) - }) - .map(|state| (state.validators.len(), iter)) - }) - }) - .collect() - }) - .and_then(move |validator_counts| { - if validator_counts - .iter() - .any(|count| *count != expected_count) - { - Err(format!( - "Nodes do not all have {} validators in their state. Validator counts: {:?}", - expected_count, validator_counts - )) - } else { - Ok(()) - } - }) +) -> Result<(), String> { + let validator_counts = { + let mut validator_counts = Vec::new(); + for remote_node in network.remote_nodes()? { + let beacon = remote_node.http.beacon(); + + let head = beacon + .get_head() + .await + .map_err(|e| format!("Get head via http failed: {:?}", e))?; + + let vc = beacon + .get_state_by_root(head.state_root) + .await + .map(|(state, _root)| state) + .map_err(|e| format!("Get state root via http failed: {:?}", e))? + .validators + .len(); + validator_counts.push(vc); + } + validator_counts + }; + + if validator_counts + .iter() + .any(|count| *count != expected_count) + { + Err(format!( + "Nodes do not all have {} validators in their state. Validator counts: {:?}", + expected_count, validator_counts + )) + } else { + Ok(()) + } } diff --git a/tests/simulator/src/eth1_sim.rs b/tests/simulator/src/eth1_sim.rs index 3413701c5d..ee4d083137 100644 --- a/tests/simulator/src/eth1_sim.rs +++ b/tests/simulator/src/eth1_sim.rs @@ -1,13 +1,12 @@ use crate::{checks, LocalNetwork, E}; use clap::ArgMatches; use eth1_test_rig::GanacheEth1Instance; -use futures::{future, stream, Future, Stream}; +use futures::prelude::*; use node_test_rig::{ environment::EnvironmentBuilder, testing_client_config, ClientGenesis, ValidatorConfig, }; use std::net::{IpAddr, Ipv4Addr}; -use std::time::{Duration, Instant}; -use tokio::timer::Interval; +use std::time::Duration; pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let node_count = value_t!(matches, "nodes", usize).expect("missing nodes default"); @@ -50,159 +49,123 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let deposit_amount = env.eth2_config.spec.max_effective_balance; let context = env.core_context(); - let executor = context.executor.clone(); - let future = GanacheEth1Instance::new() + let main_future = async { /* * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit * validators. */ - .map(move |ganache_eth1_instance| { - let deposit_contract = ganache_eth1_instance.deposit_contract; - let ganache = ganache_eth1_instance.ganache; - let eth1_endpoint = ganache.endpoint(); - let deposit_contract_address = deposit_contract.address(); + let ganache_eth1_instance = GanacheEth1Instance::new().await?; + let deposit_contract = ganache_eth1_instance.deposit_contract; + let ganache = ganache_eth1_instance.ganache; + let eth1_endpoint = ganache.endpoint(); + let deposit_contract_address = deposit_contract.address(); - // Start a timer that produces eth1 blocks on an interval. - executor.spawn( - Interval::new(Instant::now(), eth1_block_time) - .map_err(|_| eprintln!("Eth1 block timer failed")) - .for_each(move |_| ganache.evm_mine().map_err(|_| ())) - .map_err(|_| eprintln!("Eth1 evm_mine failed")) - .map(|_| ()), - ); + // Start a timer that produces eth1 blocks on an interval. + tokio::spawn(async move { + let mut interval = tokio::time::interval(eth1_block_time); + while let Some(_) = interval.next().await { + let _ = ganache.evm_mine().await; + } + }); - // Submit deposits to the deposit contract. - executor.spawn( - stream::unfold(0..total_validator_count, move |mut iter| { - iter.next().map(|i| { - println!("Submitting deposit for validator {}...", i); - deposit_contract - .deposit_deterministic_async::(i, deposit_amount) - .map(|_| ((), iter)) - }) - }) - .collect() - .map(|_| ()) - .map_err(|e| eprintln!("Error submitting deposit: {}", e)), - ); + // Submit deposits to the deposit contract. + tokio::spawn(async move { + for i in 0..total_validator_count { + println!("Submitting deposit for validator {}...", i); + let _ = deposit_contract + .deposit_deterministic_async::(i, deposit_amount) + .await; + } + }); - let mut beacon_config = testing_client_config(); + let mut beacon_config = testing_client_config(); - beacon_config.genesis = ClientGenesis::DepositContract; - beacon_config.eth1.endpoint = eth1_endpoint; - beacon_config.eth1.deposit_contract_address = deposit_contract_address; - beacon_config.eth1.deposit_contract_deploy_block = 0; - beacon_config.eth1.lowest_cached_block_number = 0; - beacon_config.eth1.follow_distance = 1; - beacon_config.dummy_eth1_backend = false; - beacon_config.sync_eth1_chain = true; + beacon_config.genesis = ClientGenesis::DepositContract; + beacon_config.eth1.endpoint = eth1_endpoint; + beacon_config.eth1.deposit_contract_address = deposit_contract_address; + beacon_config.eth1.deposit_contract_deploy_block = 0; + beacon_config.eth1.lowest_cached_block_number = 0; + beacon_config.eth1.follow_distance = 1; + beacon_config.dummy_eth1_backend = false; + beacon_config.sync_eth1_chain = true; - beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); - beacon_config - }) /* * Create a new `LocalNetwork` with one beacon node. */ - .and_then(move |beacon_config| { - LocalNetwork::new(context, beacon_config.clone()) - .map(|network| (network, beacon_config)) - }) + let network = LocalNetwork::new(context, beacon_config.clone()).await?; /* * One by one, add beacon nodes to the network. */ - .and_then(move |(network, beacon_config)| { - let network_1 = network.clone(); - stream::unfold(0..node_count - 1, move |mut iter| { - iter.next().map(|_| { - network_1 - .add_beacon_node(beacon_config.clone()) - .map(|()| ((), iter)) - }) - }) - .collect() - .map(|_| network) - }) + for _ in 0..node_count - 1 { + network.add_beacon_node(beacon_config.clone()).await?; + } /* * One by one, add validator clients to the network. Each validator client is attached to * a single corresponding beacon node. */ - .and_then(move |network| { - let network_1 = network.clone(); - // Note: presently the validator client future will only resolve once genesis time - // occurs. This is great for this scenario, but likely to change in the future. - // - // If the validator client future behaviour changes, we would need to add a new future - // that delays until genesis. Otherwise, all of the checks that start in the next - // future will start too early. + // Note: presently the validator client future will only resolve once genesis time + // occurs. This is great for this scenario, but likely to change in the future. + // + // If the validator client future behaviour changes, we would need to add a new future + // that delays until genesis. Otherwise, all of the checks that start in the next + // future will start too early. - stream::unfold(0..node_count, move |mut iter| { - iter.next().map(|i| { - let indices = (i * validators_per_node..(i + 1) * validators_per_node) - .collect::>(); + for i in 0..node_count { + let indices = + (i * validators_per_node..(i + 1) * validators_per_node).collect::>(); + network + .add_validator_client(ValidatorConfig::default(), i, indices) + .await?; + } - network_1 - .add_validator_client(ValidatorConfig::default(), i, indices) - .map(|()| ((), iter)) - }) - }) - .collect() - .map(|_| network) - }) /* * Start the processes that will run checks on the network as it runs. */ - .and_then(move |network| { - // The `final_future` either completes immediately or never completes, depending on the value - // of `end_after_checks`. - let final_future: Box + Send> = - if end_after_checks { - Box::new(future::ok(()).map_err(|()| "".to_string())) - } else { - Box::new(future::empty().map_err(|()| "".to_string())) - }; - future::ok(()) - // Check that the chain finalizes at the first given opportunity. - .join(checks::verify_first_finalization( - network.clone(), - slot_duration, - )) - // Check that the chain starts with the expected validator count. - .join(checks::verify_initial_validator_count( - network.clone(), - slot_duration, - initial_validator_count, - )) - // Check that validators greater than `spec.min_genesis_active_validator_count` are - // onboarded at the first possible opportunity. - .join(checks::verify_validator_onboarding( - network.clone(), - slot_duration, - total_validator_count, - )) - // End now or run forever, depending on the `end_after_checks` flag. - .join(final_future) - .map(|_| network) - }) + let _err = futures::join!( + // Check that the chain finalizes at the first given opportunity. + checks::verify_first_finalization(network.clone(), slot_duration), + // Check that the chain starts with the expected validator count. + checks::verify_initial_validator_count( + network.clone(), + slot_duration, + initial_validator_count, + ), + // Check that validators greater than `spec.min_genesis_active_validator_count` are + // onboarded at the first possible opportunity. + checks::verify_validator_onboarding( + network.clone(), + slot_duration, + total_validator_count, + ) + ); + + // The `final_future` either completes immediately or never completes, depending on the value + // of `end_after_checks`. + + if !end_after_checks { + future::pending::<()>().await; + } /* * End the simulation by dropping the network. This will kill all running beacon nodes and * validator clients. */ - .map(|network| { - println!( - "Simulation complete. Finished with {} beacon nodes and {} validator clients", - network.beacon_node_count(), - network.validator_client_count() - ); + println!( + "Simulation complete. Finished with {} beacon nodes and {} validator clients", + network.beacon_node_count(), + network.validator_client_count() + ); - // Be explicit about dropping the network, as this kills all the nodes. This ensures - // all the checks have adequate time to pass. - drop(network) - }); + // Be explicit about dropping the network, as this kills all the nodes. This ensures + // all the checks have adequate time to pass. + drop(network); + Ok::<(), String>(()) + }; - env.runtime().block_on(future) + Ok(env.runtime().block_on(main_future).unwrap()) } diff --git a/tests/simulator/src/local_network.rs b/tests/simulator/src/local_network.rs index 22ffc4d369..61699a66e2 100644 --- a/tests/simulator/src/local_network.rs +++ b/tests/simulator/src/local_network.rs @@ -1,4 +1,3 @@ -use futures::{Future, IntoFuture}; use node_test_rig::{ environment::RuntimeContext, ClientConfig, LocalBeaconNode, LocalValidatorClient, RemoteBeaconNode, ValidatorConfig, @@ -42,23 +41,24 @@ impl Deref for LocalNetwork { impl LocalNetwork { /// Creates a new network with a single `BeaconNode`. - pub fn new( + pub async fn new( context: RuntimeContext, mut beacon_config: ClientConfig, - ) -> impl Future { + ) -> Result { beacon_config.network.discovery_port = BOOTNODE_PORT; beacon_config.network.libp2p_port = BOOTNODE_PORT; beacon_config.network.enr_udp_port = Some(BOOTNODE_PORT); beacon_config.network.enr_tcp_port = Some(BOOTNODE_PORT); - LocalBeaconNode::production(context.service_context("boot_node".into()), beacon_config).map( - |beacon_node| Self { - inner: Arc::new(Inner { - context, - beacon_nodes: RwLock::new(vec![beacon_node]), - validator_clients: RwLock::new(vec![]), - }), - }, - ) + let beacon_node = + LocalBeaconNode::production(context.service_context("boot_node".into()), beacon_config) + .await?; + Ok(Self { + inner: Arc::new(Inner { + context, + beacon_nodes: RwLock::new(vec![beacon_node]), + validator_clients: RwLock::new(vec![]), + }), + }) } /// Returns the number of beacon nodes in the network. @@ -78,72 +78,65 @@ impl LocalNetwork { } /// Adds a beacon node to the network, connecting to the 0'th beacon node via ENR. - pub fn add_beacon_node( - &self, - mut beacon_config: ClientConfig, - ) -> impl Future { + pub async fn add_beacon_node(&self, mut beacon_config: ClientConfig) -> Result<(), String> { let self_1 = self.clone(); println!("Adding beacon node.."); - self.beacon_nodes - .read() - .first() - .map(|boot_node| { - beacon_config.network.boot_nodes.push( - boot_node - .client - .enr() - .expect("bootnode must have a network"), - ); - }) - .expect("should have at least one node"); + { + let read_lock = self.beacon_nodes.read(); + + let boot_node = read_lock.first().expect("should have at least one node"); + + beacon_config.network.boot_nodes.push( + boot_node + .client + .enr() + .expect("bootnode must have a network"), + ); + } let index = self.beacon_nodes.read().len(); - LocalBeaconNode::production( + let beacon_node = LocalBeaconNode::production( self.context.service_context(format!("node_{}", index)), beacon_config, ) - .map(move |beacon_node| { - self_1.beacon_nodes.write().push(beacon_node); - }) + .await?; + self_1.beacon_nodes.write().push(beacon_node); + Ok(()) } /// Adds a validator client to the network, connecting it to the beacon node with index /// `beacon_node`. - pub fn add_validator_client( + pub async fn add_validator_client( &self, mut validator_config: ValidatorConfig, beacon_node: usize, keypair_indices: Vec, - ) -> impl Future { + ) -> Result<(), String> { let index = self.validator_clients.read().len(); let context = self.context.service_context(format!("validator_{}", index)); let self_1 = self.clone(); + let socket_addr = { + let read_lock = self.beacon_nodes.read(); + let beacon_node = read_lock + .get(beacon_node) + .ok_or_else(|| format!("No beacon node for index {}", beacon_node))?; + beacon_node + .client + .http_listen_addr() + .expect("Must have http started") + }; - self.beacon_nodes - .read() - .get(beacon_node) - .map(move |beacon_node| { - let socket_addr = beacon_node - .client - .http_listen_addr() - .expect("Must have http started"); - - validator_config.http_server = - format!("http://{}:{}", socket_addr.ip(), socket_addr.port()); - - validator_config - }) - .ok_or_else(|| format!("No beacon node for index {}", beacon_node)) - .into_future() - .and_then(move |validator_config| { - LocalValidatorClient::production_with_insecure_keypairs( - context, - validator_config, - &keypair_indices, - ) - }) - .map(move |validator_client| self_1.validator_clients.write().push(validator_client)) + validator_config.http_server = + format!("http://{}:{}", socket_addr.ip(), socket_addr.port()); + let validator_client = LocalValidatorClient::production_with_insecure_keypairs( + context, + validator_config, + &keypair_indices, + ) + .await?; + self_1.validator_clients.write().push(validator_client); + Ok(()) } /// For all beacon nodes in `Self`, return a HTTP client to access each nodes HTTP API. @@ -157,13 +150,14 @@ impl LocalNetwork { } /// Return current epoch of bootnode. - pub fn bootnode_epoch(&self) -> impl Future { + pub async fn bootnode_epoch(&self) -> Result { let nodes = self.remote_nodes().expect("Failed to get remote nodes"); let bootnode = nodes.first().expect("Should contain bootnode"); bootnode .http .beacon() .get_head() + .await .map_err(|e| format!("Cannot get head: {:?}", e)) .map(|head| head.finalized_slot.epoch(E::slots_per_epoch())) } diff --git a/tests/simulator/src/no_eth1_sim.rs b/tests/simulator/src/no_eth1_sim.rs index b4d233909f..55b19223f8 100644 --- a/tests/simulator/src/no_eth1_sim.rs +++ b/tests/simulator/src/no_eth1_sim.rs @@ -1,6 +1,6 @@ use crate::{checks, LocalNetwork}; use clap::ArgMatches; -use futures::{future, stream, Future, Stream}; +use futures::prelude::*; use node_test_rig::{ environment::EnvironmentBuilder, testing_client_config, ClientGenesis, ValidatorConfig, }; @@ -63,88 +63,61 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); - let future = LocalNetwork::new(context, beacon_config.clone()) + let main_future = async { + let network = LocalNetwork::new(context, beacon_config.clone()).await?; /* * One by one, add beacon nodes to the network. */ - .and_then(move |network| { - let network_1 = network.clone(); - stream::unfold(0..node_count - 1, move |mut iter| { - iter.next().map(|_| { - network_1 - .add_beacon_node(beacon_config.clone()) - .map(|()| ((), iter)) - }) - }) - .collect() - .map(|_| network) - }) + for _ in 0..node_count - 1 { + network.add_beacon_node(beacon_config.clone()).await?; + } /* * One by one, add validator clients to the network. Each validator client is attached to * a single corresponding beacon node. */ - .and_then(move |network| { - let network_1 = network.clone(); - // Note: presently the validator client future will only resolve once genesis time - // occurs. This is great for this scenario, but likely to change in the future. - // - // If the validator client future behaviour changes, we would need to add a new future - // that delays until genesis. Otherwise, all of the checks that start in the next - // future will start too early. + // Note: presently the validator client future will only resolve once genesis time + // occurs. This is great for this scenario, but likely to change in the future. + // + // If the validator client future behaviour changes, we would need to add a new future + // that delays until genesis. Otherwise, all of the checks that start in the next + // future will start too early. - stream::unfold(0..node_count, move |mut iter| { - iter.next().map(|i| { - let indices = (i * validators_per_node..(i + 1) * validators_per_node) - .collect::>(); - - network_1 - .add_validator_client(ValidatorConfig::default(), i, indices) - .map(|()| ((), iter)) - }) - }) - .collect() - .map(|_| network) - }) + for i in 0..node_count { + let indices = + (i * validators_per_node..(i + 1) * validators_per_node).collect::>(); + network + .add_validator_client(ValidatorConfig::default(), i, indices) + .await?; + } /* * Start the processes that will run checks on the network as it runs. */ - .and_then(move |network| { - // The `final_future` either completes immediately or never completes, depending on the value - // of `end_after_checks`. - let final_future: Box + Send> = - if end_after_checks { - Box::new(future::ok(()).map_err(|()| "".to_string())) - } else { - Box::new(future::empty().map_err(|()| "".to_string())) - }; + // Check that the chain finalizes at the first given opportunity. + checks::verify_first_finalization(network.clone(), slot_duration).await?; - future::ok(()) - // Check that the chain finalizes at the first given opportunity. - .join(checks::verify_first_finalization( - network.clone(), - slot_duration, - )) - // End now or run forever, depending on the `end_after_checks` flag. - .join(final_future) - .map(|_| network) - }) + // The `final_future` either completes immediately or never completes, depending on the value + // of `end_after_checks`. + + if !end_after_checks { + future::pending::<()>().await; + } /* * End the simulation by dropping the network. This will kill all running beacon nodes and * validator clients. */ - .map(|network| { - println!( - "Simulation complete. Finished with {} beacon nodes and {} validator clients", - network.beacon_node_count(), - network.validator_client_count() - ); + println!( + "Simulation complete. Finished with {} beacon nodes and {} validator clients", + network.beacon_node_count(), + network.validator_client_count() + ); - // Be explicit about dropping the network, as this kills all the nodes. This ensures - // all the checks have adequate time to pass. - drop(network) - }); + // Be explicit about dropping the network, as this kills all the nodes. This ensures + // all the checks have adequate time to pass. + drop(network); + Ok::<(), String>(()) + }; - env.runtime().block_on(future) + Ok(env.runtime().block_on(main_future).unwrap()) } diff --git a/tests/simulator/src/sync_sim.rs b/tests/simulator/src/sync_sim.rs index 16a62fc327..ccf11e3ac1 100644 --- a/tests/simulator/src/sync_sim.rs +++ b/tests/simulator/src/sync_sim.rs @@ -1,14 +1,13 @@ use crate::checks::{epoch_delay, verify_all_finalized_at}; use crate::local_network::LocalNetwork; use clap::ArgMatches; -use futures::{future, stream, Future, IntoFuture, Stream}; +use futures::prelude::*; use node_test_rig::ClientConfig; use node_test_rig::{ environment::EnvironmentBuilder, testing_client_config, ClientGenesis, ValidatorConfig, }; use std::net::{IpAddr, Ipv4Addr}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use tokio::timer::Interval; use types::{Epoch, EthSpec}; pub fn run_syncing_sim(matches: &ArgMatches) -> Result<(), String> { @@ -78,110 +77,118 @@ fn syncing_sim( beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); - let future = LocalNetwork::new(context, beacon_config.clone()) + let main_future = async { + /* + * Create a new `LocalNetwork` with one beacon node. + */ + let network = LocalNetwork::new(context, beacon_config.clone()).await?; + /* * Add a validator client which handles all validators from the genesis state. */ - .and_then(move |network| { - network - .add_validator_client(ValidatorConfig::default(), 0, (0..num_validators).collect()) - .map(|_| network) - }) - /* - * Start the processes that will run checks on the network as it runs. - */ - .and_then(move |network| { - // The `final_future` either completes immediately or never completes, depending on the value - // of `end_after_checks`. - let final_future: Box + Send> = - if end_after_checks { - Box::new(future::ok(()).map_err(|()| "".to_string())) - } else { - Box::new(future::empty().map_err(|()| "".to_string())) - }; + network + .add_validator_client(ValidatorConfig::default(), 0, (0..num_validators).collect()) + .await?; + + // Check all syncing strategies one after other. + pick_strategy( + &strategy, + network.clone(), + beacon_config.clone(), + slot_duration, + initial_delay, + sync_timeout, + ) + .await?; + + // The `final_future` either completes immediately or never completes, depending on the value + // of `end_after_checks`. + + if !end_after_checks { + future::pending::<()>().await; + } - future::ok(()) - // Check all syncing strategies one after other. - .join(pick_strategy( - &strategy, - network.clone(), - beacon_config.clone(), - slot_duration, - initial_delay, - sync_timeout, - )) - .join(final_future) - .map(|_| network) - }) /* * End the simulation by dropping the network. This will kill all running beacon nodes and * validator clients. */ - .map(|network| { - println!( - "Simulation complete. Finished with {} beacon nodes and {} validator clients", - network.beacon_node_count(), - network.validator_client_count() - ); + println!( + "Simulation complete. Finished with {} beacon nodes and {} validator clients", + network.beacon_node_count(), + network.validator_client_count() + ); - // Be explicit about dropping the network, as this kills all the nodes. This ensures - // all the checks have adequate time to pass. - drop(network) - }); + // Be explicit about dropping the network, as this kills all the nodes. This ensures + // all the checks have adequate time to pass. + drop(network); + Ok::<(), String>(()) + }; - env.runtime().block_on(future) + env.runtime().block_on(main_future) } -pub fn pick_strategy( +pub async fn pick_strategy( strategy: &str, network: LocalNetwork, beacon_config: ClientConfig, slot_duration: Duration, initial_delay: u64, sync_timeout: u64, -) -> Box + Send + 'static> { +) -> Result<(), String> { match strategy { - "one-node" => Box::new(verify_one_node_sync( - network, - beacon_config, - slot_duration, - initial_delay, - sync_timeout, - )), - "two-nodes" => Box::new(verify_two_nodes_sync( - network, - beacon_config, - slot_duration, - initial_delay, - sync_timeout, - )), - "mixed" => Box::new(verify_in_between_sync( - network, - beacon_config, - slot_duration, - initial_delay, - sync_timeout, - )), - "all" => Box::new(verify_syncing( - network, - beacon_config, - slot_duration, - initial_delay, - sync_timeout, - )), - _ => Box::new(Err("Invalid strategy".into()).into_future()), + "one-node" => { + verify_one_node_sync( + network, + beacon_config, + slot_duration, + initial_delay, + sync_timeout, + ) + .await + } + "two-nodes" => { + verify_two_nodes_sync( + network, + beacon_config, + slot_duration, + initial_delay, + sync_timeout, + ) + .await + } + "mixed" => { + verify_in_between_sync( + network, + beacon_config, + slot_duration, + initial_delay, + sync_timeout, + ) + .await + } + "all" => { + verify_syncing( + network, + beacon_config, + slot_duration, + initial_delay, + sync_timeout, + ) + .await + } + _ => Err("Invalid strategy".into()), } } /// Verify one node added after `initial_delay` epochs is in sync /// after `sync_timeout` epochs. -pub fn verify_one_node_sync( +pub async fn verify_one_node_sync( network: LocalNetwork, beacon_config: ClientConfig, slot_duration: Duration, initial_delay: u64, sync_timeout: u64, -) -> impl Future { +) -> Result<(), String> { let epoch_duration = slot_duration * (E::slots_per_epoch() as u32); let network_c = network.clone(); // Delay for `initial_delay` epochs before adding another node to start syncing @@ -190,35 +197,34 @@ pub fn verify_one_node_sync( slot_duration, E::slots_per_epoch(), ) - .and_then(move |_| { - // Add a beacon node - network.add_beacon_node(beacon_config).map(|_| network) - }) - .and_then(move |network| { - // Check every `epoch_duration` if nodes are synced - // limited to at most `sync_timeout` epochs - Interval::new_interval(epoch_duration) - .take(sync_timeout) - .map_err(|_| "Failed to create interval".to_string()) - .take_while(move |_| check_still_syncing(&network_c)) - .for_each(|_| Ok(())) // consume the stream - .map(|_| network) - }) - .and_then(move |network| network.bootnode_epoch().map(|e| (e, network))) - .and_then(move |(epoch, network)| { - verify_all_finalized_at(network, epoch).map_err(|e| format!("One node sync error: {}", e)) - }) + .await; + // Add a beacon node + network.add_beacon_node(beacon_config).await?; + // Check every `epoch_duration` if nodes are synced + // limited to at most `sync_timeout` epochs + let mut interval = tokio::time::interval(epoch_duration); + let mut count = 0; + while let Some(_) = interval.next().await { + if count >= sync_timeout || !check_still_syncing(&network_c).await? { + break; + } + count += 1; + } + let epoch = network.bootnode_epoch().await?; + verify_all_finalized_at(network, epoch) + .map_err(|e| format!("One node sync error: {}", e)) + .await } /// Verify two nodes added after `initial_delay` epochs are in sync /// after `sync_timeout` epochs. -pub fn verify_two_nodes_sync( +pub async fn verify_two_nodes_sync( network: LocalNetwork, beacon_config: ClientConfig, slot_duration: Duration, initial_delay: u64, sync_timeout: u64, -) -> impl Future { +) -> Result<(), String> { let epoch_duration = slot_duration * (E::slots_per_epoch() as u32); let network_c = network.clone(); // Delay for `initial_delay` epochs before adding another node to start syncing @@ -227,41 +233,36 @@ pub fn verify_two_nodes_sync( slot_duration, E::slots_per_epoch(), ) - .and_then(move |_| { - // Add beacon nodes - network - .add_beacon_node(beacon_config.clone()) - .map(|_| (network, beacon_config)) - .and_then(|(network, beacon_config)| { - network.add_beacon_node(beacon_config).map(|_| network) - }) - }) - .and_then(move |network| { - // Check every `epoch_duration` if nodes are synced - // limited to at most `sync_timeout` epochs - Interval::new_interval(epoch_duration) - .take(sync_timeout) - .map_err(|_| "Failed to create interval".to_string()) - .take_while(move |_| check_still_syncing(&network_c)) - .for_each(|_| Ok(())) // consume the stream - .map(|_| network) - }) - .and_then(move |network| network.bootnode_epoch().map(|e| (e, network))) - .and_then(move |(epoch, network)| { - verify_all_finalized_at(network, epoch).map_err(|e| format!("Two node sync error: {}", e)) - }) + .await; + // Add beacon nodes + network.add_beacon_node(beacon_config.clone()).await?; + network.add_beacon_node(beacon_config).await?; + // Check every `epoch_duration` if nodes are synced + // limited to at most `sync_timeout` epochs + let mut interval = tokio::time::interval(epoch_duration); + let mut count = 0; + while let Some(_) = interval.next().await { + if count >= sync_timeout || !check_still_syncing(&network_c).await? { + break; + } + count += 1; + } + let epoch = network.bootnode_epoch().await?; + verify_all_finalized_at(network, epoch) + .map_err(|e| format!("One node sync error: {}", e)) + .await } /// Add 2 syncing nodes after `initial_delay` epochs, /// Add another node after `sync_timeout - 5` epochs and verify all are /// in sync after `sync_timeout + 5` epochs. -pub fn verify_in_between_sync( +pub async fn verify_in_between_sync( network: LocalNetwork, beacon_config: ClientConfig, slot_duration: Duration, initial_delay: u64, sync_timeout: u64, -) -> impl Future { +) -> Result<(), String> { let epoch_duration = slot_duration * (E::slots_per_epoch() as u32); let network_c = network.clone(); // Delay for `initial_delay` epochs before adding another node to start syncing @@ -271,52 +272,43 @@ pub fn verify_in_between_sync( slot_duration, E::slots_per_epoch(), ) - .and_then(move |_| { - // Add a beacon node - network - .add_beacon_node(beacon_config.clone()) - .map(|_| (network, beacon_config)) - .and_then(|(network, beacon_config)| { - network.add_beacon_node(beacon_config).map(|_| network) - }) - }) - .and_then(move |network| { - // Delay before adding additional syncing nodes. - epoch_delay( - Epoch::new(sync_timeout - 5), - slot_duration, - E::slots_per_epoch(), - ) - .map(|_| network) - }) - .and_then(move |network| { - // Add a beacon node - network.add_beacon_node(config1.clone()).map(|_| network) - }) - .and_then(move |network| { - // Check every `epoch_duration` if nodes are synced - // limited to at most `sync_timeout` epochs - Interval::new_interval(epoch_duration) - .take(sync_timeout + 5) - .map_err(|_| "Failed to create interval".to_string()) - .take_while(move |_| check_still_syncing(&network_c)) - .for_each(|_| Ok(())) // consume the stream - .map(|_| network) - }) - .and_then(move |network| network.bootnode_epoch().map(|e| (e, network))) - .and_then(move |(epoch, network)| { - verify_all_finalized_at(network, epoch).map_err(|e| format!("In between sync error: {}", e)) - }) + .await; + // Add two beacon nodes + network.add_beacon_node(beacon_config.clone()).await?; + network.add_beacon_node(beacon_config).await?; + // Delay before adding additional syncing nodes. + epoch_delay( + Epoch::new(sync_timeout - 5), + slot_duration, + E::slots_per_epoch(), + ) + .await; + // Add a beacon node + network.add_beacon_node(config1.clone()).await?; + // Check every `epoch_duration` if nodes are synced + // limited to at most `sync_timeout` epochs + let mut interval = tokio::time::interval(epoch_duration); + let mut count = 0; + while let Some(_) = interval.next().await { + if count >= sync_timeout || !check_still_syncing(&network_c).await? { + break; + } + count += 1; + } + let epoch = network.bootnode_epoch().await?; + verify_all_finalized_at(network, epoch) + .map_err(|e| format!("One node sync error: {}", e)) + .await } /// Run syncing strategies one after other. -pub fn verify_syncing( +pub async fn verify_syncing( network: LocalNetwork, beacon_config: ClientConfig, slot_duration: Duration, initial_delay: u64, sync_timeout: u64, -) -> impl Future { +) -> Result<(), String> { verify_one_node_sync( network.clone(), beacon_config.clone(), @@ -324,53 +316,42 @@ pub fn verify_syncing( initial_delay, sync_timeout, ) - .map(|_| println!("Completed one node sync")) - .and_then(move |_| { - verify_two_nodes_sync( - network.clone(), - beacon_config.clone(), - slot_duration, - initial_delay, - sync_timeout, - ) - .map(|_| { - println!("Completed two node sync"); - (network, beacon_config) - }) - }) - .and_then(move |(network, beacon_config)| { - verify_in_between_sync( - network, - beacon_config, - slot_duration, - initial_delay, - sync_timeout, - ) - .map(|_| println!("Completed in between sync")) - }) + .await?; + println!("Completed one node sync"); + verify_two_nodes_sync( + network.clone(), + beacon_config.clone(), + slot_duration, + initial_delay, + sync_timeout, + ) + .await?; + println!("Completed two node sync"); + verify_in_between_sync( + network, + beacon_config, + slot_duration, + initial_delay, + sync_timeout, + ) + .await?; + println!("Completed in between sync"); + Ok(()) } -pub fn check_still_syncing( - network: &LocalNetwork, -) -> impl Future { - network - .remote_nodes() - .into_future() - // get syncing status of nodes - .and_then(|remote_nodes| { - stream::unfold(remote_nodes.into_iter(), |mut iter| { - iter.next().map(|remote_node| { - remote_node - .http - .node() - .syncing_status() - .map(|status| status.is_syncing) - .map(|status| (status, iter)) - .map_err(|e| format!("Get syncing status via http failed: {:?}", e)) - }) - }) - .collect() - }) - .and_then(move |status| Ok(status.iter().any(|is_syncing| *is_syncing))) - .map_err(|e| format!("Failed syncing check: {:?}", e)) +pub async fn check_still_syncing(network: &LocalNetwork) -> Result { + // get syncing status of nodes + let mut status = Vec::new(); + for remote_node in network.remote_nodes()? { + status.push( + remote_node + .http + .node() + .syncing_status() + .await + .map(|status| status.is_syncing) + .map_err(|e| format!("Get syncing status via http failed: {:?}", e))?, + ) + } + Ok(status.iter().any(|is_syncing| *is_syncing)) } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 68dfc906d0..f8c8bc1fb2 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -8,6 +8,9 @@ edition = "2018" name = "validator_client" path = "src/lib.rs" +[dev-dependencies] +tokio = {version = "0.2.20", features = ["time", "rt-threaded", "macros"]} + [dependencies] eth2_ssz = "0.1.2" eth2_config = { path = "../eth2/utils/eth2_config" } @@ -17,28 +20,27 @@ eth2_interop_keypairs = { path = "../eth2/utils/eth2_interop_keypairs" } slot_clock = { path = "../eth2/utils/slot_clock" } rest_types = { path = "../eth2/utils/rest_types" } types = { path = "../eth2/types" } -serde = "1.0.102" -serde_derive = "1.0.102" -serde_json = "1.0.41" +serde = "1.0.110" +serde_derive = "1.0.110" +serde_json = "1.0.52" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } -slog-async = "2.3.0" -slog-term = "2.4.2" -tokio = "0.1.22" -tokio-timer = "0.2.12" -error-chain = "0.12.1" -bincode = "1.2.0" -futures = "0.1.29" +slog-async = "2.5.0" +slog-term = "2.5.0" +tokio = {version = "0.2.20", features = ["time"]} +error-chain = "0.12.2" +bincode = "1.2.1" +futures = { version = "0.3.5", features = ["compat"] } dirs = "2.0.2" logging = { path = "../eth2/utils/logging" } environment = { path = "../lighthouse/environment" } -parking_lot = "0.7" -exit-future = "0.1.4" -libc = "0.2.65" -eth2_ssz_derive = { path = "../eth2/utils/ssz_derive" } -hex = "0.3" +parking_lot = "0.10.2" +exit-future = "0.2.0" +libc = "0.2.69" +eth2_ssz_derive = "0.1.0" +hex = "0.4.2" deposit_contract = { path = "../eth2/utils/deposit_contract" } bls = { path = "../eth2/utils/bls" } remote_beacon_node = { path = "../eth2/utils/remote_beacon_node" } -tempdir = "0.3" -rayon = "1.2.0" +tempdir = "0.3.7" +rayon = "1.3.0" web3 = "0.10.0" diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index 82e1a5745a..f4cfdd7dd7 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -4,15 +4,14 @@ use crate::{ }; use environment::RuntimeContext; use exit_future::Signal; -use futures::{future, Future, Stream}; +use futures::{FutureExt, StreamExt}; use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; use slog::{crit, debug, info, trace}; use slot_clock::SlotClock; use std::collections::HashMap; use std::ops::Deref; use std::sync::Arc; -use std::time::{Duration, Instant}; -use tokio::timer::{Delay, Interval}; +use tokio::time::{delay_until, interval_at, Duration, Instant}; use types::{Attestation, ChainSpec, CommitteeIndex, EthSpec, Slot}; /// Builds an `AttestationService`. @@ -119,9 +118,8 @@ impl Deref for AttestationService { impl AttestationService { /// Starts the service which periodically produces attestations. - pub fn start_update_service(&self, spec: &ChainSpec) -> Result { - let context = &self.context; - let log = context.log.clone(); + pub fn start_update_service(self, spec: &ChainSpec) -> Result { + let log = self.context.log.clone(); let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); let duration_to_next_slot = self @@ -129,49 +127,48 @@ impl AttestationService { .duration_to_next_slot() .ok_or_else(|| "Unable to determine duration to next slot".to_string())?; - let interval = { - Interval::new( + info!( + log, + "Attestation production service started"; + "next_update_millis" => duration_to_next_slot.as_millis() + ); + + let mut interval = { + // Note: `interval_at` panics if `slot_duration` is 0 + interval_at( Instant::now() + duration_to_next_slot + slot_duration / 3, slot_duration, ) }; let (exit_signal, exit_fut) = exit_future::signal(); - let service = self.clone(); - let log_1 = log.clone(); - let log_2 = log.clone(); - let log_3 = log.clone(); - context.executor.spawn( - exit_fut - .until( - interval - .map_err(move |e| { - crit! { - log_1, - "Timer thread failed"; - "error" => format!("{}", e) - } - }) - .for_each(move |_| { - if let Err(e) = service.spawn_attestation_tasks(slot_duration) { - crit!( - log_2, - "Failed to spawn attestation tasks"; - "error" => e - ) - } else { - trace!( - log_2, - "Spawned attestation tasks"; - ) - } + let runtime_handle = self.context.runtime_handle.clone(); - Ok(()) - }), - ) - .map(move |_| info!(log_3, "Shutdown complete")), + let interval_fut = async move { + while interval.next().await.is_some() { + let log = &self.context.log; + + if let Err(e) = self.spawn_attestation_tasks(slot_duration) { + crit!( + log, + "Failed to spawn attestation tasks"; + "error" => e + ) + } else { + trace!( + log, + "Spawned attestation tasks"; + ) + } + } + }; + + let future = futures::future::select( + Box::pin(interval_fut), + exit_fut.map(move |_| info!(log, "Shutdown complete")), ); + runtime_handle.spawn(future); Ok(exit_signal) } @@ -179,13 +176,11 @@ impl AttestationService { /// For each each required attestation, spawn a new task that downloads, signs and uploads the /// attestation to the beacon node. fn spawn_attestation_tasks(&self, slot_duration: Duration) -> Result<(), String> { - let service = self.clone(); - - let slot = service + let slot = self .slot_clock .now() .ok_or_else(|| "Failed to read slot clock".to_string())?; - let duration_to_next_slot = service + let duration_to_next_slot = self .slot_clock .duration_to_next_slot() .ok_or_else(|| "Unable to determine duration to next slot".to_string())?; @@ -197,7 +192,7 @@ impl AttestationService { .checked_sub(slot_duration / 3) .unwrap_or_else(|| Duration::from_secs(0)); - let duties_by_committee_index: HashMap> = service + let duties_by_committee_index: HashMap> = self .duties_service .attesters(slot) .into_iter() @@ -219,15 +214,14 @@ impl AttestationService { .into_iter() .for_each(|(committee_index, validator_duties)| { // Spawn a separate task for each attestation. - service - .context - .executor - .spawn(self.clone().publish_attestations_and_aggregates( + self.inner.context.runtime_handle.spawn( + self.clone().publish_attestations_and_aggregates( slot, committee_index, validator_duties, aggregate_production_instant, - )); + ), + ); }); Ok(()) @@ -242,75 +236,64 @@ impl AttestationService { /// /// The given `validator_duties` should already be filtered to only contain those that match /// `slot` and `committee_index`. Critical errors will be logged if this is not the case. - fn publish_attestations_and_aggregates( - &self, + async fn publish_attestations_and_aggregates( + self, slot: Slot, committee_index: CommitteeIndex, validator_duties: Vec, aggregate_production_instant: Instant, - ) -> Box + Send> { + ) -> Result<(), ()> { + let log = &self.context.log; + // There's not need to produce `Attestation` or `SignedAggregateAndProof` if we do not have // any validators for the given `slot` and `committee_index`. if validator_duties.is_empty() { - return Box::new(future::ok(())); + return Ok(()); } - let service_1 = self.clone(); - let log_1 = self.context.log.clone(); - let validator_duties_1 = Arc::new(validator_duties); - let validator_duties_2 = validator_duties_1.clone(); - - Box::new( - // Step 1. - // - // Download, sign and publish an `Attestation` for each validator. - self.produce_and_publish_attestations(slot, committee_index, validator_duties_1) - .and_then::<_, Box + Send>>( - move |attestation_opt| { - if let Some(attestation) = attestation_opt { - Box::new( - // Step 2. (Only if step 1 produced an attestation) - // - // First, wait until the `aggregation_production_instant` (2/3rds - // of the way though the slot). As verified in the - // `delay_triggers_when_in_the_past` test, this code will still run - // even if the instant has already elapsed. - // - // Then download, sign and publish a `SignedAggregateAndProof` for each - // validator that is elected to aggregate for this `slot` and - // `committee_index`. - Delay::new(aggregate_production_instant) - .map_err(|e| { - format!( - "Unable to create aggregate production delay: {:?}", - e - ) - }) - .and_then(move |()| { - service_1.produce_and_publish_aggregates( - attestation, - validator_duties_2, - ) - }), - ) - } else { - // If `produce_and_publish_attestations` did not download any - // attestations then there is no need to produce any - // `SignedAggregateAndProof`. - Box::new(future::ok(())) - } - }, + // Step 1. + // + // Download, sign and publish an `Attestation` for each validator. + let attestation_opt = self + .produce_and_publish_attestations(slot, committee_index, &validator_duties) + .await + .map_err(move |e| { + crit!( + log, + "Error during attestation routine"; + "error" => format!("{:?}", e), + "committee_index" => committee_index, + "slot" => slot.as_u64(), ) + })?; + + // Step 2. + // + // If an attestation was produced, make an aggregate. + if let Some(attestation) = attestation_opt { + // First, wait until the `aggregation_production_instant` (2/3rds + // of the way though the slot). As verified in the + // `delay_triggers_when_in_the_past` test, this code will still run + // even if the instant has already elapsed. + delay_until(aggregate_production_instant).await; + + // Then download, sign and publish a `SignedAggregateAndProof` for each + // validator that is elected to aggregate for this `slot` and + // `committee_index`. + self.produce_and_publish_aggregates(attestation, &validator_duties) + .await .map_err(move |e| { crit!( - log_1, + log, "Error during attestation routine"; "error" => format!("{:?}", e), "committee_index" => committee_index, "slot" => slot.as_u64(), ) - }), - ) + })?; + } + + Ok(()) } /// Performs the first step of the attesting process: downloading `Attestation` objects, @@ -325,139 +308,132 @@ impl AttestationService { /// /// Only one `Attestation` is downloaded from the BN. It is then cloned and signed by each /// validator and the list of individually-signed `Attestation` objects is returned to the BN. - fn produce_and_publish_attestations( + async fn produce_and_publish_attestations( &self, slot: Slot, committee_index: CommitteeIndex, - validator_duties: Arc>, - ) -> Box>, Error = String> + Send> { + validator_duties: &[DutyAndProof], + ) -> Result>, String> { + let log = &self.context.log; + if validator_duties.is_empty() { - return Box::new(future::ok(None)); + return Ok(None); } - let service = self.clone(); + let attestation = self + .beacon_node + .http + .validator() + .produce_attestation(slot, committee_index) + .await + .map_err(|e| format!("Failed to produce attestation: {:?}", e))?; + + // For each validator in `validator_duties`, clone the `attestation` and add + // their signature. + // + // If any validator is unable to sign, they are simply skipped. + let signed_attestations = validator_duties + .iter() + .filter_map(|duty| { + // Ensure that all required fields are present in the validator duty. + let (duty_slot, duty_committee_index, validator_committee_position, _) = + if let Some(tuple) = duty.attestation_duties() { + tuple + } else { + crit!( + log, + "Missing validator duties when signing"; + "duties" => format!("{:?}", duty) + ); + return None; + }; + + // Ensure that the attestation matches the duties. + if duty_slot != attestation.data.slot + || duty_committee_index != attestation.data.index + { + crit!( + log, + "Inconsistent validator duties during signing"; + "validator" => format!("{:?}", duty.validator_pubkey()), + "duty_slot" => duty_slot, + "attestation_slot" => attestation.data.slot, + "duty_index" => duty_committee_index, + "attestation_index" => attestation.data.index, + ); + return None; + } + + let mut attestation = attestation.clone(); + + if self + .validator_store + .sign_attestation( + duty.validator_pubkey(), + validator_committee_position, + &mut attestation, + ) + .is_none() + { + crit!( + log, + "Attestation signing refused"; + "validator" => format!("{:?}", duty.validator_pubkey()), + "slot" => attestation.data.slot, + "index" => attestation.data.index, + ); + None + } else { + Some(attestation) + } + }) + .collect::>(); + + // If there are any signed attestations, publish them to the BN. Otherwise, + // just return early. + if let Some(attestation) = signed_attestations.first().cloned() { + let num_attestations = signed_attestations.len(); + let beacon_block_root = attestation.data.beacon_block_root; - Box::new( self.beacon_node .http .validator() - .produce_attestation(slot, committee_index) - .map_err(|e| format!("Failed to produce attestation: {:?}", e)) - .and_then::<_, Box + Send>>(move |attestation| { - let log = service.context.log.clone(); - - // For each validator in `validator_duties`, clone the `attestation` and add - // their signature. - // - // If any validator is unable to sign, they are simply skipped. - let signed_attestations = validator_duties - .iter() - .filter_map(|duty| { - let log = service.context.log.clone(); - - // Ensure that all required fields are present in the validator duty. - let (duty_slot, duty_committee_index, validator_committee_position, _) = - if let Some(tuple) = duty.attestation_duties() { - tuple - } else { - crit!( - log, - "Missing validator duties when signing"; - "duties" => format!("{:?}", duty) - ); - return None; - }; - - // Ensure that the attestation matches the duties. - if duty_slot != attestation.data.slot - || duty_committee_index != attestation.data.index - { - crit!( - log, - "Inconsistent validator duties during signing"; - "validator" => format!("{:?}", duty.validator_pubkey()), - "duty_slot" => duty_slot, - "attestation_slot" => attestation.data.slot, - "duty_index" => duty_committee_index, - "attestation_index" => attestation.data.index, - ); - return None; - } - - let mut attestation = attestation.clone(); - - if service - .validator_store - .sign_attestation( - duty.validator_pubkey(), - validator_committee_position, - &mut attestation, - ) - .is_none() - { - crit!( - log, - "Attestation signing refused"; - "validator" => format!("{:?}", duty.validator_pubkey()), - "slot" => attestation.data.slot, - "index" => attestation.data.index, - ); - None - } else { - Some(attestation) - } - }) - .collect::>(); - - // If there are any signed attestations, publish them to the BN. Otherwise, - // just return early. - if let Some(attestation) = signed_attestations.first().cloned() { - let num_attestations = signed_attestations.len(); - let beacon_block_root = attestation.data.beacon_block_root; - - Box::new( - service - .beacon_node - .http - .validator() - .publish_attestations(signed_attestations) - .map_err(|e| format!("Failed to publish attestation: {:?}", e)) - .map(move |publish_status| match publish_status { - PublishStatus::Valid => info!( - log, - "Successfully published attestations"; - "count" => num_attestations, - "head_block" => format!("{:?}", beacon_block_root), - "committee_index" => committee_index, - "slot" => slot.as_u64(), - "type" => "unaggregated", - ), - PublishStatus::Invalid(msg) => crit!( - log, - "Published attestation was invalid"; - "message" => msg, - "committee_index" => committee_index, - "slot" => slot.as_u64(), - "type" => "unaggregated", - ), - PublishStatus::Unknown => crit!( - log, - "Unknown condition when publishing unagg. attestation" - ), - }) - .map(|()| Some(attestation)), - ) - } else { - debug!( - log, - "No attestations to publish"; - "committee_index" => committee_index, - "slot" => slot.as_u64(), - ); - Box::new(future::ok(None)) + .publish_attestations(signed_attestations) + .await + .map_err(|e| format!("Failed to publish attestation: {:?}", e)) + .map(move |publish_status| match publish_status { + PublishStatus::Valid => info!( + log, + "Successfully published attestations"; + "count" => num_attestations, + "head_block" => format!("{:?}", beacon_block_root), + "committee_index" => committee_index, + "slot" => slot.as_u64(), + "type" => "unaggregated", + ), + PublishStatus::Invalid(msg) => crit!( + log, + "Published attestation was invalid"; + "message" => msg, + "committee_index" => committee_index, + "slot" => slot.as_u64(), + "type" => "unaggregated", + ), + PublishStatus::Unknown => { + crit!(log, "Unknown condition when publishing unagg. attestation") } - }), - ) + }) + .map(|()| Some(attestation)) + } else { + debug!( + log, + "No attestations to publish"; + "committee_index" => committee_index, + "slot" => slot.as_u64(), + ); + + Ok(None) + } } /// Performs the second step of the attesting process: downloading an aggregated `Attestation`, @@ -473,107 +449,105 @@ impl AttestationService { /// Only one aggregated `Attestation` is downloaded from the BN. It is then cloned and signed /// by each validator and the list of individually-signed `SignedAggregateAndProof` objects is /// returned to the BN. - fn produce_and_publish_aggregates( + async fn produce_and_publish_aggregates( &self, attestation: Attestation, - validator_duties: Arc>, - ) -> impl Future { - let service_1 = self.clone(); - let log_1 = self.context.log.clone(); + validator_duties: &[DutyAndProof], + ) -> Result<(), String> { + let log = &self.context.log; - self.beacon_node + let aggregated_attestation = self + .beacon_node .http .validator() .produce_aggregate_attestation(&attestation.data) - .map_err(|e| format!("Failed to produce an aggregate attestation: {:?}", e)) - .and_then::<_, Box + Send>>( - move |aggregated_attestation| { - // For each validator, clone the `aggregated_attestation` and convert it into - // a `SignedAggregateAndProof` - let signed_aggregate_and_proofs = validator_duties - .iter() - .filter_map(|duty_and_proof| { - // Do not produce a signed aggregator for validators that are not - // subscribed aggregators. - let selection_proof = duty_and_proof.selection_proof.as_ref()?.clone(); + .await + .map_err(|e| format!("Failed to produce an aggregate attestation: {:?}", e))?; - let (duty_slot, duty_committee_index, _, validator_index) = - duty_and_proof.attestation_duties().or_else(|| { - crit!(log_1, "Missing duties when signing aggregate"); - None - })?; + // For each validator, clone the `aggregated_attestation` and convert it into + // a `SignedAggregateAndProof` + let signed_aggregate_and_proofs = validator_duties + .iter() + .filter_map(|duty_and_proof| { + // Do not produce a signed aggregator for validators that are not + // subscribed aggregators. + let selection_proof = duty_and_proof.selection_proof.as_ref()?.clone(); - let pubkey = &duty_and_proof.duty.validator_pubkey; - let slot = attestation.data.slot; - let committee_index = attestation.data.index; + let (duty_slot, duty_committee_index, _, validator_index) = + duty_and_proof.attestation_duties().or_else(|| { + crit!(log, "Missing duties when signing aggregate"); + None + })?; - if duty_slot != slot || duty_committee_index != committee_index { - crit!(log_1, "Inconsistent validator duties during signing"); - return None; - } + let pubkey = &duty_and_proof.duty.validator_pubkey; + let slot = attestation.data.slot; + let committee_index = attestation.data.index; - if let Some(signed_aggregate_and_proof) = service_1 - .validator_store - .produce_signed_aggregate_and_proof( - pubkey, - validator_index, - aggregated_attestation.clone(), - selection_proof, - ) - { - Some(signed_aggregate_and_proof) - } else { - crit!(log_1, "Failed to sign attestation"); - None - } - }) - .collect::>(); + if duty_slot != slot || duty_committee_index != committee_index { + crit!(log, "Inconsistent validator duties during signing"); + return None; + } - // If there any signed aggregates and proofs were produced, publish them to the - // BN. - if let Some(first) = signed_aggregate_and_proofs.first().cloned() { - let attestation = first.message.aggregate; + if let Some(signed_aggregate_and_proof) = + self.validator_store.produce_signed_aggregate_and_proof( + pubkey, + validator_index, + aggregated_attestation.clone(), + selection_proof, + ) + { + Some(signed_aggregate_and_proof) + } else { + crit!(log, "Failed to sign attestation"); + None + } + }) + .collect::>(); - Box::new(service_1 - .beacon_node - .http - .validator() - .publish_aggregate_and_proof(signed_aggregate_and_proofs) - .map(|publish_status| (attestation, publish_status)) - .map_err(|e| format!("Failed to publish aggregate and proofs: {:?}", e)) - .map(move |(attestation, publish_status)| match publish_status { - PublishStatus::Valid => info!( - log_1, - "Successfully published attestations"; - "signatures" => attestation.aggregation_bits.num_set_bits(), - "head_block" => format!("{:?}", attestation.data.beacon_block_root), - "committee_index" => attestation.data.index, - "slot" => attestation.data.slot.as_u64(), - "type" => "aggregated", - ), - PublishStatus::Invalid(msg) => crit!( - log_1, - "Published attestation was invalid"; - "message" => msg, - "committee_index" => attestation.data.index, - "slot" => attestation.data.slot.as_u64(), - "type" => "aggregated", - ), - PublishStatus::Unknown => { - crit!(log_1, "Unknown condition when publishing agg. attestation") - } - })) - } else { - debug!( - log_1, - "No signed aggregates to publish"; - "committee_index" => attestation.data.index, - "slot" => attestation.data.slot.as_u64(), - ); - Box::new(future::ok(())) - } - }, - ) + // If there any signed aggregates and proofs were produced, publish them to the + // BN. + if let Some(first) = signed_aggregate_and_proofs.first().cloned() { + let attestation = first.message.aggregate; + + let publish_status = self + .beacon_node + .http + .validator() + .publish_aggregate_and_proof(signed_aggregate_and_proofs) + .await + .map_err(|e| format!("Failed to publish aggregate and proofs: {:?}", e))?; + match publish_status { + PublishStatus::Valid => info!( + log, + "Successfully published attestations"; + "signatures" => attestation.aggregation_bits.num_set_bits(), + "head_block" => format!("{:?}", attestation.data.beacon_block_root), + "committee_index" => attestation.data.index, + "slot" => attestation.data.slot.as_u64(), + "type" => "aggregated", + ), + PublishStatus::Invalid(msg) => crit!( + log, + "Published attestation was invalid"; + "message" => msg, + "committee_index" => attestation.data.index, + "slot" => attestation.data.slot.as_u64(), + "type" => "aggregated", + ), + PublishStatus::Unknown => { + crit!(log, "Unknown condition when publishing agg. attestation") + } + }; + Ok(()) + } else { + debug!( + log, + "No signed aggregates to publish"; + "committee_index" => attestation.data.index, + "slot" => attestation.data.slot.as_u64(), + ); + Ok(()) + } } } @@ -581,26 +555,18 @@ impl AttestationService { mod tests { use super::*; use parking_lot::RwLock; - use tokio::runtime::Builder as RuntimeBuilder; /// This test is to ensure that a `tokio_timer::Delay` with an instant in the past will still /// trigger. - #[test] - fn delay_triggers_when_in_the_past() { + #[tokio::test] + async fn delay_triggers_when_in_the_past() { let in_the_past = Instant::now() - Duration::from_secs(2); let state_1 = Arc::new(RwLock::new(in_the_past)); let state_2 = state_1.clone(); - let future = Delay::new(in_the_past) - .map_err(|_| panic!("Failed to create duration")) - .map(move |()| *state_1.write() = Instant::now()); - - let mut runtime = RuntimeBuilder::new() - .core_threads(1) - .build() - .expect("failed to start runtime"); - - runtime.block_on(future).expect("failed to complete future"); + delay_until(in_the_past) + .map(move |()| *state_1.write() = Instant::now()) + .await; assert!( *state_2.read() > in_the_past, diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 14c7043a73..3de83c4e99 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -1,15 +1,14 @@ use crate::{duties_service::DutiesService, validator_store::ValidatorStore}; use environment::RuntimeContext; use exit_future::Signal; -use futures::{stream, Future, IntoFuture, Stream}; +use futures::{FutureExt, StreamExt, TryFutureExt}; use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; use slog::{crit, error, info, trace}; use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; -use std::time::{Duration, Instant}; -use tokio::timer::Interval; -use types::{ChainSpec, EthSpec}; +use tokio::time::{interval_at, Duration, Instant}; +use types::{ChainSpec, EthSpec, PublicKey, Slot}; /// Delay this period of time after the slot starts. This allows the node to process the new slot. const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(100); @@ -114,7 +113,7 @@ impl Deref for BlockService { impl BlockService { /// Starts the service that periodically attempts to produce blocks. - pub fn start_update_service(&self, spec: &ChainSpec) -> Result { + pub fn start_update_service(self, spec: &ChainSpec) -> Result { let log = self.context.log.clone(); let duration_to_next_slot = self @@ -122,145 +121,138 @@ impl BlockService { .duration_to_next_slot() .ok_or_else(|| "Unable to determine duration to next slot".to_string())?; - let interval = { + info!( + log, + "Block production service started"; + "next_update_millis" => duration_to_next_slot.as_millis() + ); + + let mut interval = { let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); - Interval::new( + // Note: interval_at panics if slot_duration = 0 + interval_at( Instant::now() + duration_to_next_slot + TIME_DELAY_FROM_SLOT, slot_duration, ) }; - let (exit_signal, exit_fut) = exit_future::signal(); - let service = self.clone(); - let log_1 = log.clone(); - let log_2 = log.clone(); + let runtime_handle = self.inner.context.runtime_handle.clone(); - self.context.executor.spawn( - exit_fut - .until( - interval - .map_err(move |e| { - crit! { - log_1, - "Timer thread failed"; - "error" => format!("{}", e) - } - }) - .for_each(move |_| service.clone().do_update().then(|_| Ok(()))), - ) - .map(move |_| info!(log_2, "Shutdown complete")), + let interval_fut = async move { + while interval.next().await.is_some() { + self.do_update().await.ok(); + } + }; + + let (exit_signal, exit_fut) = exit_future::signal(); + + let future = futures::future::select( + Box::pin(interval_fut), + exit_fut.map(move |_| info!(log, "Shutdown complete")), ); + runtime_handle.spawn(future); Ok(exit_signal) } /// Attempt to produce a block for any block producers in the `ValidatorStore`. - fn do_update(self) -> impl Future { - let service = self.clone(); - let log_1 = self.context.log.clone(); - let log_2 = self.context.log.clone(); + async fn do_update(&self) -> Result<(), ()> { + let log = &self.context.log; - self.slot_clock - .now() - .ok_or_else(move || { - crit!(log_1, "Duties manager failed to read slot clock"); - }) - .into_future() - .and_then(move |slot| { - let iter = service.duties_service.block_producers(slot).into_iter(); + let slot = self.slot_clock.now().ok_or_else(move || { + crit!(log, "Duties manager failed to read slot clock"); + })?; - if iter.len() == 0 { - trace!( - log_2, - "No local block proposers for this slot"; - "slot" => slot.as_u64() - ) - } else if iter.len() > 1 { - error!( - log_2, - "Multiple block proposers for this slot"; - "action" => "producing blocks for all proposers", - "num_proposers" => iter.len(), - "slot" => slot.as_u64(), - ) - } + trace!( + log, + "Block service update started"; + "slot" => slot.as_u64() + ); - stream::unfold(iter, move |mut block_producers| { - let log_1 = service.context.log.clone(); - let log_2 = service.context.log.clone(); - let service_1 = service.clone(); - let service_2 = service.clone(); - let service_3 = service.clone(); + let iter = self.duties_service.block_producers(slot).into_iter(); - block_producers.next().map(move |validator_pubkey| { - service_1 - .validator_store - .randao_reveal(&validator_pubkey, slot.epoch(E::slots_per_epoch())) - .ok_or_else(|| "Unable to produce randao reveal".to_string()) - .into_future() - .and_then(move |randao_reveal| { - service_1 - .beacon_node - .http - .validator() - .produce_block(slot, randao_reveal) - .map_err(|e| { - format!( - "Error from beacon node when producing block: {:?}", - e - ) - }) - }) - .and_then(move |block| { - service_2 - .validator_store - .sign_block(&validator_pubkey, block) - .ok_or_else(|| "Unable to sign block".to_string()) - }) - .and_then(move |block| { - service_3 - .beacon_node - .http - .validator() - .publish_block(block.clone()) - .map(|publish_status| (block, publish_status)) - .map_err(|e| { - format!( - "Error from beacon node when publishing block: {:?}", - e - ) - }) - }) - .map(move |(block, publish_status)| match publish_status { - PublishStatus::Valid => info!( - log_1, - "Successfully published block"; - "deposits" => block.message.body.deposits.len(), - "attestations" => block.message.body.attestations.len(), - "slot" => block.slot().as_u64(), - ), - PublishStatus::Invalid(msg) => crit!( - log_1, - "Published block was invalid"; - "message" => msg, - "slot" => block.slot().as_u64(), - ), - PublishStatus::Unknown => { - crit!(log_1, "Unknown condition when publishing block") - } - }) - .map_err(move |e| { - crit!( - log_2, - "Error whilst producing block"; - "message" => e - ) - }) - .then(|_| Ok(((), block_producers))) - }) - }) - .collect() - .map(|_| ()) - }) + if iter.len() == 0 { + trace!( + log, + "No local block proposers for this slot"; + "slot" => slot.as_u64() + ) + } else if iter.len() > 1 { + error!( + log, + "Multiple block proposers for this slot"; + "action" => "producing blocks for all proposers", + "num_proposers" => iter.len(), + "slot" => slot.as_u64(), + ) + } + + iter.for_each(|validator_pubkey| { + let service = self.clone(); + let log = log.clone(); + self.inner.context.runtime_handle.spawn( + service + .publish_block(slot, validator_pubkey) + .map_err(move |e| { + crit!( + log, + "Error whilst producing block"; + "message" => e + ) + }), + ); + }); + + Ok(()) + } + + /// Produce a block at the given slot for validator_pubkey + async fn publish_block(self, slot: Slot, validator_pubkey: PublicKey) -> Result<(), String> { + let log = &self.context.log; + + let randao_reveal = self + .validator_store + .randao_reveal(&validator_pubkey, slot.epoch(E::slots_per_epoch())) + .ok_or_else(|| "Unable to produce randao reveal".to_string())?; + + let block = self + .beacon_node + .http + .validator() + .produce_block(slot, randao_reveal) + .await + .map_err(|e| format!("Error from beacon node when producing block: {:?}", e))?; + + let signed_block = self + .validator_store + .sign_block(&validator_pubkey, block) + .ok_or_else(|| "Unable to sign block".to_string())?; + + let publish_status = self + .beacon_node + .http + .validator() + .publish_block(signed_block.clone()) + .await + .map_err(|e| format!("Error from beacon node when publishing block: {:?}", e))?; + + match publish_status { + PublishStatus::Valid => info!( + log, + "Successfully published block"; + "deposits" => signed_block.message.body.deposits.len(), + "attestations" => signed_block.message.body.attestations.len(), + "slot" => signed_block.slot().as_u64(), + ), + PublishStatus::Invalid(msg) => crit!( + log, + "Published block was invalid"; + "message" => msg, + "slot" => signed_block.slot().as_u64(), + ), + PublishStatus::Unknown => crit!(log, "Unknown condition when publishing block"), + } + + Ok(()) } } diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index d14880b88e..a78bf3f973 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -1,18 +1,17 @@ -use crate::validator_store::ValidatorStore; +use crate::{is_synced::is_synced, validator_store::ValidatorStore}; use environment::RuntimeContext; use exit_future::Signal; -use futures::{future, Future, IntoFuture, Stream}; +use futures::{FutureExt, StreamExt}; use parking_lot::RwLock; use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; use rest_types::{ValidatorDuty, ValidatorDutyBytes, ValidatorSubscription}; -use slog::{crit, debug, error, info, trace, warn}; +use slog::{debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::collections::HashMap; use std::convert::TryInto; use std::ops::Deref; use std::sync::Arc; -use std::time::{Duration, Instant}; -use tokio::timer::Interval; +use tokio::time::{interval_at, Duration, Instant}; use types::{ChainSpec, CommitteeIndex, Epoch, EthSpec, PublicKey, SelectionProof, Slot}; /// Delay this period of time after the slot starts. This allows the node to process the new slot. @@ -231,6 +230,17 @@ impl DutiesStore { .collect() } + fn is_aggregator(&self, validator_pubkey: &PublicKey, epoch: &Epoch) -> Option { + Some( + self.store + .read() + .get(validator_pubkey)? + .get(epoch)? + .selection_proof + .is_some(), + ) + } + fn insert( &self, epoch: Epoch, @@ -367,7 +377,7 @@ pub struct Inner { store: Arc, validator_store: ValidatorStore, pub(crate) slot_clock: T, - beacon_node: RemoteBeaconNode, + pub(crate) beacon_node: RemoteBeaconNode, context: RuntimeContext, /// If true, the duties service will poll for duties from the beacon node even if it is not /// synced. @@ -430,7 +440,7 @@ impl DutiesService { } /// Start the service that periodically polls the beacon node for validator duties. - pub fn start_update_service(&self, spec: &ChainSpec) -> Result { + pub fn start_update_service(self, spec: &ChainSpec) -> Result { let log = self.context.log.clone(); let duration_to_next_slot = self @@ -438,281 +448,244 @@ impl DutiesService { .duration_to_next_slot() .ok_or_else(|| "Unable to determine duration to next slot".to_string())?; - let interval = { + let mut interval = { let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); - Interval::new( + // Note: `interval_at` panics if `slot_duration` is 0 + interval_at( Instant::now() + duration_to_next_slot + TIME_DELAY_FROM_SLOT, slot_duration, ) }; let (exit_signal, exit_fut) = exit_future::signal(); - let service = self.clone(); - let log_1 = log.clone(); - let log_2 = log.clone(); // Run an immediate update before starting the updater service. - self.context.executor.spawn(service.clone().do_update()); + self.inner + .context + .runtime_handle + .spawn(self.clone().do_update()); - self.context.executor.spawn( - exit_fut - .until( - interval - .map_err(move |e| { - crit! { - log_1, - "Timer thread failed"; - "error" => format!("{}", e) - } - }) - .for_each(move |_| service.clone().do_update().then(|_| Ok(()))), - ) - .map(move |_| info!(log_2, "Shutdown complete")), + let runtime_handle = self.inner.context.runtime_handle.clone(); + + let interval_fut = async move { + while interval.next().await.is_some() { + self.clone().do_update().await.ok(); + } + }; + + let future = futures::future::select( + Box::pin(interval_fut), + exit_fut.map(move |_| info!(log, "Shutdown complete")), ); + runtime_handle.spawn(future); Ok(exit_signal) } /// Attempt to download the duties of all managed validators for this epoch and the next. - fn do_update(&self) -> impl Future { - let service_1 = self.clone(); - let service_2 = self.clone(); - let service_3 = self.clone(); - let service_4 = self.clone(); - let log_1 = self.context.log.clone(); - let log_2 = self.context.log.clone(); + async fn do_update(self) -> Result<(), ()> { + let log = &self.context.log; - self.slot_clock + if !is_synced(&self.beacon_node, &self.slot_clock, None).await + && !self.allow_unsynced_beacon_node + { + return Ok(()); + } + + let current_epoch = self + .slot_clock .now() - .ok_or_else(move || { - error!(log_1, "Duties manager failed to read slot clock"); + .ok_or_else(|| { + error!(log, "Duties manager failed to read slot clock"); }) - .into_future() - .map(move |slot| { + .map(|slot| { let epoch = slot.epoch(E::slots_per_epoch()); if slot % E::slots_per_epoch() == 0 { let prune_below = epoch - PRUNE_DEPTH; trace!( - log_2, + log, "Pruning duties cache"; "pruning_below" => prune_below.as_u64(), "current_epoch" => epoch.as_u64(), ); - service_1.store.prune(prune_below); + self.store.prune(prune_below); } epoch - }) - .and_then(move |epoch| { - let log = service_2.context.log.clone(); + })?; - service_2 - .beacon_node - .http - .beacon() - .get_head() - .map(move |head| (epoch, head.slot.epoch(E::slots_per_epoch()))) - .map_err(move |e| { - error!( - log, - "Failed to contact beacon node"; - "error" => format!("{:?}", e) - ) - }) - }) - .and_then(move |(current_epoch, beacon_head_epoch)| { - let log = service_3.context.log.clone(); + let result = self.clone().update_epoch(current_epoch).await; + if let Err(e) = result { + error!( + log, + "Failed to get current epoch duties"; + "http_error" => format!("{:?}", e) + ); + } - let future: Box + Send> = if beacon_head_epoch + 1 - < current_epoch - && !service_3.allow_unsynced_beacon_node - { - error!( - log, - "Beacon node is not synced"; - "node_head_epoch" => format!("{}", beacon_head_epoch), - "current_epoch" => format!("{}", current_epoch), - ); + self.clone() + .update_epoch(current_epoch + 1) + .await + .map_err(move |e| { + error!( + log, + "Failed to get next epoch duties"; + "http_error" => format!("{:?}", e) + ); + })?; - Box::new(future::ok(())) - } else { - Box::new(service_3.update_epoch(current_epoch).then(move |result| { - if let Err(e) = result { - error!( - log, - "Failed to get current epoch duties"; - "http_error" => format!("{:?}", e) - ); - } - - let log = service_4.context.log.clone(); - service_4.update_epoch(current_epoch + 1).map_err(move |e| { - error!( - log, - "Failed to get next epoch duties"; - "http_error" => format!("{:?}", e) - ); - }) - })) - }; - - future - }) - .map(|_| ()) + Ok(()) } /// Attempt to download the duties of all managed validators for the given `epoch`. - fn update_epoch(self, epoch: Epoch) -> impl Future { - let service_1 = self.clone(); - let service_2 = self.clone(); - let service_3 = self; - - let pubkeys = service_1.validator_store.voting_pubkeys(); - service_1 + async fn update_epoch(self, epoch: Epoch) -> Result<(), String> { + let pubkeys = self.validator_store.voting_pubkeys(); + let all_duties = self .beacon_node .http .validator() .get_duties(epoch, pubkeys.as_slice()) - .map(move |all_duties| (epoch, all_duties)) - .map_err(move |e| format!("Failed to get duties for epoch {}: {:?}", epoch, e)) - .and_then(move |(epoch, all_duties)| { - let log = service_2.context.log.clone(); + .await + .map_err(move |e| format!("Failed to get duties for epoch {}: {:?}", epoch, e))?; - let mut new_validator = 0; - let mut new_epoch = 0; - let mut identical = 0; - let mut replaced = 0; - let mut invalid = 0; + let log = self.context.log.clone(); - // For each of the duties, attempt to insert them into our local store and build a - // list of new or changed selections proofs for any aggregating validators. - let validator_subscriptions = all_duties.into_iter().filter_map(|remote_duties| { - // Convert the remote duties into our local representation. - let duties: DutyAndProof = remote_duties - .try_into() - .map_err(|e| error!( + let mut new_validator = 0; + let mut new_epoch = 0; + let mut identical = 0; + let mut replaced = 0; + let mut invalid = 0; + + // For each of the duties, attempt to insert them into our local store and build a + // list of new or changed selections proofs for any aggregating validators. + let validator_subscriptions = all_duties + .into_iter() + .filter_map(|remote_duties| { + // Convert the remote duties into our local representation. + let duties: DutyAndProof = remote_duties + .clone() + .try_into() + .map_err(|e| { + error!( log, "Unable to convert remote duties"; "error" => e - )) - .ok()?; + ) + }) + .ok()?; - // Attempt to update our local store. - let outcome = service_2 - .store - .insert(epoch, duties.clone(), E::slots_per_epoch(), &service_2.validator_store) - .map_err(|e| error!( + let validator_pubkey = duties.duty.validator_pubkey.clone(); + + // Attempt to update our local store. + let outcome = self + .store + .insert(epoch, duties, E::slots_per_epoch(), &self.validator_store) + .map_err(|e| { + error!( log, "Unable to store duties"; "error" => e - )) - .ok()?; + ) + }) + .ok()?; - match &outcome { - InsertOutcome::NewValidator => { - debug!( - log, - "First duty assignment for validator"; - "proposal_slots" => format!("{:?}", &duties.duty.block_proposal_slots), - "attestation_slot" => format!("{:?}", &duties.duty.attestation_slot), - "validator" => format!("{:?}", &duties.duty.validator_pubkey) - ); - new_validator += 1; - } - InsertOutcome::NewEpoch => new_epoch += 1, - InsertOutcome::Identical => identical += 1, - InsertOutcome::Replaced { .. } => replaced += 1, - InsertOutcome::Invalid => invalid += 1, - }; - - if outcome.is_subscription_candidate() { - Some(ValidatorSubscription { - validator_index: duties.duty.validator_index?, - attestation_committee_index: duties.duty.attestation_committee_index?, - slot: duties.duty.attestation_slot?, - is_aggregator: duties.selection_proof.is_some(), - }) - } else { - None + match &outcome { + InsertOutcome::NewValidator => { + debug!( + log, + "First duty assignment for validator"; + "proposal_slots" => format!("{:?}", &remote_duties.block_proposal_slots), + "attestation_slot" => format!("{:?}", &remote_duties.attestation_slot), + "validator" => format!("{:?}", &remote_duties.validator_pubkey) + ); + new_validator += 1; } - }).collect::>(); + InsertOutcome::NewEpoch => new_epoch += 1, + InsertOutcome::Identical => identical += 1, + InsertOutcome::Replaced { .. } => replaced += 1, + InsertOutcome::Invalid => invalid += 1, + }; - if invalid > 0 { - error!( - log, - "Received invalid duties from beacon node"; - "bad_duty_count" => invalid, - "info" => "Duties are from wrong epoch." - ) - } + // The selection proof is computed on `store.insert`, so it's necessary to check + // with the store that the validator is an aggregator. + let is_aggregator = self.store.is_aggregator(&validator_pubkey, &epoch)?; - trace!( - log, - "Performed duties update"; - "identical" => identical, - "new_epoch" => new_epoch, - "new_validator" => new_validator, - "replaced" => replaced, - "epoch" => format!("{}", epoch) - ); - - if replaced > 0 { - warn!( - log, - "Duties changed during routine update"; - "info" => "Chain re-org likely occurred." - ) - } - - Ok(validator_subscriptions) - }) - .and_then::<_, Box + Send>>(move |validator_subscriptions| { - let log = service_3.context.log.clone(); - let count = validator_subscriptions.len(); - - if count == 0 { - debug!( - log, - "No new subscriptions required" - ); - - Box::new(future::ok(())) + if outcome.is_subscription_candidate() { + Some(ValidatorSubscription { + validator_index: remote_duties.validator_index?, + attestation_committee_index: remote_duties.attestation_committee_index?, + slot: remote_duties.attestation_slot?, + is_aggregator, + }) } else { - Box::new(service_3.beacon_node - .http - .validator() - .subscribe(validator_subscriptions) - .map_err(|e| format!("Failed to subscribe validators: {:?}", e)) - .map(move |status| { - match status { - PublishStatus::Valid => { - debug!( - log, - "Successfully subscribed validators"; - "count" => count - ) - }, - PublishStatus::Unknown => { - error!( - log, - "Unknown response from subscription"; - ) - }, - PublishStatus::Invalid(e) => { - error!( - log, - "Failed to subscribe validator"; - "error" => e - ) - }, - }; - })) + None } - }) + .collect::>(); + + if invalid > 0 { + error!( + log, + "Received invalid duties from beacon node"; + "bad_duty_count" => invalid, + "info" => "Duties are from wrong epoch." + ) + } + + trace!( + log, + "Performed duties update"; + "identical" => identical, + "new_epoch" => new_epoch, + "new_validator" => new_validator, + "replaced" => replaced, + "epoch" => format!("{}", epoch) + ); + + if replaced > 0 { + warn!( + log, + "Duties changed during routine update"; + "info" => "Chain re-org likely occurred" + ) + } + + let log = self.context.log.clone(); + let count = validator_subscriptions.len(); + + if count == 0 { + debug!(log, "No new subscriptions required"); + + Ok(()) + } else { + self.beacon_node + .http + .validator() + .subscribe(validator_subscriptions) + .await + .map_err(|e| format!("Failed to subscribe validators: {:?}", e)) + .map(move |status| { + match status { + PublishStatus::Valid => debug!( + log, + "Successfully subscribed validators"; + "count" => count + ), + PublishStatus::Unknown => error!( + log, + "Unknown response from subscription"; + ), + PublishStatus::Invalid(e) => error!( + log, + "Failed to subscribe validator"; + "error" => e + ), + }; + }) + } } } diff --git a/validator_client/src/fork_service.rs b/validator_client/src/fork_service.rs index 9ff3a0bf56..ae979d4fd2 100644 --- a/validator_client/src/fork_service.rs +++ b/validator_client/src/fork_service.rs @@ -1,14 +1,13 @@ use environment::RuntimeContext; use exit_future::Signal; -use futures::{Future, Stream}; +use futures::{FutureExt, StreamExt}; use parking_lot::RwLock; use remote_beacon_node::RemoteBeaconNode; -use slog::{crit, info, trace}; +use slog::{debug, info, trace}; use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; -use std::time::{Duration, Instant}; -use tokio::timer::Interval; +use tokio::time::{interval_at, Duration, Instant}; use types::{ChainSpec, EthSpec, Fork}; /// Delay this period of time after the slot starts. This allows the node to process the new slot. @@ -101,7 +100,7 @@ impl ForkService { } /// Starts the service that periodically polls for the `Fork`. - pub fn start_update_service(&self, spec: &ChainSpec) -> Result { + pub fn start_update_service(self, spec: &ChainSpec) -> Result { let log = self.context.log.clone(); let duration_to_next_epoch = self @@ -109,63 +108,67 @@ impl ForkService { .duration_to_next_epoch(E::slots_per_epoch()) .ok_or_else(|| "Unable to determine duration to next epoch".to_string())?; - let interval = { + let mut interval = { let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); - Interval::new( + // Note: interval_at panics if `slot_duration * E::slots_per_epoch()` = 0 + interval_at( Instant::now() + duration_to_next_epoch + TIME_DELAY_FROM_SLOT, slot_duration * E::slots_per_epoch() as u32, ) }; let (exit_signal, exit_fut) = exit_future::signal(); - let service = self.clone(); - let log_1 = log.clone(); - let log_2 = log.clone(); // Run an immediate update before starting the updater service. - self.context.executor.spawn(service.clone().do_update()); + self.inner + .context + .runtime_handle + .spawn(self.clone().do_update()); - self.context.executor.spawn( - exit_fut - .until( - interval - .map_err(move |e| { - crit! { - log_1, - "Timer thread failed"; - "error" => format!("{}", e) - } - }) - .for_each(move |_| service.do_update().then(|_| Ok(()))), - ) - .map(move |_| info!(log_2, "Shutdown complete")), + let runtime_handle = self.inner.context.runtime_handle.clone(); + + let interval_fut = async move { + while interval.next().await.is_some() { + self.clone().do_update().await.ok(); + } + }; + + let future = futures::future::select( + Box::pin(interval_fut), + exit_fut.map(move |_| info!(log, "Shutdown complete")), ); + runtime_handle.spawn(future); Ok(exit_signal) } /// Attempts to download the `Fork` from the server. - fn do_update(&self) -> impl Future { - let service_1 = self.clone(); - let log_1 = service_1.context.log.clone(); - let log_2 = service_1.context.log.clone(); + async fn do_update(self) -> Result<(), ()> { + let log = &self.context.log; - self.inner + let fork = self + .inner .beacon_node .http .beacon() .get_fork() - .map(move |fork| *(service_1.fork.write()) = Some(fork)) - .map(move |_| trace!(log_1, "Fork update success")) - .map_err(move |e| { + .await + .map_err(|e| { trace!( - log_2, + log, "Fork update failed"; "error" => format!("Error retrieving fork: {:?}", e) ) - }) - // Returning an error will stop the interval. This is not desired, a single failure - // should not stop all future attempts. - .then(|_| Ok(())) + })?; + + if self.fork.read().as_ref() != Some(&fork) { + *(self.fork.write()) = Some(fork); + } + + debug!(log, "Fork update success"); + + // Returning an error will stop the interval. This is not desired, a single failure + // should not stop all future attempts. + Ok(()) } } diff --git a/validator_client/src/is_synced.rs b/validator_client/src/is_synced.rs new file mode 100644 index 0000000000..e1017ac771 --- /dev/null +++ b/validator_client/src/is_synced.rs @@ -0,0 +1,80 @@ +use remote_beacon_node::RemoteBeaconNode; +use rest_types::SyncingResponse; +use slog::{debug, error, Logger}; +use slot_clock::SlotClock; +use types::EthSpec; + +/// A distance in slots. +const SYNC_TOLERANCE: u64 = 4; + +/// Returns `true` if the beacon node is synced and ready for action. +/// +/// Returns `false` if: +/// +/// - The beacon node is unreachable. +/// - The beacon node indicates that it is syncing **AND** it is more than `SYNC_TOLERANCE` behind +/// the highest known slot. +/// +/// The second condition means the even if the beacon node thinks that it's syncing, we'll still +/// try to use it if it's close enough to the head. +pub async fn is_synced( + beacon_node: &RemoteBeaconNode, + slot_clock: &T, + log_opt: Option<&Logger>, +) -> bool { + let resp = match beacon_node.http.node().syncing_status().await { + Ok(resp) => resp, + Err(e) => { + if let Some(log) = log_opt { + error!( + log, + "Unable connect to beacon node"; + "error" => format!("{:?}", e) + ) + } + + return false; + } + }; + + match &resp { + SyncingResponse { + is_syncing: false, .. + } => true, + SyncingResponse { + is_syncing: true, + sync_status, + } => { + if let Some(log) = log_opt { + debug!( + log, + "Beacon node sync status"; + "status" => format!("{:?}", resp), + ); + } + + let now = if let Some(slot) = slot_clock.now() { + slot + } else { + // There's no good reason why we shouldn't be able to read the slot clock, so we'll + // indicate we're not synced if that's the case. + return false; + }; + + if sync_status.current_slot + SYNC_TOLERANCE >= now { + true + } else { + if let Some(log) = log_opt { + error!( + log, + "Beacon node is syncing"; + "msg" => "not receiving new duties", + "target_slot" => sync_status.highest_slot.as_u64(), + "current_slot" => sync_status.current_slot.as_u64(), + ); + } + false + } + } + } +} diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index ec7e2a743d..fa082fd1f3 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -4,6 +4,7 @@ mod cli; mod config; mod duties_service; mod fork_service; +mod is_synced; mod notifier; mod validator_store; @@ -19,18 +20,13 @@ use duties_service::{DutiesService, DutiesServiceBuilder}; use environment::RuntimeContext; use exit_future::Signal; use fork_service::{ForkService, ForkServiceBuilder}; -use futures::{ - future::{self, loop_fn, Loop}, - Future, IntoFuture, -}; use notifier::spawn_notifier; use remote_beacon_node::RemoteBeaconNode; use slog::{error, info, Logger}; use slot_clock::SlotClock; use slot_clock::SystemTimeSlotClock; -use std::time::{Duration, Instant}; use std::time::{SystemTime, UNIX_EPOCH}; -use tokio::timer::Delay; +use tokio::time::{delay_for, Duration}; use types::EthSpec; use validator_store::ValidatorStore; @@ -47,27 +43,24 @@ pub struct ProductionValidatorClient { block_service: BlockService, attestation_service: AttestationService, exit_signals: Vec, + config: Config, } impl ProductionValidatorClient { /// Instantiates the validator client, _without_ starting the timers to trigger block /// and attestation production. - pub fn new_from_cli( + pub async fn new_from_cli( context: RuntimeContext, - cli_args: &ArgMatches, - ) -> impl Future { - Config::from_cli(&cli_args) - .into_future() - .map_err(|e| format!("Unable to initialize config: {}", e)) - .and_then(|config| Self::new(context, config)) + cli_args: &ArgMatches<'_>, + ) -> Result { + let config = Config::from_cli(&cli_args) + .map_err(|e| format!("Unable to initialize config: {}", e))?; + Self::new(context, config).await } /// Instantiates the validator client, _without_ starting the timers to trigger block /// and attestation production. - pub fn new( - mut context: RuntimeContext, - config: Config, - ) -> impl Future { + pub async fn new(mut context: RuntimeContext, config: Config) -> Result { let log_1 = context.log.clone(); let log_2 = context.log.clone(); let log_3 = context.log.clone(); @@ -80,204 +73,178 @@ impl ProductionValidatorClient { "datadir" => format!("{:?}", config.data_dir), ); - RemoteBeaconNode::new_with_timeout(config.http_server.clone(), HTTP_TIMEOUT) - .map_err(|e| format!("Unable to init beacon node http client: {}", e)) - .into_future() - .and_then(move |beacon_node| wait_for_node(beacon_node, log_2)) - .and_then(|beacon_node| { - beacon_node - .http - .spec() - .get_eth2_config() - .map(|eth2_config| (beacon_node, eth2_config)) - .map_err(|e| format!("Unable to read eth2 config from beacon node: {:?}", e)) - }) - .and_then(|(beacon_node, eth2_config)| { - beacon_node - .http - .beacon() - .get_genesis_time() - .map(|genesis_time| (beacon_node, eth2_config, genesis_time)) - .map_err(|e| format!("Unable to read genesis time from beacon node: {:?}", e)) - }) - .and_then(move |(beacon_node, remote_eth2_config, genesis_time)| { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .into_future() - .map_err(|e| format!("Unable to read system time: {:?}", e)) - .and_then::<_, Box + Send>>(move |now| { - let log = log_3.clone(); - let genesis = Duration::from_secs(genesis_time); + let beacon_node = + RemoteBeaconNode::new_with_timeout(config.http_server.clone(), HTTP_TIMEOUT) + .map_err(|e| format!("Unable to init beacon node http client: {}", e))?; - // If the time now is less than (prior to) genesis, then delay until the - // genesis instant. - // - // If the validator client starts before genesis, it will get errors from - // the slot clock. - if now < genesis { - info!( - log, - "Starting node prior to genesis"; - "seconds_to_wait" => (genesis - now).as_secs() - ); + // TODO: check if all logs in wait_for_node are produed while awaiting + let beacon_node = wait_for_node(beacon_node, log_2).await?; + let eth2_config = beacon_node + .http + .spec() + .get_eth2_config() + .await + .map_err(|e| format!("Unable to read eth2 config from beacon node: {:?}", e))?; + let genesis_time = beacon_node + .http + .beacon() + .get_genesis_time() + .await + .map_err(|e| format!("Unable to read genesis time from beacon node: {:?}", e))?; + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to read system time: {:?}", e))?; + let log = log_3.clone(); + let genesis = Duration::from_secs(genesis_time); - Box::new( - Delay::new(Instant::now() + (genesis - now)) - .map_err(|e| { - format!("Unable to create genesis wait delay: {:?}", e) - }) - .map(move |_| (beacon_node, remote_eth2_config, genesis_time)), - ) - } else { - info!( - log, - "Genesis has already occurred"; - "seconds_ago" => (now - genesis).as_secs() - ); + // If the time now is less than (prior to) genesis, then delay until the + // genesis instant. + // + // If the validator client starts before genesis, it will get errors from + // the slot clock. + if now < genesis { + info!( + log, + "Starting node prior to genesis"; + "seconds_to_wait" => (genesis - now).as_secs() + ); - Box::new(future::ok((beacon_node, remote_eth2_config, genesis_time))) - } - }) - }) - .and_then(|(beacon_node, eth2_config, genesis_time)| { - beacon_node - .http - .beacon() - .get_genesis_validators_root() - .map(move |genesis_validators_root| { - ( - beacon_node, - eth2_config, - genesis_time, - genesis_validators_root, - ) - }) - .map_err(|e| { - format!( - "Unable to read genesis validators root from beacon node: {:?}", - e - ) - }) - }) - .and_then( - move |(beacon_node, remote_eth2_config, genesis_time, genesis_validators_root)| { - let log = log_4.clone(); + delay_for(genesis - now).await + } else { + info!( + log, + "Genesis has already occurred"; + "seconds_ago" => (now - genesis).as_secs() + ); + } + let genesis_validators_root = beacon_node + .http + .beacon() + .get_genesis_validators_root() + .await + .map_err(|e| { + format!( + "Unable to read genesis validators root from beacon node: {:?}", + e + ) + })?; + let log = log_4.clone(); - // Do not permit a connection to a beacon node using different spec constants. - if context.eth2_config.spec_constants != remote_eth2_config.spec_constants { - return Err(format!( - "Beacon node is using an incompatible spec. Got {}, expected {}", - remote_eth2_config.spec_constants, context.eth2_config.spec_constants - )); - } + // Do not permit a connection to a beacon node using different spec constants. + if context.eth2_config.spec_constants != eth2_config.spec_constants { + return Err(format!( + "Beacon node is using an incompatible spec. Got {}, expected {}", + eth2_config.spec_constants, context.eth2_config.spec_constants + )); + } - // Note: here we just assume the spec variables of the remote node. This is very useful - // for testnets, but perhaps a security issue when it comes to mainnet. - // - // A damaging attack would be for a beacon node to convince the validator client of a - // different `SLOTS_PER_EPOCH` variable. This could result in slashable messages being - // produced. We are safe from this because `SLOTS_PER_EPOCH` is a type-level constant - // for Lighthouse. - context.eth2_config = remote_eth2_config; + // Note: here we just assume the spec variables of the remote node. This is very useful + // for testnets, but perhaps a security issue when it comes to mainnet. + // + // A damaging attack would be for a beacon node to convince the validator client of a + // different `SLOTS_PER_EPOCH` variable. This could result in slashable messages being + // produced. We are safe from this because `SLOTS_PER_EPOCH` is a type-level constant + // for Lighthouse. + context.eth2_config = eth2_config; - let slot_clock = SystemTimeSlotClock::new( - context.eth2_config.spec.genesis_slot, - Duration::from_secs(genesis_time), - Duration::from_millis(context.eth2_config.spec.milliseconds_per_slot), - ); + let slot_clock = SystemTimeSlotClock::new( + context.eth2_config.spec.genesis_slot, + Duration::from_secs(genesis_time), + Duration::from_millis(context.eth2_config.spec.milliseconds_per_slot), + ); - let fork_service = ForkServiceBuilder::new() - .slot_clock(slot_clock.clone()) - .beacon_node(beacon_node.clone()) - .runtime_context(context.service_context("fork".into())) - .build()?; + let fork_service = ForkServiceBuilder::new() + .slot_clock(slot_clock.clone()) + .beacon_node(beacon_node.clone()) + .runtime_context(context.service_context("fork".into())) + .build()?; - let validator_store: ValidatorStore = - match &config.key_source { - // Load pre-existing validators from the data dir. - // - // Use the `account_manager` to generate these files. - KeySource::Disk => ValidatorStore::load_from_disk( - config.data_dir.clone(), - genesis_validators_root, - context.eth2_config.spec.clone(), - fork_service.clone(), - log.clone(), - )?, - // Generate ephemeral insecure keypairs for testing purposes. - // - // Do not use in production. - KeySource::InsecureKeypairs(indices) => { - ValidatorStore::insecure_ephemeral_validators( - &indices, - genesis_validators_root, - context.eth2_config.spec.clone(), - fork_service.clone(), - log.clone(), - )? - } - }; + let validator_store: ValidatorStore = match &config.key_source { + // Load pre-existing validators from the data dir. + // + // Use the `account_manager` to generate these files. + KeySource::Disk => ValidatorStore::load_from_disk( + config.data_dir.clone(), + genesis_validators_root, + context.eth2_config.spec.clone(), + fork_service.clone(), + log.clone(), + )?, + // Generate ephemeral insecure keypairs for testing purposes. + // + // Do not use in production. + KeySource::InsecureKeypairs(indices) => ValidatorStore::insecure_ephemeral_validators( + &indices, + genesis_validators_root, + context.eth2_config.spec.clone(), + fork_service.clone(), + log.clone(), + )?, + }; - info!( - log, - "Loaded validator keypair store"; - "voting_validators" => validator_store.num_voting_validators() - ); + info!( + log, + "Loaded validator keypair store"; + "voting_validators" => validator_store.num_voting_validators() + ); - let duties_service = DutiesServiceBuilder::new() - .slot_clock(slot_clock.clone()) - .validator_store(validator_store.clone()) - .beacon_node(beacon_node.clone()) - .runtime_context(context.service_context("duties".into())) - .allow_unsynced_beacon_node(config.allow_unsynced_beacon_node) - .build()?; + let duties_service = DutiesServiceBuilder::new() + .slot_clock(slot_clock.clone()) + .validator_store(validator_store.clone()) + .beacon_node(beacon_node.clone()) + .runtime_context(context.service_context("duties".into())) + .allow_unsynced_beacon_node(config.allow_unsynced_beacon_node) + .build()?; - let block_service = BlockServiceBuilder::new() - .duties_service(duties_service.clone()) - .slot_clock(slot_clock.clone()) - .validator_store(validator_store.clone()) - .beacon_node(beacon_node.clone()) - .runtime_context(context.service_context("block".into())) - .build()?; + let block_service = BlockServiceBuilder::new() + .duties_service(duties_service.clone()) + .slot_clock(slot_clock.clone()) + .validator_store(validator_store.clone()) + .beacon_node(beacon_node.clone()) + .runtime_context(context.service_context("block".into())) + .build()?; - let attestation_service = AttestationServiceBuilder::new() - .duties_service(duties_service.clone()) - .slot_clock(slot_clock) - .validator_store(validator_store) - .beacon_node(beacon_node) - .runtime_context(context.service_context("attestation".into())) - .build()?; + let attestation_service = AttestationServiceBuilder::new() + .duties_service(duties_service.clone()) + .slot_clock(slot_clock) + .validator_store(validator_store) + .beacon_node(beacon_node) + .runtime_context(context.service_context("attestation".into())) + .build()?; - Ok(Self { - context, - duties_service, - fork_service, - block_service, - attestation_service, - exit_signals: vec![], - }) - }, - ) + Ok(Self { + context, + duties_service, + fork_service, + block_service, + attestation_service, + exit_signals: vec![], + config, + }) } pub fn start_service(&mut self) -> Result<(), String> { let duties_exit = self .duties_service + .clone() .start_update_service(&self.context.eth2_config.spec) .map_err(|e| format!("Unable to start duties service: {}", e))?; let fork_exit = self .fork_service + .clone() .start_update_service(&self.context.eth2_config.spec) .map_err(|e| format!("Unable to start fork service: {}", e))?; let block_exit = self .block_service + .clone() .start_update_service(&self.context.eth2_config.spec) .map_err(|e| format!("Unable to start block service: {}", e))?; let attestation_exit = self .attestation_service + .clone() .start_update_service(&self.context.eth2_config.spec) .map_err(|e| format!("Unable to start attestation service: {}", e))?; @@ -298,48 +265,39 @@ impl ProductionValidatorClient { /// Request the version from the node, looping back and trying again on failure. Exit once the node /// has been contacted. -fn wait_for_node( +async fn wait_for_node( beacon_node: RemoteBeaconNode, log: Logger, -) -> impl Future, Error = String> { +) -> Result, String> { // Try to get the version string from the node, looping until success is returned. - loop_fn(beacon_node.clone(), move |beacon_node| { + loop { let log = log.clone(); - beacon_node + let result = beacon_node .clone() .http .node() .get_version() - .map_err(|e| format!("{:?}", e)) - .then(move |result| { - let future: Box, Error = String> + Send> = match result - { - Ok(version) => { - info!( - log, - "Connected to beacon node"; - "version" => version, - ); + .await + .map_err(|e| format!("{:?}", e)); - Box::new(future::ok(Loop::Break(beacon_node))) - } - Err(e) => { - error!( - log, - "Unable to connect to beacon node"; - "error" => format!("{:?}", e), - ); + match result { + Ok(version) => { + info!( + log, + "Connected to beacon node"; + "version" => version, + ); - Box::new( - Delay::new(Instant::now() + RETRY_DELAY) - .map_err(|e| format!("Failed to trigger delay: {:?}", e)) - .and_then(|_| future::ok(Loop::Continue(beacon_node))), - ) - } - }; - - future - }) - }) - .map(|_| beacon_node) + return Ok(beacon_node); + } + Err(e) => { + error!( + log, + "Unable to connect to beacon node"; + "error" => format!("{:?}", e), + ); + delay_for(RETRY_DELAY).await; + } + } + } } diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index e60bf8cc62..9d9aa97318 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -1,39 +1,43 @@ -use crate::ProductionValidatorClient; +use crate::{is_synced::is_synced, ProductionValidatorClient}; use exit_future::Signal; -use futures::{Future, Stream}; +use futures::{FutureExt, StreamExt}; use slog::{error, info}; use slot_clock::SlotClock; -use std::time::{Duration, Instant}; -use tokio::timer::Interval; +use tokio::time::{interval_at, Duration, Instant}; use types::EthSpec; /// Spawns a notifier service which periodically logs information about the node. pub fn spawn_notifier(client: &ProductionValidatorClient) -> Result { let context = client.context.service_context("notifier".into()); + let runtime_handle = context.runtime_handle.clone(); + let log = context.log.clone(); + let duties_service = client.duties_service.clone(); + let allow_unsynced_beacon_node = client.config.allow_unsynced_beacon_node; let slot_duration = Duration::from_millis(context.eth2_config.spec.milliseconds_per_slot); - let duration_to_next_slot = client - .duties_service + let duration_to_next_slot = duties_service .slot_clock .duration_to_next_slot() .ok_or_else(|| "slot_notifier unable to determine time to next slot")?; - // Run this half way through each slot. + // Run the notifier half way through each slot. let start_instant = Instant::now() + duration_to_next_slot + (slot_duration / 2); + let mut interval = interval_at(start_instant, slot_duration); - // Run this each slot. - let interval_duration = slot_duration; + let interval_fut = async move { + let log = &context.log; - let duties_service = client.duties_service.clone(); - let log_1 = context.log.clone(); - let log_2 = context.log.clone(); - - let interval_future = Interval::new(start_instant, interval_duration) - .map_err( - move |e| error!(log_1, "Slot notifier timer failed"; "error" => format!("{:?}", e)), - ) - .for_each(move |_| { - let log = log_2.clone(); + while interval.next().await.is_some() { + if !is_synced( + &duties_service.beacon_node, + &duties_service.slot_clock, + Some(&log), + ) + .await + && !allow_unsynced_beacon_node + { + continue; + } if let Some(slot) = duties_service.slot_clock.now() { let epoch = slot.epoch(T::slots_per_epoch()); @@ -46,7 +50,7 @@ pub fn spawn_notifier(client: &ProductionValidatorClient) -> Resu error!(log, "No validators present") } else if total_validators == attesting_validators { info!( - log_2, + log, "All validators active"; "proposers" => proposing_validators, "active_validators" => attesting_validators, @@ -56,7 +60,7 @@ pub fn spawn_notifier(client: &ProductionValidatorClient) -> Resu ); } else if attesting_validators > 0 { info!( - log_2, + log, "Some validators active"; "proposers" => proposing_validators, "active_validators" => attesting_validators, @@ -66,7 +70,7 @@ pub fn spawn_notifier(client: &ProductionValidatorClient) -> Resu ); } else { info!( - log_2, + log, "Awaiting activation"; "validators" => total_validators, "epoch" => format!("{}", epoch), @@ -76,16 +80,15 @@ pub fn spawn_notifier(client: &ProductionValidatorClient) -> Resu } else { error!(log, "Unable to read slot clock"); } - - Ok(()) - }); + } + }; let (exit_signal, exit) = exit_future::signal(); - let log = context.log.clone(); - client.context.executor.spawn( - exit.until(interval_future) - .map(move |_| info!(log, "Shutdown complete")), + let future = futures::future::select( + Box::pin(interval_fut), + exit.map(move |_| info!(log, "Shutdown complete")), ); + runtime_handle.spawn(future); Ok(exit_signal) } diff --git a/validator_client/src/validator_directory.rs b/validator_client/src/validator_directory.rs index 197e1cb44e..23643b6fe1 100644 --- a/validator_client/src/validator_directory.rs +++ b/validator_client/src/validator_directory.rs @@ -1,6 +1,6 @@ use bls::get_withdrawal_credentials; use deposit_contract::{encode_eth1_tx_data, DEPOSIT_GAS}; -use futures::{Future, IntoFuture}; +use futures::compat::Future01CompatExt; use hex; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -303,28 +303,27 @@ impl ValidatorDirectoryBuilder { Ok(self) } - pub fn submit_eth1_deposit( + pub async fn submit_eth1_deposit( self, web3: Web3, from: Address, deposit_contract: Address, - ) -> impl Future { - self.get_deposit_data() - .into_future() - .and_then(move |(deposit_data, deposit_amount)| { - web3.eth() - .send_transaction(TransactionRequest { - from, - to: Some(deposit_contract), - gas: Some(DEPOSIT_GAS.into()), - gas_price: None, - value: Some(from_gwei(deposit_amount)), - data: Some(deposit_data.into()), - nonce: None, - condition: None, - }) - .map_err(|e| format!("Failed to send transaction: {:?}", e)) + ) -> Result<(Self, Hash256), String> { + let (deposit_data, deposit_amount) = self.get_deposit_data()?; + web3.eth() + .send_transaction(TransactionRequest { + from, + to: Some(deposit_contract), + gas: Some(DEPOSIT_GAS.into()), + gas_price: None, + value: Some(from_gwei(deposit_amount)), + data: Some(deposit_data.into()), + nonce: None, + condition: None, }) + .compat() + .await + .map_err(|e| format!("Failed to send transaction: {:?}", e)) .map(|tx| (self, tx)) }