mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-03 16:51:41 +00:00
Compare commits
204 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b354a83faa | ||
|
|
0b287f6ece | ||
|
|
ee036cba7e | ||
|
|
f4fe2ac533 | ||
|
|
7d87e11e0f | ||
|
|
cfae5fbbc4 | ||
|
|
983f768034 | ||
|
|
138c0cf7f0 | ||
|
|
82a0973935 | ||
|
|
09a615b2c0 | ||
|
|
924ba66218 | ||
|
|
6206d8e79b | ||
|
|
5629126f45 | ||
|
|
20ee893969 | ||
|
|
0feb3cf19a | ||
|
|
f26adc0a36 | ||
|
|
d4dd25883f | ||
|
|
c5722093d3 | ||
|
|
1bbecbcf26 | ||
|
|
31707ccf45 | ||
|
|
1419501f2e | ||
|
|
6c17b4696f | ||
|
|
37679b8898 | ||
|
|
f634f073a8 | ||
|
|
142e033c34 | ||
|
|
3b5da8f35f | ||
|
|
3ea01ac26b | ||
|
|
d0f1a3e59f | ||
|
|
4d77784bb8 | ||
|
|
7d8acc20a0 | ||
|
|
2ede9caaa6 | ||
|
|
a37e75f44b | ||
|
|
febb300a2d | ||
|
|
36d3d37cb4 | ||
|
|
395d99ce03 | ||
|
|
f53dedb27d | ||
|
|
eaa9f9744f | ||
|
|
ba0f3daf9d | ||
|
|
09b40b7a5e | ||
|
|
9ae9df806c | ||
|
|
edf250cea9 | ||
|
|
5680355b31 | ||
|
|
a413b43fed | ||
|
|
5f013548c0 | ||
|
|
0b5be9b2c0 | ||
|
|
e5d9d6179f | ||
|
|
b73c497be2 | ||
|
|
21bcc8848d | ||
|
|
23a8f31f83 | ||
|
|
ba10c80633 | ||
|
|
3c4daec9af | ||
|
|
3a888d6ef3 | ||
|
|
41f7547645 | ||
|
|
ea0e936ac4 | ||
|
|
e26da35cbf | ||
|
|
393782f632 | ||
|
|
f61a7113ac | ||
|
|
2870172e0d | ||
|
|
0620f54f2f | ||
|
|
1c90c816b7 | ||
|
|
e940dcea47 | ||
|
|
b885d79ac3 | ||
|
|
fc5e6cbbb0 | ||
|
|
4a01f44206 | ||
|
|
4b213032b2 | ||
|
|
c80860c17e | ||
|
|
e164371083 | ||
|
|
00c89c51c8 | ||
|
|
3c7f2d651a | ||
|
|
e6a8635b38 | ||
|
|
9ae218bfac | ||
|
|
57e0b6a615 | ||
|
|
e8d5d37bc1 | ||
|
|
275148a152 | ||
|
|
559b7c8faa | ||
|
|
025b262e01 | ||
|
|
ac2ce2ba6b | ||
|
|
f500b24242 | ||
|
|
51fbaefe41 | ||
|
|
20a48df80a | ||
|
|
5bc8fea2e0 | ||
|
|
5977c00edb | ||
|
|
f631155304 | ||
|
|
bae4835308 | ||
|
|
e429c3eefe | ||
|
|
2856f5122d | ||
|
|
25cd91ce26 | ||
|
|
c7f47af9fb | ||
|
|
9dab928572 | ||
|
|
536728b975 | ||
|
|
ac89bb190a | ||
|
|
314c077870 | ||
|
|
a7a79ce4b7 | ||
|
|
916a133043 | ||
|
|
d4dd9fae07 | ||
|
|
2ccb9f48da | ||
|
|
821f91ec75 | ||
|
|
fc0b8adcd7 | ||
|
|
721323f045 | ||
|
|
163fda2c26 | ||
|
|
1e671a61d6 | ||
|
|
d90bd648d8 | ||
|
|
9f6ee212ff | ||
|
|
9fc290a344 | ||
|
|
95320f8ab0 | ||
|
|
66f183be02 | ||
|
|
6e7d5c6a7c | ||
|
|
3953204727 | ||
|
|
38b9bf98ac | ||
|
|
7688b5f1dd | ||
|
|
e0e41fc8e5 | ||
|
|
e3d9832fee | ||
|
|
69e15af0b2 | ||
|
|
02174e21d8 | ||
|
|
825aca0ef3 | ||
|
|
4ddfc032e2 | ||
|
|
b3c01bf09d | ||
|
|
81a89fb773 | ||
|
|
259502829e | ||
|
|
da6ab85e99 | ||
|
|
ea76faeeee | ||
|
|
920bfdaade | ||
|
|
6d507ef863 | ||
|
|
07a091ad95 | ||
|
|
decea48c78 | ||
|
|
710409c2ba | ||
|
|
f3d05c15d1 | ||
|
|
f3380c00b8 | ||
|
|
e379ad0f4e | ||
|
|
305724770d | ||
|
|
9450a0f30d | ||
|
|
bcb6afa0aa | ||
|
|
3199b1a6f2 | ||
|
|
06a72614cb | ||
|
|
065251b701 | ||
|
|
81c9fe3817 | ||
|
|
e6f97bf466 | ||
|
|
764cb2d32a | ||
|
|
9db0c28051 | ||
|
|
6b8c96662f | ||
|
|
7818447fd2 | ||
|
|
d15ec9b544 | ||
|
|
1a4de898bc | ||
|
|
7bbeca4fa1 | ||
|
|
6622bf9f03 | ||
|
|
320e72e2de | ||
|
|
bb8b88edcf | ||
|
|
2dfe77a8f9 | ||
|
|
39bf05e3e5 | ||
|
|
d2983c13df | ||
|
|
7f036a6e95 | ||
|
|
7ce9a252a4 | ||
|
|
ed4b3ef471 | ||
|
|
7baac70056 | ||
|
|
208f1da81b | ||
|
|
d9d00cc05d | ||
|
|
e20a2deebd | ||
|
|
036096ef61 | ||
|
|
0e37a16927 | ||
|
|
52d60cce1d | ||
|
|
042e80570c | ||
|
|
fe03ff0f21 | ||
|
|
197adeff0b | ||
|
|
ce10db15da | ||
|
|
a214032e1f | ||
|
|
723c7cbd27 | ||
|
|
cb26ddebb1 | ||
|
|
91cb14ac41 | ||
|
|
08e6b4961d | ||
|
|
d609a3f639 | ||
|
|
91a28e7438 | ||
|
|
812809913d | ||
|
|
5879f84d17 | ||
|
|
7d897a0519 | ||
|
|
6383c95f8b | ||
|
|
ea4a52984c | ||
|
|
58a9f979e0 | ||
|
|
61496d8dad | ||
|
|
5122b2c13a | ||
|
|
8bc82c573d | ||
|
|
d41a9f7aa6 | ||
|
|
f72094ca8d | ||
|
|
ad4e5adabc | ||
|
|
9718c5db07 | ||
|
|
103300c880 | ||
|
|
3c52b5c58d | ||
|
|
e889c2eb22 | ||
|
|
f8cac1b822 | ||
|
|
919c81fe7d | ||
|
|
a88afb7409 | ||
|
|
ea56dcb179 | ||
|
|
d79e07902e | ||
|
|
681e013d31 | ||
|
|
0b49a8507e | ||
|
|
ddd63c0de1 | ||
|
|
309cd95b2c | ||
|
|
c93f9c351b | ||
|
|
314fae41fe | ||
|
|
ac2ff01d1e | ||
|
|
dd51a72f1f | ||
|
|
4331834003 | ||
|
|
c571afb8d8 | ||
|
|
a4b07a833c | ||
|
|
2d8e2dd7f5 |
@@ -2,4 +2,3 @@ tests/ef_tests/eth2.0-spec-tests
|
||||
target/
|
||||
*.data
|
||||
*.tar.gz
|
||||
.git
|
||||
|
||||
75
.github/workflows/test-suite.yml
vendored
75
.github/workflows/test-suite.yml
vendored
@@ -1,9 +1,19 @@
|
||||
name: test-suite
|
||||
|
||||
on: [push]
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- staging
|
||||
- trying
|
||||
- 'pr/*'
|
||||
pull_request:
|
||||
env:
|
||||
# Deny warnings in CI
|
||||
RUSTFLAGS: "-D warnings"
|
||||
jobs:
|
||||
cargo-fmt:
|
||||
name: cargo-fmt
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
@@ -12,6 +22,7 @@ jobs:
|
||||
- name: Check formatting with cargo fmt
|
||||
run: make cargo-fmt
|
||||
release-tests-ubuntu:
|
||||
name: release-tests-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
@@ -22,7 +33,22 @@ jobs:
|
||||
run: sudo npm install -g ganache-cli
|
||||
- name: Run tests in release
|
||||
run: make test-release
|
||||
release-tests-and-install-macos:
|
||||
name: release-tests-and-install-macos
|
||||
runs-on: macos-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Install ganache-cli
|
||||
run: sudo npm install -g ganache-cli
|
||||
- name: Run tests in release
|
||||
run: make test-release
|
||||
- name: Install Lighthouse
|
||||
run: make
|
||||
debug-tests-ubuntu:
|
||||
name: debug-tests-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
@@ -34,6 +60,7 @@ jobs:
|
||||
- name: Run tests in debug
|
||||
run: make test-debug
|
||||
state-transition-vectors-ubuntu:
|
||||
name: state-transition-vectors-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
@@ -43,15 +70,17 @@ jobs:
|
||||
- name: Run state_transition_vectors in release.
|
||||
run: make run-state-transition-tests
|
||||
ef-tests-ubuntu:
|
||||
name: ef-tests-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Run eth2.0-spec-tests with and without fake_crypto
|
||||
- name: Run eth2.0-spec-tests with blst, milagro and fake_crypto
|
||||
run: make test-ef
|
||||
dockerfile-ubuntu:
|
||||
name: dockerfile-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
@@ -59,6 +88,7 @@ jobs:
|
||||
- name: Build the root Dockerfile
|
||||
run: docker build .
|
||||
eth1-simulator-ubuntu:
|
||||
name: eth1-simulator-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
@@ -68,6 +98,7 @@ jobs:
|
||||
- name: Run the beacon chain sim that starts from an eth1 contract
|
||||
run: cargo run --release --bin simulator eth1-sim
|
||||
no-eth1-simulator-ubuntu:
|
||||
name: no-eth1-simulator-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
@@ -77,6 +108,7 @@ jobs:
|
||||
- name: Run the beacon chain sim without an eth1 connection
|
||||
run: cargo run --release --bin simulator no-eth1-sim
|
||||
check-benchmarks:
|
||||
name: check-benchmarks
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
@@ -84,6 +116,7 @@ jobs:
|
||||
- name: Typecheck benchmark code without running it
|
||||
run: make check-benches
|
||||
clippy:
|
||||
name: clippy
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
@@ -91,9 +124,33 @@ jobs:
|
||||
- name: Lint code for quality and style with Clippy
|
||||
run: make lint
|
||||
arbitrary-check:
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Validate state_processing feature arbitrary-fuzz
|
||||
run: make arbitrary-fuzz
|
||||
name: arbitrary-check
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Validate state_processing feature arbitrary-fuzz
|
||||
run: make arbitrary-fuzz
|
||||
cargo-audit:
|
||||
name: cargo-audit
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database
|
||||
run: make audit
|
||||
cargo-udeps:
|
||||
name: cargo-udeps
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Install a nightly compiler with rustfmt, as a kind of quality control
|
||||
run: rustup toolchain install --component=rustfmt nightly
|
||||
- name: Install cargo-udeps
|
||||
run: cargo install cargo-udeps --locked
|
||||
- name: Run cargo udeps to identify unused crates in the dependency graph
|
||||
run: make udeps
|
||||
env:
|
||||
# Allow warnings on Nightly
|
||||
RUSTFLAGS: ""
|
||||
|
||||
2506
Cargo.lock
generated
2506
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
113
Cargo.toml
113
Cargo.toml
@@ -1,67 +1,80 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"eth2/proto_array_fork_choice",
|
||||
"eth2/operation_pool",
|
||||
"eth2/state_processing",
|
||||
"eth2/types",
|
||||
"eth2/utils/bls",
|
||||
"eth2/utils/clap_utils",
|
||||
"eth2/utils/compare_fields",
|
||||
"eth2/utils/compare_fields_derive",
|
||||
"eth2/utils/deposit_contract",
|
||||
"eth2/utils/eth2_config",
|
||||
"eth2/utils/eth2_interop_keypairs",
|
||||
"eth2/utils/eth2_key_derivation",
|
||||
"eth2/utils/eth2_keystore",
|
||||
"eth2/utils/eth2_testnet_config",
|
||||
"eth2/utils/eth2_wallet",
|
||||
"eth2/utils/logging",
|
||||
"eth2/utils/eth2_hashing",
|
||||
"eth2/utils/hashset_delay",
|
||||
"eth2/utils/lighthouse_metrics",
|
||||
"eth2/utils/merkle_proof",
|
||||
"eth2/utils/int_to_bytes",
|
||||
"eth2/utils/safe_arith",
|
||||
"eth2/utils/serde_hex",
|
||||
"eth2/utils/slot_clock",
|
||||
"eth2/utils/rest_types",
|
||||
"eth2/utils/ssz",
|
||||
"eth2/utils/ssz_derive",
|
||||
"eth2/utils/ssz_types",
|
||||
"eth2/utils/swap_or_not_shuffle",
|
||||
"eth2/utils/cached_tree_hash",
|
||||
"eth2/utils/tree_hash",
|
||||
"eth2/utils/tree_hash_derive",
|
||||
"eth2/utils/test_random_derive",
|
||||
"account_manager",
|
||||
|
||||
"beacon_node",
|
||||
"beacon_node/beacon_chain",
|
||||
"beacon_node/client",
|
||||
"beacon_node/eth1",
|
||||
"beacon_node/eth2-libp2p",
|
||||
"beacon_node/eth2_libp2p",
|
||||
"beacon_node/network",
|
||||
"beacon_node/rest_api",
|
||||
"beacon_node/store",
|
||||
"beacon_node/timer",
|
||||
"beacon_node/version",
|
||||
"beacon_node/websocket_server",
|
||||
"tests/simulator",
|
||||
"tests/ef_tests",
|
||||
"tests/eth1_test_rig",
|
||||
"tests/node_test_rig",
|
||||
"tests/state_transition_vectors",
|
||||
|
||||
"boot_node",
|
||||
|
||||
"common/account_utils",
|
||||
"common/clap_utils",
|
||||
"common/compare_fields",
|
||||
"common/compare_fields_derive",
|
||||
"common/deposit_contract",
|
||||
"common/eth2_config",
|
||||
"common/eth2_interop_keypairs",
|
||||
"common/eth2_testnet_config",
|
||||
"common/eth2_wallet_manager",
|
||||
"common/hashset_delay",
|
||||
"common/lighthouse_metrics",
|
||||
"common/lighthouse_version",
|
||||
"common/logging",
|
||||
"common/remote_beacon_node",
|
||||
"common/rest_types",
|
||||
"common/slot_clock",
|
||||
"common/test_random_derive",
|
||||
"common/validator_dir",
|
||||
|
||||
"consensus/cached_tree_hash",
|
||||
"consensus/int_to_bytes",
|
||||
"consensus/fork_choice",
|
||||
"consensus/proto_array",
|
||||
"consensus/safe_arith",
|
||||
"consensus/ssz",
|
||||
"consensus/ssz_derive",
|
||||
"consensus/ssz_types",
|
||||
"consensus/serde_hex",
|
||||
"consensus/state_processing",
|
||||
"consensus/swap_or_not_shuffle",
|
||||
"consensus/tree_hash",
|
||||
"consensus/tree_hash_derive",
|
||||
|
||||
"crypto/bls",
|
||||
"crypto/eth2_hashing",
|
||||
"crypto/eth2_key_derivation",
|
||||
"crypto/eth2_keystore",
|
||||
"crypto/eth2_wallet",
|
||||
|
||||
"lcli",
|
||||
"validator_client",
|
||||
"account_manager",
|
||||
|
||||
"lighthouse",
|
||||
"lighthouse/environment"
|
||||
"lighthouse/environment",
|
||||
|
||||
"testing/simulator",
|
||||
"testing/ef_tests",
|
||||
"testing/eth1_test_rig",
|
||||
"testing/node_test_rig",
|
||||
"testing/state_transition_vectors",
|
||||
|
||||
"validator_client",
|
||||
"validator_client/slashing_protection",
|
||||
]
|
||||
|
||||
[patch]
|
||||
[patch.crates-io]
|
||||
tree_hash = { path = "eth2/utils/tree_hash" }
|
||||
tree_hash_derive = { path = "eth2/utils/tree_hash_derive" }
|
||||
eth2_ssz = { path = "eth2/utils/ssz" }
|
||||
eth2_ssz_derive = { path = "eth2/utils/ssz_derive" }
|
||||
eth2_ssz_types = { path = "eth2/utils/ssz_types" }
|
||||
eth2_hashing = { path = "eth2/utils/eth2_hashing" }
|
||||
web3 = { git = "https://github.com/tomusdrw/rust-web3" }
|
||||
tree_hash = { path = "consensus/tree_hash" }
|
||||
tree_hash_derive = { path = "consensus/tree_hash_derive" }
|
||||
eth2_ssz = { path = "consensus/ssz" }
|
||||
eth2_ssz_derive = { path = "consensus/ssz_derive" }
|
||||
eth2_ssz_types = { path = "consensus/ssz_types" }
|
||||
eth2_hashing = { path = "crypto/eth2_hashing" }
|
||||
leveldb-sys = { git = "https://github.com/michaelsproul/leveldb-sys", branch = "v2.0.6-cmake" }
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
FROM rust:1.41.1 AS builder
|
||||
FROM rust:1.45.1 AS builder
|
||||
RUN apt-get update && apt-get install -y cmake
|
||||
COPY . lighthouse
|
||||
ARG PORTABLE
|
||||
ENV PORTABLE $PORTABLE
|
||||
RUN cd lighthouse && make
|
||||
RUN cd lighthouse && cargo install --path lcli --locked
|
||||
RUN cd lighthouse && make install-lcli
|
||||
|
||||
FROM debian:buster-slim
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
|
||||
28
Makefile
28
Makefile
@@ -1,17 +1,25 @@
|
||||
.PHONY: tests
|
||||
|
||||
EF_TESTS = "tests/ef_tests"
|
||||
STATE_TRANSITION_VECTORS = "tests/state_transition_vectors"
|
||||
EF_TESTS = "testing/ef_tests"
|
||||
STATE_TRANSITION_VECTORS = "testing/state_transition_vectors"
|
||||
|
||||
# Builds the Lighthouse binary in release (optimized).
|
||||
#
|
||||
# Binaries will most likely be found in `./target/release`
|
||||
install:
|
||||
ifeq ($(PORTABLE), true)
|
||||
cargo install --path lighthouse --force --locked --features portable
|
||||
else
|
||||
cargo install --path lighthouse --force --locked
|
||||
endif
|
||||
|
||||
# Builds the lcli binary in release (optimized).
|
||||
install-lcli:
|
||||
ifeq ($(PORTABLE), true)
|
||||
cargo install --path lcli --force --locked --features portable
|
||||
else
|
||||
cargo install --path lcli --force --locked
|
||||
endif
|
||||
|
||||
# Runs the full workspace tests in **release**, without downloading any additional
|
||||
# test vectors.
|
||||
@@ -35,6 +43,7 @@ check-benches:
|
||||
run-ef-tests:
|
||||
cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests"
|
||||
cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests,fake_crypto"
|
||||
cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests,milagro"
|
||||
|
||||
# Runs only the tests/state_transition_vectors tests.
|
||||
run-state-transition-tests:
|
||||
@@ -51,9 +60,9 @@ test: test-release
|
||||
test-full: cargo-fmt test-release test-debug test-ef
|
||||
|
||||
# Lints the code for bad style and potentially unsafe arithmetic using Clippy.
|
||||
# Clippy lints are opt-in per-crate for now, which is why we allow all by default.
|
||||
# Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
|
||||
lint:
|
||||
cargo clippy --all -- -A clippy::all
|
||||
cargo clippy --all -- -D warnings
|
||||
|
||||
# Runs the makefile in the `ef_tests` repo.
|
||||
#
|
||||
@@ -65,7 +74,16 @@ make-ef-tests:
|
||||
|
||||
# Verifies that state_processing feature arbitrary-fuzz will compile
|
||||
arbitrary-fuzz:
|
||||
cargo check --manifest-path=eth2/state_processing/Cargo.toml --features arbitrary-fuzz
|
||||
cargo check --manifest-path=consensus/state_processing/Cargo.toml --features arbitrary-fuzz
|
||||
|
||||
# Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
|
||||
audit:
|
||||
cargo install --force cargo-audit
|
||||
cargo audit
|
||||
|
||||
# Runs `cargo udeps` to check for unused dependencies
|
||||
udeps:
|
||||
cargo +nightly udeps --tests --all-targets --release
|
||||
|
||||
# Performs a `cargo` clean and cleans the `ef_tests` directory.
|
||||
clean:
|
||||
|
||||
17
README.md
17
README.md
@@ -2,7 +2,7 @@
|
||||
|
||||
An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prime.
|
||||
|
||||
[![Build Status]][Build Link] [![Book Status]][Book Link] [![RustDoc Status]][RustDoc Link] [![Chat Badge]][Chat Link] [![Swagger Badge]][Swagger Link]
|
||||
[![Build Status]][Build Link] [![Book Status]][Book Link] [![RustDoc Status]][RustDoc Link] [![Chat Badge]][Chat Link]
|
||||
|
||||
[Build Status]: https://github.com/sigp/lighthouse/workflows/test-suite/badge.svg?branch=master
|
||||
[Build Link]: https://github.com/sigp/lighthouse/actions
|
||||
@@ -12,8 +12,6 @@ An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prim
|
||||
[Book Link]: http://lighthouse-book.sigmaprime.io/
|
||||
[RustDoc Status]:https://img.shields.io/badge/code--docs-master-orange
|
||||
[RustDoc Link]: http://lighthouse-docs.sigmaprime.io/
|
||||
[Swagger Badge]: https://img.shields.io/badge/testnet--explorer-beaconcha.in-informational
|
||||
[Swagger Link]: https://lighthouse-testnet3.beaconcha.in/
|
||||
|
||||
[Documentation](http://lighthouse-book.sigmaprime.io/)
|
||||
|
||||
@@ -24,8 +22,7 @@ An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prim
|
||||
Lighthouse is:
|
||||
|
||||
- Fully open-source, licensed under Apache 2.0.
|
||||
- Security-focused. Fuzzing has begun and security reviews are planned
|
||||
for late-2019.
|
||||
- Security-focused. Fuzzing has begun and security reviews are underway.
|
||||
- Built in [Rust](https://www.rust-lang.org/), a modern language providing unique safety guarantees and
|
||||
excellent performance (comparable to C++).
|
||||
- Funded by various organisations, including Sigma Prime, the
|
||||
@@ -39,7 +36,7 @@ Like all Ethereum 2.0 clients, Lighthouse is a work-in-progress.
|
||||
|
||||
Current development overview:
|
||||
|
||||
- Specification `v0.11.1` implemented, optimized and passing test vectors.
|
||||
- Specification `v0.12.1` implemented, optimized and passing test vectors.
|
||||
- Rust-native libp2p with Gossipsub and Discv5.
|
||||
- RESTful JSON API via HTTP server.
|
||||
- Events via WebSocket.
|
||||
@@ -50,9 +47,11 @@ Current development overview:
|
||||
- ~~**April 2019**: Inital single-client testnets.~~
|
||||
- ~~**September 2019**: Inter-operability with other Ethereum 2.0 clients.~~
|
||||
- ~~**Q1 2020**: `lighthouse-0.1.0` release: All major phase 0 features implemented.~~
|
||||
- **Q2 2020**: Public, multi-client testnet with user-facing functionality.
|
||||
- **Q2 2020**: Third-party security review.
|
||||
- **Q4 2020**: Production Beacon Chain testnet (tentative).
|
||||
- ~~**Q2 2020**: Public, multi-client testnet with user-facing functionality.~~
|
||||
- ~~**Q2 2020**: Third-party security review.~~
|
||||
- **Q3 2020**: Additional third-party security reviews.
|
||||
- **Q3 2020**: Long-lived, multi-client Beacon Chain testnet
|
||||
- **Q4 2020**: Production Beacon Chain (tentative).
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -1,31 +1,33 @@
|
||||
[package]
|
||||
name = "account_manager"
|
||||
version = "0.0.1"
|
||||
version = "0.2.0"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Luke Anderson <luke@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dev-dependencies]
|
||||
tempdir = "0.3.7"
|
||||
|
||||
[dependencies]
|
||||
bls = { path = "../eth2/utils/bls" }
|
||||
bls = { path = "../crypto/bls" }
|
||||
clap = "2.33.0"
|
||||
slog = "2.5.2"
|
||||
slog-term = "2.5.0"
|
||||
slog-async = "2.5.0"
|
||||
types = { path = "../eth2/types" }
|
||||
types = { path = "../consensus/types" }
|
||||
state_processing = { path = "../consensus/state_processing" }
|
||||
dirs = "2.0.2"
|
||||
environment = { path = "../lighthouse/environment" }
|
||||
deposit_contract = { path = "../eth2/utils/deposit_contract" }
|
||||
deposit_contract = { path = "../common/deposit_contract" }
|
||||
libc = "0.2.65"
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
hex = "0.4.2"
|
||||
validator_client = { path = "../validator_client" }
|
||||
rayon = "1.3.0"
|
||||
eth2_testnet_config = { path = "../eth2/utils/eth2_testnet_config" }
|
||||
web3 = "0.10.0"
|
||||
eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
||||
web3 = "0.11.0"
|
||||
futures = { version = "0.3.5", features = ["compat"] }
|
||||
clap_utils = { path = "../eth2/utils/clap_utils" }
|
||||
# reduce feature set
|
||||
tokio = {version = "0.2.20", features = ["full"]}
|
||||
clap_utils = { path = "../common/clap_utils" }
|
||||
eth2_wallet = { path = "../crypto/eth2_wallet" }
|
||||
eth2_wallet_manager = { path = "../common/eth2_wallet_manager" }
|
||||
rand = "0.7.2"
|
||||
validator_dir = { path = "../common/validator_dir" }
|
||||
tokio = { version = "0.2.21", features = ["full"] }
|
||||
eth2_keystore = { path = "../crypto/eth2_keystore" }
|
||||
account_utils = { path = "../common/account_utils" }
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
use crate::deposits;
|
||||
use clap::{App, Arg, SubCommand};
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new("account_manager")
|
||||
.visible_aliases(&["a", "am", "account", "account_manager"])
|
||||
.about("Utilities for generating and managing Ethereum 2.0 accounts.")
|
||||
.subcommand(
|
||||
SubCommand::with_name("validator")
|
||||
.about("Generate or manage Ethereum 2.0 validators.")
|
||||
.subcommand(deposits::cli_app())
|
||||
.subcommand(
|
||||
SubCommand::with_name("new")
|
||||
.about("Create a new Ethereum 2.0 validator.")
|
||||
.arg(
|
||||
Arg::with_name("deposit-value")
|
||||
.short("v")
|
||||
.long("deposit-value")
|
||||
.value_name("GWEI")
|
||||
.takes_value(true)
|
||||
.default_value("32000000000")
|
||||
.help("The deposit amount in Gwei (not Wei). Default is 32 ETH."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("send-deposits")
|
||||
.long("send-deposits")
|
||||
.help("If present, submit validator deposits to an eth1 endpoint /
|
||||
defined by the --eth1-endpoint. Requires either the /
|
||||
--deposit-contract or --testnet-dir flag.")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("eth1-endpoint")
|
||||
.short("e")
|
||||
.long("eth1-endpoint")
|
||||
.value_name("HTTP_SERVER")
|
||||
.takes_value(true)
|
||||
.default_value("http://localhost:8545")
|
||||
.help("The URL to the Eth1 JSON-RPC HTTP API (e.g., Geth/Parity-Ethereum)."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("account-index")
|
||||
.short("i")
|
||||
.long("account-index")
|
||||
.value_name("INDEX")
|
||||
.takes_value(true)
|
||||
.default_value("0")
|
||||
.help("The eth1 accounts[] index which will send the transaction"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("password")
|
||||
.short("p")
|
||||
.long("password")
|
||||
.value_name("FILE")
|
||||
.takes_value(true)
|
||||
.help("The password file to unlock the eth1 account (see --index)"),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("insecure")
|
||||
.about("Produce insecure, ephemeral validators. DO NOT USE TO STORE VALUE.")
|
||||
.arg(
|
||||
Arg::with_name("first")
|
||||
.index(1)
|
||||
.value_name("INDEX")
|
||||
.help("Index of the first validator")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("last")
|
||||
.index(2)
|
||||
.value_name("INDEX")
|
||||
.help("Index of the last validator")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("random")
|
||||
.about("Produces public keys using entropy from the Rust 'rand' library.")
|
||||
.arg(
|
||||
Arg::with_name("validator_count")
|
||||
.index(1)
|
||||
.value_name("INTEGER")
|
||||
.help("The number of new validators to generate.")
|
||||
.takes_value(true)
|
||||
.default_value("1"),
|
||||
),
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
21
account_manager/src/common.rs
Normal file
21
account_manager/src/common.rs
Normal file
@@ -0,0 +1,21 @@
|
||||
use clap::ArgMatches;
|
||||
use std::fs::create_dir_all;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
pub fn ensure_dir_exists<P: AsRef<Path>>(path: P) -> Result<(), String> {
|
||||
let path = path.as_ref();
|
||||
|
||||
if !path.exists() {
|
||||
create_dir_all(path).map_err(|e| format!("Unable to create {:?}: {:?}", path, e))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn base_wallet_dir(matches: &ArgMatches, arg: &'static str) -> Result<PathBuf, String> {
|
||||
clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
arg,
|
||||
PathBuf::new().join(".lighthouse").join("wallets"),
|
||||
)
|
||||
}
|
||||
@@ -1,241 +0,0 @@
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use clap_utils;
|
||||
use environment::Environment;
|
||||
use futures::compat::Future01CompatExt;
|
||||
use slog::{info, Logger};
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use tokio::time::{delay_until, Duration, Instant};
|
||||
use types::EthSpec;
|
||||
use validator_client::validator_directory::ValidatorDirectoryBuilder;
|
||||
use web3::{
|
||||
transports::Ipc,
|
||||
types::{Address, SyncInfo, SyncState},
|
||||
Transport, Web3,
|
||||
};
|
||||
|
||||
const SYNCING_STATE_RETRY_DELAY: Duration = Duration::from_secs(2);
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new("deposited")
|
||||
.about("Creates new Lighthouse validator keys and directories. Each newly-created validator
|
||||
will have a deposit transaction formed and submitted to the deposit contract via
|
||||
--eth1-ipc. This application will only write each validator keys to disk if the deposit
|
||||
transaction returns successfully from the eth1 node. The process exits immediately if any
|
||||
Eth1 tx fails. Does not wait for Eth1 confirmation blocks, so there is no guarantee that a
|
||||
deposit will be accepted in the Eth1 chain. Before key generation starts, this application
|
||||
will wait until the eth1 indicates that it is not syncing via the eth_syncing endpoint")
|
||||
.arg(
|
||||
Arg::with_name("validator-dir")
|
||||
.long("validator-dir")
|
||||
.value_name("VALIDATOR_DIRECTORY")
|
||||
.help("The path where the validator directories will be created. Defaults to ~/.lighthouse/validators")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("eth1-ipc")
|
||||
.long("eth1-ipc")
|
||||
.value_name("ETH1_IPC_PATH")
|
||||
.help("Path to an Eth1 JSON-RPC IPC endpoint")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("from-address")
|
||||
.long("from-address")
|
||||
.value_name("FROM_ETH1_ADDRESS")
|
||||
.help("The address that will submit the eth1 deposit. Must be unlocked on the node
|
||||
at --eth1-ipc.")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("deposit-gwei")
|
||||
.long("deposit-gwei")
|
||||
.value_name("DEPOSIT_GWEI")
|
||||
.help("The GWEI value of the deposit amount. Defaults to the minimum amount
|
||||
required for an active validator (MAX_EFFECTIVE_BALANCE.")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("count")
|
||||
.long("count")
|
||||
.value_name("DEPOSIT_COUNT")
|
||||
.help("The number of deposits to create, regardless of how many already exist")
|
||||
.conflicts_with("limit")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("at-most")
|
||||
.long("at-most")
|
||||
.value_name("VALIDATOR_COUNT")
|
||||
.help("Observe the number of validators in --validator-dir, only creating enough to
|
||||
ensure reach the given count. Never deletes an existing validator.")
|
||||
.conflicts_with("count")
|
||||
.takes_value(true),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run<T: EthSpec>(
|
||||
matches: &ArgMatches<'_>,
|
||||
mut env: Environment<T>,
|
||||
) -> Result<(), String> {
|
||||
let spec = env.core_context().eth2_config.spec;
|
||||
let log = env.core_context().log;
|
||||
|
||||
let validator_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
"validator_dir",
|
||||
PathBuf::new().join(".lighthouse").join("validators"),
|
||||
)?;
|
||||
let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, "eth1-ipc")?;
|
||||
let from_address: Address = clap_utils::parse_required(matches, "from-address")?;
|
||||
let deposit_gwei = clap_utils::parse_optional(matches, "deposit-gwei")?
|
||||
.unwrap_or_else(|| spec.max_effective_balance);
|
||||
let count: Option<usize> = clap_utils::parse_optional(matches, "count")?;
|
||||
let at_most: Option<usize> = clap_utils::parse_optional(matches, "at-most")?;
|
||||
|
||||
let starting_validator_count = existing_validator_count(&validator_dir)?;
|
||||
|
||||
let n = match (count, at_most) {
|
||||
(Some(_), Some(_)) => Err("Cannot supply --count and --at-most".to_string()),
|
||||
(None, None) => Err("Must supply either --count or --at-most".to_string()),
|
||||
(Some(count), None) => Ok(count),
|
||||
(None, Some(at_most)) => Ok(at_most.saturating_sub(starting_validator_count)),
|
||||
}?;
|
||||
|
||||
if n == 0 {
|
||||
info!(
|
||||
log,
|
||||
"No need to produce and validators, exiting";
|
||||
"--count" => count,
|
||||
"--at-most" => at_most,
|
||||
"existing_validators" => starting_validator_count,
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let deposit_contract = env
|
||||
.testnet
|
||||
.as_ref()
|
||||
.ok_or_else(|| "Unable to run account manager without a testnet dir".to_string())?
|
||||
.deposit_contract_address()
|
||||
.map_err(|e| format!("Unable to parse deposit contract address: {}", e))?;
|
||||
|
||||
if deposit_contract == Address::zero() {
|
||||
return Err("Refusing to deposit to the zero address. Check testnet configuration.".into());
|
||||
}
|
||||
|
||||
let (_event_loop_handle, transport) =
|
||||
Ipc::new(eth1_ipc_path).map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?;
|
||||
let web3 = Web3::new(transport);
|
||||
|
||||
env.runtime()
|
||||
.block_on(poll_until_synced(web3.clone(), log.clone()))?;
|
||||
|
||||
for i in 0..n {
|
||||
let tx_hash_log = log.clone();
|
||||
|
||||
env.runtime()
|
||||
.block_on(async {
|
||||
ValidatorDirectoryBuilder::default()
|
||||
.spec(spec.clone())
|
||||
.custom_deposit_amount(deposit_gwei)
|
||||
.thread_random_keypairs()
|
||||
.submit_eth1_deposit(web3.clone(), from_address, deposit_contract)
|
||||
.await
|
||||
.map(move |(builder, tx_hash)| {
|
||||
info!(
|
||||
tx_hash_log,
|
||||
"Validator deposited";
|
||||
"eth1_tx_hash" => format!("{:?}", tx_hash),
|
||||
"index" => format!("{}/{}", i + 1, n),
|
||||
);
|
||||
builder
|
||||
})
|
||||
})?
|
||||
.create_directory(validator_dir.clone())?
|
||||
.write_keypair_files()?
|
||||
.write_eth1_data_file()?
|
||||
.build()?;
|
||||
}
|
||||
|
||||
let ending_validator_count = existing_validator_count(&validator_dir)?;
|
||||
let delta = ending_validator_count.saturating_sub(starting_validator_count);
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Success";
|
||||
"validators_created_and_deposited" => delta,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the number of validators that exist in the given `validator_dir`.
|
||||
///
|
||||
/// This function just assumes any file is a validator directory, making it likely to return a
|
||||
/// higher number than accurate but never a lower one.
|
||||
fn existing_validator_count(validator_dir: &PathBuf) -> Result<usize, String> {
|
||||
fs::read_dir(&validator_dir)
|
||||
.map(|iter| iter.count())
|
||||
.map_err(|e| format!("Unable to read {:?}: {}", validator_dir, e))
|
||||
}
|
||||
|
||||
/// Run a poll on the `eth_syncing` endpoint, blocking until the node is synced.
|
||||
async fn poll_until_synced<T>(web3: Web3<T>, log: Logger) -> Result<(), String>
|
||||
where
|
||||
T: Transport + Send + 'static,
|
||||
<T as Transport>::Out: Send,
|
||||
{
|
||||
loop {
|
||||
let sync_state = web3
|
||||
.clone()
|
||||
.eth()
|
||||
.syncing()
|
||||
.compat()
|
||||
.await
|
||||
.map_err(|e| format!("Unable to read syncing state from eth1 node: {:?}", e))?;
|
||||
match sync_state {
|
||||
SyncState::Syncing(SyncInfo {
|
||||
current_block,
|
||||
highest_block,
|
||||
..
|
||||
}) => {
|
||||
info!(
|
||||
log,
|
||||
"Waiting for eth1 node to sync";
|
||||
"est_highest_block" => format!("{}", highest_block),
|
||||
"current_block" => format!("{}", current_block),
|
||||
);
|
||||
|
||||
delay_until(Instant::now() + SYNCING_STATE_RETRY_DELAY).await;
|
||||
}
|
||||
SyncState::NotSyncing => {
|
||||
let block_number = web3
|
||||
.clone()
|
||||
.eth()
|
||||
.block_number()
|
||||
.compat()
|
||||
.await
|
||||
.map_err(|e| format!("Unable to read block number from eth1 node: {:?}", e))?;
|
||||
if block_number > 0.into() {
|
||||
info!(
|
||||
log,
|
||||
"Eth1 node is synced";
|
||||
"head_block" => format!("{}", block_number),
|
||||
);
|
||||
break;
|
||||
} else {
|
||||
delay_until(Instant::now() + SYNCING_STATE_RETRY_DELAY).await;
|
||||
info!(
|
||||
log,
|
||||
"Waiting for eth1 node to sync";
|
||||
"current_block" => 0,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,457 +1,37 @@
|
||||
mod cli;
|
||||
mod deposits;
|
||||
mod common;
|
||||
pub mod validator;
|
||||
pub mod wallet;
|
||||
|
||||
use clap::App;
|
||||
use clap::ArgMatches;
|
||||
use deposit_contract::DEPOSIT_GAS;
|
||||
use environment::{Environment, RuntimeContext};
|
||||
use eth2_testnet_config::Eth2TestnetConfig;
|
||||
use futures::compat::Future01CompatExt;
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use rayon::prelude::*;
|
||||
use slog::{error, info, Logger};
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::path::PathBuf;
|
||||
use types::{ChainSpec, EthSpec};
|
||||
use validator_client::validator_directory::{ValidatorDirectory, ValidatorDirectoryBuilder};
|
||||
use web3::{
|
||||
transports::Http,
|
||||
types::{Address, TransactionRequest, U256},
|
||||
Web3,
|
||||
};
|
||||
use environment::Environment;
|
||||
use types::EthSpec;
|
||||
|
||||
pub use cli::cli_app;
|
||||
pub const CMD: &str = "account_manager";
|
||||
pub const SECRETS_DIR_FLAG: &str = "secrets-dir";
|
||||
pub const VALIDATOR_DIR_FLAG: &str = "validator-dir";
|
||||
pub const BASE_DIR_FLAG: &str = "base-dir";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.visible_aliases(&["a", "am", "account", CMD])
|
||||
.about("Utilities for generating and managing Ethereum 2.0 accounts.")
|
||||
.subcommand(wallet::cli_app())
|
||||
.subcommand(validator::cli_app())
|
||||
}
|
||||
|
||||
/// Run the account manager, returning an error if the operation did not succeed.
|
||||
pub fn run<T: EthSpec>(matches: &ArgMatches<'_>, mut env: Environment<T>) -> Result<(), String> {
|
||||
let context = env.core_context();
|
||||
let log = context.log.clone();
|
||||
|
||||
// If the `datadir` was not provided, default to the home directory. If the home directory is
|
||||
// not known, use the current directory.
|
||||
let datadir = matches
|
||||
.value_of("datadir")
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|| {
|
||||
dirs::home_dir()
|
||||
.unwrap_or_else(|| PathBuf::from("."))
|
||||
.join(".lighthouse")
|
||||
.join("validators")
|
||||
});
|
||||
|
||||
fs::create_dir_all(&datadir).map_err(|e| format!("Failed to create datadir: {}", e))?;
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Located data directory";
|
||||
"path" => format!("{:?}", datadir)
|
||||
);
|
||||
|
||||
pub fn run<T: EthSpec>(matches: &ArgMatches<'_>, env: Environment<T>) -> Result<(), String> {
|
||||
match matches.subcommand() {
|
||||
("validator", Some(matches)) => match matches.subcommand() {
|
||||
("deposited", Some(matches)) => deposits::cli_run(matches, env)?,
|
||||
("new", Some(matches)) => run_new_validator_subcommand(matches, datadir, env)?,
|
||||
_ => {
|
||||
return Err("Invalid 'validator new' command. See --help.".to_string());
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return Err("Invalid 'validator' command. See --help.".to_string());
|
||||
(wallet::CMD, Some(matches)) => wallet::cli_run(matches)?,
|
||||
(validator::CMD, Some(matches)) => validator::cli_run(matches, env)?,
|
||||
(unknown, _) => {
|
||||
return Err(format!(
|
||||
"{} is not a valid {} command. See --help.",
|
||||
unknown, CMD
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Describes the crypto key generation methods for a validator.
|
||||
enum KeygenMethod {
|
||||
/// Produce an insecure "deterministic" keypair. Used only for interop and testing.
|
||||
Insecure(usize),
|
||||
/// Generate a new key from the `rand` thread random RNG.
|
||||
ThreadRandom,
|
||||
}
|
||||
|
||||
/// Process the subcommand for creating new validators.
|
||||
fn run_new_validator_subcommand<T: EthSpec>(
|
||||
matches: &ArgMatches,
|
||||
datadir: PathBuf,
|
||||
mut env: Environment<T>,
|
||||
) -> Result<(), String> {
|
||||
let mut context = env.core_context();
|
||||
let log = context.log.clone();
|
||||
|
||||
// Load the testnet configuration from disk, or use the default testnet.
|
||||
let eth2_testnet_config: Eth2TestnetConfig<T> =
|
||||
if let Some(testnet_dir_str) = matches.value_of("testnet-dir") {
|
||||
let testnet_dir = testnet_dir_str
|
||||
.parse::<PathBuf>()
|
||||
.map_err(|e| format!("Unable to parse testnet-dir: {}", e))?;
|
||||
|
||||
if !testnet_dir.exists() {
|
||||
return Err(format!(
|
||||
"Testnet directory at {:?} does not exist",
|
||||
testnet_dir
|
||||
));
|
||||
}
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Loading deposit contract address";
|
||||
"testnet_dir" => format!("{:?}", &testnet_dir)
|
||||
);
|
||||
|
||||
Eth2TestnetConfig::load(testnet_dir.clone())
|
||||
.map_err(|e| format!("Failed to load testnet dir at {:?}: {}", testnet_dir, e))?
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Using Lighthouse testnet deposit contract";
|
||||
);
|
||||
|
||||
Eth2TestnetConfig::hard_coded()
|
||||
.map_err(|e| format!("Failed to load hard_coded testnet dir: {}", e))?
|
||||
};
|
||||
|
||||
context.eth2_config.spec = eth2_testnet_config
|
||||
.yaml_config
|
||||
.as_ref()
|
||||
.ok_or_else(|| "The testnet directory must contain a spec config".to_string())?
|
||||
.apply_to_chain_spec::<T>(&context.eth2_config.spec)
|
||||
.ok_or_else(|| {
|
||||
format!(
|
||||
"The loaded config is not compatible with the {} spec",
|
||||
&context.eth2_config.spec_constants
|
||||
)
|
||||
})?;
|
||||
|
||||
let methods: Vec<KeygenMethod> = match matches.subcommand() {
|
||||
("insecure", Some(matches)) => {
|
||||
let first = matches
|
||||
.value_of("first")
|
||||
.ok_or_else(|| "No first index".to_string())?
|
||||
.parse::<usize>()
|
||||
.map_err(|e| format!("Unable to parse first index: {}", e))?;
|
||||
let last = matches
|
||||
.value_of("last")
|
||||
.ok_or_else(|| "No last index".to_string())?
|
||||
.parse::<usize>()
|
||||
.map_err(|e| format!("Unable to parse first index: {}", e))?;
|
||||
|
||||
(first..last).map(KeygenMethod::Insecure).collect()
|
||||
}
|
||||
("random", Some(matches)) => {
|
||||
let count = matches
|
||||
.value_of("validator_count")
|
||||
.ok_or_else(|| "No validator count".to_string())?
|
||||
.parse::<usize>()
|
||||
.map_err(|e| format!("Unable to parse validator count: {}", e))?;
|
||||
|
||||
(0..count).map(|_| KeygenMethod::ThreadRandom).collect()
|
||||
}
|
||||
_ => {
|
||||
return Err("Invalid 'validator' command. See --help.".to_string());
|
||||
}
|
||||
};
|
||||
|
||||
let deposit_value = matches
|
||||
.value_of("deposit-value")
|
||||
.ok_or_else(|| "No deposit-value".to_string())?
|
||||
.parse::<u64>()
|
||||
.map_err(|e| format!("Unable to parse deposit-value: {}", e))?;
|
||||
|
||||
let validators = make_validators(
|
||||
datadir.clone(),
|
||||
&methods,
|
||||
deposit_value,
|
||||
&context.eth2_config.spec,
|
||||
&log,
|
||||
)?;
|
||||
|
||||
if matches.is_present("send-deposits") {
|
||||
let eth1_endpoint = matches
|
||||
.value_of("eth1-endpoint")
|
||||
.ok_or_else(|| "No eth1-endpoint".to_string())?;
|
||||
let account_index = matches
|
||||
.value_of("account-index")
|
||||
.ok_or_else(|| "No account-index".to_string())?
|
||||
.parse::<usize>()
|
||||
.map_err(|e| format!("Unable to parse account-index: {}", e))?;
|
||||
|
||||
// If supplied, load the eth1 account password from file.
|
||||
let password = if let Some(password_path) = matches.value_of("password") {
|
||||
Some(
|
||||
File::open(password_path)
|
||||
.map_err(|e| format!("Unable to open password file: {:?}", e))
|
||||
.and_then(|mut file| {
|
||||
let mut password = String::new();
|
||||
file.read_to_string(&mut password)
|
||||
.map_err(|e| format!("Unable to read password file to string: {:?}", e))
|
||||
.map(|_| password)
|
||||
})
|
||||
.map(|password| {
|
||||
// Trim the line feed from the end of the password file, if present.
|
||||
if password.ends_with('\n') {
|
||||
password[0..password.len() - 1].to_string()
|
||||
} else {
|
||||
password
|
||||
}
|
||||
})?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Submitting validator deposits";
|
||||
"eth1_node_http_endpoint" => eth1_endpoint
|
||||
);
|
||||
|
||||
// Convert from `types::Address` to `web3::types::Address`.
|
||||
let deposit_contract = Address::from_slice(
|
||||
eth2_testnet_config
|
||||
.deposit_contract_address()?
|
||||
.as_fixed_bytes(),
|
||||
);
|
||||
|
||||
if let Err(()) = env.runtime().block_on(deposit_validators(
|
||||
context.clone(),
|
||||
eth1_endpoint.to_string(),
|
||||
deposit_contract,
|
||||
validators.clone(),
|
||||
account_index,
|
||||
deposit_value,
|
||||
password,
|
||||
)) {
|
||||
error!(
|
||||
log,
|
||||
"Created validators but could not submit deposits";
|
||||
)
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Validator deposits complete";
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Generated validator directories";
|
||||
"base_path" => format!("{:?}", datadir),
|
||||
"count" => validators.len(),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Produces a validator directory for each of the key generation methods provided in `methods`.
|
||||
fn make_validators(
|
||||
datadir: PathBuf,
|
||||
methods: &[KeygenMethod],
|
||||
deposit_value: u64,
|
||||
spec: &ChainSpec,
|
||||
log: &Logger,
|
||||
) -> Result<Vec<ValidatorDirectory>, String> {
|
||||
methods
|
||||
.par_iter()
|
||||
.map(|method| {
|
||||
let mut builder = ValidatorDirectoryBuilder::default()
|
||||
.spec(spec.clone())
|
||||
.custom_deposit_amount(deposit_value);
|
||||
|
||||
builder = match method {
|
||||
KeygenMethod::Insecure(index) => builder.insecure_keypairs(*index),
|
||||
KeygenMethod::ThreadRandom => builder.thread_random_keypairs(),
|
||||
};
|
||||
|
||||
let validator = builder
|
||||
.create_directory(datadir.clone())?
|
||||
.write_keypair_files()?
|
||||
.write_eth1_data_file()?
|
||||
.build()?;
|
||||
|
||||
let pubkey = &validator
|
||||
.voting_keypair
|
||||
.as_ref()
|
||||
.ok_or_else(|| "Generated validator must have voting keypair".to_string())?
|
||||
.pk;
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Saved new validator to disk";
|
||||
"voting_pubkey" => format!("{:?}", pubkey)
|
||||
);
|
||||
|
||||
Ok(validator)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// For each `ValidatorDirectory`, submit a deposit transaction to the `eth1_endpoint`.
|
||||
///
|
||||
/// Returns success as soon as the eth1 endpoint accepts the transaction (i.e., does not wait for
|
||||
/// transaction success/revert).
|
||||
async fn deposit_validators<E: EthSpec>(
|
||||
context: RuntimeContext<E>,
|
||||
eth1_endpoint: String,
|
||||
deposit_contract: Address,
|
||||
validators: Vec<ValidatorDirectory>,
|
||||
account_index: usize,
|
||||
deposit_value: u64,
|
||||
password: Option<String>,
|
||||
) -> Result<(), ()> {
|
||||
let log_1 = context.log.clone();
|
||||
let log_2 = context.log.clone();
|
||||
|
||||
let (event_loop, transport) = Http::new(ð1_endpoint).map_err(move |e| {
|
||||
error!(
|
||||
log_1,
|
||||
"Failed to start web3 HTTP transport";
|
||||
"error" => format!("{:?}", e)
|
||||
)
|
||||
})?;
|
||||
/*
|
||||
* Loop through the validator directories and submit the deposits.
|
||||
*/
|
||||
let web3 = Web3::new(transport);
|
||||
|
||||
futures::stream::iter(validators)
|
||||
.for_each(|validator| async {
|
||||
let web3 = web3.clone();
|
||||
let log = log_2.clone();
|
||||
let password = password.clone();
|
||||
|
||||
let _ = deposit_validator(
|
||||
web3,
|
||||
deposit_contract,
|
||||
validator,
|
||||
deposit_value,
|
||||
account_index,
|
||||
password,
|
||||
log,
|
||||
)
|
||||
.await;
|
||||
})
|
||||
.map(|_| event_loop)
|
||||
// // Web3 gives errors if the event loop is dropped whilst performing requests.
|
||||
.map(drop)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// For the given `ValidatorDirectory`, submit a deposit transaction to the `web3` node.
|
||||
///
|
||||
/// Returns success as soon as the eth1 endpoint accepts the transaction (i.e., does not wait for
|
||||
/// transaction success/revert).
|
||||
async fn deposit_validator(
|
||||
web3: Web3<Http>,
|
||||
deposit_contract: Address,
|
||||
validator: ValidatorDirectory,
|
||||
deposit_amount: u64,
|
||||
account_index: usize,
|
||||
password_opt: Option<String>,
|
||||
log: Logger,
|
||||
) -> Result<(), ()> {
|
||||
let voting_keypair = validator
|
||||
.voting_keypair
|
||||
.clone()
|
||||
.ok_or_else(|| error!(log, "Validator does not have voting keypair"))?;
|
||||
|
||||
let deposit_data = validator
|
||||
.deposit_data
|
||||
.clone()
|
||||
.ok_or_else(|| error!(log, "Validator does not have deposit data"))?;
|
||||
|
||||
let pubkey_1 = voting_keypair.pk.clone();
|
||||
let pubkey_2 = voting_keypair.pk;
|
||||
|
||||
let log_1 = log.clone();
|
||||
let log_2 = log.clone();
|
||||
|
||||
// TODO: creating a future to extract the Error type
|
||||
// check if there's a better way
|
||||
let future = async move {
|
||||
let accounts = web3
|
||||
.eth()
|
||||
.accounts()
|
||||
.compat()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to get accounts: {:?}", e))?;
|
||||
|
||||
let from_address = accounts
|
||||
.get(account_index)
|
||||
.cloned()
|
||||
.ok_or_else(|| "Insufficient accounts for deposit".to_string())?;
|
||||
|
||||
/*
|
||||
* If a password was supplied, unlock the account.
|
||||
*/
|
||||
let from = if let Some(password) = password_opt {
|
||||
// Unlock for only a single transaction.
|
||||
let duration = None;
|
||||
|
||||
let result = web3
|
||||
.personal()
|
||||
.unlock_account(from_address, &password, duration)
|
||||
.compat()
|
||||
.await;
|
||||
match result {
|
||||
Ok(true) => from_address,
|
||||
Ok(false) => {
|
||||
return Err::<(), String>(
|
||||
"Eth1 node refused to unlock account. Check password.".to_string(),
|
||||
)
|
||||
}
|
||||
Err(e) => return Err::<(), String>(format!("Eth1 unlock request failed: {:?}", e)),
|
||||
}
|
||||
} else {
|
||||
from_address
|
||||
};
|
||||
|
||||
/*
|
||||
* Submit the deposit transaction.
|
||||
*/
|
||||
let tx_request = TransactionRequest {
|
||||
from,
|
||||
to: Some(deposit_contract),
|
||||
gas: Some(U256::from(DEPOSIT_GAS)),
|
||||
gas_price: None,
|
||||
value: Some(from_gwei(deposit_amount)),
|
||||
data: Some(deposit_data.into()),
|
||||
nonce: None,
|
||||
condition: None,
|
||||
};
|
||||
|
||||
let tx = web3
|
||||
.eth()
|
||||
.send_transaction(tx_request)
|
||||
.compat()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to call deposit fn: {:?}", e))?;
|
||||
info!(
|
||||
log_1,
|
||||
"Validator deposit successful";
|
||||
"eth1_tx_hash" => format!("{:?}", tx),
|
||||
"validator_voting_pubkey" => format!("{:?}", pubkey_1)
|
||||
);
|
||||
Ok(())
|
||||
};
|
||||
|
||||
future.await.map_err(move |e| {
|
||||
error!(
|
||||
log_2,
|
||||
"Validator deposit_failed";
|
||||
"error" => e,
|
||||
"validator_voting_pubkey" => format!("{:?}", pubkey_2)
|
||||
);
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Converts gwei to wei.
|
||||
fn from_gwei(gwei: u64) -> U256 {
|
||||
U256::from(gwei) * U256::exp10(9)
|
||||
}
|
||||
|
||||
207
account_manager/src/validator/create.rs
Normal file
207
account_manager/src/validator/create.rs
Normal file
@@ -0,0 +1,207 @@
|
||||
use crate::{common::ensure_dir_exists, SECRETS_DIR_FLAG, VALIDATOR_DIR_FLAG};
|
||||
use account_utils::{random_password, strip_off_newlines, validator_definitions};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use environment::Environment;
|
||||
use eth2_wallet::PlainText;
|
||||
use eth2_wallet_manager::WalletManager;
|
||||
use std::ffi::OsStr;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use types::EthSpec;
|
||||
use validator_dir::Builder as ValidatorDirBuilder;
|
||||
|
||||
pub const CMD: &str = "create";
|
||||
pub const BASE_DIR_FLAG: &str = "base-dir";
|
||||
pub const WALLET_NAME_FLAG: &str = "wallet-name";
|
||||
pub const WALLET_PASSPHRASE_FLAG: &str = "wallet-passphrase";
|
||||
pub const DEPOSIT_GWEI_FLAG: &str = "deposit-gwei";
|
||||
pub const STORE_WITHDRAW_FLAG: &str = "store-withdrawal-keystore";
|
||||
pub const COUNT_FLAG: &str = "count";
|
||||
pub const AT_MOST_FLAG: &str = "at-most";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about(
|
||||
"Creates new validators from an existing EIP-2386 wallet using the EIP-2333 HD key \
|
||||
derivation scheme.",
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(WALLET_NAME_FLAG)
|
||||
.long(WALLET_NAME_FLAG)
|
||||
.value_name("WALLET_NAME")
|
||||
.help("Use the wallet identified by this name")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(WALLET_PASSPHRASE_FLAG)
|
||||
.long(WALLET_PASSPHRASE_FLAG)
|
||||
.value_name("WALLET_PASSWORD_PATH")
|
||||
.help("A path to a file containing the password which will unlock the wallet.")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
||||
.long(VALIDATOR_DIR_FLAG)
|
||||
.value_name("VALIDATOR_DIRECTORY")
|
||||
.help(
|
||||
"The path where the validator directories will be created. \
|
||||
Defaults to ~/.lighthouse/validators",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(SECRETS_DIR_FLAG)
|
||||
.long(SECRETS_DIR_FLAG)
|
||||
.value_name("SECRETS_DIR")
|
||||
.help(
|
||||
"The path where the validator keystore passwords will be stored. \
|
||||
Defaults to ~/.lighthouse/secrets",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(DEPOSIT_GWEI_FLAG)
|
||||
.long(DEPOSIT_GWEI_FLAG)
|
||||
.value_name("DEPOSIT_GWEI")
|
||||
.help(
|
||||
"The GWEI value of the deposit amount. Defaults to the minimum amount \
|
||||
required for an active validator (MAX_EFFECTIVE_BALANCE)",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(STORE_WITHDRAW_FLAG)
|
||||
.long(STORE_WITHDRAW_FLAG)
|
||||
.help(
|
||||
"If present, the withdrawal keystore will be stored alongside the voting \
|
||||
keypair. It is generally recommended to *not* store the withdrawal key and \
|
||||
instead generate them from the wallet seed when required.",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(COUNT_FLAG)
|
||||
.long(COUNT_FLAG)
|
||||
.value_name("VALIDATOR_COUNT")
|
||||
.help("The number of validators to create, regardless of how many already exist")
|
||||
.conflicts_with("at-most")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(AT_MOST_FLAG)
|
||||
.long(AT_MOST_FLAG)
|
||||
.value_name("AT_MOST_VALIDATORS")
|
||||
.help(
|
||||
"Observe the number of validators in --validator-dir, only creating enough to \
|
||||
reach the given count. Never deletes an existing validator.",
|
||||
)
|
||||
.conflicts_with("count")
|
||||
.takes_value(true),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run<T: EthSpec>(
|
||||
matches: &ArgMatches,
|
||||
mut env: Environment<T>,
|
||||
wallet_base_dir: PathBuf,
|
||||
) -> Result<(), String> {
|
||||
let spec = env.core_context().eth2_config.spec;
|
||||
|
||||
let name: String = clap_utils::parse_required(matches, WALLET_NAME_FLAG)?;
|
||||
let wallet_password_path: PathBuf =
|
||||
clap_utils::parse_required(matches, WALLET_PASSPHRASE_FLAG)?;
|
||||
let validator_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
VALIDATOR_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("validators"),
|
||||
)?;
|
||||
let secrets_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
SECRETS_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("secrets"),
|
||||
)?;
|
||||
let deposit_gwei = clap_utils::parse_optional(matches, DEPOSIT_GWEI_FLAG)?
|
||||
.unwrap_or_else(|| spec.max_effective_balance);
|
||||
let count: Option<usize> = clap_utils::parse_optional(matches, COUNT_FLAG)?;
|
||||
let at_most: Option<usize> = clap_utils::parse_optional(matches, AT_MOST_FLAG)?;
|
||||
|
||||
ensure_dir_exists(&validator_dir)?;
|
||||
ensure_dir_exists(&secrets_dir)?;
|
||||
|
||||
let starting_validator_count = existing_validator_count(&validator_dir)?;
|
||||
|
||||
let n = match (count, at_most) {
|
||||
(Some(_), Some(_)) => Err(format!(
|
||||
"Cannot supply --{} and --{}",
|
||||
COUNT_FLAG, AT_MOST_FLAG
|
||||
)),
|
||||
(None, None) => Err(format!(
|
||||
"Must supply either --{} or --{}",
|
||||
COUNT_FLAG, AT_MOST_FLAG
|
||||
)),
|
||||
(Some(count), None) => Ok(count),
|
||||
(None, Some(at_most)) => Ok(at_most.saturating_sub(starting_validator_count)),
|
||||
}?;
|
||||
|
||||
if n == 0 {
|
||||
eprintln!(
|
||||
"No validators to create. {}={:?}, {}={:?}",
|
||||
COUNT_FLAG, count, AT_MOST_FLAG, at_most
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let wallet_password = fs::read(&wallet_password_path)
|
||||
.map_err(|e| format!("Unable to read {:?}: {:?}", wallet_password_path, e))
|
||||
.map(|bytes| PlainText::from(strip_off_newlines(bytes)))?;
|
||||
|
||||
let mgr = WalletManager::open(&wallet_base_dir)
|
||||
.map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?;
|
||||
|
||||
let mut wallet = mgr
|
||||
.wallet_by_name(&name)
|
||||
.map_err(|e| format!("Unable to open wallet: {:?}", e))?;
|
||||
|
||||
for i in 0..n {
|
||||
let voting_password = random_password();
|
||||
let withdrawal_password = random_password();
|
||||
|
||||
let keystores = wallet
|
||||
.next_validator(
|
||||
wallet_password.as_bytes(),
|
||||
voting_password.as_bytes(),
|
||||
withdrawal_password.as_bytes(),
|
||||
)
|
||||
.map_err(|e| format!("Unable to create validator keys: {:?}", e))?;
|
||||
|
||||
let voting_pubkey = keystores.voting.pubkey().to_string();
|
||||
|
||||
ValidatorDirBuilder::new(validator_dir.clone(), secrets_dir.clone())
|
||||
.voting_keystore(keystores.voting, voting_password.as_bytes())
|
||||
.withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes())
|
||||
.create_eth1_tx_data(deposit_gwei, &spec)
|
||||
.store_withdrawal_keystore(matches.is_present(STORE_WITHDRAW_FLAG))
|
||||
.build()
|
||||
.map_err(|e| format!("Unable to build validator directory: {:?}", e))?;
|
||||
|
||||
println!("{}/{}\t0x{}", i + 1, n, voting_pubkey);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the number of validators that exist in the given `validator_dir`.
|
||||
///
|
||||
/// This function just assumes all files and directories, excluding the validator definitions YAML,
|
||||
/// are validator directories, making it likely to return a higher number than accurate
|
||||
/// but never a lower one.
|
||||
fn existing_validator_count<P: AsRef<Path>>(validator_dir: P) -> Result<usize, String> {
|
||||
fs::read_dir(validator_dir.as_ref())
|
||||
.map(|iter| {
|
||||
iter.filter_map(|e| e.ok())
|
||||
.filter(|e| e.file_name() != OsStr::new(validator_definitions::CONFIG_FILENAME))
|
||||
.count()
|
||||
})
|
||||
.map_err(|e| format!("Unable to read {:?}: {}", validator_dir.as_ref(), e))
|
||||
}
|
||||
405
account_manager/src/validator/deposit.rs
Normal file
405
account_manager/src/validator/deposit.rs
Normal file
@@ -0,0 +1,405 @@
|
||||
use crate::VALIDATOR_DIR_FLAG;
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use deposit_contract::DEPOSIT_GAS;
|
||||
use environment::Environment;
|
||||
use futures::{
|
||||
compat::Future01CompatExt,
|
||||
stream::{FuturesUnordered, StreamExt},
|
||||
};
|
||||
use slog::{info, Logger};
|
||||
use state_processing::per_block_processing::verify_deposit_signature;
|
||||
use std::path::PathBuf;
|
||||
use tokio::time::{delay_until, Duration, Instant};
|
||||
use types::EthSpec;
|
||||
use validator_dir::{Eth1DepositData, Manager as ValidatorManager, ValidatorDir};
|
||||
use web3::{
|
||||
transports::Http,
|
||||
transports::Ipc,
|
||||
types::{Address, SyncInfo, SyncState, TransactionRequest, U256},
|
||||
Transport, Web3,
|
||||
};
|
||||
|
||||
pub const CMD: &str = "deposit";
|
||||
pub const VALIDATOR_FLAG: &str = "validator";
|
||||
pub const ETH1_IPC_FLAG: &str = "eth1-ipc";
|
||||
pub const ETH1_HTTP_FLAG: &str = "eth1-http";
|
||||
pub const FROM_ADDRESS_FLAG: &str = "from-address";
|
||||
pub const CONFIRMATION_COUNT_FLAG: &str = "confirmation-count";
|
||||
pub const CONFIRMATION_BATCH_SIZE_FLAG: &str = "confirmation-batch-size";
|
||||
|
||||
const GWEI: u64 = 1_000_000_000;
|
||||
|
||||
const SYNCING_STATE_RETRY_DELAY: Duration = Duration::from_secs(2);
|
||||
|
||||
const CONFIRMATIONS_POLL_TIME: Duration = Duration::from_secs(2);
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new("deposit")
|
||||
.about(
|
||||
"Submits a deposit to an Eth1 validator registration contract via an IPC endpoint \
|
||||
of an Eth1 client (e.g., Geth, OpenEthereum, etc.). The validators must already \
|
||||
have been created and exist on the file-system. The process will exit immediately \
|
||||
with an error if any error occurs. After each deposit is submitted to the Eth1 \
|
||||
node, a file will be saved in the validator directory with the transaction hash. \
|
||||
If confirmations are set to non-zero then the application will wait for confirmations \
|
||||
before saving the transaction hash and moving onto the next batch of deposits. \
|
||||
The deposit contract address will be determined by the --testnet-dir flag on the \
|
||||
primary Lighthouse binary.",
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
||||
.long(VALIDATOR_DIR_FLAG)
|
||||
.value_name("VALIDATOR_DIRECTORY")
|
||||
.help(
|
||||
"The path to the validator client data directory. \
|
||||
Defaults to ~/.lighthouse/validators",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_FLAG)
|
||||
.long(VALIDATOR_FLAG)
|
||||
.value_name("VALIDATOR_NAME")
|
||||
.help(
|
||||
"The name of the directory in --data-dir for which to deposit. \
|
||||
Set to 'all' to deposit all validators in the --data-dir.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(ETH1_IPC_FLAG)
|
||||
.long(ETH1_IPC_FLAG)
|
||||
.value_name("ETH1_IPC_PATH")
|
||||
.help("Path to an Eth1 JSON-RPC IPC endpoint")
|
||||
.takes_value(true)
|
||||
.required(false),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(ETH1_HTTP_FLAG)
|
||||
.long(ETH1_HTTP_FLAG)
|
||||
.value_name("ETH1_HTTP_URL")
|
||||
.help("URL to an Eth1 JSON-RPC endpoint")
|
||||
.takes_value(true)
|
||||
.required(false),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(FROM_ADDRESS_FLAG)
|
||||
.long(FROM_ADDRESS_FLAG)
|
||||
.value_name("FROM_ETH1_ADDRESS")
|
||||
.help(
|
||||
"The address that will submit the eth1 deposit. \
|
||||
Must be unlocked on the node at --eth1-ipc.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(CONFIRMATION_COUNT_FLAG)
|
||||
.long(CONFIRMATION_COUNT_FLAG)
|
||||
.value_name("CONFIRMATION_COUNT")
|
||||
.help(
|
||||
"The number of Eth1 block confirmations required \
|
||||
before a transaction is considered complete. Set to \
|
||||
0 for no confirmations.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.default_value("1"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(CONFIRMATION_BATCH_SIZE_FLAG)
|
||||
.long(CONFIRMATION_BATCH_SIZE_FLAG)
|
||||
.value_name("BATCH_SIZE")
|
||||
.help(
|
||||
"Perform BATCH_SIZE deposits and wait for confirmations \
|
||||
in parallel. Useful for achieving faster bulk deposits.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.default_value("10"),
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn send_deposit_transactions<T1, T2: 'static>(
|
||||
mut env: Environment<T1>,
|
||||
log: Logger,
|
||||
mut eth1_deposit_datas: Vec<(ValidatorDir, Eth1DepositData)>,
|
||||
from_address: Address,
|
||||
deposit_contract: Address,
|
||||
transport: T2,
|
||||
confirmation_count: usize,
|
||||
confirmation_batch_size: usize,
|
||||
) -> Result<(), String>
|
||||
where
|
||||
T1: EthSpec,
|
||||
T2: Transport + std::marker::Send,
|
||||
<T2 as web3::Transport>::Out: std::marker::Send,
|
||||
{
|
||||
let web3 = Web3::new(transport);
|
||||
let spec = env.eth2_config.spec.clone();
|
||||
|
||||
let deposits_fut = async {
|
||||
poll_until_synced(web3.clone(), log.clone()).await?;
|
||||
|
||||
for chunk in eth1_deposit_datas.chunks_mut(confirmation_batch_size) {
|
||||
let futures = FuturesUnordered::default();
|
||||
|
||||
for (ref mut validator_dir, eth1_deposit_data) in chunk.iter_mut() {
|
||||
verify_deposit_signature(ð1_deposit_data.deposit_data, &spec).map_err(|e| {
|
||||
format!(
|
||||
"Deposit for {:?} fails verification, \
|
||||
are you using the correct testnet configuration?\nError: {:?}",
|
||||
eth1_deposit_data.deposit_data.pubkey, e
|
||||
)
|
||||
})?;
|
||||
|
||||
let web3 = web3.clone();
|
||||
let log = log.clone();
|
||||
futures.push(async move {
|
||||
let tx_hash = web3
|
||||
.send_transaction_with_confirmation(
|
||||
TransactionRequest {
|
||||
from: from_address,
|
||||
to: Some(deposit_contract),
|
||||
gas: Some(DEPOSIT_GAS.into()),
|
||||
gas_price: None,
|
||||
value: Some(from_gwei(eth1_deposit_data.deposit_data.amount)),
|
||||
data: Some(eth1_deposit_data.rlp.clone().into()),
|
||||
nonce: None,
|
||||
condition: None,
|
||||
},
|
||||
CONFIRMATIONS_POLL_TIME,
|
||||
confirmation_count,
|
||||
)
|
||||
.compat()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to send transaction: {:?}", e))?;
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Submitted deposit";
|
||||
"tx_hash" => format!("{:?}", tx_hash),
|
||||
);
|
||||
|
||||
validator_dir
|
||||
.save_eth1_deposit_tx_hash(&format!("{:?}", tx_hash))
|
||||
.map_err(|e| {
|
||||
format!("Failed to save tx hash {:?} to disk: {:?}", tx_hash, e)
|
||||
})?;
|
||||
|
||||
Ok::<(), String>(())
|
||||
});
|
||||
}
|
||||
|
||||
futures
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<_, _>>()?;
|
||||
}
|
||||
|
||||
Ok::<(), String>(())
|
||||
};
|
||||
|
||||
env.runtime().block_on(deposits_fut)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn cli_run<T: EthSpec>(
|
||||
matches: &ArgMatches<'_>,
|
||||
mut env: Environment<T>,
|
||||
) -> Result<(), String> {
|
||||
let log = env.core_context().log().clone();
|
||||
|
||||
let data_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
VALIDATOR_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("validators"),
|
||||
)?;
|
||||
let validator: String = clap_utils::parse_required(matches, VALIDATOR_FLAG)?;
|
||||
let eth1_ipc_path: Option<PathBuf> = clap_utils::parse_optional(matches, ETH1_IPC_FLAG)?;
|
||||
let eth1_http_url: Option<String> = clap_utils::parse_optional(matches, ETH1_HTTP_FLAG)?;
|
||||
let from_address: Address = clap_utils::parse_required(matches, FROM_ADDRESS_FLAG)?;
|
||||
let confirmation_count: usize = clap_utils::parse_required(matches, CONFIRMATION_COUNT_FLAG)?;
|
||||
let confirmation_batch_size: usize =
|
||||
clap_utils::parse_required(matches, CONFIRMATION_BATCH_SIZE_FLAG)?;
|
||||
|
||||
let manager = ValidatorManager::open(&data_dir)
|
||||
.map_err(|e| format!("Unable to read --{}: {:?}", VALIDATOR_DIR_FLAG, e))?;
|
||||
|
||||
let validators = match validator.as_ref() {
|
||||
"all" => manager
|
||||
.open_all_validators()
|
||||
.map_err(|e| format!("Unable to read all validators: {:?}", e)),
|
||||
name => {
|
||||
let path = manager
|
||||
.directory_names()
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Unable to read --{} directory names: {:?}",
|
||||
VALIDATOR_DIR_FLAG, e
|
||||
)
|
||||
})?
|
||||
.get(name)
|
||||
.ok_or_else(|| format!("Unknown validator: {}", name))?
|
||||
.clone();
|
||||
|
||||
manager
|
||||
.open_validator(&path)
|
||||
.map_err(|e| format!("Unable to open {}: {:?}", name, e))
|
||||
.map(|v| vec![v])
|
||||
}
|
||||
}?;
|
||||
|
||||
let eth1_deposit_datas = validators
|
||||
.into_iter()
|
||||
.filter(|v| !v.eth1_deposit_tx_hash_exists())
|
||||
.map(|v| match v.eth1_deposit_data() {
|
||||
Ok(Some(data)) => Ok((v, data)),
|
||||
Ok(None) => Err(format!(
|
||||
"Validator is missing deposit data file: {:?}",
|
||||
v.dir()
|
||||
)),
|
||||
Err(e) => Err(format!(
|
||||
"Unable to read deposit data for {:?}: {:?}",
|
||||
v.dir(),
|
||||
e
|
||||
)),
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
let total_gwei: u64 = eth1_deposit_datas
|
||||
.iter()
|
||||
.map(|(_, d)| d.deposit_data.amount)
|
||||
.sum();
|
||||
|
||||
if eth1_deposit_datas.is_empty() {
|
||||
info!(log, "No validators to deposit");
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Starting deposits";
|
||||
"deposit_count" => eth1_deposit_datas.len(),
|
||||
"total_eth" => total_gwei / GWEI,
|
||||
);
|
||||
|
||||
let deposit_contract = env
|
||||
.testnet
|
||||
.as_ref()
|
||||
.ok_or_else(|| "Unable to run account manager without a testnet dir".to_string())?
|
||||
.deposit_contract_address()
|
||||
.map_err(|e| format!("Unable to parse deposit contract address: {}", e))?;
|
||||
|
||||
if deposit_contract == Address::zero() {
|
||||
return Err("Refusing to deposit to the zero address. Check testnet configuration.".into());
|
||||
}
|
||||
|
||||
match (eth1_ipc_path, eth1_http_url) {
|
||||
(Some(_), Some(_)) => Err(format!(
|
||||
"error: Cannot supply both --{} and --{}",
|
||||
ETH1_IPC_FLAG, ETH1_HTTP_FLAG
|
||||
)),
|
||||
(None, None) => Err(format!(
|
||||
"error: Must supply one of --{} or --{}",
|
||||
ETH1_IPC_FLAG, ETH1_HTTP_FLAG
|
||||
)),
|
||||
(Some(ipc_path), None) => {
|
||||
let (_event_loop_handle, ipc_transport) = Ipc::new(ipc_path)
|
||||
.map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?;
|
||||
send_deposit_transactions(
|
||||
env,
|
||||
log,
|
||||
eth1_deposit_datas,
|
||||
from_address,
|
||||
deposit_contract,
|
||||
ipc_transport,
|
||||
confirmation_count,
|
||||
confirmation_batch_size,
|
||||
)
|
||||
}
|
||||
(None, Some(http_url)) => {
|
||||
let (_event_loop_handle, http_transport) = Http::new(http_url.as_str())
|
||||
.map_err(|e| format!("Unable to connect to eth1 http RPC: {:?}", e))?;
|
||||
send_deposit_transactions(
|
||||
env,
|
||||
log,
|
||||
eth1_deposit_datas,
|
||||
from_address,
|
||||
deposit_contract,
|
||||
http_transport,
|
||||
confirmation_count,
|
||||
confirmation_batch_size,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts gwei to wei.
|
||||
fn from_gwei(gwei: u64) -> U256 {
|
||||
U256::from(gwei) * U256::exp10(9)
|
||||
}
|
||||
|
||||
/// Run a poll on the `eth_syncing` endpoint, blocking until the node is synced.
|
||||
async fn poll_until_synced<T>(web3: Web3<T>, log: Logger) -> Result<(), String>
|
||||
where
|
||||
T: Transport + Send + 'static,
|
||||
<T as Transport>::Out: Send,
|
||||
{
|
||||
loop {
|
||||
let sync_state = web3
|
||||
.clone()
|
||||
.eth()
|
||||
.syncing()
|
||||
.compat()
|
||||
.await
|
||||
.map_err(|e| format!("Unable to read syncing state from eth1 node: {:?}", e))?;
|
||||
|
||||
match sync_state {
|
||||
SyncState::Syncing(SyncInfo {
|
||||
current_block,
|
||||
highest_block,
|
||||
..
|
||||
}) => {
|
||||
info!(
|
||||
log,
|
||||
"Waiting for eth1 node to sync";
|
||||
"est_highest_block" => format!("{}", highest_block),
|
||||
"current_block" => format!("{}", current_block),
|
||||
);
|
||||
|
||||
delay_until(Instant::now() + SYNCING_STATE_RETRY_DELAY).await;
|
||||
}
|
||||
SyncState::NotSyncing => {
|
||||
let block_number = web3
|
||||
.clone()
|
||||
.eth()
|
||||
.block_number()
|
||||
.compat()
|
||||
.await
|
||||
.map_err(|e| format!("Unable to read block number from eth1 node: {:?}", e))?;
|
||||
|
||||
if block_number > 0.into() {
|
||||
info!(
|
||||
log,
|
||||
"Eth1 node is synced";
|
||||
"head_block" => format!("{}", block_number),
|
||||
);
|
||||
break;
|
||||
} else {
|
||||
delay_until(Instant::now() + SYNCING_STATE_RETRY_DELAY).await;
|
||||
info!(
|
||||
log,
|
||||
"Waiting for eth1 node to sync";
|
||||
"current_block" => 0,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
214
account_manager/src/validator/import.rs
Normal file
214
account_manager/src/validator/import.rs
Normal file
@@ -0,0 +1,214 @@
|
||||
use crate::{common::ensure_dir_exists, VALIDATOR_DIR_FLAG};
|
||||
use account_utils::{
|
||||
eth2_keystore::Keystore,
|
||||
read_password_from_user,
|
||||
validator_definitions::{
|
||||
recursively_find_voting_keystores, ValidatorDefinition, ValidatorDefinitions,
|
||||
CONFIG_FILENAME,
|
||||
},
|
||||
};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
pub const CMD: &str = "import";
|
||||
pub const KEYSTORE_FLAG: &str = "keystore";
|
||||
pub const DIR_FLAG: &str = "directory";
|
||||
pub const STDIN_PASSWORD_FLAG: &str = "stdin-passwords";
|
||||
|
||||
pub const PASSWORD_PROMPT: &str = "Enter the keystore password, or press enter to omit it:";
|
||||
pub const KEYSTORE_REUSE_WARNING: &str = "DO NOT USE THE ORIGINAL KEYSTORES TO VALIDATE WITH \
|
||||
ANOTHER CLIENT, OR YOU WILL GET SLASHED.";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about(
|
||||
"Imports one or more EIP-2335 passwords into a Lighthouse VC directory, \
|
||||
requesting passwords interactively. The directory flag provides a convenient \
|
||||
method for importing a directory of keys generated by the eth2-deposit-cli \
|
||||
Python utility.",
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(KEYSTORE_FLAG)
|
||||
.long(KEYSTORE_FLAG)
|
||||
.value_name("KEYSTORE_PATH")
|
||||
.help("Path to a single keystore to be imported.")
|
||||
.conflicts_with(DIR_FLAG)
|
||||
.required_unless(DIR_FLAG)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(DIR_FLAG)
|
||||
.long(DIR_FLAG)
|
||||
.value_name("KEYSTORES_DIRECTORY")
|
||||
.help(
|
||||
"Path to a directory which contains zero or more keystores \
|
||||
for import. This directory and all sub-directories will be \
|
||||
searched and any file name which contains 'keystore' and \
|
||||
has the '.json' extension will be attempted to be imported.",
|
||||
)
|
||||
.conflicts_with(KEYSTORE_FLAG)
|
||||
.required_unless(KEYSTORE_FLAG)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
||||
.long(VALIDATOR_DIR_FLAG)
|
||||
.value_name("VALIDATOR_DIRECTORY")
|
||||
.help(
|
||||
"The path where the validator directories will be created. \
|
||||
Defaults to ~/.lighthouse/validators",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(STDIN_PASSWORD_FLAG)
|
||||
.long(STDIN_PASSWORD_FLAG)
|
||||
.help("If present, read passwords from stdin instead of tty."),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
||||
let keystore: Option<PathBuf> = clap_utils::parse_optional(matches, KEYSTORE_FLAG)?;
|
||||
let keystores_dir: Option<PathBuf> = clap_utils::parse_optional(matches, DIR_FLAG)?;
|
||||
let validator_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
VALIDATOR_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("validators"),
|
||||
)?;
|
||||
let stdin_password = matches.is_present(STDIN_PASSWORD_FLAG);
|
||||
|
||||
ensure_dir_exists(&validator_dir)?;
|
||||
|
||||
let mut defs = ValidatorDefinitions::open_or_create(&validator_dir)
|
||||
.map_err(|e| format!("Unable to open {}: {:?}", CONFIG_FILENAME, e))?;
|
||||
|
||||
// Collect the paths for the keystores that should be imported.
|
||||
let keystore_paths = match (keystore, keystores_dir) {
|
||||
(Some(keystore), None) => vec![keystore],
|
||||
(None, Some(keystores_dir)) => {
|
||||
let mut keystores = vec![];
|
||||
|
||||
recursively_find_voting_keystores(&keystores_dir, &mut keystores)
|
||||
.map_err(|e| format!("Unable to search {:?}: {:?}", keystores_dir, e))?;
|
||||
|
||||
if keystores.is_empty() {
|
||||
eprintln!("No keystores found in {:?}", keystores_dir);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
keystores
|
||||
}
|
||||
_ => {
|
||||
return Err(format!(
|
||||
"Must supply either --{} or --{}",
|
||||
KEYSTORE_FLAG, DIR_FLAG
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
eprintln!("WARNING: {}", KEYSTORE_REUSE_WARNING);
|
||||
|
||||
// For each keystore:
|
||||
//
|
||||
// - Obtain the keystore password, if the user desires.
|
||||
// - Copy the keystore into the `validator_dir`.
|
||||
// - Add the keystore to the validator definitions file.
|
||||
//
|
||||
// Skip keystores that already exist, but exit early if any operation fails.
|
||||
let mut num_imported_keystores = 0;
|
||||
for src_keystore in &keystore_paths {
|
||||
let keystore = Keystore::from_json_file(src_keystore)
|
||||
.map_err(|e| format!("Unable to read keystore JSON {:?}: {:?}", src_keystore, e))?;
|
||||
|
||||
eprintln!("");
|
||||
eprintln!("Keystore found at {:?}:", src_keystore);
|
||||
eprintln!("");
|
||||
eprintln!(" - Public key: 0x{}", keystore.pubkey());
|
||||
eprintln!(" - UUID: {}", keystore.uuid());
|
||||
eprintln!("");
|
||||
eprintln!(
|
||||
"If you enter the password it will be stored as plain-text in {} so that it is not \
|
||||
required each time the validator client starts.",
|
||||
CONFIG_FILENAME
|
||||
);
|
||||
|
||||
let password_opt = loop {
|
||||
eprintln!("");
|
||||
eprintln!("{}", PASSWORD_PROMPT);
|
||||
|
||||
let password = read_password_from_user(stdin_password)?;
|
||||
|
||||
if password.as_ref().is_empty() {
|
||||
eprintln!("Continuing without password.");
|
||||
sleep(Duration::from_secs(1)); // Provides nicer UX.
|
||||
break None;
|
||||
}
|
||||
|
||||
match keystore.decrypt_keypair(password.as_ref()) {
|
||||
Ok(_) => {
|
||||
eprintln!("Password is correct.");
|
||||
eprintln!("");
|
||||
sleep(Duration::from_secs(1)); // Provides nicer UX.
|
||||
break Some(password);
|
||||
}
|
||||
Err(eth2_keystore::Error::InvalidPassword) => {
|
||||
eprintln!("Invalid password");
|
||||
}
|
||||
Err(e) => return Err(format!("Error whilst decrypting keypair: {:?}", e)),
|
||||
}
|
||||
};
|
||||
|
||||
// The keystore is placed in a directory that matches the name of the public key. This
|
||||
// provides some loose protection against adding the same keystore twice.
|
||||
let dest_dir = validator_dir.join(format!("0x{}", keystore.pubkey()));
|
||||
if dest_dir.exists() {
|
||||
eprintln!(
|
||||
"Skipping import of keystore for existing public key: {:?}",
|
||||
src_keystore
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
fs::create_dir_all(&dest_dir)
|
||||
.map_err(|e| format!("Unable to create import directory: {:?}", e))?;
|
||||
|
||||
// Retain the keystore file name, but place it in the new directory.
|
||||
let dest_keystore = src_keystore
|
||||
.file_name()
|
||||
.and_then(|file_name| file_name.to_str())
|
||||
.map(|file_name_str| dest_dir.join(file_name_str))
|
||||
.ok_or_else(|| format!("Badly formatted file name: {:?}", src_keystore))?;
|
||||
|
||||
// Copy the keystore to the new location.
|
||||
fs::copy(&src_keystore, &dest_keystore)
|
||||
.map_err(|e| format!("Unable to copy keystore: {:?}", e))?;
|
||||
|
||||
eprintln!("Successfully imported keystore.");
|
||||
num_imported_keystores += 1;
|
||||
|
||||
let validator_def =
|
||||
ValidatorDefinition::new_keystore_with_password(&dest_keystore, password_opt)
|
||||
.map_err(|e| format!("Unable to create new validator definition: {:?}", e))?;
|
||||
|
||||
defs.push(validator_def);
|
||||
|
||||
defs.save(&validator_dir)
|
||||
.map_err(|e| format!("Unable to save {}: {:?}", CONFIG_FILENAME, e))?;
|
||||
|
||||
eprintln!("Successfully updated {}.", CONFIG_FILENAME);
|
||||
}
|
||||
|
||||
eprintln!("");
|
||||
eprintln!(
|
||||
"Successfully imported {} validators ({} skipped).",
|
||||
num_imported_keystores,
|
||||
keystore_paths.len() - num_imported_keystores
|
||||
);
|
||||
eprintln!("");
|
||||
eprintln!("WARNING: {}", KEYSTORE_REUSE_WARNING);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
41
account_manager/src/validator/list.rs
Normal file
41
account_manager/src/validator/list.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
use crate::VALIDATOR_DIR_FLAG;
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use std::path::PathBuf;
|
||||
use validator_dir::Manager as ValidatorManager;
|
||||
|
||||
pub const CMD: &str = "list";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
||||
.long(VALIDATOR_DIR_FLAG)
|
||||
.value_name("VALIDATOR_DIRECTORY")
|
||||
.help(
|
||||
"The path to search for validator directories. \
|
||||
Defaults to ~/.lighthouse/validators",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.about("Lists the names of all validators.")
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches<'_>) -> Result<(), String> {
|
||||
let data_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
VALIDATOR_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("validators"),
|
||||
)?;
|
||||
|
||||
let mgr = ValidatorManager::open(&data_dir)
|
||||
.map_err(|e| format!("Unable to read --{}: {:?}", VALIDATOR_DIR_FLAG, e))?;
|
||||
|
||||
for (name, _path) in mgr
|
||||
.directory_names()
|
||||
.map_err(|e| format!("Unable to list wallets: {:?}", e))?
|
||||
{
|
||||
println!("{}", name)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
42
account_manager/src/validator/mod.rs
Normal file
42
account_manager/src/validator/mod.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
pub mod create;
|
||||
pub mod deposit;
|
||||
pub mod import;
|
||||
pub mod list;
|
||||
|
||||
use crate::common::base_wallet_dir;
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use environment::Environment;
|
||||
use types::EthSpec;
|
||||
|
||||
pub const CMD: &str = "validator";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about("Provides commands for managing Eth2 validators.")
|
||||
.arg(
|
||||
Arg::with_name("base-dir")
|
||||
.long("base-dir")
|
||||
.value_name("BASE_DIRECTORY")
|
||||
.help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/wallets")
|
||||
.takes_value(true),
|
||||
)
|
||||
.subcommand(create::cli_app())
|
||||
.subcommand(deposit::cli_app())
|
||||
.subcommand(import::cli_app())
|
||||
.subcommand(list::cli_app())
|
||||
}
|
||||
|
||||
pub fn cli_run<T: EthSpec>(matches: &ArgMatches, env: Environment<T>) -> Result<(), String> {
|
||||
let base_wallet_dir = base_wallet_dir(matches, "base-dir")?;
|
||||
|
||||
match matches.subcommand() {
|
||||
(create::CMD, Some(matches)) => create::cli_run::<T>(matches, env, base_wallet_dir),
|
||||
(deposit::CMD, Some(matches)) => deposit::cli_run::<T>(matches, env),
|
||||
(import::CMD, Some(matches)) => import::cli_run(matches),
|
||||
(list::CMD, Some(matches)) => list::cli_run(matches),
|
||||
(unknown, _) => Err(format!(
|
||||
"{} does not have a {} command. See --help",
|
||||
CMD, unknown
|
||||
)),
|
||||
}
|
||||
}
|
||||
164
account_manager/src/wallet/create.rs
Normal file
164
account_manager/src/wallet/create.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
use crate::BASE_DIR_FLAG;
|
||||
use account_utils::{random_password, strip_off_newlines};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use eth2_wallet::{
|
||||
bip39::{Language, Mnemonic, MnemonicType},
|
||||
PlainText,
|
||||
};
|
||||
use eth2_wallet_manager::{WalletManager, WalletType};
|
||||
use std::ffi::OsStr;
|
||||
use std::fs::{self, File};
|
||||
use std::io::prelude::*;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
pub const CMD: &str = "create";
|
||||
pub const HD_TYPE: &str = "hd";
|
||||
pub const NAME_FLAG: &str = "name";
|
||||
pub const PASSPHRASE_FLAG: &str = "passphrase-file";
|
||||
pub const TYPE_FLAG: &str = "type";
|
||||
pub const MNEMONIC_FLAG: &str = "mnemonic-output-path";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about("Creates a new HD (hierarchical-deterministic) EIP-2386 wallet.")
|
||||
.arg(
|
||||
Arg::with_name(NAME_FLAG)
|
||||
.long(NAME_FLAG)
|
||||
.value_name("WALLET_NAME")
|
||||
.help(
|
||||
"The wallet will be created with this name. It is not allowed to \
|
||||
create two wallets with the same name for the same --base-dir.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(PASSPHRASE_FLAG)
|
||||
.long(PASSPHRASE_FLAG)
|
||||
.value_name("WALLET_PASSWORD_PATH")
|
||||
.help(
|
||||
"A path to a file containing the password which will unlock the wallet. \
|
||||
If the file does not exist, a random password will be generated and \
|
||||
saved at that path. To avoid confusion, if the file does not already \
|
||||
exist it must include a '.pass' suffix.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(TYPE_FLAG)
|
||||
.long(TYPE_FLAG)
|
||||
.value_name("WALLET_TYPE")
|
||||
.help(
|
||||
"The type of wallet to create. Only HD (hierarchical-deterministic) \
|
||||
wallets are supported presently..",
|
||||
)
|
||||
.takes_value(true)
|
||||
.possible_values(&[HD_TYPE])
|
||||
.default_value(HD_TYPE),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(MNEMONIC_FLAG)
|
||||
.long(MNEMONIC_FLAG)
|
||||
.value_name("MNEMONIC_PATH")
|
||||
.help(
|
||||
"If present, the mnemonic will be saved to this file. DO NOT SHARE THE MNEMONIC.",
|
||||
)
|
||||
.takes_value(true)
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> {
|
||||
let name: String = clap_utils::parse_required(matches, NAME_FLAG)?;
|
||||
let wallet_password_path: PathBuf = clap_utils::parse_required(matches, PASSPHRASE_FLAG)?;
|
||||
let mnemonic_output_path: Option<PathBuf> = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?;
|
||||
let type_field: String = clap_utils::parse_required(matches, TYPE_FLAG)?;
|
||||
|
||||
let wallet_type = match type_field.as_ref() {
|
||||
HD_TYPE => WalletType::Hd,
|
||||
unknown => return Err(format!("--{} {} is not supported", TYPE_FLAG, unknown)),
|
||||
};
|
||||
|
||||
let mgr = WalletManager::open(&base_dir)
|
||||
.map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?;
|
||||
|
||||
// Create a new random mnemonic.
|
||||
//
|
||||
// The `tiny-bip39` crate uses `thread_rng()` for this entropy.
|
||||
let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English);
|
||||
|
||||
// Create a random password if the file does not exist.
|
||||
if !wallet_password_path.exists() {
|
||||
// To prevent users from accidentally supplying their password to the PASSPHRASE_FLAG and
|
||||
// create a file with that name, we require that the password has a .pass suffix.
|
||||
if wallet_password_path.extension() != Some(&OsStr::new("pass")) {
|
||||
return Err(format!(
|
||||
"Only creates a password file if that file ends in .pass: {:?}",
|
||||
wallet_password_path
|
||||
));
|
||||
}
|
||||
|
||||
create_with_600_perms(&wallet_password_path, random_password().as_bytes())
|
||||
.map_err(|e| format!("Unable to write to {:?}: {:?}", wallet_password_path, e))?;
|
||||
}
|
||||
|
||||
let wallet_password = fs::read(&wallet_password_path)
|
||||
.map_err(|e| format!("Unable to read {:?}: {:?}", wallet_password_path, e))
|
||||
.map(|bytes| PlainText::from(strip_off_newlines(bytes)))?;
|
||||
|
||||
let wallet = mgr
|
||||
.create_wallet(name, wallet_type, &mnemonic, wallet_password.as_bytes())
|
||||
.map_err(|e| format!("Unable to create wallet: {:?}", e))?;
|
||||
|
||||
if let Some(path) = mnemonic_output_path {
|
||||
create_with_600_perms(&path, mnemonic.phrase().as_bytes())
|
||||
.map_err(|e| format!("Unable to write mnemonic to {:?}: {:?}", path, e))?;
|
||||
}
|
||||
|
||||
println!("Your wallet's 12-word BIP-39 mnemonic is:");
|
||||
println!();
|
||||
println!("\t{}", mnemonic.phrase());
|
||||
println!();
|
||||
println!("This mnemonic can be used to fully restore your wallet, should ");
|
||||
println!("you lose the JSON file or your password. ");
|
||||
println!();
|
||||
println!("It is very important that you DO NOT SHARE this mnemonic as it will ");
|
||||
println!("reveal the private keys of all validators and keys generated with ");
|
||||
println!("this wallet. That would be catastrophic.");
|
||||
println!();
|
||||
println!("It is also important to store a backup of this mnemonic so you can ");
|
||||
println!("recover your private keys in the case of data loss. Writing it on ");
|
||||
println!("a piece of paper and storing it in a safe place would be prudent.");
|
||||
println!();
|
||||
println!("Your wallet's UUID is:");
|
||||
println!();
|
||||
println!("\t{}", wallet.wallet().uuid());
|
||||
println!();
|
||||
println!("You do not need to backup your UUID or keep it secret.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Creates a file with `600 (-rw-------)` permissions.
|
||||
pub fn create_with_600_perms<P: AsRef<Path>>(path: P, bytes: &[u8]) -> Result<(), String> {
|
||||
let path = path.as_ref();
|
||||
|
||||
let mut file =
|
||||
File::create(&path).map_err(|e| format!("Unable to create {:?}: {}", path, e))?;
|
||||
|
||||
let mut perm = file
|
||||
.metadata()
|
||||
.map_err(|e| format!("Unable to get {:?} metadata: {}", path, e))?
|
||||
.permissions();
|
||||
|
||||
perm.set_mode(0o600);
|
||||
|
||||
file.set_permissions(perm)
|
||||
.map_err(|e| format!("Unable to set {:?} permissions: {}", path, e))?;
|
||||
|
||||
file.write_all(bytes)
|
||||
.map_err(|e| format!("Unable to write to {:?}: {}", path, e))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
24
account_manager/src/wallet/list.rs
Normal file
24
account_manager/src/wallet/list.rs
Normal file
@@ -0,0 +1,24 @@
|
||||
use crate::BASE_DIR_FLAG;
|
||||
use clap::App;
|
||||
use eth2_wallet_manager::WalletManager;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub const CMD: &str = "list";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD).about("Lists the names of all wallets.")
|
||||
}
|
||||
|
||||
pub fn cli_run(base_dir: PathBuf) -> Result<(), String> {
|
||||
let mgr = WalletManager::open(&base_dir)
|
||||
.map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?;
|
||||
|
||||
for (name, _uuid) in mgr
|
||||
.wallets()
|
||||
.map_err(|e| format!("Unable to list wallets: {:?}", e))?
|
||||
{
|
||||
println!("{}", name)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
38
account_manager/src/wallet/mod.rs
Normal file
38
account_manager/src/wallet/mod.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
pub mod create;
|
||||
pub mod list;
|
||||
|
||||
use crate::{
|
||||
common::{base_wallet_dir, ensure_dir_exists},
|
||||
BASE_DIR_FLAG,
|
||||
};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
|
||||
pub const CMD: &str = "wallet";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about("Manage wallets, from which validator keys can be derived.")
|
||||
.arg(
|
||||
Arg::with_name(BASE_DIR_FLAG)
|
||||
.long(BASE_DIR_FLAG)
|
||||
.value_name("BASE_DIRECTORY")
|
||||
.help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/wallets")
|
||||
.takes_value(true),
|
||||
)
|
||||
.subcommand(create::cli_app())
|
||||
.subcommand(list::cli_app())
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
||||
let base_dir = base_wallet_dir(matches, BASE_DIR_FLAG)?;
|
||||
ensure_dir_exists(&base_dir)?;
|
||||
|
||||
match matches.subcommand() {
|
||||
(create::CMD, Some(matches)) => create::cli_run(matches, base_dir),
|
||||
(list::CMD, Some(_)) => list::cli_run(base_dir),
|
||||
(unknown, _) => Err(format!(
|
||||
"{} does not have a {} command. See --help",
|
||||
CMD, unknown
|
||||
)),
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "beacon_node"
|
||||
version = "0.1.2"
|
||||
version = "0.2.0"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
||||
edition = "2018"
|
||||
|
||||
@@ -9,35 +9,34 @@ name = "beacon_node"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dev-dependencies]
|
||||
node_test_rig = { path = "../tests/node_test_rig" }
|
||||
node_test_rig = { path = "../testing/node_test_rig" }
|
||||
|
||||
[features]
|
||||
write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing.
|
||||
|
||||
[dependencies]
|
||||
eth2_config = { path = "../eth2/utils/eth2_config" }
|
||||
eth2_config = { path = "../common/eth2_config" }
|
||||
beacon_chain = { path = "beacon_chain" }
|
||||
types = { path = "../eth2/types" }
|
||||
types = { path = "../consensus/types" }
|
||||
store = { path = "./store" }
|
||||
client = { path = "client" }
|
||||
version = { path = "version" }
|
||||
clap = "2.33.0"
|
||||
rand = "0.7.3"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] }
|
||||
slog-term = "2.5.0"
|
||||
slog-async = "2.5.0"
|
||||
ctrlc = { version = "3.1.4", features = ["termination"] }
|
||||
tokio = {version = "0.2.20", features = ["time"] }
|
||||
tokio = { version = "0.2.21", features = ["time"] }
|
||||
exit-future = "0.2.0"
|
||||
env_logger = "0.7.1"
|
||||
dirs = "2.0.2"
|
||||
logging = { path = "../eth2/utils/logging" }
|
||||
logging = { path = "../common/logging" }
|
||||
futures = "0.3.5"
|
||||
environment = { path = "../lighthouse/environment" }
|
||||
genesis = { path = "genesis" }
|
||||
eth2_testnet_config = { path = "../eth2/utils/eth2_testnet_config" }
|
||||
eth2-libp2p = { path = "./eth2-libp2p" }
|
||||
eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
||||
eth2_libp2p = { path = "./eth2_libp2p" }
|
||||
eth2_ssz = "0.1.2"
|
||||
toml = "0.5.6"
|
||||
serde = "1.0.110"
|
||||
clap_utils = { path = "../eth2/utils/clap_utils" }
|
||||
clap_utils = { path = "../common/clap_utils" }
|
||||
hyper = "0.13.5"
|
||||
lighthouse_version = { path = "../common/lighthouse_version" }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "beacon_chain"
|
||||
version = "0.1.2"
|
||||
version = "0.2.0"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>"]
|
||||
edition = "2018"
|
||||
|
||||
@@ -9,44 +9,50 @@ default = ["participation_metrics"]
|
||||
write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing.
|
||||
participation_metrics = [] # Exposes validator participation metrics to Prometheus.
|
||||
|
||||
[dev-dependencies]
|
||||
int_to_bytes = { path = "../../consensus/int_to_bytes" }
|
||||
|
||||
[dependencies]
|
||||
eth2_config = { path = "../../eth2/utils/eth2_config" }
|
||||
merkle_proof = { path = "../../eth2/utils/merkle_proof" }
|
||||
eth2_config = { path = "../../common/eth2_config" }
|
||||
merkle_proof = { path = "../../consensus/merkle_proof" }
|
||||
store = { path = "../store" }
|
||||
parking_lot = "0.10.2"
|
||||
parking_lot = "0.11.0"
|
||||
lazy_static = "1.4.0"
|
||||
lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" }
|
||||
smallvec = "1.4.1"
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||
log = "0.4.8"
|
||||
operation_pool = { path = "../../eth2/operation_pool" }
|
||||
operation_pool = { path = "../operation_pool" }
|
||||
rayon = "1.3.0"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.110"
|
||||
serde_yaml = "0.8.11"
|
||||
serde_json = "1.0.52"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
slog-term = "2.6.0"
|
||||
sloggers = "1.0.0"
|
||||
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
||||
slot_clock = { path = "../../common/slot_clock" }
|
||||
eth2_hashing = "0.1.0"
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_ssz_types = { path = "../../eth2/utils/ssz_types" }
|
||||
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
state_processing = { path = "../../eth2/state_processing" }
|
||||
state_processing = { path = "../../consensus/state_processing" }
|
||||
tree_hash = "0.1.0"
|
||||
types = { path = "../../eth2/types" }
|
||||
tokio = "0.2.20"
|
||||
types = { path = "../../consensus/types" }
|
||||
tokio = "0.2.21"
|
||||
eth1 = { path = "../eth1" }
|
||||
websocket_server = { path = "../websocket_server" }
|
||||
futures = "0.3.5"
|
||||
genesis = { path = "../genesis" }
|
||||
integer-sqrt = "0.1.3"
|
||||
rand = "0.7.3"
|
||||
proto_array_fork_choice = { path = "../../eth2/proto_array_fork_choice" }
|
||||
lru = "0.4.3"
|
||||
proto_array = { path = "../../consensus/proto_array" }
|
||||
lru = "0.5.1"
|
||||
tempfile = "3.1.0"
|
||||
bitvec = "0.17.4"
|
||||
bls = { path = "../../eth2/utils/bls" }
|
||||
safe_arith = { path = "../../eth2/utils/safe_arith" }
|
||||
|
||||
[dev-dependencies]
|
||||
lazy_static = "1.4.0"
|
||||
bls = { path = "../../crypto/bls" }
|
||||
safe_arith = { path = "../../consensus/safe_arith" }
|
||||
fork_choice = { path = "../../consensus/fork_choice" }
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
bus = "2.2.3"
|
||||
derivative = "2.1.1"
|
||||
itertools = "0.9.0"
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
//! -------------------------------------
|
||||
//! |
|
||||
//! ▼
|
||||
//! ForkChoiceVerifiedAttestation
|
||||
//! impl SignatureVerifiedAttestation
|
||||
//! ```
|
||||
|
||||
use crate::{
|
||||
@@ -52,7 +52,7 @@ use std::borrow::Cow;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{
|
||||
Attestation, BeaconCommittee, CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation,
|
||||
RelativeEpoch, SelectionProof, SignedAggregateAndProof, Slot,
|
||||
RelativeEpoch, SelectionProof, SignedAggregateAndProof, Slot, SubnetId,
|
||||
};
|
||||
|
||||
/// Returned when an attestation was not successfully verified. It might not have been verified for
|
||||
@@ -62,70 +62,170 @@ use types::{
|
||||
/// other than `BeaconChainError`).
|
||||
/// - The application encountered an internal error whilst attempting to determine validity
|
||||
/// (the `BeaconChainError` variant)
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
/// The attestation is from a slot that is later than the current slot (with respect to the
|
||||
/// gossip clock disparity).
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||
FutureSlot {
|
||||
attestation_slot: Slot,
|
||||
latest_permissible_slot: Slot,
|
||||
},
|
||||
/// The attestation is from a slot that is prior to the earliest permissible slot (with
|
||||
/// respect to the gossip clock disparity).
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||
PastSlot {
|
||||
attestation_slot: Slot,
|
||||
earliest_permissible_slot: Slot,
|
||||
},
|
||||
/// The attestations aggregation bits were empty when they shouldn't be.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
EmptyAggregationBitfield,
|
||||
/// The `selection_proof` on the aggregate attestation does not elect it as an aggregator.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
InvalidSelectionProof { aggregator_index: u64 },
|
||||
/// The `selection_proof` on the aggregate attestation selects it as a validator, however the
|
||||
/// aggregator index is not in the committee for that attestation.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
AggregatorNotInCommittee { aggregator_index: u64 },
|
||||
/// The aggregator index refers to a validator index that we have not seen.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
AggregatorPubkeyUnknown(u64),
|
||||
/// The attestation has been seen before; either in a block, on the gossip network or from a
|
||||
/// local validator.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// It's unclear if this attestation is valid, however we have already observed it and do not
|
||||
/// need to observe it again.
|
||||
AttestationAlreadyKnown(Hash256),
|
||||
/// There has already been an aggregation observed for this validator, we refuse to process a
|
||||
/// second.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// It's unclear if this attestation is valid, however we have already observed an aggregate
|
||||
/// attestation from this validator for this epoch and should not observe another.
|
||||
AggregatorAlreadyKnown(u64),
|
||||
/// The aggregator index is higher than the maximum possible validator count.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
ValidatorIndexTooHigh(usize),
|
||||
/// The `attestation.data.beacon_block_root` block is unknown.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The attestation points to a block we have not yet imported. It's unclear if the attestation
|
||||
/// is valid or not.
|
||||
UnknownHeadBlock { beacon_block_root: Hash256 },
|
||||
/// The `attestation.data.slot` is not from the same epoch as `data.target.epoch` and therefore
|
||||
/// the attestation is invalid.
|
||||
/// The `attestation.data.slot` is not from the same epoch as `data.target.epoch`.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
BadTargetEpoch,
|
||||
/// The target root of the attestation points to a block that we have not verified.
|
||||
///
|
||||
/// This is invalid behaviour whilst we first check for `UnknownHeadBlock`.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
UnknownTargetRoot(Hash256),
|
||||
/// A signature on the attestation is invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
InvalidSignature,
|
||||
/// There is no committee for the slot and committee index of this attestation and the
|
||||
/// attestation should not have been produced.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
NoCommitteeForSlotAndIndex { slot: Slot, index: CommitteeIndex },
|
||||
/// The unaggregated attestation doesn't have only one aggregation bit set.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
NotExactlyOneAggregationBitSet(usize),
|
||||
/// We have already observed an attestation for the `validator_index` and refuse to process
|
||||
/// another.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// It's unclear if this attestation is valid, however we have already observed a
|
||||
/// single-participant attestation from this validator for this epoch and should not observe
|
||||
/// another.
|
||||
PriorAttestationKnown { validator_index: u64, epoch: Epoch },
|
||||
/// The attestation is for an epoch in the future (with respect to the gossip clock disparity).
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||
FutureEpoch {
|
||||
attestation_epoch: Epoch,
|
||||
current_epoch: Epoch,
|
||||
},
|
||||
/// The attestation is for an epoch in the past (with respect to the gossip clock disparity).
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||
PastEpoch {
|
||||
attestation_epoch: Epoch,
|
||||
current_epoch: Epoch,
|
||||
},
|
||||
/// The attestation is attesting to a state that is later than itself. (Viz., attesting to the
|
||||
/// future).
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
AttestsToFutureBlock { block: Slot, attestation: Slot },
|
||||
/// The attestation was received on an invalid attestation subnet.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
InvalidSubnetId {
|
||||
received: SubnetId,
|
||||
expected: SubnetId,
|
||||
},
|
||||
/// The attestation failed the `state_processing` verification stage.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
Invalid(AttestationValidationError),
|
||||
/// There was an error whilst processing the attestation. It is not known if it is valid or invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// We were unable to process this attestation due to an internal error. It's unclear if the
|
||||
/// attestation is valid.
|
||||
BeaconChainError(BeaconChainError),
|
||||
}
|
||||
|
||||
@@ -158,65 +258,21 @@ impl<T: BeaconChainTypes> Clone for VerifiedUnaggregatedAttestation<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Wraps an `indexed_attestation` that is valid for application to fork choice. The
|
||||
/// `indexed_attestation` will have been generated via the `VerifiedAggregatedAttestation` or
|
||||
/// `VerifiedUnaggregatedAttestation` wrappers.
|
||||
pub struct ForkChoiceVerifiedAttestation<'a, T: BeaconChainTypes> {
|
||||
indexed_attestation: &'a IndexedAttestation<T::EthSpec>,
|
||||
}
|
||||
|
||||
/// A helper trait implemented on wrapper types that can be progressed to a state where they can be
|
||||
/// verified for application to fork choice.
|
||||
pub trait IntoForkChoiceVerifiedAttestation<'a, T: BeaconChainTypes> {
|
||||
fn into_fork_choice_verified_attestation(
|
||||
&'a self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<ForkChoiceVerifiedAttestation<'a, T>, Error>;
|
||||
pub trait SignatureVerifiedAttestation<T: BeaconChainTypes> {
|
||||
fn indexed_attestation(&self) -> &IndexedAttestation<T::EthSpec>;
|
||||
}
|
||||
|
||||
impl<'a, T: BeaconChainTypes> IntoForkChoiceVerifiedAttestation<'a, T>
|
||||
for VerifiedAggregatedAttestation<T>
|
||||
{
|
||||
/// Progresses the `VerifiedAggregatedAttestation` to a stage where it is valid for application
|
||||
/// to the fork-choice rule (or not).
|
||||
fn into_fork_choice_verified_attestation(
|
||||
&'a self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<ForkChoiceVerifiedAttestation<T>, Error> {
|
||||
ForkChoiceVerifiedAttestation::from_signature_verified_components(
|
||||
&self.indexed_attestation,
|
||||
chain,
|
||||
)
|
||||
impl<'a, T: BeaconChainTypes> SignatureVerifiedAttestation<T> for VerifiedAggregatedAttestation<T> {
|
||||
fn indexed_attestation(&self) -> &IndexedAttestation<T::EthSpec> {
|
||||
&self.indexed_attestation
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: BeaconChainTypes> IntoForkChoiceVerifiedAttestation<'a, T>
|
||||
for VerifiedUnaggregatedAttestation<T>
|
||||
{
|
||||
/// Progresses the `Attestation` to a stage where it is valid for application to the
|
||||
/// fork-choice rule (or not).
|
||||
fn into_fork_choice_verified_attestation(
|
||||
&'a self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<ForkChoiceVerifiedAttestation<T>, Error> {
|
||||
ForkChoiceVerifiedAttestation::from_signature_verified_components(
|
||||
&self.indexed_attestation,
|
||||
chain,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: BeaconChainTypes> IntoForkChoiceVerifiedAttestation<'a, T>
|
||||
for ForkChoiceVerifiedAttestation<'a, T>
|
||||
{
|
||||
/// Simply returns itself.
|
||||
fn into_fork_choice_verified_attestation(
|
||||
&'a self,
|
||||
_: &BeaconChain<T>,
|
||||
) -> Result<ForkChoiceVerifiedAttestation<T>, Error> {
|
||||
Ok(Self {
|
||||
indexed_attestation: self.indexed_attestation,
|
||||
})
|
||||
impl<T: BeaconChainTypes> SignatureVerifiedAttestation<T> for VerifiedUnaggregatedAttestation<T> {
|
||||
fn indexed_attestation(&self) -> &IndexedAttestation<T::EthSpec> {
|
||||
&self.indexed_attestation
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,12 +291,7 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
|
||||
// We do not queue future attestations for later processing.
|
||||
verify_propagation_slot_range(chain, attestation)?;
|
||||
|
||||
// Ensure the aggregated attestation has not already been seen locally.
|
||||
//
|
||||
// TODO: this part of the code is not technically to spec, however I have raised a PR to
|
||||
// change it:
|
||||
//
|
||||
// https://github.com/ethereum/eth2.0-specs/pull/1749
|
||||
// Ensure the valid aggregated attestation has not already been seen locally.
|
||||
let attestation_root = attestation.tree_hash_root();
|
||||
if chain
|
||||
.observed_attestations
|
||||
@@ -278,56 +329,36 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
|
||||
// attestation and do not delay consideration for later.
|
||||
verify_head_block_is_known(chain, &attestation)?;
|
||||
|
||||
let indexed_attestation = map_attestation_committee(chain, attestation, |committee| {
|
||||
// Note: this clones the signature which is known to be a relatively slow operation.
|
||||
//
|
||||
// Future optimizations should remove this clone.
|
||||
let selection_proof =
|
||||
SelectionProof::from(signed_aggregate.message.selection_proof.clone());
|
||||
|
||||
if !selection_proof
|
||||
.is_aggregator(committee.committee.len(), &chain.spec)
|
||||
.map_err(|e| Error::BeaconChainError(e.into()))?
|
||||
{
|
||||
return Err(Error::InvalidSelectionProof { aggregator_index });
|
||||
}
|
||||
|
||||
/*
|
||||
* I have raised a PR that will likely get merged in v0.12.0:
|
||||
*
|
||||
* https://github.com/ethereum/eth2.0-specs/pull/1732
|
||||
*
|
||||
* If this PR gets merged, uncomment this code and remove the code below.
|
||||
*
|
||||
if !committee
|
||||
.committee
|
||||
.iter()
|
||||
.any(|validator_index| *validator_index as u64 == aggregator_index)
|
||||
{
|
||||
return Err(Error::AggregatorNotInCommittee { aggregator_index });
|
||||
}
|
||||
*/
|
||||
|
||||
get_indexed_attestation(committee.committee, &attestation)
|
||||
.map_err(|e| BeaconChainError::from(e).into())
|
||||
})?;
|
||||
|
||||
// Ensure the aggregator is in the attestation.
|
||||
//
|
||||
// I've raised an issue with this here:
|
||||
//
|
||||
// https://github.com/ethereum/eth2.0-specs/pull/1732
|
||||
//
|
||||
// I suspect PR my will get merged in v0.12 and we'll need to delete this code and
|
||||
// uncomment the code above.
|
||||
if !indexed_attestation
|
||||
.attesting_indices
|
||||
.iter()
|
||||
.any(|validator_index| *validator_index as u64 == aggregator_index)
|
||||
{
|
||||
return Err(Error::AggregatorNotInCommittee { aggregator_index });
|
||||
// Ensure that the attestation has participants.
|
||||
if attestation.aggregation_bits.is_zero() {
|
||||
return Err(Error::EmptyAggregationBitfield);
|
||||
}
|
||||
|
||||
let indexed_attestation =
|
||||
map_attestation_committee(chain, attestation, |(committee, _)| {
|
||||
// Note: this clones the signature which is known to be a relatively slow operation.
|
||||
//
|
||||
// Future optimizations should remove this clone.
|
||||
let selection_proof =
|
||||
SelectionProof::from(signed_aggregate.message.selection_proof.clone());
|
||||
|
||||
if !selection_proof
|
||||
.is_aggregator(committee.committee.len(), &chain.spec)
|
||||
.map_err(|e| Error::BeaconChainError(e.into()))?
|
||||
{
|
||||
return Err(Error::InvalidSelectionProof { aggregator_index });
|
||||
}
|
||||
|
||||
// Ensure the aggregator is a member of the committee for which it is aggregating.
|
||||
if !committee.committee.contains(&(aggregator_index as usize)) {
|
||||
return Err(Error::AggregatorNotInCommittee { aggregator_index });
|
||||
}
|
||||
|
||||
get_indexed_attestation(committee.committee, &attestation)
|
||||
.map_err(|e| BeaconChainError::from(e).into())
|
||||
})?;
|
||||
|
||||
// Ensure that all signatures are valid.
|
||||
if !verify_signed_aggregate_signatures(chain, &signed_aggregate, &indexed_attestation)? {
|
||||
return Err(Error::InvalidSignature);
|
||||
}
|
||||
@@ -351,7 +382,7 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
|
||||
if chain
|
||||
.observed_aggregators
|
||||
.observe_validator(&attestation, aggregator_index as usize)
|
||||
.map_err(|e| BeaconChainError::from(e))?
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
return Err(Error::PriorAttestationKnown {
|
||||
validator_index: aggregator_index,
|
||||
@@ -370,14 +401,6 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
|
||||
chain.add_to_block_inclusion_pool(self)
|
||||
}
|
||||
|
||||
/// A helper function to add this aggregate to `beacon_chain.fork_choice`.
|
||||
pub fn add_to_fork_choice(
|
||||
&self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<ForkChoiceVerifiedAttestation<T>, Error> {
|
||||
chain.apply_attestation_to_fork_choice(self)
|
||||
}
|
||||
|
||||
/// Returns the underlying `attestation` for the `signed_aggregate`.
|
||||
pub fn attestation(&self) -> &Attestation<T::EthSpec> {
|
||||
&self.signed_aggregate.message.aggregate
|
||||
@@ -387,8 +410,12 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
|
||||
impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
||||
/// Returns `Ok(Self)` if the `attestation` is valid to be (re)published on the gossip
|
||||
/// network.
|
||||
///
|
||||
/// `subnet_id` is the subnet from which we received this attestation. This function will
|
||||
/// verify that it was received on the correct subnet.
|
||||
pub fn verify(
|
||||
attestation: Attestation<T::EthSpec>,
|
||||
subnet_id: SubnetId,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Self, Error> {
|
||||
// Ensure attestation is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (within a
|
||||
@@ -408,7 +435,23 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
||||
// attestation and do not delay consideration for later.
|
||||
verify_head_block_is_known(chain, &attestation)?;
|
||||
|
||||
let indexed_attestation = obtain_indexed_attestation(chain, &attestation)?;
|
||||
let (indexed_attestation, committees_per_slot) =
|
||||
obtain_indexed_attestation_and_committees_per_slot(chain, &attestation)?;
|
||||
|
||||
let expected_subnet_id = SubnetId::compute_subnet_for_attestation_data::<T::EthSpec>(
|
||||
&indexed_attestation.data,
|
||||
committees_per_slot,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::from)?;
|
||||
|
||||
// Ensure the attestation is from the correct subnet.
|
||||
if subnet_id != expected_subnet_id {
|
||||
return Err(Error::InvalidSubnetId {
|
||||
received: subnet_id,
|
||||
expected: expected_subnet_id,
|
||||
});
|
||||
}
|
||||
|
||||
let validator_index = *indexed_attestation
|
||||
.attesting_indices
|
||||
@@ -422,7 +465,7 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
||||
if chain
|
||||
.observed_attesters
|
||||
.validator_has_been_observed(&attestation, validator_index as usize)
|
||||
.map_err(|e| BeaconChainError::from(e))?
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
return Err(Error::PriorAttestationKnown {
|
||||
validator_index,
|
||||
@@ -442,7 +485,7 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
||||
if chain
|
||||
.observed_attesters
|
||||
.observe_validator(&attestation, validator_index as usize)
|
||||
.map_err(|e| BeaconChainError::from(e))?
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
return Err(Error::PriorAttestationKnown {
|
||||
validator_index,
|
||||
@@ -475,114 +518,6 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: BeaconChainTypes> ForkChoiceVerifiedAttestation<'a, T> {
|
||||
/// Returns `Ok(Self)` if the `attestation` is valid to be applied to the beacon chain fork
|
||||
/// choice.
|
||||
///
|
||||
/// The supplied `indexed_attestation` MUST have a valid signature, this function WILL NOT
|
||||
/// CHECK THE SIGNATURE. Use the `VerifiedAggregatedAttestation` or
|
||||
/// `VerifiedUnaggregatedAttestation` structs to do signature verification.
|
||||
fn from_signature_verified_components(
|
||||
indexed_attestation: &'a IndexedAttestation<T::EthSpec>,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Self, Error> {
|
||||
// There is no point in processing an attestation with an empty bitfield. Reject
|
||||
// it immediately.
|
||||
//
|
||||
// This is not in the specification, however it should be transparent to other nodes. We
|
||||
// return early here to avoid wasting precious resources verifying the rest of it.
|
||||
if indexed_attestation.attesting_indices.len() == 0 {
|
||||
return Err(Error::EmptyAggregationBitfield);
|
||||
}
|
||||
|
||||
let slot_now = chain.slot()?;
|
||||
let epoch_now = slot_now.epoch(T::EthSpec::slots_per_epoch());
|
||||
let target = indexed_attestation.data.target.clone();
|
||||
|
||||
// Attestation must be from the current or previous epoch.
|
||||
if target.epoch > epoch_now {
|
||||
return Err(Error::FutureEpoch {
|
||||
attestation_epoch: target.epoch,
|
||||
current_epoch: epoch_now,
|
||||
});
|
||||
} else if target.epoch + 1 < epoch_now {
|
||||
return Err(Error::PastEpoch {
|
||||
attestation_epoch: target.epoch,
|
||||
current_epoch: epoch_now,
|
||||
});
|
||||
}
|
||||
|
||||
if target.epoch
|
||||
!= indexed_attestation
|
||||
.data
|
||||
.slot
|
||||
.epoch(T::EthSpec::slots_per_epoch())
|
||||
{
|
||||
return Err(Error::BadTargetEpoch);
|
||||
}
|
||||
|
||||
// Attestation target must be for a known block.
|
||||
if !chain.fork_choice.contains_block(&target.root) {
|
||||
return Err(Error::UnknownTargetRoot(target.root));
|
||||
}
|
||||
|
||||
// TODO: we're not testing an assert from the spec:
|
||||
//
|
||||
// `assert get_current_slot(store) >= compute_start_slot_at_epoch(target.epoch)`
|
||||
//
|
||||
// I think this check is redundant and I've raised an issue here:
|
||||
//
|
||||
// https://github.com/ethereum/eth2.0-specs/pull/1755
|
||||
//
|
||||
// To resolve this todo, observe the outcome of the above PR.
|
||||
|
||||
// Load the slot and state root for `attestation.data.beacon_block_root`.
|
||||
//
|
||||
// This indirectly checks to see if the `attestation.data.beacon_block_root` is in our fork
|
||||
// choice. Any known, non-finalized block should be in fork choice, so this check
|
||||
// immediately filters out attestations that attest to a block that has not been processed.
|
||||
//
|
||||
// Attestations must be for a known block. If the block is unknown, we simply drop the
|
||||
// attestation and do not delay consideration for later.
|
||||
let (block_slot, _state_root) = chain
|
||||
.fork_choice
|
||||
.block_slot_and_state_root(&indexed_attestation.data.beacon_block_root)
|
||||
.ok_or_else(|| Error::UnknownHeadBlock {
|
||||
beacon_block_root: indexed_attestation.data.beacon_block_root,
|
||||
})?;
|
||||
|
||||
// TODO: currently we do not check the FFG source/target. This is what the spec dictates
|
||||
// but it seems wrong.
|
||||
//
|
||||
// I have opened an issue on the specs repo for this:
|
||||
//
|
||||
// https://github.com/ethereum/eth2.0-specs/issues/1636
|
||||
//
|
||||
// We should revisit this code once that issue has been resolved.
|
||||
|
||||
// Attestations must not be for blocks in the future. If this is the case, the attestation
|
||||
// should not be considered.
|
||||
if block_slot > indexed_attestation.data.slot {
|
||||
return Err(Error::AttestsToFutureBlock {
|
||||
block: block_slot,
|
||||
attestation: indexed_attestation.data.slot,
|
||||
});
|
||||
}
|
||||
|
||||
// Note: we're not checking the "attestations can only affect the fork choice of subsequent
|
||||
// slots" part of the spec, we do this upstream.
|
||||
|
||||
Ok(Self {
|
||||
indexed_attestation,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the wrapped `IndexedAttestation`.
|
||||
pub fn indexed_attestation(&self) -> &IndexedAttestation<T::EthSpec> {
|
||||
&self.indexed_attestation
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `Ok(())` if the `attestation.data.beacon_block_root` is known to this chain.
|
||||
///
|
||||
/// The block root may not be known for two reasons:
|
||||
@@ -599,6 +534,7 @@ fn verify_head_block_is_known<T: BeaconChainTypes>(
|
||||
) -> Result<(), Error> {
|
||||
if chain
|
||||
.fork_choice
|
||||
.read()
|
||||
.contains_block(&attestation.data.beacon_block_root)
|
||||
{
|
||||
Ok(())
|
||||
@@ -663,7 +599,7 @@ pub fn verify_attestation_signature<T: BeaconChainTypes>(
|
||||
.canonical_head
|
||||
.try_read_for(HEAD_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| BeaconChainError::CanonicalHeadLockTimeout)
|
||||
.map(|head| head.beacon_state.fork.clone())?;
|
||||
.map(|head| head.beacon_state.fork)?;
|
||||
|
||||
let signature_set = indexed_attestation_signature_set_from_pubkeys(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
@@ -680,7 +616,7 @@ pub fn verify_attestation_signature<T: BeaconChainTypes>(
|
||||
let _signature_verification_timer =
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SIGNATURE_TIMES);
|
||||
|
||||
if signature_set.is_valid() {
|
||||
if signature_set.verify() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::InvalidSignature)
|
||||
@@ -691,8 +627,8 @@ pub fn verify_attestation_signature<T: BeaconChainTypes>(
|
||||
/// includes three signatures:
|
||||
///
|
||||
/// - `signed_aggregate.signature`
|
||||
/// - `signed_aggregate.signature.message.selection proof`
|
||||
/// - `signed_aggregate.signature.message.aggregate.signature`
|
||||
/// - `signed_aggregate.message.selection_proof`
|
||||
/// - `signed_aggregate.message.aggregate.signature`
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
@@ -718,7 +654,7 @@ pub fn verify_signed_aggregate_signatures<T: BeaconChainTypes>(
|
||||
.canonical_head
|
||||
.try_read_for(HEAD_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| BeaconChainError::CanonicalHeadLockTimeout)
|
||||
.map(|head| head.beacon_state.fork.clone())?;
|
||||
.map(|head| head.beacon_state.fork)?;
|
||||
|
||||
let signature_sets = vec![
|
||||
signed_aggregate_selection_proof_signature_set(
|
||||
@@ -748,22 +684,26 @@ pub fn verify_signed_aggregate_signatures<T: BeaconChainTypes>(
|
||||
.map_err(BeaconChainError::SignatureSetError)?,
|
||||
];
|
||||
|
||||
Ok(verify_signature_sets(signature_sets))
|
||||
Ok(verify_signature_sets(signature_sets.iter()))
|
||||
}
|
||||
|
||||
/// Returns the `indexed_attestation` for the `attestation` using the public keys cached in the
|
||||
/// `chain`.
|
||||
pub fn obtain_indexed_attestation<T: BeaconChainTypes>(
|
||||
/// Assists in readability.
|
||||
type CommitteesPerSlot = u64;
|
||||
|
||||
/// Returns the `indexed_attestation` and committee count per slot for the `attestation` using the
|
||||
/// public keys cached in the `chain`.
|
||||
pub fn obtain_indexed_attestation_and_committees_per_slot<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
attestation: &Attestation<T::EthSpec>,
|
||||
) -> Result<IndexedAttestation<T::EthSpec>, Error> {
|
||||
map_attestation_committee(chain, attestation, |committee| {
|
||||
) -> Result<(IndexedAttestation<T::EthSpec>, CommitteesPerSlot), Error> {
|
||||
map_attestation_committee(chain, attestation, |(committee, committees_per_slot)| {
|
||||
get_indexed_attestation(committee.committee, &attestation)
|
||||
.map(|attestation| (attestation, committees_per_slot))
|
||||
.map_err(|e| BeaconChainError::from(e).into())
|
||||
})
|
||||
}
|
||||
|
||||
/// Runs the `map_fn` with the committee for the given `attestation`.
|
||||
/// Runs the `map_fn` with the committee and committee count per slot for the given `attestation`.
|
||||
///
|
||||
/// This function exists in this odd "map" pattern because efficiently obtaining the committee for
|
||||
/// an attestation can be complex. It might involve reading straight from the
|
||||
@@ -779,7 +719,7 @@ pub fn map_attestation_committee<'a, T, F, R>(
|
||||
) -> Result<R, Error>
|
||||
where
|
||||
T: BeaconChainTypes,
|
||||
F: Fn(BeaconCommittee) -> Result<R, Error>,
|
||||
F: Fn((BeaconCommittee, CommitteesPerSlot)) -> Result<R, Error>,
|
||||
{
|
||||
let attestation_epoch = attestation.data.slot.epoch(T::EthSpec::slots_per_epoch());
|
||||
let target = &attestation.data.target;
|
||||
@@ -791,9 +731,10 @@ where
|
||||
// processing an attestation that does not include our latest finalized block in its chain.
|
||||
//
|
||||
// We do not delay consideration for later, we simply drop the attestation.
|
||||
let (target_block_slot, target_block_state_root) = chain
|
||||
let target_block = chain
|
||||
.fork_choice
|
||||
.block_slot_and_state_root(&target.root)
|
||||
.read()
|
||||
.get_block(&target.root)
|
||||
.ok_or_else(|| Error::UnknownTargetRoot(target.root))?;
|
||||
|
||||
// Obtain the shuffling cache, timing how long we wait.
|
||||
@@ -808,9 +749,10 @@ where
|
||||
metrics::stop_timer(cache_wait_timer);
|
||||
|
||||
if let Some(committee_cache) = shuffling_cache.get(attestation_epoch, target.root) {
|
||||
let committees_per_slot = committee_cache.committees_per_slot();
|
||||
committee_cache
|
||||
.get_beacon_committee(attestation.data.slot, attestation.data.index)
|
||||
.map(map_fn)
|
||||
.map(|committee| map_fn((committee, committees_per_slot)))
|
||||
.unwrap_or_else(|| {
|
||||
Err(Error::NoCommitteeForSlotAndIndex {
|
||||
slot: attestation.data.slot,
|
||||
@@ -826,15 +768,15 @@ where
|
||||
chain.log,
|
||||
"Attestation processing cache miss";
|
||||
"attn_epoch" => attestation_epoch.as_u64(),
|
||||
"target_block_epoch" => target_block_slot.epoch(T::EthSpec::slots_per_epoch()).as_u64(),
|
||||
"target_block_epoch" => target_block.slot.epoch(T::EthSpec::slots_per_epoch()).as_u64(),
|
||||
);
|
||||
|
||||
let state_read_timer =
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES);
|
||||
|
||||
let mut state = chain
|
||||
.get_state(&target_block_state_root, Some(target_block_slot))?
|
||||
.ok_or_else(|| BeaconChainError::MissingBeaconState(target_block_state_root))?;
|
||||
.get_state(&target_block.state_root, Some(target_block.slot))?
|
||||
.ok_or_else(|| BeaconChainError::MissingBeaconState(target_block.state_root))?;
|
||||
|
||||
metrics::stop_timer(state_read_timer);
|
||||
let state_skip_timer =
|
||||
@@ -847,7 +789,7 @@ where
|
||||
// The state roots are not useful for the shuffling, so there's no need to
|
||||
// compute them.
|
||||
per_slot_processing(&mut state, Some(Hash256::zero()), &chain.spec)
|
||||
.map_err(|e| BeaconChainError::from(e))?;
|
||||
.map_err(BeaconChainError::from)?;
|
||||
}
|
||||
|
||||
metrics::stop_timer(state_skip_timer);
|
||||
@@ -859,11 +801,11 @@ where
|
||||
|
||||
state
|
||||
.build_committee_cache(relative_epoch, &chain.spec)
|
||||
.map_err(|e| BeaconChainError::from(e))?;
|
||||
.map_err(BeaconChainError::from)?;
|
||||
|
||||
let committee_cache = state
|
||||
.committee_cache(relative_epoch)
|
||||
.map_err(|e| BeaconChainError::from(e))?;
|
||||
.map_err(BeaconChainError::from)?;
|
||||
|
||||
chain
|
||||
.shuffling_cache
|
||||
@@ -873,9 +815,10 @@ where
|
||||
|
||||
metrics::stop_timer(committee_building_timer);
|
||||
|
||||
let committees_per_slot = committee_cache.committees_per_slot();
|
||||
committee_cache
|
||||
.get_beacon_committee(attestation.data.slot, attestation.data.index)
|
||||
.map(map_fn)
|
||||
.map(|committee| map_fn((committee, committees_per_slot)))
|
||||
.unwrap_or_else(|| {
|
||||
Err(Error::NoCommitteeForSlotAndIndex {
|
||||
slot: attestation.data.slot,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::attestation_verification::{
|
||||
Error as AttestationError, ForkChoiceVerifiedAttestation, IntoForkChoiceVerifiedAttestation,
|
||||
VerifiedAggregatedAttestation, VerifiedUnaggregatedAttestation,
|
||||
Error as AttestationError, SignatureVerifiedAttestation, VerifiedAggregatedAttestation,
|
||||
VerifiedUnaggregatedAttestation,
|
||||
};
|
||||
use crate::block_verification::{
|
||||
check_block_relevancy, get_block_root, signature_verify_chain_segment, BlockError,
|
||||
@@ -9,7 +9,6 @@ use crate::block_verification::{
|
||||
use crate::errors::{BeaconChainError as Error, BlockProductionError};
|
||||
use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
||||
use crate::events::{EventHandler, EventKind};
|
||||
use crate::fork_choice::{Error as ForkChoiceError, ForkChoice};
|
||||
use crate::head_tracker::HeadTracker;
|
||||
use crate::metrics;
|
||||
use crate::migrate::Migrate;
|
||||
@@ -17,20 +16,26 @@ use crate::naive_aggregation_pool::{Error as NaiveAggregationError, NaiveAggrega
|
||||
use crate::observed_attestations::{Error as AttestationObservationError, ObservedAttestations};
|
||||
use crate::observed_attesters::{ObservedAggregators, ObservedAttesters};
|
||||
use crate::observed_block_producers::ObservedBlockProducers;
|
||||
use crate::observed_operations::{ObservationOutcome, ObservedOperations};
|
||||
use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||
use crate::persisted_fork_choice::PersistedForkChoice;
|
||||
use crate::shuffling_cache::ShufflingCache;
|
||||
use crate::snapshot_cache::SnapshotCache;
|
||||
use crate::timeout_rw_lock::TimeoutRwLock;
|
||||
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
|
||||
use crate::BeaconForkChoiceStore;
|
||||
use crate::BeaconSnapshot;
|
||||
use fork_choice::ForkChoice;
|
||||
use itertools::process_results;
|
||||
use operation_pool::{OperationPool, PersistedOperationPool};
|
||||
use parking_lot::RwLock;
|
||||
use slog::{crit, debug, error, info, trace, warn, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use state_processing::per_block_processing::errors::{
|
||||
AttestationValidationError, AttesterSlashingValidationError, ExitValidationError,
|
||||
ProposerSlashingValidationError,
|
||||
use state_processing::{
|
||||
common::get_indexed_attestation, per_block_processing,
|
||||
per_block_processing::errors::AttestationValidationError, per_slot_processing,
|
||||
BlockSignatureStrategy, SigVerifiedOp,
|
||||
};
|
||||
use state_processing::{per_block_processing, per_slot_processing, BlockSignatureStrategy};
|
||||
use std::borrow::Cow;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
@@ -38,18 +43,11 @@ use std::collections::HashSet;
|
||||
use std::io::prelude::*;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use store::iter::{
|
||||
BlockRootsIterator, ParentRootBlockIterator, ReverseBlockRootIterator,
|
||||
ReverseStateRootIterator, StateRootsIterator,
|
||||
};
|
||||
use store::{Error as DBError, Store};
|
||||
use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterator};
|
||||
use store::{Error as DBError, HotColdDB, StoreOp};
|
||||
use types::*;
|
||||
|
||||
// Text included in blocks.
|
||||
// Must be 32-bytes or panic.
|
||||
//
|
||||
// |-------must be this long------|
|
||||
pub const GRAFFITI: &str = "sigp/lighthouse-0.1.2-prerelease";
|
||||
pub type ForkChoiceError = fork_choice::Error<crate::ForkChoiceStoreError>;
|
||||
|
||||
/// The time-out before failure during an operation to take a read/write RwLock on the canonical
|
||||
/// head.
|
||||
@@ -73,20 +71,20 @@ pub const FORK_CHOICE_DB_KEY: [u8; 32] = [0; 32];
|
||||
|
||||
/// The result of a chain segment processing.
|
||||
#[derive(Debug)]
|
||||
pub enum ChainSegmentResult {
|
||||
pub enum ChainSegmentResult<T: EthSpec> {
|
||||
/// Processing this chain segment finished successfully.
|
||||
Successful { imported_blocks: usize },
|
||||
/// There was an error processing this chain segment. Before the error, some blocks could
|
||||
/// have been imported.
|
||||
Failed {
|
||||
imported_blocks: usize,
|
||||
error: BlockError,
|
||||
error: BlockError<T>,
|
||||
},
|
||||
}
|
||||
|
||||
/// The accepted clock drift for nodes gossiping blocks and attestations (spec v0.11.0). See:
|
||||
/// The accepted clock drift for nodes gossiping blocks and attestations. See:
|
||||
///
|
||||
/// https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/p2p-interface.md#configuration
|
||||
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#configuration
|
||||
pub const MAXIMUM_GOSSIP_CLOCK_DISPARITY: Duration = Duration::from_millis(500);
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
@@ -150,10 +148,11 @@ pub struct HeadInfo {
|
||||
}
|
||||
|
||||
pub trait BeaconChainTypes: Send + Sync + 'static {
|
||||
type Store: store::Store<Self::EthSpec>;
|
||||
type StoreMigrator: Migrate<Self::Store, Self::EthSpec>;
|
||||
type HotStore: store::ItemStore<Self::EthSpec>;
|
||||
type ColdStore: store::ItemStore<Self::EthSpec>;
|
||||
type StoreMigrator: Migrate<Self::EthSpec, Self::HotStore, Self::ColdStore>;
|
||||
type SlotClock: slot_clock::SlotClock;
|
||||
type Eth1Chain: Eth1ChainBackend<Self::EthSpec, Self::Store>;
|
||||
type Eth1Chain: Eth1ChainBackend<Self::EthSpec>;
|
||||
type EthSpec: types::EthSpec;
|
||||
type EventHandler: EventHandler<Self::EthSpec>;
|
||||
}
|
||||
@@ -163,7 +162,7 @@ pub trait BeaconChainTypes: Send + Sync + 'static {
|
||||
pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
pub spec: ChainSpec,
|
||||
/// Persistent storage for blocks, states, etc. Typically an on-disk store, such as LevelDB.
|
||||
pub store: Arc<T::Store>,
|
||||
pub store: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
/// Database migrator for running background maintenance on the store.
|
||||
pub store_migrator: T::StoreMigrator,
|
||||
/// Reports the current slot, typically based upon the system clock.
|
||||
@@ -176,7 +175,7 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
///
|
||||
/// This pool accepts `Attestation` objects that only have one aggregation bit set and provides
|
||||
/// a method to get an aggregated `Attestation` for some `AttestationData`.
|
||||
pub naive_aggregation_pool: NaiveAggregationPool<T::EthSpec>,
|
||||
pub naive_aggregation_pool: RwLock<NaiveAggregationPool<T::EthSpec>>,
|
||||
/// Contains a store of attestations which have been observed by the beacon chain.
|
||||
pub observed_attestations: ObservedAttestations<T::EthSpec>,
|
||||
/// Maintains a record of which validators have been seen to attest in recent epochs.
|
||||
@@ -186,17 +185,27 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
pub observed_aggregators: ObservedAggregators<T::EthSpec>,
|
||||
/// Maintains a record of which validators have proposed blocks for each slot.
|
||||
pub observed_block_producers: ObservedBlockProducers<T::EthSpec>,
|
||||
/// Maintains a record of which validators have submitted voluntary exits.
|
||||
pub observed_voluntary_exits: ObservedOperations<SignedVoluntaryExit, T::EthSpec>,
|
||||
/// Maintains a record of which validators we've seen proposer slashings for.
|
||||
pub observed_proposer_slashings: ObservedOperations<ProposerSlashing, T::EthSpec>,
|
||||
/// Maintains a record of which validators we've seen attester slashings for.
|
||||
pub observed_attester_slashings: ObservedOperations<AttesterSlashing<T::EthSpec>, T::EthSpec>,
|
||||
/// Provides information from the Ethereum 1 (PoW) chain.
|
||||
pub eth1_chain: Option<Eth1Chain<T::Eth1Chain, T::EthSpec, T::Store>>,
|
||||
pub eth1_chain: Option<Eth1Chain<T::Eth1Chain, T::EthSpec>>,
|
||||
/// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received.
|
||||
pub(crate) canonical_head: TimeoutRwLock<BeaconSnapshot<T::EthSpec>>,
|
||||
/// The root of the genesis block.
|
||||
pub genesis_block_root: Hash256,
|
||||
/// The root of the list of genesis validators, used during syncing.
|
||||
pub genesis_validators_root: Hash256,
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
/// A state-machine that is updated with information from the network and chooses a canonical
|
||||
/// head block.
|
||||
pub fork_choice: ForkChoice<T>,
|
||||
pub fork_choice: RwLock<
|
||||
ForkChoice<BeaconForkChoiceStore<T::EthSpec, T::HotStore, T::ColdStore>, T::EthSpec>,
|
||||
>,
|
||||
/// A handler for events generated by the beacon chain.
|
||||
pub event_handler: T::EventHandler,
|
||||
/// Used to track the heads of the beacon chain.
|
||||
@@ -211,6 +220,8 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
pub disabled_forks: Vec<String>,
|
||||
/// Logging to CLI, etc.
|
||||
pub(crate) log: Logger,
|
||||
/// Arbitrary bytes included in the blocks.
|
||||
pub(crate) graffiti: Graffiti,
|
||||
}
|
||||
|
||||
type BeaconBlockAndState<T> = (BeaconBlock<T>, BeaconState<T>);
|
||||
@@ -241,16 +252,23 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
let fork_choice_timer = metrics::start_timer(&metrics::PERSIST_FORK_CHOICE);
|
||||
|
||||
self.store.put(
|
||||
let fork_choice = self.fork_choice.read();
|
||||
|
||||
self.store.put_item(
|
||||
&Hash256::from_slice(&FORK_CHOICE_DB_KEY),
|
||||
&self.fork_choice.as_ssz_container(),
|
||||
&PersistedForkChoice {
|
||||
fork_choice: fork_choice.to_persisted(),
|
||||
fork_choice_store: fork_choice.fc_store().to_persisted(),
|
||||
},
|
||||
)?;
|
||||
|
||||
drop(fork_choice);
|
||||
|
||||
metrics::stop_timer(fork_choice_timer);
|
||||
let head_timer = metrics::start_timer(&metrics::PERSIST_HEAD);
|
||||
|
||||
self.store
|
||||
.put(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY), &persisted_head)?;
|
||||
.put_item(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY), &persisted_head)?;
|
||||
|
||||
metrics::stop_timer(head_timer);
|
||||
|
||||
@@ -264,31 +282,27 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// This operation is typically slow and causes a lot of allocations. It should be used
|
||||
/// sparingly.
|
||||
pub fn persist_op_pool(&self) -> Result<(), Error> {
|
||||
let timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
|
||||
let _timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
|
||||
|
||||
self.store.put(
|
||||
self.store.put_item(
|
||||
&Hash256::from_slice(&OP_POOL_DB_KEY),
|
||||
&PersistedOperationPool::from_operation_pool(&self.op_pool),
|
||||
)?;
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Persists `self.eth1_chain` and its caches to disk.
|
||||
pub fn persist_eth1_cache(&self) -> Result<(), Error> {
|
||||
let timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
|
||||
let _timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
|
||||
|
||||
if let Some(eth1_chain) = self.eth1_chain.as_ref() {
|
||||
self.store.put(
|
||||
self.store.put_item(
|
||||
&Hash256::from_slice(Ð1_CACHE_DB_KEY),
|
||||
ð1_chain.as_ssz_container(),
|
||||
)?;
|
||||
}
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -324,30 +338,31 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// returned may be earlier than the wall-clock slot.
|
||||
pub fn rev_iter_block_roots(
|
||||
&self,
|
||||
) -> Result<ReverseBlockRootIterator<T::EthSpec, T::Store>, Error> {
|
||||
) -> Result<impl Iterator<Item = Result<(Hash256, Slot), Error>>, Error> {
|
||||
let head = self.head()?;
|
||||
|
||||
let iter = BlockRootsIterator::owned(self.store.clone(), head.beacon_state);
|
||||
|
||||
Ok(ReverseBlockRootIterator::new(
|
||||
(head.beacon_block_root, head.beacon_block.slot()),
|
||||
iter,
|
||||
))
|
||||
Ok(
|
||||
std::iter::once(Ok((head.beacon_block_root, head.beacon_block.slot())))
|
||||
.chain(iter)
|
||||
.map(|result| result.map_err(|e| e.into())),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn forwards_iter_block_roots(
|
||||
&self,
|
||||
start_slot: Slot,
|
||||
) -> Result<<T::Store as Store<T::EthSpec>>::ForwardsBlockRootsIterator, Error> {
|
||||
) -> Result<impl Iterator<Item = Result<(Hash256, Slot), Error>>, Error> {
|
||||
let local_head = self.head()?;
|
||||
|
||||
Ok(T::Store::forwards_block_roots_iterator(
|
||||
let iter = HotColdDB::forwards_block_roots_iterator(
|
||||
self.store.clone(),
|
||||
start_slot,
|
||||
local_head.beacon_state,
|
||||
local_head.beacon_block_root,
|
||||
&self.spec,
|
||||
))
|
||||
)?;
|
||||
|
||||
Ok(iter.map(|result| result.map_err(Into::into)))
|
||||
}
|
||||
|
||||
/// Traverse backwards from `block_root` to find the block roots of its ancestors.
|
||||
@@ -362,7 +377,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
pub fn rev_iter_block_roots_from(
|
||||
&self,
|
||||
block_root: Hash256,
|
||||
) -> Result<ReverseBlockRootIterator<T::EthSpec, T::Store>, Error> {
|
||||
) -> Result<impl Iterator<Item = Result<(Hash256, Slot), Error>>, Error> {
|
||||
let block = self
|
||||
.get_block(&block_root)?
|
||||
.ok_or_else(|| Error::MissingBeaconBlock(block_root))?;
|
||||
@@ -370,10 +385,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.get_state(&block.state_root(), Some(block.slot()))?
|
||||
.ok_or_else(|| Error::MissingBeaconState(block.state_root()))?;
|
||||
let iter = BlockRootsIterator::owned(self.store.clone(), state);
|
||||
Ok(ReverseBlockRootIterator::new(
|
||||
(block_root, block.slot()),
|
||||
iter,
|
||||
))
|
||||
Ok(std::iter::once(Ok((block_root, block.slot())))
|
||||
.chain(iter)
|
||||
.map(|result| result.map_err(|e| e.into())))
|
||||
}
|
||||
|
||||
/// Traverse backwards from `block_root` to find the root of the ancestor block at `slot`.
|
||||
@@ -382,10 +396,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
block_root: Hash256,
|
||||
slot: Slot,
|
||||
) -> Result<Option<Hash256>, Error> {
|
||||
Ok(self
|
||||
.rev_iter_block_roots_from(block_root)?
|
||||
.find(|(_, ancestor_slot)| *ancestor_slot == slot)
|
||||
.map(|(ancestor_block_root, _)| ancestor_block_root))
|
||||
process_results(self.rev_iter_block_roots_from(block_root)?, |mut iter| {
|
||||
iter.find(|(_, ancestor_slot)| *ancestor_slot == slot)
|
||||
.map(|(ancestor_block_root, _)| ancestor_block_root)
|
||||
})
|
||||
}
|
||||
|
||||
/// Iterates across all `(state_root, slot)` pairs from the head of the chain (inclusive) to
|
||||
@@ -399,16 +413,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// returned may be earlier than the wall-clock slot.
|
||||
pub fn rev_iter_state_roots(
|
||||
&self,
|
||||
) -> Result<ReverseStateRootIterator<T::EthSpec, T::Store>, Error> {
|
||||
) -> Result<impl Iterator<Item = Result<(Hash256, Slot), Error>>, Error> {
|
||||
let head = self.head()?;
|
||||
let slot = head.beacon_state.slot;
|
||||
|
||||
let iter = StateRootsIterator::owned(self.store.clone(), head.beacon_state);
|
||||
|
||||
Ok(ReverseStateRootIterator::new(
|
||||
(head.beacon_state_root, slot),
|
||||
iter,
|
||||
))
|
||||
let iter = std::iter::once(Ok((head.beacon_state_root, slot)))
|
||||
.chain(iter)
|
||||
.map(|result| result.map_err(Into::into));
|
||||
Ok(iter)
|
||||
}
|
||||
|
||||
/// Returns the block at the given slot, if any. Only returns blocks in the canonical chain.
|
||||
@@ -420,13 +432,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
&self,
|
||||
slot: Slot,
|
||||
) -> Result<Option<SignedBeaconBlock<T::EthSpec>>, Error> {
|
||||
let root = self
|
||||
.rev_iter_block_roots()?
|
||||
.find(|(_, this_slot)| *this_slot == slot)
|
||||
.map(|(root, _)| root);
|
||||
let root = process_results(self.rev_iter_block_roots()?, |mut iter| {
|
||||
iter.find(|(_, this_slot)| *this_slot == slot)
|
||||
.map(|(root, _)| root)
|
||||
})?;
|
||||
|
||||
if let Some(block_root) = root {
|
||||
Ok(self.store.get(&block_root)?)
|
||||
Ok(self.store.get_item(&block_root)?)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
@@ -483,9 +495,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
slot: head.beacon_block.slot(),
|
||||
block_root: head.beacon_block_root,
|
||||
state_root: head.beacon_state_root,
|
||||
current_justified_checkpoint: head.beacon_state.current_justified_checkpoint.clone(),
|
||||
finalized_checkpoint: head.beacon_state.finalized_checkpoint.clone(),
|
||||
fork: head.beacon_state.fork.clone(),
|
||||
current_justified_checkpoint: head.beacon_state.current_justified_checkpoint,
|
||||
finalized_checkpoint: head.beacon_state.finalized_checkpoint,
|
||||
fork: head.beacon_state.fork,
|
||||
genesis_time: head.beacon_state.genesis_time,
|
||||
genesis_validators_root: head.beacon_state.genesis_validators_root,
|
||||
})
|
||||
@@ -569,12 +581,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
Ok(state)
|
||||
}
|
||||
Ordering::Less => {
|
||||
let state_root = self
|
||||
.rev_iter_state_roots()?
|
||||
.take_while(|(_root, current_slot)| *current_slot >= slot)
|
||||
.find(|(_root, current_slot)| *current_slot == slot)
|
||||
.map(|(root, _slot)| root)
|
||||
.ok_or_else(|| Error::NoStateForSlot(slot))?;
|
||||
let state_root = process_results(self.rev_iter_state_roots()?, |iter| {
|
||||
iter.take_while(|(_, current_slot)| *current_slot >= slot)
|
||||
.find(|(_, current_slot)| *current_slot == slot)
|
||||
.map(|(root, _slot)| root)
|
||||
})?
|
||||
.ok_or_else(|| Error::NoStateForSlot(slot))?;
|
||||
|
||||
Ok(self
|
||||
.get_state(&state_root, Some(slot))?
|
||||
@@ -649,10 +661,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
///
|
||||
/// Returns None if a block doesn't exist at the slot.
|
||||
pub fn root_at_slot(&self, target_slot: Slot) -> Result<Option<Hash256>, Error> {
|
||||
Ok(self
|
||||
.rev_iter_block_roots()?
|
||||
.find(|(_root, slot)| *slot == target_slot)
|
||||
.map(|(root, _slot)| root))
|
||||
process_results(self.rev_iter_block_roots()?, |mut iter| {
|
||||
iter.find(|(_, slot)| *slot == target_slot)
|
||||
.map(|(root, _)| root)
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the block proposer for a given slot.
|
||||
@@ -735,7 +747,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
&self,
|
||||
data: &AttestationData,
|
||||
) -> Result<Option<Attestation<T::EthSpec>>, Error> {
|
||||
self.naive_aggregation_pool.get(data).map_err(Into::into)
|
||||
self.naive_aggregation_pool
|
||||
.read()
|
||||
.get(data)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Produce an unaggregated `Attestation` that is valid for the given `slot` and `index`.
|
||||
@@ -843,14 +858,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
data: AttestationData {
|
||||
slot,
|
||||
index,
|
||||
beacon_block_root: beacon_block_root,
|
||||
source: state.current_justified_checkpoint.clone(),
|
||||
beacon_block_root,
|
||||
source: state.current_justified_checkpoint,
|
||||
target: Checkpoint {
|
||||
epoch,
|
||||
root: target_root,
|
||||
},
|
||||
},
|
||||
signature: AggregateSignature::empty_signature(),
|
||||
signature: AggregateSignature::empty(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -862,12 +877,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
pub fn verify_unaggregated_attestation_for_gossip(
|
||||
&self,
|
||||
attestation: Attestation<T::EthSpec>,
|
||||
subnet_id: SubnetId,
|
||||
) -> Result<VerifiedUnaggregatedAttestation<T>, AttestationError> {
|
||||
metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_REQUESTS);
|
||||
let _timer =
|
||||
metrics::start_timer(&metrics::UNAGGREGATED_ATTESTATION_GOSSIP_VERIFICATION_TIMES);
|
||||
|
||||
VerifiedUnaggregatedAttestation::verify(attestation, self).map(|v| {
|
||||
VerifiedUnaggregatedAttestation::verify(attestation, subnet_id, self).map(|v| {
|
||||
metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_SUCCESSES);
|
||||
v
|
||||
})
|
||||
@@ -892,23 +908,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// Accepts some attestation-type object and attempts to verify it in the context of fork
|
||||
/// choice. If it is valid it is applied to `self.fork_choice`.
|
||||
///
|
||||
/// Common items that implement `IntoForkChoiceVerifiedAttestation`:
|
||||
/// Common items that implement `SignatureVerifiedAttestation`:
|
||||
///
|
||||
/// - `VerifiedUnaggregatedAttestation`
|
||||
/// - `VerifiedAggregatedAttestation`
|
||||
/// - `ForkChoiceVerifiedAttestation`
|
||||
pub fn apply_attestation_to_fork_choice<'a>(
|
||||
&self,
|
||||
unverified_attestation: &'a impl IntoForkChoiceVerifiedAttestation<'a, T>,
|
||||
) -> Result<ForkChoiceVerifiedAttestation<'a, T>, AttestationError> {
|
||||
let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_APPLY_TO_FORK_CHOICE);
|
||||
verified: &'a impl SignatureVerifiedAttestation<T>,
|
||||
) -> Result<(), Error> {
|
||||
let _timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES);
|
||||
|
||||
let verified = unverified_attestation.into_fork_choice_verified_attestation(self)?;
|
||||
let indexed_attestation = verified.indexed_attestation();
|
||||
self.fork_choice
|
||||
.process_indexed_attestation(indexed_attestation)
|
||||
.map_err(|e| Error::from(e))?;
|
||||
Ok(verified)
|
||||
.write()
|
||||
.on_attestation(self.slot()?, verified.indexed_attestation())
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Accepts an `VerifiedUnaggregatedAttestation` and attempts to apply it to the "naive
|
||||
@@ -927,7 +940,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
let attestation = unaggregated_attestation.attestation();
|
||||
|
||||
match self.naive_aggregation_pool.insert(attestation) {
|
||||
match self.naive_aggregation_pool.write().insert(attestation) {
|
||||
Ok(outcome) => trace!(
|
||||
self.log,
|
||||
"Stored unaggregated attestation";
|
||||
@@ -978,8 +991,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.try_read_for(HEAD_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| Error::CanonicalHeadLockTimeout)?
|
||||
.beacon_state
|
||||
.fork
|
||||
.clone();
|
||||
.fork;
|
||||
|
||||
self.op_pool
|
||||
.insert_attestation(
|
||||
@@ -1044,8 +1056,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
// pivot block is the same as the current state's pivot block. If it is, then the
|
||||
// attestation's shuffling is the same as the current state's.
|
||||
// To account for skipped slots, find the first block at *or before* the pivot slot.
|
||||
let fork_choice_lock = self.fork_choice.core_proto_array();
|
||||
let fork_choice_lock = self.fork_choice.read();
|
||||
let pivot_block_root = fork_choice_lock
|
||||
.proto_array()
|
||||
.core_proto_array()
|
||||
.iter_block_roots(block_root)
|
||||
.find(|(_, slot)| *slot <= pivot_slot)
|
||||
.map(|(block_root, _)| block_root);
|
||||
@@ -1065,81 +1079,68 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Accept some exit and queue it for inclusion in an appropriate block.
|
||||
pub fn process_voluntary_exit(
|
||||
/// Verify a voluntary exit before allowing it to propagate on the gossip network.
|
||||
pub fn verify_voluntary_exit_for_gossip(
|
||||
&self,
|
||||
exit: SignedVoluntaryExit,
|
||||
) -> Result<(), ExitValidationError> {
|
||||
match self.wall_clock_state() {
|
||||
Ok(state) => {
|
||||
if self.eth1_chain.is_some() {
|
||||
self.op_pool.insert_voluntary_exit(exit, &state, &self.spec)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
&self.log,
|
||||
"Unable to process voluntary exit";
|
||||
"error" => format!("{:?}", e),
|
||||
"reason" => "no state"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
) -> Result<ObservationOutcome<SignedVoluntaryExit>, Error> {
|
||||
// NOTE: this could be more efficient if it avoided cloning the head state
|
||||
let wall_clock_state = self.wall_clock_state()?;
|
||||
Ok(self
|
||||
.observed_voluntary_exits
|
||||
.verify_and_observe(exit, &wall_clock_state, &self.spec)?)
|
||||
}
|
||||
|
||||
/// Accept a pre-verified exit and queue it for inclusion in an appropriate block.
|
||||
pub fn import_voluntary_exit(&self, exit: SigVerifiedOp<SignedVoluntaryExit>) {
|
||||
if self.eth1_chain.is_some() {
|
||||
self.op_pool.insert_voluntary_exit(exit)
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify a proposer slashing before allowing it to propagate on the gossip network.
|
||||
pub fn verify_proposer_slashing_for_gossip(
|
||||
&self,
|
||||
proposer_slashing: ProposerSlashing,
|
||||
) -> Result<ObservationOutcome<ProposerSlashing>, Error> {
|
||||
let wall_clock_state = self.wall_clock_state()?;
|
||||
Ok(self.observed_proposer_slashings.verify_and_observe(
|
||||
proposer_slashing,
|
||||
&wall_clock_state,
|
||||
&self.spec,
|
||||
)?)
|
||||
}
|
||||
|
||||
/// Accept some proposer slashing and queue it for inclusion in an appropriate block.
|
||||
pub fn process_proposer_slashing(
|
||||
&self,
|
||||
proposer_slashing: ProposerSlashing,
|
||||
) -> Result<(), ProposerSlashingValidationError> {
|
||||
match self.wall_clock_state() {
|
||||
Ok(state) => {
|
||||
if self.eth1_chain.is_some() {
|
||||
self.op_pool
|
||||
.insert_proposer_slashing(proposer_slashing, &state, &self.spec)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
&self.log,
|
||||
"Unable to process proposer slashing";
|
||||
"error" => format!("{:?}", e),
|
||||
"reason" => "no state"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
pub fn import_proposer_slashing(&self, proposer_slashing: SigVerifiedOp<ProposerSlashing>) {
|
||||
if self.eth1_chain.is_some() {
|
||||
self.op_pool.insert_proposer_slashing(proposer_slashing)
|
||||
}
|
||||
}
|
||||
|
||||
/// Accept some attester slashing and queue it for inclusion in an appropriate block.
|
||||
pub fn process_attester_slashing(
|
||||
/// Verify an attester slashing before allowing it to propagate on the gossip network.
|
||||
pub fn verify_attester_slashing_for_gossip(
|
||||
&self,
|
||||
attester_slashing: AttesterSlashing<T::EthSpec>,
|
||||
) -> Result<(), AttesterSlashingValidationError> {
|
||||
match self.wall_clock_state() {
|
||||
Ok(state) => {
|
||||
if self.eth1_chain.is_some() {
|
||||
self.op_pool
|
||||
.insert_attester_slashing(attester_slashing, &state, &self.spec)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
&self.log,
|
||||
"Unable to process attester slashing";
|
||||
"error" => format!("{:?}", e),
|
||||
"reason" => "no state"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
) -> Result<ObservationOutcome<AttesterSlashing<T::EthSpec>>, Error> {
|
||||
let wall_clock_state = self.wall_clock_state()?;
|
||||
Ok(self.observed_attester_slashings.verify_and_observe(
|
||||
attester_slashing,
|
||||
&wall_clock_state,
|
||||
&self.spec,
|
||||
)?)
|
||||
}
|
||||
|
||||
/// Accept some attester slashing and queue it for inclusion in an appropriate block.
|
||||
pub fn import_attester_slashing(
|
||||
&self,
|
||||
attester_slashing: SigVerifiedOp<AttesterSlashing<T::EthSpec>>,
|
||||
) -> Result<(), Error> {
|
||||
if self.eth1_chain.is_some() {
|
||||
self.op_pool
|
||||
.insert_attester_slashing(attester_slashing, self.head_info()?.fork)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Attempt to verify and import a chain of blocks to `self`.
|
||||
@@ -1155,7 +1156,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
pub fn process_chain_segment(
|
||||
&self,
|
||||
chain_segment: Vec<SignedBeaconBlock<T::EthSpec>>,
|
||||
) -> ChainSegmentResult {
|
||||
) -> ChainSegmentResult<T::EthSpec> {
|
||||
let mut filtered_chain_segment = Vec::with_capacity(chain_segment.len());
|
||||
let mut imported_blocks = 0;
|
||||
|
||||
@@ -1228,14 +1229,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
}
|
||||
}
|
||||
|
||||
while !filtered_chain_segment.is_empty() {
|
||||
while let Some((_root, block)) = filtered_chain_segment.first() {
|
||||
// Determine the epoch of the first block in the remaining segment.
|
||||
let start_epoch = filtered_chain_segment
|
||||
.first()
|
||||
.map(|(_root, block)| block)
|
||||
.expect("chain_segment cannot be empty")
|
||||
.slot()
|
||||
.epoch(T::EthSpec::slots_per_epoch());
|
||||
let start_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch());
|
||||
|
||||
// The `last_index` indicates the position of the last block that is in the current
|
||||
// epoch of `start_epoch`.
|
||||
@@ -1293,7 +1289,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
pub fn verify_block_for_gossip(
|
||||
&self,
|
||||
block: SignedBeaconBlock<T::EthSpec>,
|
||||
) -> Result<GossipVerifiedBlock<T>, BlockError> {
|
||||
) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> {
|
||||
let slot = block.message.slot;
|
||||
let graffiti_string = String::from_utf8(block.message.body.graffiti[..].to_vec())
|
||||
.unwrap_or_else(|_| format!("{:?}", &block.message.body.graffiti[..]));
|
||||
@@ -1339,9 +1335,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
pub fn process_block<B: IntoFullyVerifiedBlock<T>>(
|
||||
&self,
|
||||
unverified_block: B,
|
||||
) -> Result<Hash256, BlockError> {
|
||||
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
||||
// Start the Prometheus timer.
|
||||
let full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES);
|
||||
let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES);
|
||||
|
||||
// Increment the Prometheus counter for block processing requests.
|
||||
metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS);
|
||||
@@ -1350,13 +1346,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
let block = unverified_block.block().clone();
|
||||
|
||||
// A small closure to group the verification and import errors.
|
||||
let import_block = |unverified_block: B| -> Result<Hash256, BlockError> {
|
||||
let import_block = |unverified_block: B| -> Result<Hash256, BlockError<T::EthSpec>> {
|
||||
let fully_verified = unverified_block.into_fully_verified_block(self)?;
|
||||
self.import_block(fully_verified)
|
||||
};
|
||||
|
||||
// Verify and import the block.
|
||||
let result = match import_block(unverified_block) {
|
||||
match import_block(unverified_block) {
|
||||
// The block was successfully verified and imported. Yay.
|
||||
Ok(block_root) => {
|
||||
trace!(
|
||||
@@ -1370,7 +1366,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES);
|
||||
|
||||
let _ = self.event_handler.register(EventKind::BeaconBlockImported {
|
||||
block_root: block_root,
|
||||
block_root,
|
||||
block: Box::new(block),
|
||||
});
|
||||
|
||||
@@ -1407,12 +1403,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
Err(other)
|
||||
}
|
||||
};
|
||||
|
||||
// Stop the Prometheus timer.
|
||||
metrics::stop_timer(full_timer);
|
||||
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
/// Accepts a fully-verified block and imports it into the chain without performing any
|
||||
@@ -1423,13 +1414,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
fn import_block(
|
||||
&self,
|
||||
fully_verified_block: FullyVerifiedBlock<T>,
|
||||
) -> Result<Hash256, BlockError> {
|
||||
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
||||
let signed_block = fully_verified_block.block;
|
||||
let block = &signed_block.message;
|
||||
let block_root = fully_verified_block.block_root;
|
||||
let state = fully_verified_block.state;
|
||||
let parent_block = fully_verified_block.parent_block;
|
||||
let intermediate_states = fully_verified_block.intermediate_states;
|
||||
let current_slot = self.slot()?;
|
||||
let mut ops = fully_verified_block.intermediate_states;
|
||||
|
||||
let attestation_observation_timer =
|
||||
metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION);
|
||||
@@ -1449,9 +1441,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
metrics::stop_timer(attestation_observation_timer);
|
||||
|
||||
let fork_choice_register_timer =
|
||||
metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE_REGISTER);
|
||||
|
||||
// If there are new validators in this block, update our pubkey cache.
|
||||
//
|
||||
// We perform this _before_ adding the block to fork choice because the pubkey cache is
|
||||
@@ -1487,20 +1476,35 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
shuffling_cache.insert(state.current_epoch(), target_root, committee_cache);
|
||||
}
|
||||
|
||||
let mut fork_choice = self.fork_choice.write();
|
||||
|
||||
// Register the new block with the fork choice service.
|
||||
if let Err(e) = self
|
||||
.fork_choice
|
||||
.process_block(self, &state, block, block_root)
|
||||
{
|
||||
error!(
|
||||
self.log,
|
||||
"Add block to fork choice failed";
|
||||
"block_root" => format!("{}", block_root),
|
||||
"error" => format!("{:?}", e),
|
||||
)
|
||||
let _fork_choice_block_timer =
|
||||
metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES);
|
||||
fork_choice
|
||||
.on_block(current_slot, block, block_root, &state)
|
||||
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
||||
}
|
||||
|
||||
metrics::stop_timer(fork_choice_register_timer);
|
||||
// Register each attestation in the block with the fork choice service.
|
||||
for attestation in &block.body.attestations[..] {
|
||||
let _fork_choice_attestation_timer =
|
||||
metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES);
|
||||
|
||||
let committee =
|
||||
state.get_beacon_committee(attestation.data.slot, attestation.data.index)?;
|
||||
let indexed_attestation = get_indexed_attestation(committee.committee, attestation)
|
||||
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
||||
|
||||
match fork_choice.on_attestation(current_slot, &indexed_attestation) {
|
||||
Ok(()) => Ok(()),
|
||||
// Ignore invalid attestations whilst importing attestations from a block. The
|
||||
// block might be very old and therefore the attestations useless to fork choice.
|
||||
Err(ForkChoiceError::InvalidAttestation(_)) => Ok(()),
|
||||
Err(e) => Err(BlockError::BeaconChainError(e.into())),
|
||||
}?;
|
||||
}
|
||||
|
||||
metrics::observe(
|
||||
&metrics::OPERATIONS_PER_BLOCK_ATTESTATION,
|
||||
@@ -1509,18 +1513,17 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE);
|
||||
|
||||
// Store all the states between the parent block state and this block's slot before storing
|
||||
// the final state.
|
||||
intermediate_states.commit(&*self.store)?;
|
||||
// Store all the states between the parent block state and this block's slot, the block and state.
|
||||
ops.push(StoreOp::PutBlock(block_root.into(), signed_block.clone()));
|
||||
ops.push(StoreOp::PutState(
|
||||
block.state_root.into(),
|
||||
Cow::Borrowed(&state),
|
||||
));
|
||||
self.store.do_atomically(ops)?;
|
||||
|
||||
// Store the block and state.
|
||||
// NOTE: we store the block *after* the state to guard against inconsistency in the event of
|
||||
// a crash, as states are usually looked up from blocks, not the other way around. A better
|
||||
// solution would be to use a database transaction (once our choice of database and API
|
||||
// settles down).
|
||||
// See: https://github.com/sigp/lighthouse/issues/692
|
||||
self.store.put_state(&block.state_root, &state)?;
|
||||
self.store.put_block(&block_root, signed_block.clone())?;
|
||||
// The fork choice write-lock is dropped *after* the on-disk database has been updated.
|
||||
// This prevents inconsistency between the two at the expense of concurrency.
|
||||
drop(fork_choice);
|
||||
|
||||
let parent_root = block.parent_root;
|
||||
let slot = block.slot;
|
||||
@@ -1610,11 +1613,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
state.latest_block_header.canonical_root()
|
||||
};
|
||||
|
||||
let mut graffiti: [u8; 32] = [0; 32];
|
||||
graffiti.copy_from_slice(GRAFFITI.as_bytes());
|
||||
|
||||
let (proposer_slashings, attester_slashings) =
|
||||
self.op_pool.get_slashings(&state, &self.spec);
|
||||
let (proposer_slashings, attester_slashings) = self.op_pool.get_slashings(&state);
|
||||
|
||||
let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?;
|
||||
let deposits = eth1_chain
|
||||
@@ -1636,6 +1635,24 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
})
|
||||
};
|
||||
|
||||
// Iterate through the naive aggregation pool and ensure all the attestations from there
|
||||
// are included in the operation pool.
|
||||
for attestation in self.naive_aggregation_pool.read().iter() {
|
||||
if let Err(e) = self.op_pool.insert_attestation(
|
||||
attestation.clone(),
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
&self.spec,
|
||||
) {
|
||||
// Don't stop block production if there's an error, just create a log.
|
||||
error!(
|
||||
self.log,
|
||||
"Attestation did not transfer to op pool";
|
||||
"reason" => format!("{:?}", e)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let mut block = SignedBeaconBlock {
|
||||
message: BeaconBlock {
|
||||
slot: state.slot,
|
||||
@@ -1645,7 +1662,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
body: BeaconBlockBody {
|
||||
randao_reveal,
|
||||
eth1_data,
|
||||
graffiti,
|
||||
graffiti: self.graffiti,
|
||||
proposer_slashings: proposer_slashings.into(),
|
||||
attester_slashings: attester_slashings.into(),
|
||||
attestations: self
|
||||
@@ -1658,7 +1675,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
},
|
||||
},
|
||||
// The block is not signed here, that is the task of a validator client.
|
||||
signature: Signature::empty_signature(),
|
||||
signature: Signature::empty(),
|
||||
};
|
||||
|
||||
per_block_processing(
|
||||
@@ -1690,7 +1707,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// Execute the fork choice algorithm and enthrone the result as the canonical head.
|
||||
pub fn fork_choice(&self) -> Result<(), Error> {
|
||||
metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS);
|
||||
let overall_timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES);
|
||||
let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES);
|
||||
|
||||
let result = self.fork_choice_internal();
|
||||
|
||||
@@ -1698,14 +1715,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS);
|
||||
}
|
||||
|
||||
metrics::stop_timer(overall_timer);
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn fork_choice_internal(&self) -> Result<(), Error> {
|
||||
// Determine the root of the block that is the head of the chain.
|
||||
let beacon_block_root = self.fork_choice.find_head(&self)?;
|
||||
let beacon_block_root = self.fork_choice.write().get_head(self.slot()?)?;
|
||||
|
||||
let current_head = self.head_info()?;
|
||||
let old_finalized_root = current_head.finalized_checkpoint.root;
|
||||
@@ -1723,7 +1738,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.snapshot_cache
|
||||
.try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
||||
.and_then(|snapshot_cache| snapshot_cache.get_cloned(beacon_block_root))
|
||||
.map::<Result<_, Error>, _>(|snapshot| Ok(snapshot))
|
||||
.map::<Result<_, Error>, _>(Ok)
|
||||
.unwrap_or_else(|| {
|
||||
let beacon_block = self
|
||||
.get_block(&beacon_block_root)?
|
||||
@@ -1858,7 +1873,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
pub fn per_slot_task(&self) {
|
||||
trace!(self.log, "Running beacon chain per slot tasks");
|
||||
if let Some(slot) = self.slot_clock.now() {
|
||||
self.naive_aggregation_pool.prune(slot);
|
||||
self.naive_aggregation_pool.write().prune(slot);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1885,7 +1900,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
new_epoch: new_finalized_epoch,
|
||||
})
|
||||
} else {
|
||||
self.fork_choice.prune()?;
|
||||
self.fork_choice.write().prune()?;
|
||||
|
||||
self.observed_block_producers
|
||||
.prune(new_finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()));
|
||||
@@ -1908,7 +1923,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.get_state(&finalized_block.state_root, Some(finalized_block.slot))?
|
||||
.ok_or_else(|| Error::MissingBeaconState(finalized_block.state_root))?;
|
||||
|
||||
self.op_pool.prune_all(&finalized_state, &self.spec);
|
||||
self.op_pool
|
||||
.prune_all(&finalized_state, self.head_info()?.fork);
|
||||
|
||||
// TODO: configurable max finality distance
|
||||
let max_finality_distance = 0;
|
||||
@@ -1934,7 +1950,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, Error> {
|
||||
Ok(!self
|
||||
.store
|
||||
.exists::<SignedBeaconBlock<T::EthSpec>>(beacon_block_root)?)
|
||||
.item_exists::<SignedBeaconBlock<T::EthSpec>>(beacon_block_root)?)
|
||||
}
|
||||
|
||||
/// Dumps the entire canonical chain, from the head to genesis to a vector for analysis.
|
||||
@@ -2014,8 +2030,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
let mut finalized_blocks: HashSet<Hash256> = HashSet::new();
|
||||
|
||||
let genesis_block_hash = Hash256::zero();
|
||||
write!(output, "digraph beacon {{\n").unwrap();
|
||||
write!(output, "\t_{:?}[label=\"genesis\"];\n", genesis_block_hash).unwrap();
|
||||
writeln!(output, "digraph beacon {{").unwrap();
|
||||
writeln!(output, "\t_{:?}[label=\"genesis\"];", genesis_block_hash).unwrap();
|
||||
|
||||
// Canonical head needs to be processed first as otherwise finalized blocks aren't detected
|
||||
// properly.
|
||||
@@ -2049,36 +2065,36 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
}
|
||||
|
||||
if block_hash == canonical_head_hash {
|
||||
write!(
|
||||
writeln!(
|
||||
output,
|
||||
"\t_{:?}[label=\"{} ({})\" shape=box3d];\n",
|
||||
"\t_{:?}[label=\"{} ({})\" shape=box3d];",
|
||||
block_hash,
|
||||
block_hash,
|
||||
signed_beacon_block.slot()
|
||||
)
|
||||
.unwrap();
|
||||
} else if finalized_blocks.contains(&block_hash) {
|
||||
write!(
|
||||
writeln!(
|
||||
output,
|
||||
"\t_{:?}[label=\"{} ({})\" shape=Msquare];\n",
|
||||
"\t_{:?}[label=\"{} ({})\" shape=Msquare];",
|
||||
block_hash,
|
||||
block_hash,
|
||||
signed_beacon_block.slot()
|
||||
)
|
||||
.unwrap();
|
||||
} else {
|
||||
write!(
|
||||
writeln!(
|
||||
output,
|
||||
"\t_{:?}[label=\"{} ({})\" shape=box];\n",
|
||||
"\t_{:?}[label=\"{} ({})\" shape=box];",
|
||||
block_hash,
|
||||
block_hash,
|
||||
signed_beacon_block.slot()
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
write!(
|
||||
writeln!(
|
||||
output,
|
||||
"\t_{:?} -> _{:?};\n",
|
||||
"\t_{:?} -> _{:?};",
|
||||
block_hash,
|
||||
signed_beacon_block.parent_root()
|
||||
)
|
||||
@@ -2086,7 +2102,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
}
|
||||
}
|
||||
|
||||
write!(output, "}}\n").unwrap();
|
||||
writeln!(output, "}}").unwrap();
|
||||
}
|
||||
|
||||
// Used for debugging
|
||||
@@ -2138,8 +2154,8 @@ impl From<BeaconStateError> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl ChainSegmentResult {
|
||||
pub fn to_block_error(self) -> Result<(), BlockError> {
|
||||
impl<T: EthSpec> ChainSegmentResult<T> {
|
||||
pub fn into_block_error(self) -> Result<(), BlockError<T>> {
|
||||
match self {
|
||||
ChainSegmentResult::Failed { error, .. } => Err(error),
|
||||
ChainSegmentResult::Successful { .. } => Ok(()),
|
||||
|
||||
352
beacon_node/beacon_chain/src/beacon_fork_choice_store.rs
Normal file
352
beacon_node/beacon_chain/src/beacon_fork_choice_store.rs
Normal file
@@ -0,0 +1,352 @@
|
||||
//! Defines the `BeaconForkChoiceStore` which provides the persistent storage for the `ForkChoice`
|
||||
//! struct.
|
||||
//!
|
||||
//! Additionally, the private `BalancesCache` struct is defined; a cache designed to avoid database
|
||||
//! reads when fork choice requires the validator balances of the justified state.
|
||||
|
||||
use crate::{metrics, BeaconSnapshot};
|
||||
use fork_choice::ForkChoiceStore;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use store::{Error as StoreError, HotColdDB, ItemStore};
|
||||
use types::{
|
||||
BeaconBlock, BeaconState, BeaconStateError, Checkpoint, EthSpec, Hash256, SignedBeaconBlock,
|
||||
Slot,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
UnableToReadSlot,
|
||||
UnableToReadTime,
|
||||
InvalidGenesisSnapshot(Slot),
|
||||
AncestorUnknown { ancestor_slot: Slot },
|
||||
UninitializedBestJustifiedBalances,
|
||||
FailedToReadBlock(StoreError),
|
||||
MissingBlock(Hash256),
|
||||
FailedToReadState(StoreError),
|
||||
MissingState(Hash256),
|
||||
InvalidPersistedBytes(ssz::DecodeError),
|
||||
BeaconStateError(BeaconStateError),
|
||||
}
|
||||
|
||||
impl From<BeaconStateError> for Error {
|
||||
fn from(e: BeaconStateError) -> Self {
|
||||
Error::BeaconStateError(e)
|
||||
}
|
||||
}
|
||||
|
||||
/// The number of validator balance sets that are cached within `BalancesCache`.
|
||||
const MAX_BALANCE_CACHE_SIZE: usize = 4;
|
||||
|
||||
/// Returns the effective balances for every validator in the given `state`.
|
||||
///
|
||||
/// Any validator who is not active in the epoch of the given `state` is assigned a balance of
|
||||
/// zero.
|
||||
pub fn get_effective_balances<T: EthSpec>(state: &BeaconState<T>) -> Vec<u64> {
|
||||
state
|
||||
.validators
|
||||
.iter()
|
||||
.map(|validator| {
|
||||
if validator.is_active_at(state.current_epoch()) {
|
||||
validator.effective_balance
|
||||
} else {
|
||||
0
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// An item that is stored in the `BalancesCache`.
|
||||
#[derive(PartialEq, Clone, Debug, Encode, Decode)]
|
||||
struct CacheItem {
|
||||
/// The block root at which `self.balances` are valid.
|
||||
block_root: Hash256,
|
||||
/// The effective balances from a `BeaconState` validator registry.
|
||||
balances: Vec<u64>,
|
||||
}
|
||||
|
||||
/// Provides a cache to avoid reading `BeaconState` from disk when updating the current justified
|
||||
/// checkpoint.
|
||||
///
|
||||
/// It is effectively a mapping of `epoch_boundary_block_root -> state.balances`.
|
||||
#[derive(PartialEq, Clone, Default, Debug, Encode, Decode)]
|
||||
struct BalancesCache {
|
||||
items: Vec<CacheItem>,
|
||||
}
|
||||
|
||||
impl BalancesCache {
|
||||
/// Inspect the given `state` and determine the root of the block at the first slot of
|
||||
/// `state.current_epoch`. If there is not already some entry for the given block root, then
|
||||
/// add the effective balances from the `state` to the cache.
|
||||
pub fn process_state<E: EthSpec>(
|
||||
&mut self,
|
||||
block_root: Hash256,
|
||||
state: &BeaconState<E>,
|
||||
) -> Result<(), Error> {
|
||||
// We are only interested in balances from states that are at the start of an epoch,
|
||||
// because this is where the `current_justified_checkpoint.root` will point.
|
||||
if !Self::is_first_block_in_epoch(block_root, state)? {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let epoch_boundary_slot = state.current_epoch().start_slot(E::slots_per_epoch());
|
||||
let epoch_boundary_root = if epoch_boundary_slot == state.slot {
|
||||
block_root
|
||||
} else {
|
||||
// This call remains sensible as long as `state.block_roots` is larger than a single
|
||||
// epoch.
|
||||
*state.get_block_root(epoch_boundary_slot)?
|
||||
};
|
||||
|
||||
if self.position(epoch_boundary_root).is_none() {
|
||||
let item = CacheItem {
|
||||
block_root: epoch_boundary_root,
|
||||
balances: get_effective_balances(state),
|
||||
};
|
||||
|
||||
if self.items.len() == MAX_BALANCE_CACHE_SIZE {
|
||||
self.items.remove(0);
|
||||
}
|
||||
|
||||
self.items.push(item);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns `true` if the given `block_root` is the first/only block to have been processed in
|
||||
/// the epoch of the given `state`.
|
||||
///
|
||||
/// We can determine if it is the first block by looking back through `state.block_roots` to
|
||||
/// see if there is a block in the current epoch with a different root.
|
||||
fn is_first_block_in_epoch<E: EthSpec>(
|
||||
block_root: Hash256,
|
||||
state: &BeaconState<E>,
|
||||
) -> Result<bool, Error> {
|
||||
let mut prior_block_found = false;
|
||||
|
||||
for slot in state.current_epoch().slot_iter(E::slots_per_epoch()) {
|
||||
if slot < state.slot {
|
||||
if *state.get_block_root(slot)? != block_root {
|
||||
prior_block_found = true;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(!prior_block_found)
|
||||
}
|
||||
|
||||
fn position(&self, block_root: Hash256) -> Option<usize> {
|
||||
self.items
|
||||
.iter()
|
||||
.position(|item| item.block_root == block_root)
|
||||
}
|
||||
|
||||
/// Get the balances for the given `block_root`, if any.
|
||||
///
|
||||
/// If some balances are found, they are removed from the cache.
|
||||
pub fn get(&mut self, block_root: Hash256) -> Option<Vec<u64>> {
|
||||
let i = self.position(block_root)?;
|
||||
Some(self.items.remove(i).balances)
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements `fork_choice::ForkChoiceStore` in order to provide a persistent backing to the
|
||||
/// `fork_choice::ForkChoice` struct.
|
||||
#[derive(Debug)]
|
||||
pub struct BeaconForkChoiceStore<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
balances_cache: BalancesCache,
|
||||
time: Slot,
|
||||
finalized_checkpoint: Checkpoint,
|
||||
justified_checkpoint: Checkpoint,
|
||||
justified_balances: Vec<u64>,
|
||||
best_justified_checkpoint: Checkpoint,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<E, Hot, Cold> PartialEq for BeaconForkChoiceStore<E, Hot, Cold>
|
||||
where
|
||||
E: EthSpec,
|
||||
Hot: ItemStore<E>,
|
||||
Cold: ItemStore<E>,
|
||||
{
|
||||
/// This implementation ignores the `store` and `slot_clock`.
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.balances_cache == other.balances_cache
|
||||
&& self.time == other.time
|
||||
&& self.finalized_checkpoint == other.finalized_checkpoint
|
||||
&& self.justified_checkpoint == other.justified_checkpoint
|
||||
&& self.justified_balances == other.justified_balances
|
||||
&& self.best_justified_checkpoint == other.best_justified_checkpoint
|
||||
}
|
||||
}
|
||||
|
||||
impl<E, Hot, Cold> BeaconForkChoiceStore<E, Hot, Cold>
|
||||
where
|
||||
E: EthSpec,
|
||||
Hot: ItemStore<E>,
|
||||
Cold: ItemStore<E>,
|
||||
{
|
||||
/// Initialize `Self` from some `anchor` checkpoint which may or may not be the genesis state.
|
||||
///
|
||||
/// ## Specification
|
||||
///
|
||||
/// Equivalent to:
|
||||
///
|
||||
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#get_forkchoice_store
|
||||
///
|
||||
/// ## Notes:
|
||||
///
|
||||
/// It is assumed that `anchor` is already persisted in `store`.
|
||||
pub fn get_forkchoice_store(
|
||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
anchor: &BeaconSnapshot<E>,
|
||||
) -> Self {
|
||||
let anchor_state = &anchor.beacon_state;
|
||||
let mut anchor_block_header = anchor_state.latest_block_header.clone();
|
||||
if anchor_block_header.state_root == Hash256::zero() {
|
||||
anchor_block_header.state_root = anchor.beacon_state_root;
|
||||
}
|
||||
let anchor_root = anchor_block_header.canonical_root();
|
||||
let anchor_epoch = anchor_state.current_epoch();
|
||||
let justified_checkpoint = Checkpoint {
|
||||
epoch: anchor_epoch,
|
||||
root: anchor_root,
|
||||
};
|
||||
let finalized_checkpoint = justified_checkpoint;
|
||||
|
||||
Self {
|
||||
store,
|
||||
balances_cache: <_>::default(),
|
||||
time: anchor_state.slot,
|
||||
justified_checkpoint,
|
||||
justified_balances: anchor_state.balances.clone().into(),
|
||||
finalized_checkpoint,
|
||||
best_justified_checkpoint: justified_checkpoint,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Save the current state of `Self` to a `PersistedForkChoiceStore` which can be stored to the
|
||||
/// on-disk database.
|
||||
pub fn to_persisted(&self) -> PersistedForkChoiceStore {
|
||||
PersistedForkChoiceStore {
|
||||
balances_cache: self.balances_cache.clone(),
|
||||
time: self.time,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
justified_balances: self.justified_balances.clone(),
|
||||
best_justified_checkpoint: self.best_justified_checkpoint,
|
||||
}
|
||||
}
|
||||
|
||||
/// Restore `Self` from a previously-generated `PersistedForkChoiceStore`.
|
||||
pub fn from_persisted(
|
||||
persisted: PersistedForkChoiceStore,
|
||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
) -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
store,
|
||||
balances_cache: persisted.balances_cache,
|
||||
time: persisted.time,
|
||||
finalized_checkpoint: persisted.finalized_checkpoint,
|
||||
justified_checkpoint: persisted.justified_checkpoint,
|
||||
justified_balances: persisted.justified_balances,
|
||||
best_justified_checkpoint: persisted.best_justified_checkpoint,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<E, Hot, Cold> ForkChoiceStore<E> for BeaconForkChoiceStore<E, Hot, Cold>
|
||||
where
|
||||
E: EthSpec,
|
||||
Hot: ItemStore<E>,
|
||||
Cold: ItemStore<E>,
|
||||
{
|
||||
type Error = Error;
|
||||
|
||||
fn get_current_slot(&self) -> Slot {
|
||||
self.time
|
||||
}
|
||||
|
||||
fn set_current_slot(&mut self, slot: Slot) {
|
||||
self.time = slot
|
||||
}
|
||||
|
||||
fn on_verified_block(
|
||||
&mut self,
|
||||
_block: &BeaconBlock<E>,
|
||||
block_root: Hash256,
|
||||
state: &BeaconState<E>,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.balances_cache.process_state(block_root, state)
|
||||
}
|
||||
|
||||
fn justified_checkpoint(&self) -> &Checkpoint {
|
||||
&self.justified_checkpoint
|
||||
}
|
||||
|
||||
fn justified_balances(&self) -> &[u64] {
|
||||
&self.justified_balances
|
||||
}
|
||||
|
||||
fn best_justified_checkpoint(&self) -> &Checkpoint {
|
||||
&self.best_justified_checkpoint
|
||||
}
|
||||
|
||||
fn finalized_checkpoint(&self) -> &Checkpoint {
|
||||
&self.finalized_checkpoint
|
||||
}
|
||||
|
||||
fn set_finalized_checkpoint(&mut self, checkpoint: Checkpoint) {
|
||||
self.finalized_checkpoint = checkpoint
|
||||
}
|
||||
|
||||
fn set_justified_checkpoint(&mut self, checkpoint: Checkpoint) -> Result<(), Error> {
|
||||
self.justified_checkpoint = checkpoint;
|
||||
|
||||
if let Some(balances) = self.balances_cache.get(self.justified_checkpoint.root) {
|
||||
metrics::inc_counter(&metrics::BALANCES_CACHE_HITS);
|
||||
self.justified_balances = balances;
|
||||
} else {
|
||||
metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES);
|
||||
let justified_block = self
|
||||
.store
|
||||
.get_item::<SignedBeaconBlock<E>>(&self.justified_checkpoint.root)
|
||||
.map_err(Error::FailedToReadBlock)?
|
||||
.ok_or_else(|| Error::MissingBlock(self.justified_checkpoint.root))?
|
||||
.message;
|
||||
|
||||
self.justified_balances = self
|
||||
.store
|
||||
.get_state(&justified_block.state_root, Some(justified_block.slot))
|
||||
.map_err(Error::FailedToReadState)?
|
||||
.ok_or_else(|| Error::MissingState(justified_block.state_root))?
|
||||
.balances
|
||||
.into();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint) {
|
||||
self.best_justified_checkpoint = checkpoint
|
||||
}
|
||||
}
|
||||
|
||||
/// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database.
|
||||
#[derive(Encode, Decode)]
|
||||
pub struct PersistedForkChoiceStore {
|
||||
balances_cache: BalancesCache,
|
||||
time: Slot,
|
||||
finalized_checkpoint: Checkpoint,
|
||||
justified_checkpoint: Checkpoint,
|
||||
justified_balances: Vec<u64>,
|
||||
best_justified_checkpoint: Checkpoint,
|
||||
}
|
||||
@@ -62,17 +62,13 @@ use std::borrow::Cow;
|
||||
use std::convert::TryFrom;
|
||||
use std::fs;
|
||||
use std::io::Write;
|
||||
use store::{Error as DBError, StateBatch};
|
||||
use store::{Error as DBError, HotStateSummary, StoreOp};
|
||||
use tree_hash::TreeHash;
|
||||
use types::{
|
||||
BeaconBlock, BeaconState, BeaconStateError, ChainSpec, CloneConfig, EthSpec, Hash256,
|
||||
PublicKey, RelativeEpoch, SignedBeaconBlock, Slot,
|
||||
};
|
||||
|
||||
mod block_processing_outcome;
|
||||
|
||||
pub use block_processing_outcome::BlockProcessingOutcome;
|
||||
|
||||
/// Maximum block slot number. Block with slots bigger than this constant will NOT be processed.
|
||||
const MAXIMUM_BLOCK_SLOT_NUMBER: u64 = 4_294_967_296; // 2^32
|
||||
|
||||
@@ -86,55 +82,124 @@ const WRITE_BLOCK_PROCESSING_SSZ: bool = cfg!(feature = "write_ssz_files");
|
||||
///
|
||||
/// - The block is malformed/invalid (indicated by all results other than `BeaconChainError`.
|
||||
/// - We encountered an error whilst trying to verify the block (a `BeaconChainError`).
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum BlockError {
|
||||
#[derive(Debug)]
|
||||
pub enum BlockError<T: EthSpec> {
|
||||
/// The parent block was unknown.
|
||||
ParentUnknown(Hash256),
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// It's unclear if this block is valid, but it cannot be processed without already knowing
|
||||
/// its parent.
|
||||
ParentUnknown(Box<SignedBeaconBlock<T>>),
|
||||
/// The block slot is greater than the present slot.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||
FutureSlot {
|
||||
present_slot: Slot,
|
||||
block_slot: Slot,
|
||||
},
|
||||
/// The block state_root does not match the generated state.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has incompatible state transition logic and is faulty.
|
||||
StateRootMismatch { block: Hash256, local: Hash256 },
|
||||
/// The block was a genesis block, these blocks cannot be re-imported.
|
||||
GenesisBlock,
|
||||
/// The slot is finalized, no need to import.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// It's unclear if this block is valid, but this block is for a finalized slot and is
|
||||
/// therefore useless to us.
|
||||
WouldRevertFinalizedSlot {
|
||||
block_slot: Slot,
|
||||
finalized_slot: Slot,
|
||||
},
|
||||
/// Block is already known, no need to re-import.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The block is valid and we have already imported a block with this hash.
|
||||
BlockIsAlreadyKnown,
|
||||
/// A block for this proposer and slot has already been observed.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The `proposer` has already proposed a block at this slot. The existing block may or may not
|
||||
/// be equal to the given block.
|
||||
RepeatProposal { proposer: u64, slot: Slot },
|
||||
/// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// We set a very, very high maximum slot number and this block exceeds it. There's no good
|
||||
/// reason to be sending these blocks, they're from future slots.
|
||||
///
|
||||
/// The block is invalid and the peer is faulty.
|
||||
BlockSlotLimitReached,
|
||||
/// The `BeaconBlock` has a `proposer_index` that does not match the index we computed locally.
|
||||
///
|
||||
/// The block is invalid.
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The block is invalid and the peer is faulty.
|
||||
IncorrectBlockProposer { block: u64, local_shuffling: u64 },
|
||||
/// The proposal signature in invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The block is invalid and the peer is faulty.
|
||||
ProposalSignatureInvalid,
|
||||
/// The `block.proposal_index` is not known.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The block is invalid and the peer is faulty.
|
||||
UnknownValidator(u64),
|
||||
/// A signature in the block is invalid (exactly which is unknown).
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The block is invalid and the peer is faulty.
|
||||
InvalidSignature,
|
||||
/// The provided block is from an earlier slot than its parent.
|
||||
/// The provided block is from an later slot than its parent.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The block is invalid and the peer is faulty.
|
||||
BlockIsNotLaterThanParent { block_slot: Slot, state_slot: Slot },
|
||||
/// At least one block in the chain segment did not have it's parent root set to the root of
|
||||
/// the prior block.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The chain of blocks is invalid and the peer is faulty.
|
||||
NonLinearParentRoots,
|
||||
/// The slots of the blocks in the chain segment were not strictly increasing. I.e., a child
|
||||
/// had lower slot than a parent.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The chain of blocks is invalid and the peer is faulty.
|
||||
NonLinearSlots,
|
||||
/// The block failed the specification's `per_block_processing` function, it is invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The block is invalid and the peer is faulty.
|
||||
PerBlockProcessingError(BlockProcessingError),
|
||||
/// There was an error whilst processing the block. It is not necessarily invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// We were unable to process this block due to an internal error. It's unclear if the block is
|
||||
/// valid.
|
||||
BeaconChainError(BeaconChainError),
|
||||
}
|
||||
|
||||
impl From<BlockSignatureVerifierError> for BlockError {
|
||||
impl<T: EthSpec> From<BlockSignatureVerifierError> for BlockError<T> {
|
||||
fn from(e: BlockSignatureVerifierError) -> Self {
|
||||
match e {
|
||||
// Make a special distinction for `IncorrectBlockProposer` since it indicates an
|
||||
@@ -151,25 +216,25 @@ impl From<BlockSignatureVerifierError> for BlockError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BeaconChainError> for BlockError {
|
||||
impl<T: EthSpec> From<BeaconChainError> for BlockError<T> {
|
||||
fn from(e: BeaconChainError) -> Self {
|
||||
BlockError::BeaconChainError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BeaconStateError> for BlockError {
|
||||
impl<T: EthSpec> From<BeaconStateError> for BlockError<T> {
|
||||
fn from(e: BeaconStateError) -> Self {
|
||||
BlockError::BeaconChainError(BeaconChainError::BeaconStateError(e))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SlotProcessingError> for BlockError {
|
||||
impl<T: EthSpec> From<SlotProcessingError> for BlockError<T> {
|
||||
fn from(e: SlotProcessingError) -> Self {
|
||||
BlockError::BeaconChainError(BeaconChainError::SlotProcessingError(e))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DBError> for BlockError {
|
||||
impl<T: EthSpec> From<DBError> for BlockError<T> {
|
||||
fn from(e: DBError) -> Self {
|
||||
BlockError::BeaconChainError(BeaconChainError::DBError(e))
|
||||
}
|
||||
@@ -186,15 +251,17 @@ impl From<DBError> for BlockError {
|
||||
/// The given `chain_segment` must span no more than two epochs, otherwise an error will be
|
||||
/// returned.
|
||||
pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
|
||||
chain_segment: Vec<(Hash256, SignedBeaconBlock<T::EthSpec>)>,
|
||||
mut chain_segment: Vec<(Hash256, SignedBeaconBlock<T::EthSpec>)>,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Vec<SignatureVerifiedBlock<T>>, BlockError> {
|
||||
let (mut parent, slot) = if let Some(block) = chain_segment.first().map(|(_, block)| block) {
|
||||
let parent = load_parent(&block.message, chain)?;
|
||||
(parent, block.slot())
|
||||
} else {
|
||||
) -> Result<Vec<SignatureVerifiedBlock<T>>, BlockError<T::EthSpec>> {
|
||||
if chain_segment.is_empty() {
|
||||
return Ok(vec![]);
|
||||
};
|
||||
}
|
||||
|
||||
let (first_root, first_block) = chain_segment.remove(0);
|
||||
let (mut parent, first_block) = load_parent(first_block, chain)?;
|
||||
let slot = first_block.slot();
|
||||
chain_segment.insert(0, (first_root, first_block));
|
||||
|
||||
let highest_slot = chain_segment
|
||||
.last()
|
||||
@@ -263,12 +330,12 @@ pub struct SignatureVerifiedBlock<T: BeaconChainTypes> {
|
||||
/// Note: a `FullyVerifiedBlock` is not _forever_ valid to be imported, it may later become invalid
|
||||
/// due to finality or some other event. A `FullyVerifiedBlock` should be imported into the
|
||||
/// `BeaconChain` immediately after it is instantiated.
|
||||
pub struct FullyVerifiedBlock<T: BeaconChainTypes> {
|
||||
pub struct FullyVerifiedBlock<'a, T: BeaconChainTypes> {
|
||||
pub block: SignedBeaconBlock<T::EthSpec>,
|
||||
pub block_root: Hash256,
|
||||
pub state: BeaconState<T::EthSpec>,
|
||||
pub parent_block: SignedBeaconBlock<T::EthSpec>,
|
||||
pub intermediate_states: StateBatch<T::EthSpec>,
|
||||
pub intermediate_states: Vec<StoreOp<'a, T::EthSpec>>,
|
||||
}
|
||||
|
||||
/// Implemented on types that can be converted into a `FullyVerifiedBlock`.
|
||||
@@ -278,7 +345,7 @@ pub trait IntoFullyVerifiedBlock<T: BeaconChainTypes> {
|
||||
fn into_fully_verified_block(
|
||||
self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<FullyVerifiedBlock<T>, BlockError>;
|
||||
) -> Result<FullyVerifiedBlock<T>, BlockError<T::EthSpec>>;
|
||||
|
||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec>;
|
||||
}
|
||||
@@ -291,7 +358,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
||||
pub fn new(
|
||||
block: SignedBeaconBlock<T::EthSpec>,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Self, BlockError> {
|
||||
) -> Result<Self, BlockError<T::EthSpec>> {
|
||||
// Do not gossip or process blocks from future slots.
|
||||
let present_slot_with_tolerance = chain
|
||||
.slot_clock
|
||||
@@ -319,7 +386,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
||||
});
|
||||
}
|
||||
|
||||
let mut parent = load_parent(&block.message, chain)?;
|
||||
let (mut parent, block) = load_parent(block, chain)?;
|
||||
let block_root = get_block_root(&block);
|
||||
|
||||
let state = cheap_state_advance_to_obtain_committees(
|
||||
@@ -388,7 +455,7 @@ impl<T: BeaconChainTypes> IntoFullyVerifiedBlock<T> for GossipVerifiedBlock<T> {
|
||||
fn into_fully_verified_block(
|
||||
self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<FullyVerifiedBlock<T>, BlockError> {
|
||||
) -> Result<FullyVerifiedBlock<T>, BlockError<T::EthSpec>> {
|
||||
let fully_verified = SignatureVerifiedBlock::from_gossip_verified_block(self, chain)?;
|
||||
fully_verified.into_fully_verified_block(chain)
|
||||
}
|
||||
@@ -406,8 +473,8 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
||||
pub fn new(
|
||||
block: SignedBeaconBlock<T::EthSpec>,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Self, BlockError> {
|
||||
let mut parent = load_parent(&block.message, chain)?;
|
||||
) -> Result<Self, BlockError<T::EthSpec>> {
|
||||
let (mut parent, block) = load_parent(block, chain)?;
|
||||
let block_root = get_block_root(&block);
|
||||
|
||||
let state = cheap_state_advance_to_obtain_committees(
|
||||
@@ -438,7 +505,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
||||
pub fn from_gossip_verified_block(
|
||||
from: GossipVerifiedBlock<T>,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Self, BlockError> {
|
||||
) -> Result<Self, BlockError<T::EthSpec>> {
|
||||
let mut parent = from.parent;
|
||||
let block = from.block;
|
||||
|
||||
@@ -471,12 +538,12 @@ impl<T: BeaconChainTypes> IntoFullyVerifiedBlock<T> for SignatureVerifiedBlock<T
|
||||
fn into_fully_verified_block(
|
||||
self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<FullyVerifiedBlock<T>, BlockError> {
|
||||
let block = self.block;
|
||||
let parent = self
|
||||
.parent
|
||||
.map(Result::Ok)
|
||||
.unwrap_or_else(|| load_parent(&block.message, chain))?;
|
||||
) -> Result<FullyVerifiedBlock<T>, BlockError<T::EthSpec>> {
|
||||
let (parent, block) = if let Some(parent) = self.parent {
|
||||
(parent, self.block)
|
||||
} else {
|
||||
load_parent(self.block, chain)?
|
||||
};
|
||||
|
||||
FullyVerifiedBlock::from_signature_verified_components(
|
||||
block,
|
||||
@@ -497,7 +564,7 @@ impl<T: BeaconChainTypes> IntoFullyVerifiedBlock<T> for SignedBeaconBlock<T::Eth
|
||||
fn into_fully_verified_block(
|
||||
self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<FullyVerifiedBlock<T>, BlockError> {
|
||||
) -> Result<FullyVerifiedBlock<T>, BlockError<T::EthSpec>> {
|
||||
SignatureVerifiedBlock::new(self, chain)?.into_fully_verified_block(chain)
|
||||
}
|
||||
|
||||
@@ -506,7 +573,7 @@ impl<T: BeaconChainTypes> IntoFullyVerifiedBlock<T> for SignedBeaconBlock<T::Eth
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> FullyVerifiedBlock<T> {
|
||||
impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> {
|
||||
/// Instantiates `Self`, a wrapper that indicates that the given `block` is fully valid. See
|
||||
/// the struct-level documentation for more information.
|
||||
///
|
||||
@@ -519,7 +586,7 @@ impl<T: BeaconChainTypes> FullyVerifiedBlock<T> {
|
||||
block_root: Hash256,
|
||||
parent: BeaconSnapshot<T::EthSpec>,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Self, BlockError> {
|
||||
) -> Result<Self, BlockError<T::EthSpec>> {
|
||||
// Reject any block if its parent is not known to fork choice.
|
||||
//
|
||||
// A block that is not in fork choice is either:
|
||||
@@ -530,8 +597,12 @@ impl<T: BeaconChainTypes> FullyVerifiedBlock<T> {
|
||||
// because it will revert finalization. Note that the finalized block is stored in fork
|
||||
// choice, so we will not reject any child of the finalized block (this is relevant during
|
||||
// genesis).
|
||||
if !chain.fork_choice.contains_block(&block.parent_root()) {
|
||||
return Err(BlockError::ParentUnknown(block.parent_root()));
|
||||
if !chain
|
||||
.fork_choice
|
||||
.read()
|
||||
.contains_block(&block.parent_root())
|
||||
{
|
||||
return Err(BlockError::ParentUnknown(Box::new(block)));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -548,7 +619,7 @@ impl<T: BeaconChainTypes> FullyVerifiedBlock<T> {
|
||||
|
||||
// Keep a batch of any states that were "skipped" (block-less) in between the parent state
|
||||
// slot and the block slot. These will be stored in the database.
|
||||
let mut intermediate_states = StateBatch::new();
|
||||
let mut intermediate_states: Vec<StoreOp<T::EthSpec>> = Vec::new();
|
||||
|
||||
// The block must have a higher slot than its parent.
|
||||
if block.slot() <= parent.beacon_state.slot {
|
||||
@@ -571,12 +642,22 @@ impl<T: BeaconChainTypes> FullyVerifiedBlock<T> {
|
||||
// Computing the state root here is time-equivalent to computing it during slot
|
||||
// processing, but we get early access to it.
|
||||
let state_root = state.update_tree_hash_cache()?;
|
||||
intermediate_states.add_state(state_root, &state)?;
|
||||
|
||||
let op = if state.slot % T::EthSpec::slots_per_epoch() == 0 {
|
||||
StoreOp::PutState(state_root.into(), Cow::Owned(state.clone()))
|
||||
} else {
|
||||
StoreOp::PutStateSummary(
|
||||
state_root.into(),
|
||||
HotStateSummary::new(&state_root, &state)?,
|
||||
)
|
||||
};
|
||||
intermediate_states.push(op);
|
||||
state_root
|
||||
};
|
||||
|
||||
per_slot_processing(&mut state, Some(state_root), &chain.spec)?
|
||||
.map(|summary| summaries.push(summary));
|
||||
if let Some(summary) = per_slot_processing(&mut state, Some(state_root), &chain.spec)? {
|
||||
summaries.push(summary)
|
||||
}
|
||||
}
|
||||
|
||||
expose_participation_metrics(&summaries);
|
||||
@@ -670,7 +751,7 @@ impl<T: BeaconChainTypes> FullyVerifiedBlock<T> {
|
||||
fn check_block_against_finalized_slot<T: BeaconChainTypes>(
|
||||
block: &BeaconBlock<T::EthSpec>,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<(), BlockError> {
|
||||
) -> Result<(), BlockError<T::EthSpec>> {
|
||||
let finalized_slot = chain
|
||||
.head_info()?
|
||||
.finalized_checkpoint
|
||||
@@ -698,7 +779,7 @@ pub fn check_block_relevancy<T: BeaconChainTypes>(
|
||||
signed_block: &SignedBeaconBlock<T::EthSpec>,
|
||||
block_root: Option<Hash256>,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Hash256, BlockError> {
|
||||
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
||||
let block = &signed_block.message;
|
||||
|
||||
// Do not process blocks from the future.
|
||||
@@ -727,7 +808,7 @@ pub fn check_block_relevancy<T: BeaconChainTypes>(
|
||||
|
||||
// Check if the block is already known. We know it is post-finalization, so it is
|
||||
// sufficient to check the fork choice.
|
||||
if chain.fork_choice.contains_block(&block_root) {
|
||||
if chain.fork_choice.read().contains_block(&block_root) {
|
||||
return Err(BlockError::BlockIsAlreadyKnown);
|
||||
}
|
||||
|
||||
@@ -751,12 +832,11 @@ pub fn get_block_root<E: EthSpec>(block: &SignedBeaconBlock<E>) -> Hash256 {
|
||||
///
|
||||
/// Returns `Err(BlockError::ParentUnknown)` if the parent is not found, or if an error occurs
|
||||
/// whilst attempting the operation.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn load_parent<T: BeaconChainTypes>(
|
||||
block: &BeaconBlock<T::EthSpec>,
|
||||
block: SignedBeaconBlock<T::EthSpec>,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<BeaconSnapshot<T::EthSpec>, BlockError> {
|
||||
let db_read_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_READ);
|
||||
|
||||
) -> Result<(BeaconSnapshot<T::EthSpec>, SignedBeaconBlock<T::EthSpec>), BlockError<T::EthSpec>> {
|
||||
// Reject any block if its parent is not known to fork choice.
|
||||
//
|
||||
// A block that is not in fork choice is either:
|
||||
@@ -767,50 +847,58 @@ fn load_parent<T: BeaconChainTypes>(
|
||||
// because it will revert finalization. Note that the finalized block is stored in fork
|
||||
// choice, so we will not reject any child of the finalized block (this is relevant during
|
||||
// genesis).
|
||||
if !chain.fork_choice.contains_block(&block.parent_root) {
|
||||
return Err(BlockError::ParentUnknown(block.parent_root));
|
||||
if !chain
|
||||
.fork_choice
|
||||
.read()
|
||||
.contains_block(&block.parent_root())
|
||||
{
|
||||
return Err(BlockError::ParentUnknown(Box::new(block)));
|
||||
}
|
||||
|
||||
// Load the parent block and state from disk, returning early if it's not available.
|
||||
let result = chain
|
||||
let db_read_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_READ);
|
||||
|
||||
let result = if let Some(snapshot) = chain
|
||||
.snapshot_cache
|
||||
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
||||
.and_then(|mut snapshot_cache| snapshot_cache.try_remove(block.parent_root))
|
||||
.map(|snapshot| Ok(Some(snapshot)))
|
||||
.unwrap_or_else(|| {
|
||||
// Load the blocks parent block from the database, returning invalid if that block is not
|
||||
// found.
|
||||
//
|
||||
// We don't return a DBInconsistent error here since it's possible for a block to
|
||||
// exist in fork choice but not in the database yet. In such a case we simply
|
||||
// indicate that we don't yet know the parent.
|
||||
let parent_block = if let Some(block) = chain.get_block(&block.parent_root)? {
|
||||
block
|
||||
} else {
|
||||
return Ok(None);
|
||||
};
|
||||
.and_then(|mut snapshot_cache| snapshot_cache.try_remove(block.parent_root()))
|
||||
{
|
||||
Ok((snapshot, block))
|
||||
} else {
|
||||
// Load the blocks parent block from the database, returning invalid if that block is not
|
||||
// found.
|
||||
//
|
||||
// We don't return a DBInconsistent error here since it's possible for a block to
|
||||
// exist in fork choice but not in the database yet. In such a case we simply
|
||||
// indicate that we don't yet know the parent.
|
||||
let root = block.parent_root();
|
||||
let parent_block = if let Some(block) = chain
|
||||
.get_block(&block.parent_root())
|
||||
.map_err(BlockError::BeaconChainError)?
|
||||
{
|
||||
block
|
||||
} else {
|
||||
return Err(BlockError::ParentUnknown(Box::new(block)));
|
||||
};
|
||||
|
||||
// Load the parent blocks state from the database, returning an error if it is not found.
|
||||
// It is an error because if we know the parent block we should also know the parent state.
|
||||
let parent_state_root = parent_block.state_root();
|
||||
let parent_state = chain
|
||||
.get_state(&parent_state_root, Some(parent_block.slot()))?
|
||||
.ok_or_else(|| {
|
||||
BeaconChainError::DBInconsistent(format!(
|
||||
"Missing state {:?}",
|
||||
parent_state_root
|
||||
))
|
||||
})?;
|
||||
// Load the parent blocks state from the database, returning an error if it is not found.
|
||||
// It is an error because if we know the parent block we should also know the parent state.
|
||||
let parent_state_root = parent_block.state_root();
|
||||
let parent_state = chain
|
||||
.get_state(&parent_state_root, Some(parent_block.slot()))?
|
||||
.ok_or_else(|| {
|
||||
BeaconChainError::DBInconsistent(format!("Missing state {:?}", parent_state_root))
|
||||
})?;
|
||||
|
||||
Ok(Some(BeaconSnapshot {
|
||||
Ok((
|
||||
BeaconSnapshot {
|
||||
beacon_block: parent_block,
|
||||
beacon_block_root: block.parent_root,
|
||||
beacon_block_root: root,
|
||||
beacon_state: parent_state,
|
||||
beacon_state_root: parent_state_root,
|
||||
}))
|
||||
})
|
||||
.map_err(BlockError::BeaconChainError)?
|
||||
.ok_or_else(|| BlockError::ParentUnknown(block.parent_root));
|
||||
},
|
||||
block,
|
||||
))
|
||||
};
|
||||
|
||||
metrics::stop_timer(db_read_timer);
|
||||
|
||||
@@ -832,7 +920,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
|
||||
state: &'a mut BeaconState<E>,
|
||||
block_slot: Slot,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Cow<'a, BeaconState<E>>, BlockError> {
|
||||
) -> Result<Cow<'a, BeaconState<E>>, BlockError<E>> {
|
||||
let block_epoch = block_slot.epoch(E::slots_per_epoch());
|
||||
|
||||
if state.current_epoch() == block_epoch {
|
||||
@@ -864,7 +952,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
|
||||
/// Obtains a read-locked `ValidatorPubkeyCache` from the `chain`.
|
||||
fn get_validator_pubkey_cache<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<RwLockReadGuard<ValidatorPubkeyCache>, BlockError> {
|
||||
) -> Result<RwLockReadGuard<ValidatorPubkeyCache>, BlockError<T::EthSpec>> {
|
||||
chain
|
||||
.validator_pubkey_cache
|
||||
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
|
||||
|
||||
@@ -1,130 +0,0 @@
|
||||
use crate::{BeaconChainError, BlockError};
|
||||
use state_processing::BlockProcessingError;
|
||||
use types::{Hash256, Slot};
|
||||
|
||||
/// This is a legacy object that is being kept around to reduce merge conflicts.
|
||||
///
|
||||
/// TODO: As soon as this is merged into master, it should be removed as soon as possible.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum BlockProcessingOutcome {
|
||||
/// Block was valid and imported into the block graph.
|
||||
Processed {
|
||||
block_root: Hash256,
|
||||
},
|
||||
InvalidSignature,
|
||||
/// The proposal signature in invalid.
|
||||
ProposalSignatureInvalid,
|
||||
/// The `block.proposal_index` is not known.
|
||||
UnknownValidator(u64),
|
||||
/// The parent block was unknown.
|
||||
ParentUnknown(Hash256),
|
||||
/// The block slot is greater than the present slot.
|
||||
FutureSlot {
|
||||
present_slot: Slot,
|
||||
block_slot: Slot,
|
||||
},
|
||||
/// The block state_root does not match the generated state.
|
||||
StateRootMismatch {
|
||||
block: Hash256,
|
||||
local: Hash256,
|
||||
},
|
||||
/// The block was a genesis block, these blocks cannot be re-imported.
|
||||
GenesisBlock,
|
||||
/// The slot is finalized, no need to import.
|
||||
WouldRevertFinalizedSlot {
|
||||
block_slot: Slot,
|
||||
finalized_slot: Slot,
|
||||
},
|
||||
/// Block is already known, no need to re-import.
|
||||
BlockIsAlreadyKnown,
|
||||
/// A block for this proposer and slot has already been observed.
|
||||
RepeatProposal {
|
||||
proposer: u64,
|
||||
slot: Slot,
|
||||
},
|
||||
/// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER.
|
||||
BlockSlotLimitReached,
|
||||
/// The provided block is from an earlier slot than its parent.
|
||||
BlockIsNotLaterThanParent {
|
||||
block_slot: Slot,
|
||||
state_slot: Slot,
|
||||
},
|
||||
/// The `BeaconBlock` has a `proposer_index` that does not match the index we computed locally.
|
||||
///
|
||||
/// The block is invalid.
|
||||
IncorrectBlockProposer {
|
||||
block: u64,
|
||||
local_shuffling: u64,
|
||||
},
|
||||
/// At least one block in the chain segement did not have it's parent root set to the root of
|
||||
/// the prior block.
|
||||
NonLinearParentRoots,
|
||||
/// The slots of the blocks in the chain segment were not strictly increasing. I.e., a child
|
||||
/// had lower slot than a parent.
|
||||
NonLinearSlots,
|
||||
/// The block could not be applied to the state, it is invalid.
|
||||
PerBlockProcessingError(BlockProcessingError),
|
||||
}
|
||||
|
||||
impl BlockProcessingOutcome {
|
||||
pub fn shim(
|
||||
result: Result<Hash256, BlockError>,
|
||||
) -> Result<BlockProcessingOutcome, BeaconChainError> {
|
||||
match result {
|
||||
Ok(block_root) => Ok(BlockProcessingOutcome::Processed { block_root }),
|
||||
Err(BlockError::ParentUnknown(root)) => Ok(BlockProcessingOutcome::ParentUnknown(root)),
|
||||
Err(BlockError::FutureSlot {
|
||||
present_slot,
|
||||
block_slot,
|
||||
}) => Ok(BlockProcessingOutcome::FutureSlot {
|
||||
present_slot,
|
||||
block_slot,
|
||||
}),
|
||||
Err(BlockError::StateRootMismatch { block, local }) => {
|
||||
Ok(BlockProcessingOutcome::StateRootMismatch { block, local })
|
||||
}
|
||||
Err(BlockError::GenesisBlock) => Ok(BlockProcessingOutcome::GenesisBlock),
|
||||
Err(BlockError::WouldRevertFinalizedSlot {
|
||||
block_slot,
|
||||
finalized_slot,
|
||||
}) => Ok(BlockProcessingOutcome::WouldRevertFinalizedSlot {
|
||||
block_slot,
|
||||
finalized_slot,
|
||||
}),
|
||||
Err(BlockError::BlockIsAlreadyKnown) => Ok(BlockProcessingOutcome::BlockIsAlreadyKnown),
|
||||
Err(BlockError::RepeatProposal { proposer, slot }) => {
|
||||
Ok(BlockProcessingOutcome::RepeatProposal { proposer, slot })
|
||||
}
|
||||
Err(BlockError::BlockSlotLimitReached) => {
|
||||
Ok(BlockProcessingOutcome::BlockSlotLimitReached)
|
||||
}
|
||||
Err(BlockError::ProposalSignatureInvalid) => {
|
||||
Ok(BlockProcessingOutcome::ProposalSignatureInvalid)
|
||||
}
|
||||
Err(BlockError::UnknownValidator(i)) => Ok(BlockProcessingOutcome::UnknownValidator(i)),
|
||||
Err(BlockError::InvalidSignature) => Ok(BlockProcessingOutcome::InvalidSignature),
|
||||
Err(BlockError::BlockIsNotLaterThanParent {
|
||||
block_slot,
|
||||
state_slot,
|
||||
}) => Ok(BlockProcessingOutcome::BlockIsNotLaterThanParent {
|
||||
block_slot,
|
||||
state_slot,
|
||||
}),
|
||||
Err(BlockError::IncorrectBlockProposer {
|
||||
block,
|
||||
local_shuffling,
|
||||
}) => Ok(BlockProcessingOutcome::IncorrectBlockProposer {
|
||||
block,
|
||||
local_shuffling,
|
||||
}),
|
||||
Err(BlockError::NonLinearParentRoots) => {
|
||||
Ok(BlockProcessingOutcome::NonLinearParentRoots)
|
||||
}
|
||||
Err(BlockError::NonLinearSlots) => Ok(BlockProcessingOutcome::NonLinearSlots),
|
||||
Err(BlockError::PerBlockProcessingError(e)) => {
|
||||
Ok(BlockProcessingOutcome::PerBlockProcessingError(e))
|
||||
}
|
||||
Err(BlockError::BeaconChainError(e)) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,58 +3,80 @@ use crate::beacon_chain::{
|
||||
};
|
||||
use crate::eth1_chain::{CachingEth1Backend, SszEth1};
|
||||
use crate::events::NullEventHandler;
|
||||
use crate::fork_choice::SszForkChoice;
|
||||
use crate::head_tracker::HeadTracker;
|
||||
use crate::migrate::Migrate;
|
||||
use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||
use crate::persisted_fork_choice::PersistedForkChoice;
|
||||
use crate::shuffling_cache::ShufflingCache;
|
||||
use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE};
|
||||
use crate::timeout_rw_lock::TimeoutRwLock;
|
||||
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
|
||||
use crate::{
|
||||
BeaconChain, BeaconChainTypes, BeaconSnapshot, Eth1Chain, Eth1ChainBackend, EventHandler,
|
||||
ForkChoice,
|
||||
BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, BeaconSnapshot, Eth1Chain,
|
||||
Eth1ChainBackend, EventHandler,
|
||||
};
|
||||
use eth1::Config as Eth1Config;
|
||||
use fork_choice::ForkChoice;
|
||||
use operation_pool::{OperationPool, PersistedOperationPool};
|
||||
use proto_array_fork_choice::ProtoArrayForkChoice;
|
||||
use parking_lot::RwLock;
|
||||
use slog::{info, Logger};
|
||||
use slot_clock::{SlotClock, TestingSlotClock};
|
||||
use std::marker::PhantomData;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::Store;
|
||||
use store::{HotColdDB, ItemStore};
|
||||
use types::{
|
||||
BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Signature, SignedBeaconBlock, Slot,
|
||||
BeaconBlock, BeaconState, ChainSpec, EthSpec, Graffiti, Hash256, Signature, SignedBeaconBlock,
|
||||
Slot,
|
||||
};
|
||||
|
||||
pub const PUBKEY_CACHE_FILENAME: &str = "pubkey_cache.ssz";
|
||||
|
||||
/// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing
|
||||
/// functionality and only exists to satisfy the type system.
|
||||
pub struct Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>(
|
||||
pub struct Witness<
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>(
|
||||
PhantomData<(
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
)>,
|
||||
);
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler> BeaconChainTypes
|
||||
for Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
BeaconChainTypes
|
||||
for Witness<
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
{
|
||||
type Store = TStore;
|
||||
type HotStore = THotStore;
|
||||
type ColdStore = TColdStore;
|
||||
type StoreMigrator = TStoreMigrator;
|
||||
type SlotClock = TSlotClock;
|
||||
type Eth1Chain = TEth1Backend;
|
||||
@@ -71,7 +93,8 @@ where
|
||||
///
|
||||
/// See the tests for an example of a complete working example.
|
||||
pub struct BeaconChainBuilder<T: BeaconChainTypes> {
|
||||
store: Option<Arc<T::Store>>,
|
||||
#[allow(clippy::type_complexity)]
|
||||
store: Option<Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>>,
|
||||
store_migrator: Option<T::StoreMigrator>,
|
||||
canonical_head: Option<BeaconSnapshot<T::EthSpec>>,
|
||||
/// The finalized checkpoint to anchor the chain. May be genesis or a higher
|
||||
@@ -79,8 +102,7 @@ pub struct BeaconChainBuilder<T: BeaconChainTypes> {
|
||||
pub finalized_snapshot: Option<BeaconSnapshot<T::EthSpec>>,
|
||||
genesis_block_root: Option<Hash256>,
|
||||
op_pool: Option<OperationPool<T::EthSpec>>,
|
||||
fork_choice: Option<ForkChoice<T>>,
|
||||
eth1_chain: Option<Eth1Chain<T::Eth1Chain, T::EthSpec, T::Store>>,
|
||||
eth1_chain: Option<Eth1Chain<T::Eth1Chain, T::EthSpec>>,
|
||||
event_handler: Option<T::EventHandler>,
|
||||
slot_clock: Option<T::SlotClock>,
|
||||
head_tracker: Option<HeadTracker>,
|
||||
@@ -90,17 +112,27 @@ pub struct BeaconChainBuilder<T: BeaconChainTypes> {
|
||||
spec: ChainSpec,
|
||||
disabled_forks: Vec<String>,
|
||||
log: Option<Logger>,
|
||||
graffiti: Graffiti,
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
BeaconChainBuilder<
|
||||
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>,
|
||||
Witness<
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
{
|
||||
@@ -116,7 +148,6 @@ where
|
||||
finalized_snapshot: None,
|
||||
genesis_block_root: None,
|
||||
op_pool: None,
|
||||
fork_choice: None,
|
||||
eth1_chain: None,
|
||||
event_handler: None,
|
||||
slot_clock: None,
|
||||
@@ -127,6 +158,7 @@ where
|
||||
validator_pubkey_cache: None,
|
||||
spec: TEthSpec::default_spec(),
|
||||
log: None,
|
||||
graffiti: Graffiti::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,7 +174,7 @@ where
|
||||
/// Sets the store (database).
|
||||
///
|
||||
/// Should generally be called early in the build chain.
|
||||
pub fn store(mut self, store: Arc<TStore>) -> Self {
|
||||
pub fn store(mut self, store: Arc<HotColdDB<TEthSpec, THotStore, TColdStore>>) -> Self {
|
||||
self.store = Some(store);
|
||||
self
|
||||
}
|
||||
@@ -184,7 +216,7 @@ where
|
||||
.ok_or_else(|| "get_persisted_eth1_backend requires a store.".to_string())?;
|
||||
|
||||
store
|
||||
.get::<SszEth1>(&Hash256::from_slice(Ð1_CACHE_DB_KEY))
|
||||
.get_item::<SszEth1>(&Hash256::from_slice(Ð1_CACHE_DB_KEY))
|
||||
.map_err(|e| format!("DB error whilst reading eth1 cache: {:?}", e))
|
||||
}
|
||||
|
||||
@@ -196,7 +228,7 @@ where
|
||||
.ok_or_else(|| "store_contains_beacon_chain requires a store.".to_string())?;
|
||||
|
||||
Ok(store
|
||||
.get::<PersistedBeaconChain>(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY))
|
||||
.get_item::<PersistedBeaconChain>(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY))
|
||||
.map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))?
|
||||
.is_some())
|
||||
}
|
||||
@@ -227,7 +259,7 @@ where
|
||||
.ok_or_else(|| "resume_from_db requires a store.".to_string())?;
|
||||
|
||||
let chain = store
|
||||
.get::<PersistedBeaconChain>(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY))
|
||||
.get_item::<PersistedBeaconChain>(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY))
|
||||
.map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))?
|
||||
.ok_or_else(|| {
|
||||
"No persisted beacon chain found in store. Try purging the beacon chain database."
|
||||
@@ -242,7 +274,7 @@ where
|
||||
|
||||
let head_block_root = chain.canonical_head_block_root;
|
||||
let head_block = store
|
||||
.get::<SignedBeaconBlock<TEthSpec>>(&head_block_root)
|
||||
.get_item::<SignedBeaconBlock<TEthSpec>>(&head_block_root)
|
||||
.map_err(|e| format!("DB error when reading head block: {:?}", e))?
|
||||
.ok_or_else(|| "Head block not found in store".to_string())?;
|
||||
let head_state_root = head_block.state_root();
|
||||
@@ -253,15 +285,15 @@ where
|
||||
|
||||
self.op_pool = Some(
|
||||
store
|
||||
.get::<PersistedOperationPool<TEthSpec>>(&Hash256::from_slice(&OP_POOL_DB_KEY))
|
||||
.get_item::<PersistedOperationPool<TEthSpec>>(&Hash256::from_slice(&OP_POOL_DB_KEY))
|
||||
.map_err(|e| format!("DB error whilst reading persisted op pool: {:?}", e))?
|
||||
.map(|persisted| persisted.into_operation_pool(&head_state, &self.spec))
|
||||
.unwrap_or_else(|| OperationPool::new()),
|
||||
.map(PersistedOperationPool::into_operation_pool)
|
||||
.unwrap_or_else(OperationPool::new),
|
||||
);
|
||||
|
||||
let finalized_block_root = head_state.finalized_checkpoint.root;
|
||||
let finalized_block = store
|
||||
.get::<SignedBeaconBlock<TEthSpec>>(&finalized_block_root)
|
||||
.get_item::<SignedBeaconBlock<TEthSpec>>(&finalized_block_root)
|
||||
.map_err(|e| format!("DB error when reading finalized block: {:?}", e))?
|
||||
.ok_or_else(|| "Finalized block not found in store".to_string())?;
|
||||
let finalized_state_root = finalized_block.state_root();
|
||||
@@ -317,16 +349,18 @@ where
|
||||
.put_state(&beacon_state_root, &beacon_state)
|
||||
.map_err(|e| format!("Failed to store genesis state: {:?}", e))?;
|
||||
store
|
||||
.put(&beacon_block_root, &beacon_block)
|
||||
.put_item(&beacon_block_root, &beacon_block)
|
||||
.map_err(|e| format!("Failed to store genesis block: {:?}", e))?;
|
||||
|
||||
// Store the genesis block under the `ZERO_HASH` key.
|
||||
store.put(&Hash256::zero(), &beacon_block).map_err(|e| {
|
||||
format!(
|
||||
"Failed to store genesis block under 0x00..00 alias: {:?}",
|
||||
e
|
||||
)
|
||||
})?;
|
||||
store
|
||||
.put_item(&Hash256::zero(), &beacon_block)
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Failed to store genesis block under 0x00..00 alias: {:?}",
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
self.finalized_snapshot = Some(BeaconSnapshot {
|
||||
beacon_block_root,
|
||||
@@ -366,6 +400,12 @@ where
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the `graffiti` field.
|
||||
pub fn graffiti(mut self, graffiti: Graffiti) -> Self {
|
||||
self.graffiti = graffiti;
|
||||
self
|
||||
}
|
||||
|
||||
/// Consumes `self`, returning a `BeaconChain` if all required parameters have been supplied.
|
||||
///
|
||||
/// An error will be returned at runtime if all required parameters have not been configured.
|
||||
@@ -377,13 +417,28 @@ where
|
||||
self,
|
||||
) -> Result<
|
||||
BeaconChain<
|
||||
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>,
|
||||
Witness<
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>,
|
||||
String,
|
||||
> {
|
||||
let log = self
|
||||
.log
|
||||
.ok_or_else(|| "Cannot build without a logger".to_string())?;
|
||||
let slot_clock = self
|
||||
.slot_clock
|
||||
.ok_or_else(|| "Cannot build without a slot_clock.".to_string())?;
|
||||
let store = self
|
||||
.store
|
||||
.clone()
|
||||
.ok_or_else(|| "Cannot build without a store.".to_string())?;
|
||||
|
||||
// If this beacon chain is being loaded from disk, use the stored head. Otherwise, just use
|
||||
// the finalized checkpoint (which is probably genesis).
|
||||
@@ -407,25 +462,38 @@ where
|
||||
.pubkey_cache_path
|
||||
.ok_or_else(|| "Cannot build without a pubkey cache path".to_string())?;
|
||||
|
||||
let validator_pubkey_cache = self
|
||||
.validator_pubkey_cache
|
||||
.map(|cache| Ok(cache))
|
||||
.unwrap_or_else(|| {
|
||||
ValidatorPubkeyCache::new(&canonical_head.beacon_state, pubkey_cache_path)
|
||||
.map_err(|e| format!("Unable to init validator pubkey cache: {:?}", e))
|
||||
})?;
|
||||
let validator_pubkey_cache = self.validator_pubkey_cache.map(Ok).unwrap_or_else(|| {
|
||||
ValidatorPubkeyCache::new(&canonical_head.beacon_state, pubkey_cache_path)
|
||||
.map_err(|e| format!("Unable to init validator pubkey cache: {:?}", e))
|
||||
})?;
|
||||
|
||||
let persisted_fork_choice = store
|
||||
.get_item::<PersistedForkChoice>(&Hash256::from_slice(&FORK_CHOICE_DB_KEY))
|
||||
.map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))?;
|
||||
|
||||
let fork_choice = if let Some(persisted) = persisted_fork_choice {
|
||||
let fc_store =
|
||||
BeaconForkChoiceStore::from_persisted(persisted.fork_choice_store, store.clone())
|
||||
.map_err(|e| format!("Unable to load ForkChoiceStore: {:?}", e))?;
|
||||
|
||||
ForkChoice::from_persisted(persisted.fork_choice, fc_store)
|
||||
.map_err(|e| format!("Unable to parse persisted fork choice from disk: {:?}", e))?
|
||||
} else {
|
||||
let genesis = &canonical_head;
|
||||
|
||||
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store.clone(), genesis);
|
||||
|
||||
ForkChoice::from_genesis(fc_store, &genesis.beacon_block.message)
|
||||
.map_err(|e| format!("Unable to build initialize ForkChoice: {:?}", e))?
|
||||
};
|
||||
|
||||
let beacon_chain = BeaconChain {
|
||||
spec: self.spec,
|
||||
store: self
|
||||
.store
|
||||
.ok_or_else(|| "Cannot build without store".to_string())?,
|
||||
store,
|
||||
store_migrator: self
|
||||
.store_migrator
|
||||
.ok_or_else(|| "Cannot build without store migrator".to_string())?,
|
||||
slot_clock: self
|
||||
.slot_clock
|
||||
.ok_or_else(|| "Cannot build without slot clock".to_string())?,
|
||||
slot_clock,
|
||||
op_pool: self
|
||||
.op_pool
|
||||
.ok_or_else(|| "Cannot build without op pool".to_string())?,
|
||||
@@ -439,15 +507,17 @@ where
|
||||
observed_aggregators: <_>::default(),
|
||||
// TODO: allow for persisting and loading the pool from disk.
|
||||
observed_block_producers: <_>::default(),
|
||||
// TODO: allow for persisting and loading the pool from disk.
|
||||
observed_voluntary_exits: <_>::default(),
|
||||
observed_proposer_slashings: <_>::default(),
|
||||
observed_attester_slashings: <_>::default(),
|
||||
eth1_chain: self.eth1_chain,
|
||||
genesis_validators_root: canonical_head.beacon_state.genesis_validators_root,
|
||||
canonical_head: TimeoutRwLock::new(canonical_head.clone()),
|
||||
genesis_block_root: self
|
||||
.genesis_block_root
|
||||
.ok_or_else(|| "Cannot build without a genesis block root".to_string())?,
|
||||
fork_choice: self
|
||||
.fork_choice
|
||||
.ok_or_else(|| "Cannot build without a fork choice".to_string())?,
|
||||
fork_choice: RwLock::new(fork_choice),
|
||||
event_handler: self
|
||||
.event_handler
|
||||
.ok_or_else(|| "Cannot build without an event handler".to_string())?,
|
||||
@@ -460,6 +530,7 @@ where
|
||||
validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache),
|
||||
disabled_forks: self.disabled_forks,
|
||||
log: log.clone(),
|
||||
graffiti: self.graffiti,
|
||||
};
|
||||
|
||||
let head = beacon_chain
|
||||
@@ -478,83 +549,22 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
BeaconChainBuilder<
|
||||
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
{
|
||||
/// Initializes a fork choice with the `ThreadSafeReducedTree` backend.
|
||||
///
|
||||
/// If this builder is being "resumed" from disk, then rebuild the last fork choice stored to
|
||||
/// the database. Otherwise, create a new, empty fork choice.
|
||||
pub fn reduced_tree_fork_choice(mut self) -> Result<Self, String> {
|
||||
let store = self
|
||||
.store
|
||||
.clone()
|
||||
.ok_or_else(|| "reduced_tree_fork_choice requires a store.".to_string())?;
|
||||
|
||||
let persisted_fork_choice = store
|
||||
.get::<SszForkChoice>(&Hash256::from_slice(&FORK_CHOICE_DB_KEY))
|
||||
.map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))?;
|
||||
|
||||
let fork_choice = if let Some(persisted) = persisted_fork_choice {
|
||||
ForkChoice::from_ssz_container(persisted)
|
||||
.map_err(|e| format!("Unable to read persisted fork choice from disk: {:?}", e))?
|
||||
} else {
|
||||
let finalized_snapshot = &self
|
||||
.finalized_snapshot
|
||||
.as_ref()
|
||||
.ok_or_else(|| "reduced_tree_fork_choice requires a finalized_snapshot")?;
|
||||
let genesis_block_root = self
|
||||
.genesis_block_root
|
||||
.ok_or_else(|| "reduced_tree_fork_choice requires a genesis_block_root")?;
|
||||
|
||||
let backend = ProtoArrayForkChoice::new(
|
||||
finalized_snapshot.beacon_block.message.slot,
|
||||
finalized_snapshot.beacon_block.message.state_root,
|
||||
// Note: here we set the `justified_epoch` to be the same as the epoch of the
|
||||
// finalized checkpoint. Whilst this finalized checkpoint may actually point to
|
||||
// a _later_ justified checkpoint, that checkpoint won't yet exist in the fork
|
||||
// choice.
|
||||
finalized_snapshot.beacon_state.current_epoch(),
|
||||
finalized_snapshot.beacon_state.current_epoch(),
|
||||
finalized_snapshot.beacon_block_root,
|
||||
)?;
|
||||
|
||||
ForkChoice::new(
|
||||
backend,
|
||||
genesis_block_root,
|
||||
&finalized_snapshot.beacon_state,
|
||||
)
|
||||
};
|
||||
|
||||
self.fork_choice = Some(fork_choice);
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TSlotClock, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
BeaconChainBuilder<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
CachingEth1Backend<TEthSpec, TStore>,
|
||||
CachingEth1Backend<TEthSpec>,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
@@ -570,30 +580,33 @@ where
|
||||
.log
|
||||
.as_ref()
|
||||
.ok_or_else(|| "dummy_eth1_backend requires a log".to_string())?;
|
||||
let store = self
|
||||
.store
|
||||
.clone()
|
||||
.ok_or_else(|| "dummy_eth1_backend requires a store.".to_string())?;
|
||||
|
||||
let backend = CachingEth1Backend::new(Eth1Config::default(), log.clone(), store);
|
||||
let backend =
|
||||
CachingEth1Backend::new(Eth1Config::default(), log.clone(), self.spec.clone());
|
||||
|
||||
let mut eth1_chain = Eth1Chain::new(backend);
|
||||
eth1_chain.use_dummy_backend = true;
|
||||
|
||||
self.eth1_chain = Some(eth1_chain);
|
||||
self.eth1_chain = Some(Eth1Chain::new_dummy(backend));
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
BeaconChainBuilder<
|
||||
Witness<TStore, TStoreMigrator, TestingSlotClock, TEth1Backend, TEthSpec, TEventHandler>,
|
||||
Witness<
|
||||
TStoreMigrator,
|
||||
TestingSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
{
|
||||
@@ -618,22 +631,24 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec>
|
||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, THotStore, TColdStore>
|
||||
BeaconChainBuilder<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
NullEventHandler<TEthSpec>,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
{
|
||||
/// Sets the `BeaconChain` event handler to `NullEventHandler`.
|
||||
@@ -651,7 +666,7 @@ fn genesis_block<T: EthSpec>(
|
||||
message: BeaconBlock::empty(&spec),
|
||||
// Empty signature, which should NEVER be read. This isn't to-spec, but makes the genesis
|
||||
// block consistent with every other block.
|
||||
signature: Signature::empty_signature(),
|
||||
signature: Signature::empty(),
|
||||
};
|
||||
genesis_block.message.state_root = genesis_state
|
||||
.update_tree_hash_cache()
|
||||
@@ -663,12 +678,14 @@ fn genesis_block<T: EthSpec>(
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::migrate::{MemoryStore, NullMigrator};
|
||||
use crate::migrate::NullMigrator;
|
||||
use eth2_hashing::hash;
|
||||
use genesis::{generate_deterministic_keypairs, interop_genesis_state};
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use ssz::Encode;
|
||||
use std::time::Duration;
|
||||
use store::config::StoreConfig;
|
||||
use store::{HotColdDB, MemoryStore};
|
||||
use tempfile::tempdir;
|
||||
use types::{EthSpec, MinimalEthSpec, Slot};
|
||||
|
||||
@@ -685,7 +702,12 @@ mod test {
|
||||
let genesis_time = 13_371_337;
|
||||
|
||||
let log = get_logger();
|
||||
let store = Arc::new(MemoryStore::open());
|
||||
let store: HotColdDB<
|
||||
MinimalEthSpec,
|
||||
MemoryStore<MinimalEthSpec>,
|
||||
MemoryStore<MinimalEthSpec>,
|
||||
> = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone())
|
||||
.unwrap();
|
||||
let spec = MinimalEthSpec::default_spec();
|
||||
let data_dir = tempdir().expect("should create temporary data_dir");
|
||||
|
||||
@@ -698,7 +720,7 @@ mod test {
|
||||
|
||||
let chain = BeaconChainBuilder::new(MinimalEthSpec)
|
||||
.logger(log.clone())
|
||||
.store(store)
|
||||
.store(Arc::new(store))
|
||||
.store_migrator(NullMigrator)
|
||||
.data_dir(data_dir.path().to_path_buf())
|
||||
.genesis_state(genesis_state)
|
||||
@@ -708,8 +730,6 @@ mod test {
|
||||
.null_event_handler()
|
||||
.testing_slot_clock(Duration::from_secs(1))
|
||||
.expect("should configure testing slot clock")
|
||||
.reduced_tree_fork_choice()
|
||||
.expect("should add fork choice to builder")
|
||||
.build()
|
||||
.expect("should build");
|
||||
|
||||
|
||||
@@ -1,17 +1,20 @@
|
||||
use crate::beacon_chain::ForkChoiceError;
|
||||
use crate::eth1_chain::Error as Eth1ChainError;
|
||||
use crate::fork_choice::Error as ForkChoiceError;
|
||||
use crate::naive_aggregation_pool::Error as NaiveAggregationError;
|
||||
use crate::observed_attestations::Error as ObservedAttestationsError;
|
||||
use crate::observed_attesters::Error as ObservedAttestersError;
|
||||
use crate::observed_block_producers::Error as ObservedBlockProducersError;
|
||||
use operation_pool::OpPoolError;
|
||||
use safe_arith::ArithError;
|
||||
use ssz::DecodeError;
|
||||
use ssz_types::Error as SszTypesError;
|
||||
use state_processing::{
|
||||
block_signature_verifier::Error as BlockSignatureVerifierError,
|
||||
per_block_processing::errors::AttestationValidationError,
|
||||
signature_sets::Error as SignatureSetError, BlockProcessingError, SlotProcessingError,
|
||||
per_block_processing::errors::{
|
||||
AttestationValidationError, AttesterSlashingValidationError, ExitValidationError,
|
||||
ProposerSlashingValidationError,
|
||||
},
|
||||
signature_sets::Error as SignatureSetError,
|
||||
BlockProcessingError, SlotProcessingError,
|
||||
};
|
||||
use std::time::Duration;
|
||||
use types::*;
|
||||
@@ -26,7 +29,7 @@ macro_rules! easy_from_to {
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug)]
|
||||
pub enum BeaconChainError {
|
||||
InsufficientValidators,
|
||||
UnableToReadSlot,
|
||||
@@ -50,6 +53,9 @@ pub enum BeaconChainError {
|
||||
},
|
||||
CannotAttestToFutureState,
|
||||
AttestationValidationError(AttestationValidationError),
|
||||
ExitValidationError(ExitValidationError),
|
||||
ProposerSlashingValidationError(ProposerSlashingValidationError),
|
||||
AttesterSlashingValidationError(AttesterSlashingValidationError),
|
||||
StateSkipTooLarge {
|
||||
start_slot: Slot,
|
||||
requested_slot: Slot,
|
||||
@@ -62,7 +68,7 @@ pub enum BeaconChainError {
|
||||
AttestationCacheLockTimeout,
|
||||
ValidatorPubkeyCacheLockTimeout,
|
||||
IncorrectStateForAttestation(RelativeEpochError),
|
||||
InvalidValidatorPubkeyBytes(DecodeError),
|
||||
InvalidValidatorPubkeyBytes(bls::Error),
|
||||
ValidatorPubkeyCacheIncomplete(usize),
|
||||
SignatureSetError(SignatureSetError),
|
||||
BlockSignatureVerifierError(state_processing::block_signature_verifier::Error),
|
||||
@@ -78,6 +84,9 @@ pub enum BeaconChainError {
|
||||
|
||||
easy_from_to!(SlotProcessingError, BeaconChainError);
|
||||
easy_from_to!(AttestationValidationError, BeaconChainError);
|
||||
easy_from_to!(ExitValidationError, BeaconChainError);
|
||||
easy_from_to!(ProposerSlashingValidationError, BeaconChainError);
|
||||
easy_from_to!(AttesterSlashingValidationError, BeaconChainError);
|
||||
easy_from_to!(SszTypesError, BeaconChainError);
|
||||
easy_from_to!(OpPoolError, BeaconChainError);
|
||||
easy_from_to!(NaiveAggregationError, BeaconChainError);
|
||||
@@ -87,7 +96,7 @@ easy_from_to!(ObservedBlockProducersError, BeaconChainError);
|
||||
easy_from_to!(BlockSignatureVerifierError, BeaconChainError);
|
||||
easy_from_to!(ArithError, BeaconChainError);
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug)]
|
||||
pub enum BlockProductionError {
|
||||
UnableToGetBlockRootFromState,
|
||||
UnableToReadSlot,
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use crate::metrics;
|
||||
use environment::TaskExecutor;
|
||||
use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService};
|
||||
use eth2_hashing::hash;
|
||||
use slog::{debug, error, trace, Logger};
|
||||
@@ -9,8 +10,7 @@ use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
use std::iter::DoubleEndedIterator;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use store::{DBColumn, Error as StoreError, SimpleStoreItem, Store};
|
||||
use store::{DBColumn, Error as StoreError, StoreItem};
|
||||
use types::{
|
||||
BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned,
|
||||
DEPOSIT_TREE_DEPTH,
|
||||
@@ -19,7 +19,7 @@ use types::{
|
||||
type BlockNumber = u64;
|
||||
type Eth1DataVoteCount = HashMap<(Eth1Data, BlockNumber), u64>;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
/// Unable to return an Eth1Data for the given epoch.
|
||||
EpochUnavailable,
|
||||
@@ -59,7 +59,7 @@ pub struct SszEth1 {
|
||||
backend_bytes: Vec<u8>,
|
||||
}
|
||||
|
||||
impl SimpleStoreItem for SszEth1 {
|
||||
impl StoreItem for SszEth1 {
|
||||
fn db_column() -> DBColumn {
|
||||
DBColumn::Eth1Cache
|
||||
}
|
||||
@@ -74,24 +74,22 @@ impl SimpleStoreItem for SszEth1 {
|
||||
}
|
||||
|
||||
/// Holds an `Eth1ChainBackend` and serves requests from the `BeaconChain`.
|
||||
pub struct Eth1Chain<T, E, S>
|
||||
pub struct Eth1Chain<T, E>
|
||||
where
|
||||
T: Eth1ChainBackend<E, S>,
|
||||
T: Eth1ChainBackend<E>,
|
||||
E: EthSpec,
|
||||
S: Store<E>,
|
||||
{
|
||||
backend: T,
|
||||
/// When `true`, the backend will be ignored and dummy data from the 2019 Canada interop method
|
||||
/// will be used instead.
|
||||
pub use_dummy_backend: bool,
|
||||
_phantom: PhantomData<(E, S)>,
|
||||
use_dummy_backend: bool,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<T, E, S> Eth1Chain<T, E, S>
|
||||
impl<T, E> Eth1Chain<T, E>
|
||||
where
|
||||
T: Eth1ChainBackend<E, S>,
|
||||
T: Eth1ChainBackend<E>,
|
||||
E: EthSpec,
|
||||
S: Store<E>,
|
||||
{
|
||||
pub fn new(backend: T) -> Self {
|
||||
Self {
|
||||
@@ -101,6 +99,13 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_dummy(backend: T) -> Self {
|
||||
Self {
|
||||
use_dummy_backend: true,
|
||||
..Self::new(backend)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `Eth1Data` that should be included in a block being produced for the given
|
||||
/// `state`.
|
||||
pub fn eth1_data_for_block_production(
|
||||
@@ -109,7 +114,7 @@ where
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Eth1Data, Error> {
|
||||
if self.use_dummy_backend {
|
||||
let dummy_backend: DummyEth1ChainBackend<E, S> = DummyEth1ChainBackend::default();
|
||||
let dummy_backend: DummyEth1ChainBackend<E> = DummyEth1ChainBackend::default();
|
||||
dummy_backend.eth1_data(state, spec)
|
||||
} else {
|
||||
self.backend.eth1_data(state, spec)
|
||||
@@ -131,7 +136,7 @@ where
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Vec<Deposit>, Error> {
|
||||
if self.use_dummy_backend {
|
||||
let dummy_backend: DummyEth1ChainBackend<E, S> = DummyEth1ChainBackend::default();
|
||||
let dummy_backend: DummyEth1ChainBackend<E> = DummyEth1ChainBackend::default();
|
||||
dummy_backend.queued_deposits(state, eth1_data_vote, spec)
|
||||
} else {
|
||||
self.backend.queued_deposits(state, eth1_data_vote, spec)
|
||||
@@ -144,11 +149,11 @@ where
|
||||
pub fn from_ssz_container(
|
||||
ssz_container: &SszEth1,
|
||||
config: Eth1Config,
|
||||
store: Arc<S>,
|
||||
log: &Logger,
|
||||
spec: ChainSpec,
|
||||
) -> Result<Self, String> {
|
||||
let backend =
|
||||
Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, store, log.clone())?;
|
||||
Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, log.clone(), spec)?;
|
||||
Ok(Self {
|
||||
use_dummy_backend: ssz_container.use_dummy_backend,
|
||||
backend,
|
||||
@@ -170,7 +175,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Eth1ChainBackend<T: EthSpec, S: Store<T>>: Sized + Send + Sync {
|
||||
pub trait Eth1ChainBackend<T: EthSpec>: Sized + Send + Sync {
|
||||
/// Returns the `Eth1Data` that should be included in a block being produced for the given
|
||||
/// `state`.
|
||||
fn eth1_data(&self, beacon_state: &BeaconState<T>, spec: &ChainSpec)
|
||||
@@ -197,8 +202,8 @@ pub trait Eth1ChainBackend<T: EthSpec, S: Store<T>>: Sized + Send + Sync {
|
||||
fn from_bytes(
|
||||
bytes: &[u8],
|
||||
config: Eth1Config,
|
||||
store: Arc<S>,
|
||||
log: Logger,
|
||||
spec: ChainSpec,
|
||||
) -> Result<Self, String>;
|
||||
}
|
||||
|
||||
@@ -207,9 +212,9 @@ pub trait Eth1ChainBackend<T: EthSpec, S: Store<T>>: Sized + Send + Sync {
|
||||
/// Never creates deposits, therefore the validator set is static.
|
||||
///
|
||||
/// This was used in the 2019 Canada interop workshops.
|
||||
pub struct DummyEth1ChainBackend<T: EthSpec, S: Store<T>>(PhantomData<(T, S)>);
|
||||
pub struct DummyEth1ChainBackend<T: EthSpec>(PhantomData<T>);
|
||||
|
||||
impl<T: EthSpec, S: Store<T>> Eth1ChainBackend<T, S> for DummyEth1ChainBackend<T, S> {
|
||||
impl<T: EthSpec> Eth1ChainBackend<T> for DummyEth1ChainBackend<T> {
|
||||
/// Produce some deterministic junk based upon the current epoch.
|
||||
fn eth1_data(&self, state: &BeaconState<T>, _spec: &ChainSpec) -> Result<Eth1Data, Error> {
|
||||
let current_epoch = state.current_epoch();
|
||||
@@ -245,14 +250,14 @@ impl<T: EthSpec, S: Store<T>> Eth1ChainBackend<T, S> for DummyEth1ChainBackend<T
|
||||
fn from_bytes(
|
||||
_bytes: &[u8],
|
||||
_config: Eth1Config,
|
||||
_store: Arc<S>,
|
||||
_log: Logger,
|
||||
_spec: ChainSpec,
|
||||
) -> Result<Self, String> {
|
||||
Ok(Self(PhantomData))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec, S: Store<T>> Default for DummyEth1ChainBackend<T, S> {
|
||||
impl<T: EthSpec> Default for DummyEth1ChainBackend<T> {
|
||||
fn default() -> Self {
|
||||
Self(PhantomData)
|
||||
}
|
||||
@@ -264,45 +269,40 @@ impl<T: EthSpec, S: Store<T>> Default for DummyEth1ChainBackend<T, S> {
|
||||
/// The `core` connects to some external eth1 client (e.g., Parity/Geth) and polls it for
|
||||
/// information.
|
||||
#[derive(Clone)]
|
||||
pub struct CachingEth1Backend<T: EthSpec, S> {
|
||||
pub struct CachingEth1Backend<T: EthSpec> {
|
||||
pub core: HttpService,
|
||||
store: Arc<S>,
|
||||
log: Logger,
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: EthSpec, S: Store<T>> CachingEth1Backend<T, S> {
|
||||
impl<T: EthSpec> CachingEth1Backend<T> {
|
||||
/// Instantiates `self` with empty caches.
|
||||
///
|
||||
/// Does not connect to the eth1 node or start any tasks to keep the cache updated.
|
||||
pub fn new(config: Eth1Config, log: Logger, store: Arc<S>) -> Self {
|
||||
pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Self {
|
||||
Self {
|
||||
core: HttpService::new(config, log.clone()),
|
||||
store,
|
||||
core: HttpService::new(config, log.clone(), spec),
|
||||
log,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Starts the routine which connects to the external eth1 node and updates the caches.
|
||||
pub fn start(&self, exit: tokio::sync::oneshot::Receiver<()>) {
|
||||
// don't need to spawn as a task is being spawned in auto_update
|
||||
// TODO: check if this is correct
|
||||
HttpService::auto_update(self.core.clone(), exit);
|
||||
pub fn start(&self, handle: TaskExecutor) {
|
||||
HttpService::auto_update(self.core.clone(), handle);
|
||||
}
|
||||
|
||||
/// Instantiates `self` from an existing service.
|
||||
pub fn from_service(service: HttpService, store: Arc<S>) -> Self {
|
||||
pub fn from_service(service: HttpService) -> Self {
|
||||
Self {
|
||||
log: service.log.clone(),
|
||||
core: service,
|
||||
store,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec, S: Store<T>> Eth1ChainBackend<T, S> for CachingEth1Backend<T, S> {
|
||||
impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
|
||||
fn eth1_data(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Result<Eth1Data, Error> {
|
||||
let period = T::SlotsPerEth1VotingPeriod::to_u64();
|
||||
let voting_period_start_slot = (state.slot / period) * period;
|
||||
@@ -331,7 +331,7 @@ impl<T: EthSpec, S: Store<T>> Eth1ChainBackend<T, S> for CachingEth1Backend<T, S
|
||||
//
|
||||
// Here we choose the eth1_data corresponding to the latest block in our voting window.
|
||||
// If no votes exist, choose `state.eth1_data` as default vote.
|
||||
let default_vote = votes_to_consider
|
||||
votes_to_consider
|
||||
.iter()
|
||||
.max_by(|(_, x), (_, y)| x.cmp(y))
|
||||
.map(|vote| {
|
||||
@@ -355,8 +355,7 @@ impl<T: EthSpec, S: Store<T>> Eth1ChainBackend<T, S> for CachingEth1Backend<T, S
|
||||
);
|
||||
metrics::inc_counter(&metrics::DEFAULT_ETH1_VOTES);
|
||||
vote
|
||||
});
|
||||
default_vote
|
||||
})
|
||||
};
|
||||
|
||||
debug!(
|
||||
@@ -410,13 +409,12 @@ impl<T: EthSpec, S: Store<T>> Eth1ChainBackend<T, S> for CachingEth1Backend<T, S
|
||||
fn from_bytes(
|
||||
bytes: &[u8],
|
||||
config: Eth1Config,
|
||||
store: Arc<S>,
|
||||
log: Logger,
|
||||
spec: ChainSpec,
|
||||
) -> Result<Self, String> {
|
||||
let inner = HttpService::from_bytes(bytes, config, log.clone())?;
|
||||
let inner = HttpService::from_bytes(bytes, config, log.clone(), spec)?;
|
||||
Ok(Self {
|
||||
core: inner,
|
||||
store,
|
||||
log,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
@@ -573,17 +571,22 @@ mod test {
|
||||
mod eth1_chain_json_backend {
|
||||
use super::*;
|
||||
use eth1::DepositLog;
|
||||
use store::MemoryStore;
|
||||
use types::test_utils::{generate_deterministic_keypair, TestingDepositBuilder};
|
||||
use types::{
|
||||
test_utils::{generate_deterministic_keypair, TestingDepositBuilder},
|
||||
EthSpec, MainnetEthSpec,
|
||||
};
|
||||
|
||||
fn get_eth1_chain() -> Eth1Chain<CachingEth1Backend<E, MemoryStore<E>>, E, MemoryStore<E>> {
|
||||
fn get_eth1_chain() -> Eth1Chain<CachingEth1Backend<E>, E> {
|
||||
let eth1_config = Eth1Config {
|
||||
..Eth1Config::default()
|
||||
};
|
||||
|
||||
let log = null_logger().unwrap();
|
||||
let store = Arc::new(MemoryStore::open());
|
||||
Eth1Chain::new(CachingEth1Backend::new(eth1_config, log, store))
|
||||
Eth1Chain::new(CachingEth1Backend::new(
|
||||
eth1_config,
|
||||
log,
|
||||
MainnetEthSpec::default_spec(),
|
||||
))
|
||||
}
|
||||
|
||||
fn get_deposit_log(i: u64, spec: &ChainSpec) -> DepositLog {
|
||||
@@ -597,6 +600,7 @@ mod test {
|
||||
deposit_data,
|
||||
block_number: i,
|
||||
index: i,
|
||||
signature_is_valid: true,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
use bus::Bus;
|
||||
use parking_lot::Mutex;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use slog::{error, Logger};
|
||||
use std::marker::PhantomData;
|
||||
use types::{Attestation, Epoch, EthSpec, Hash256, SignedBeaconBlock};
|
||||
use std::sync::Arc;
|
||||
use types::{Attestation, Epoch, EthSpec, Hash256, SignedBeaconBlock, SignedBeaconBlockHash};
|
||||
pub use websocket_server::WebSocketSender;
|
||||
|
||||
pub trait EventHandler<T: EthSpec>: Sized + Send + Sync {
|
||||
@@ -18,6 +22,84 @@ impl<T: EthSpec> EventHandler<T> for WebSocketSender<T> {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ServerSentEvents<T: EthSpec> {
|
||||
// Bus<> is itself Sync + Send. We use Mutex<> here only because of the surrounding code does
|
||||
// not enforce mutability statically (i.e. relies on interior mutability).
|
||||
head_changed_queue: Arc<Mutex<Bus<SignedBeaconBlockHash>>>,
|
||||
log: Logger,
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> ServerSentEvents<T> {
|
||||
pub fn new(log: Logger) -> (Self, Arc<Mutex<Bus<SignedBeaconBlockHash>>>) {
|
||||
let bus = Bus::new(T::slots_per_epoch() as usize);
|
||||
let mutex = Mutex::new(bus);
|
||||
let arc = Arc::new(mutex);
|
||||
let this = Self {
|
||||
head_changed_queue: arc.clone(),
|
||||
log,
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
(this, arc)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> EventHandler<T> for ServerSentEvents<T> {
|
||||
fn register(&self, kind: EventKind<T>) -> Result<(), String> {
|
||||
match kind {
|
||||
EventKind::BeaconHeadChanged {
|
||||
current_head_beacon_block_root,
|
||||
..
|
||||
} => {
|
||||
let mut guard = self.head_changed_queue.lock();
|
||||
if guard
|
||||
.try_broadcast(current_head_beacon_block_root.into())
|
||||
.is_err()
|
||||
{
|
||||
error!(
|
||||
self.log,
|
||||
"Head change streaming queue full";
|
||||
"dropped_change" => format!("{}", current_head_beacon_block_root),
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// An event handler that pushes events to both the websockets handler and the SSE handler.
|
||||
// Named after the unix `tee` command. Meant as a temporary solution before ditching WebSockets
|
||||
// completely once SSE functions well enough.
|
||||
pub struct TeeEventHandler<E: EthSpec> {
|
||||
websockets_handler: WebSocketSender<E>,
|
||||
sse_handler: ServerSentEvents<E>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> TeeEventHandler<E> {
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn new(
|
||||
log: Logger,
|
||||
websockets_handler: WebSocketSender<E>,
|
||||
) -> Result<(Self, Arc<Mutex<Bus<SignedBeaconBlockHash>>>), String> {
|
||||
let (sse_handler, bus) = ServerSentEvents::new(log);
|
||||
let result = Self {
|
||||
websockets_handler,
|
||||
sse_handler,
|
||||
};
|
||||
Ok((result, bus))
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> EventHandler<E> for TeeEventHandler<E> {
|
||||
fn register(&self, kind: EventKind<E>) -> Result<(), String> {
|
||||
self.websockets_handler.register(kind.clone())?;
|
||||
self.sse_handler.register(kind)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> EventHandler<T> for NullEventHandler<T> {
|
||||
fn register(&self, _kind: EventKind<T>) -> Result<(), String> {
|
||||
Ok(())
|
||||
@@ -30,7 +112,7 @@ impl<T: EthSpec> Default for NullEventHandler<T> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[serde(
|
||||
bound = "T: EthSpec",
|
||||
rename_all = "snake_case",
|
||||
|
||||
@@ -1,300 +0,0 @@
|
||||
mod checkpoint_manager;
|
||||
|
||||
use crate::{errors::BeaconChainError, metrics, BeaconChain, BeaconChainTypes};
|
||||
use checkpoint_manager::{get_effective_balances, CheckpointManager, CheckpointWithBalances};
|
||||
use parking_lot::{RwLock, RwLockReadGuard};
|
||||
use proto_array_fork_choice::{core::ProtoArray, ProtoArrayForkChoice};
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use state_processing::common::get_indexed_attestation;
|
||||
use std::marker::PhantomData;
|
||||
use store::{DBColumn, Error as StoreError, SimpleStoreItem};
|
||||
use types::{BeaconBlock, BeaconState, BeaconStateError, Epoch, Hash256, IndexedAttestation, Slot};
|
||||
|
||||
type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
MissingBlock(Hash256),
|
||||
MissingState(Hash256),
|
||||
BackendError(String),
|
||||
BeaconStateError(BeaconStateError),
|
||||
StoreError(StoreError),
|
||||
BeaconChainError(Box<BeaconChainError>),
|
||||
UnknownBlockSlot(Hash256),
|
||||
UnknownJustifiedBlock(Hash256),
|
||||
UnknownJustifiedState(Hash256),
|
||||
UnableToJsonEncode(String),
|
||||
InvalidAttestation,
|
||||
}
|
||||
|
||||
pub struct ForkChoice<T: BeaconChainTypes> {
|
||||
backend: ProtoArrayForkChoice,
|
||||
/// Used for resolving the `0x00..00` alias back to genesis.
|
||||
///
|
||||
/// Does not necessarily need to be the _actual_ genesis, it suffices to be the finalized root
|
||||
/// whenever the struct was instantiated.
|
||||
genesis_block_root: Hash256,
|
||||
checkpoint_manager: RwLock<CheckpointManager>,
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> PartialEq for ForkChoice<T> {
|
||||
/// This implementation ignores the `store`.
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.backend == other.backend
|
||||
&& self.genesis_block_root == other.genesis_block_root
|
||||
&& *self.checkpoint_manager.read() == *other.checkpoint_manager.read()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> ForkChoice<T> {
|
||||
/// Instantiate a new fork chooser.
|
||||
///
|
||||
/// "Genesis" does not necessarily need to be the absolute genesis, it can be some finalized
|
||||
/// block.
|
||||
pub fn new(
|
||||
backend: ProtoArrayForkChoice,
|
||||
genesis_block_root: Hash256,
|
||||
genesis_state: &BeaconState<T::EthSpec>,
|
||||
) -> Self {
|
||||
let genesis_checkpoint = CheckpointWithBalances {
|
||||
epoch: genesis_state.current_epoch(),
|
||||
root: genesis_block_root,
|
||||
balances: get_effective_balances(genesis_state),
|
||||
};
|
||||
|
||||
Self {
|
||||
backend,
|
||||
genesis_block_root,
|
||||
checkpoint_manager: RwLock::new(CheckpointManager::new(genesis_checkpoint)),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Run the fork choice rule to determine the head.
|
||||
pub fn find_head(&self, chain: &BeaconChain<T>) -> Result<Hash256> {
|
||||
let timer = metrics::start_timer(&metrics::FORK_CHOICE_FIND_HEAD_TIMES);
|
||||
|
||||
let remove_alias = |root| {
|
||||
if root == Hash256::zero() {
|
||||
self.genesis_block_root
|
||||
} else {
|
||||
root
|
||||
}
|
||||
};
|
||||
|
||||
let mut manager = self.checkpoint_manager.write();
|
||||
manager.maybe_update(chain.slot()?, chain)?;
|
||||
|
||||
let result = self
|
||||
.backend
|
||||
.find_head(
|
||||
manager.current.justified.epoch,
|
||||
remove_alias(manager.current.justified.root),
|
||||
manager.current.finalized.epoch,
|
||||
&manager.current.justified.balances,
|
||||
)
|
||||
.map_err(Into::into);
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Returns true if the given block is known to fork choice.
|
||||
pub fn contains_block(&self, block_root: &Hash256) -> bool {
|
||||
self.backend.contains_block(block_root)
|
||||
}
|
||||
|
||||
/// Returns the state root for the given block root.
|
||||
pub fn block_slot_and_state_root(&self, block_root: &Hash256) -> Option<(Slot, Hash256)> {
|
||||
self.backend.block_slot_and_state_root(block_root)
|
||||
}
|
||||
|
||||
/// Process all attestations in the given `block`.
|
||||
///
|
||||
/// Assumes the block (and therefore its attestations) are valid. It is a logic error to
|
||||
/// provide an invalid block.
|
||||
pub fn process_block(
|
||||
&self,
|
||||
chain: &BeaconChain<T>,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
block: &BeaconBlock<T::EthSpec>,
|
||||
block_root: Hash256,
|
||||
) -> Result<()> {
|
||||
let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES);
|
||||
|
||||
self.checkpoint_manager
|
||||
.write()
|
||||
.process_state(block_root, state, chain, &self.backend)?;
|
||||
self.checkpoint_manager
|
||||
.write()
|
||||
.maybe_update(chain.slot()?, chain)?;
|
||||
|
||||
// Note: we never count the block as a latest message, only attestations.
|
||||
for attestation in &block.body.attestations {
|
||||
// If the `data.beacon_block_root` block is not known to the fork choice, simply ignore
|
||||
// the vote.
|
||||
if self
|
||||
.backend
|
||||
.contains_block(&attestation.data.beacon_block_root)
|
||||
{
|
||||
let committee =
|
||||
state.get_beacon_committee(attestation.data.slot, attestation.data.index)?;
|
||||
let indexed_attestation =
|
||||
get_indexed_attestation(committee.committee, &attestation)
|
||||
.map_err(|_| Error::InvalidAttestation)?;
|
||||
self.process_indexed_attestation(&indexed_attestation)?;
|
||||
}
|
||||
}
|
||||
|
||||
// This does not apply a vote to the block, it just makes fork choice aware of the block so
|
||||
// it can still be identified as the head even if it doesn't have any votes.
|
||||
self.backend.process_block(
|
||||
block.slot,
|
||||
block_root,
|
||||
block.parent_root,
|
||||
block.state_root,
|
||||
state.current_justified_checkpoint.epoch,
|
||||
state.finalized_checkpoint.epoch,
|
||||
)?;
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process an attestation which references `block` in `attestation.data.beacon_block_root`.
|
||||
///
|
||||
/// Assumes the attestation is valid.
|
||||
pub fn process_indexed_attestation(
|
||||
&self,
|
||||
attestation: &IndexedAttestation<T::EthSpec>,
|
||||
) -> Result<()> {
|
||||
let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES);
|
||||
|
||||
let block_hash = attestation.data.beacon_block_root;
|
||||
|
||||
// Ignore any attestations to the zero hash.
|
||||
//
|
||||
// This is an edge case that results from the spec aliasing the zero hash to the genesis
|
||||
// block. Attesters may attest to the zero hash if they have never seen a block.
|
||||
//
|
||||
// We have two options here:
|
||||
//
|
||||
// 1. Apply all zero-hash attestations to the zero hash.
|
||||
// 2. Ignore all attestations to the zero hash.
|
||||
//
|
||||
// (1) becomes weird once we hit finality and fork choice drops the genesis block. (2) is
|
||||
// fine because votes to the genesis block are not useful; all validators implicitly attest
|
||||
// to genesis just by being present in the chain.
|
||||
//
|
||||
// Additionally, don't add any block hash to fork choice unless we have imported the block.
|
||||
if block_hash != Hash256::zero() {
|
||||
for validator_index in attestation.attesting_indices.iter() {
|
||||
self.backend.process_attestation(
|
||||
*validator_index as usize,
|
||||
block_hash,
|
||||
attestation.data.target.epoch,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the latest message for a given validator, if any.
|
||||
///
|
||||
/// Returns `(block_root, block_slot)`.
|
||||
pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> {
|
||||
self.backend.latest_message(validator_index)
|
||||
}
|
||||
|
||||
/// Trigger a prune on the underlying fork choice backend.
|
||||
pub fn prune(&self) -> Result<()> {
|
||||
let finalized_root = self.checkpoint_manager.read().current.finalized.root;
|
||||
|
||||
self.backend.maybe_prune(finalized_root).map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Returns a read-lock to the core `ProtoArray` struct.
|
||||
///
|
||||
/// Should only be used when encoding/decoding during troubleshooting.
|
||||
pub fn core_proto_array(&self) -> RwLockReadGuard<ProtoArray> {
|
||||
self.backend.core_proto_array()
|
||||
}
|
||||
|
||||
/// Returns a `SszForkChoice` which contains the current state of `Self`.
|
||||
pub fn as_ssz_container(&self) -> SszForkChoice {
|
||||
SszForkChoice {
|
||||
genesis_block_root: self.genesis_block_root.clone(),
|
||||
checkpoint_manager: self.checkpoint_manager.read().clone(),
|
||||
backend_bytes: self.backend.as_bytes(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Instantiates `Self` from a prior `SszForkChoice`.
|
||||
///
|
||||
/// The created `Self` will have the same state as the `Self` that created the `SszForkChoice`.
|
||||
pub fn from_ssz_container(ssz_container: SszForkChoice) -> Result<Self> {
|
||||
let backend = ProtoArrayForkChoice::from_bytes(&ssz_container.backend_bytes)?;
|
||||
|
||||
Ok(Self {
|
||||
backend,
|
||||
genesis_block_root: ssz_container.genesis_block_root,
|
||||
checkpoint_manager: RwLock::new(ssz_container.checkpoint_manager),
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper struct that is used to encode/decode the state of the `ForkChoice` as SSZ bytes.
|
||||
///
|
||||
/// This is used when persisting the state of the `BeaconChain` to disk.
|
||||
#[derive(Encode, Decode, Clone)]
|
||||
pub struct SszForkChoice {
|
||||
genesis_block_root: Hash256,
|
||||
checkpoint_manager: CheckpointManager,
|
||||
backend_bytes: Vec<u8>,
|
||||
}
|
||||
|
||||
impl From<BeaconStateError> for Error {
|
||||
fn from(e: BeaconStateError) -> Error {
|
||||
Error::BeaconStateError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BeaconChainError> for Error {
|
||||
fn from(e: BeaconChainError) -> Error {
|
||||
Error::BeaconChainError(Box::new(e))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StoreError> for Error {
|
||||
fn from(e: StoreError) -> Error {
|
||||
Error::StoreError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Error {
|
||||
fn from(e: String) -> Error {
|
||||
Error::BackendError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl SimpleStoreItem for SszForkChoice {
|
||||
fn db_column() -> DBColumn {
|
||||
DBColumn::ForkChoice
|
||||
}
|
||||
|
||||
fn as_store_bytes(&self) -> Vec<u8> {
|
||||
self.as_ssz_bytes()
|
||||
}
|
||||
|
||||
fn from_store_bytes(bytes: &[u8]) -> std::result::Result<Self, StoreError> {
|
||||
Self::from_ssz_bytes(bytes).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
@@ -1,340 +0,0 @@
|
||||
use super::Error;
|
||||
use crate::{metrics, BeaconChain, BeaconChainTypes};
|
||||
use proto_array_fork_choice::ProtoArrayForkChoice;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use types::{BeaconState, Checkpoint, Epoch, EthSpec, Hash256, Slot};
|
||||
|
||||
const MAX_BALANCE_CACHE_SIZE: usize = 4;
|
||||
|
||||
/// An item that is stored in the `BalancesCache`.
|
||||
#[derive(PartialEq, Clone, Encode, Decode)]
|
||||
struct CacheItem {
|
||||
/// The block root at which `self.balances` are valid.
|
||||
block_root: Hash256,
|
||||
/// The `state.balances` list.
|
||||
balances: Vec<u64>,
|
||||
}
|
||||
|
||||
/// Provides a cache to avoid reading `BeaconState` from disk when updating the current justified
|
||||
/// checkpoint.
|
||||
///
|
||||
/// It should store a mapping of `epoch_boundary_block_root -> state.balances`.
|
||||
#[derive(PartialEq, Clone, Default, Encode, Decode)]
|
||||
struct BalancesCache {
|
||||
items: Vec<CacheItem>,
|
||||
}
|
||||
|
||||
impl BalancesCache {
|
||||
/// Inspect the given `state` and determine the root of the block at the first slot of
|
||||
/// `state.current_epoch`. If there is not already some entry for the given block root, then
|
||||
/// add `state.balances` to the cache.
|
||||
pub fn process_state<E: EthSpec>(
|
||||
&mut self,
|
||||
block_root: Hash256,
|
||||
state: &BeaconState<E>,
|
||||
) -> Result<(), Error> {
|
||||
// We are only interested in balances from states that are at the start of an epoch,
|
||||
// because this is where the `current_justified_checkpoint.root` will point.
|
||||
if !Self::is_first_block_in_epoch(block_root, state)? {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let epoch_boundary_slot = state.current_epoch().start_slot(E::slots_per_epoch());
|
||||
let epoch_boundary_root = if epoch_boundary_slot == state.slot {
|
||||
block_root
|
||||
} else {
|
||||
// This call remains sensible as long as `state.block_roots` is larger than a single
|
||||
// epoch.
|
||||
*state.get_block_root(epoch_boundary_slot)?
|
||||
};
|
||||
|
||||
if self.position(epoch_boundary_root).is_none() {
|
||||
let item = CacheItem {
|
||||
block_root: epoch_boundary_root,
|
||||
balances: get_effective_balances(state),
|
||||
};
|
||||
|
||||
if self.items.len() == MAX_BALANCE_CACHE_SIZE {
|
||||
self.items.remove(0);
|
||||
}
|
||||
|
||||
self.items.push(item);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns `true` if the given `block_root` is the first/only block to have been processed in
|
||||
/// the epoch of the given `state`.
|
||||
///
|
||||
/// We can determine if it is the first block by looking back through `state.block_roots` to
|
||||
/// see if there is a block in the current epoch with a different root.
|
||||
fn is_first_block_in_epoch<E: EthSpec>(
|
||||
block_root: Hash256,
|
||||
state: &BeaconState<E>,
|
||||
) -> Result<bool, Error> {
|
||||
let mut prior_block_found = false;
|
||||
|
||||
for slot in state.current_epoch().slot_iter(E::slots_per_epoch()) {
|
||||
if slot < state.slot {
|
||||
if *state.get_block_root(slot)? != block_root {
|
||||
prior_block_found = true;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(!prior_block_found)
|
||||
}
|
||||
|
||||
fn position(&self, block_root: Hash256) -> Option<usize> {
|
||||
self.items
|
||||
.iter()
|
||||
.position(|item| item.block_root == block_root)
|
||||
}
|
||||
|
||||
/// Get the balances for the given `block_root`, if any.
|
||||
///
|
||||
/// If some balances are found, they are removed from the cache.
|
||||
pub fn get(&mut self, block_root: Hash256) -> Option<Vec<u64>> {
|
||||
let i = self.position(block_root)?;
|
||||
Some(self.items.remove(i).balances)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the effective balances for every validator in the given `state`.
|
||||
///
|
||||
/// Any validator who is not active in the epoch of the given `state` is assigned a balance of
|
||||
/// zero.
|
||||
pub fn get_effective_balances<T: EthSpec>(state: &BeaconState<T>) -> Vec<u64> {
|
||||
state
|
||||
.validators
|
||||
.iter()
|
||||
.map(|validator| {
|
||||
if validator.is_active_at(state.current_epoch()) {
|
||||
validator.effective_balance
|
||||
} else {
|
||||
0
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// A `types::Checkpoint` that also stores the validator balances from a `BeaconState`.
|
||||
///
|
||||
/// Useful because we need to track the justified checkpoint balances.
|
||||
#[derive(PartialEq, Clone, Encode, Decode)]
|
||||
pub struct CheckpointWithBalances {
|
||||
pub epoch: Epoch,
|
||||
pub root: Hash256,
|
||||
/// These are the balances of the state with `self.root`.
|
||||
///
|
||||
/// Importantly, these are _not_ the balances of the first state that we saw that has
|
||||
/// `self.epoch` and `self.root` as `state.current_justified_checkpoint`. These are the
|
||||
/// balances of the state from the block with `state.current_justified_checkpoint.root`.
|
||||
pub balances: Vec<u64>,
|
||||
}
|
||||
|
||||
impl Into<Checkpoint> for CheckpointWithBalances {
|
||||
fn into(self) -> Checkpoint {
|
||||
Checkpoint {
|
||||
epoch: self.epoch,
|
||||
root: self.root,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A pair of checkpoints, representing `state.current_justified_checkpoint` and
|
||||
/// `state.finalized_checkpoint` for some `BeaconState`.
|
||||
#[derive(PartialEq, Clone, Encode, Decode)]
|
||||
pub struct FFGCheckpoints {
|
||||
pub justified: CheckpointWithBalances,
|
||||
pub finalized: Checkpoint,
|
||||
}
|
||||
|
||||
/// A struct to manage the justified and finalized checkpoints to be used for `ForkChoice`.
|
||||
///
|
||||
/// This struct exists to manage the `should_update_justified_checkpoint` logic in the fork choice
|
||||
/// section of the spec:
|
||||
///
|
||||
/// https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/fork-choice.md#should_update_justified_checkpoint
|
||||
#[derive(PartialEq, Clone, Encode, Decode)]
|
||||
pub struct CheckpointManager {
|
||||
/// The current FFG checkpoints that should be used for finding the head.
|
||||
pub current: FFGCheckpoints,
|
||||
/// The best-known checkpoints that should be moved to `self.current` when the time is right.
|
||||
best: FFGCheckpoints,
|
||||
/// The epoch at which `self.current` should become `self.best`, if any.
|
||||
update_at: Option<Epoch>,
|
||||
/// A cached used to try and avoid DB reads when updating `self.current` and `self.best`.
|
||||
balances_cache: BalancesCache,
|
||||
}
|
||||
|
||||
impl CheckpointManager {
|
||||
/// Create a new checkpoint cache from `genesis_checkpoint` derived from the genesis block.
|
||||
pub fn new(genesis_checkpoint: CheckpointWithBalances) -> Self {
|
||||
let ffg_checkpoint = FFGCheckpoints {
|
||||
justified: genesis_checkpoint.clone(),
|
||||
finalized: genesis_checkpoint.into(),
|
||||
};
|
||||
Self {
|
||||
current: ffg_checkpoint.clone(),
|
||||
best: ffg_checkpoint,
|
||||
update_at: None,
|
||||
balances_cache: BalancesCache::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Potentially updates `self.current`, if the conditions are correct.
|
||||
///
|
||||
/// Should be called before running the fork choice `find_head` function to ensure
|
||||
/// `self.current` is up-to-date.
|
||||
pub fn maybe_update<T: BeaconChainTypes>(
|
||||
&mut self,
|
||||
current_slot: Slot,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<(), Error> {
|
||||
if self.best.justified.epoch > self.current.justified.epoch {
|
||||
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
|
||||
|
||||
match self.update_at {
|
||||
None => {
|
||||
if self.best.justified.epoch > self.current.justified.epoch {
|
||||
if Self::compute_slots_since_epoch_start::<T>(current_slot)
|
||||
< chain.spec.safe_slots_to_update_justified
|
||||
{
|
||||
self.current = self.best.clone();
|
||||
} else {
|
||||
self.update_at = Some(current_epoch + 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(epoch) if epoch <= current_epoch => {
|
||||
self.current = self.best.clone();
|
||||
self.update_at = None
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks the given `state` (must correspond to the given `block_root`) to see if it contains
|
||||
/// a `current_justified_checkpoint` that is better than `self.best_justified_checkpoint`. If
|
||||
/// so, the value is updated.
|
||||
///
|
||||
/// Note: this does not update `self.justified_checkpoint`.
|
||||
pub fn process_state<T: BeaconChainTypes>(
|
||||
&mut self,
|
||||
block_root: Hash256,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
chain: &BeaconChain<T>,
|
||||
proto_array: &ProtoArrayForkChoice,
|
||||
) -> Result<(), Error> {
|
||||
// Only proceed if the new checkpoint is better than our current checkpoint.
|
||||
if state.current_justified_checkpoint.epoch > self.current.justified.epoch
|
||||
&& state.finalized_checkpoint.epoch >= self.current.finalized.epoch
|
||||
{
|
||||
let candidate = FFGCheckpoints {
|
||||
justified: CheckpointWithBalances {
|
||||
epoch: state.current_justified_checkpoint.epoch,
|
||||
root: state.current_justified_checkpoint.root,
|
||||
balances: self
|
||||
.get_balances_for_block(state.current_justified_checkpoint.root, chain)?,
|
||||
},
|
||||
finalized: state.finalized_checkpoint.clone(),
|
||||
};
|
||||
|
||||
// Using the given `state`, determine its ancestor at the slot of our current justified
|
||||
// epoch. Later, this will be compared to the root of the current justified checkpoint
|
||||
// to determine if this state is descendant of our current justified state.
|
||||
let new_checkpoint_ancestor = Self::get_block_root_at_slot(
|
||||
state,
|
||||
chain,
|
||||
candidate.justified.root,
|
||||
self.current
|
||||
.justified
|
||||
.epoch
|
||||
.start_slot(T::EthSpec::slots_per_epoch()),
|
||||
)?;
|
||||
|
||||
let candidate_justified_block_slot = proto_array
|
||||
.block_slot(&candidate.justified.root)
|
||||
.ok_or_else(|| Error::UnknownBlockSlot(candidate.justified.root))?;
|
||||
|
||||
// If the new justified checkpoint is an ancestor of the current justified checkpoint,
|
||||
// it is always safe to change it.
|
||||
if new_checkpoint_ancestor == Some(self.current.justified.root)
|
||||
&& candidate_justified_block_slot
|
||||
>= candidate
|
||||
.justified
|
||||
.epoch
|
||||
.start_slot(T::EthSpec::slots_per_epoch())
|
||||
{
|
||||
self.current = candidate.clone()
|
||||
}
|
||||
|
||||
if candidate.justified.epoch > self.best.justified.epoch {
|
||||
// Always update the best checkpoint, if it's better.
|
||||
self.best = candidate;
|
||||
}
|
||||
|
||||
// Add the state's balances to the balances cache to avoid a state read later.
|
||||
self.balances_cache.process_state(block_root, state)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_balances_for_block<T: BeaconChainTypes>(
|
||||
&mut self,
|
||||
block_root: Hash256,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Vec<u64>, Error> {
|
||||
if let Some(balances) = self.balances_cache.get(block_root) {
|
||||
metrics::inc_counter(&metrics::BALANCES_CACHE_HITS);
|
||||
|
||||
Ok(balances)
|
||||
} else {
|
||||
metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES);
|
||||
|
||||
let block = chain
|
||||
.get_block(&block_root)?
|
||||
.ok_or_else(|| Error::UnknownJustifiedBlock(block_root))?;
|
||||
|
||||
let state = chain
|
||||
.get_state(&block.state_root(), Some(block.slot()))?
|
||||
.ok_or_else(|| Error::UnknownJustifiedState(block.state_root()))?;
|
||||
|
||||
Ok(get_effective_balances(&state))
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to get the block root for the given `slot`.
|
||||
///
|
||||
/// First, the `state` is used to see if the slot is within the distance of its historical
|
||||
/// lists. Then, the `chain` is used which will anchor the search at the given
|
||||
/// `justified_root`.
|
||||
fn get_block_root_at_slot<T: BeaconChainTypes>(
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
chain: &BeaconChain<T>,
|
||||
justified_root: Hash256,
|
||||
slot: Slot,
|
||||
) -> Result<Option<Hash256>, Error> {
|
||||
match state.get_block_root(slot) {
|
||||
Ok(root) => Ok(Some(*root)),
|
||||
Err(_) => chain
|
||||
.get_ancestor_block_root(justified_root, slot)
|
||||
.map_err(Into::into),
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate how far `slot` lies from the start of its epoch.
|
||||
fn compute_slots_since_epoch_start<T: BeaconChainTypes>(slot: Slot) -> u64 {
|
||||
let slots_per_epoch = T::EthSpec::slots_per_epoch();
|
||||
(slot - slot.epoch(slots_per_epoch).start_slot(slots_per_epoch)).as_u64()
|
||||
}
|
||||
}
|
||||
@@ -4,13 +4,13 @@ extern crate lazy_static;
|
||||
|
||||
pub mod attestation_verification;
|
||||
mod beacon_chain;
|
||||
mod beacon_fork_choice_store;
|
||||
mod beacon_snapshot;
|
||||
mod block_verification;
|
||||
pub mod builder;
|
||||
mod errors;
|
||||
pub mod eth1_chain;
|
||||
pub mod events;
|
||||
mod fork_choice;
|
||||
mod head_tracker;
|
||||
mod metrics;
|
||||
pub mod migrate;
|
||||
@@ -18,7 +18,9 @@ mod naive_aggregation_pool;
|
||||
mod observed_attestations;
|
||||
mod observed_attesters;
|
||||
mod observed_block_producers;
|
||||
pub mod observed_operations;
|
||||
mod persisted_beacon_chain;
|
||||
mod persisted_fork_choice;
|
||||
mod shuffling_cache;
|
||||
mod snapshot_cache;
|
||||
pub mod test_utils;
|
||||
@@ -27,15 +29,15 @@ mod validator_pubkey_cache;
|
||||
|
||||
pub use self::beacon_chain::{
|
||||
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, ChainSegmentResult,
|
||||
StateSkipConfig,
|
||||
ForkChoiceError, StateSkipConfig,
|
||||
};
|
||||
pub use self::beacon_snapshot::BeaconSnapshot;
|
||||
pub use self::errors::{BeaconChainError, BlockProductionError};
|
||||
pub use attestation_verification::Error as AttestationError;
|
||||
pub use block_verification::{BlockError, BlockProcessingOutcome, GossipVerifiedBlock};
|
||||
pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError};
|
||||
pub use block_verification::{BlockError, GossipVerifiedBlock};
|
||||
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
||||
pub use events::EventHandler;
|
||||
pub use fork_choice::ForkChoice;
|
||||
pub use metrics::scrape_for_metrics;
|
||||
pub use parking_lot;
|
||||
pub use slot_clock;
|
||||
|
||||
@@ -49,10 +49,6 @@ lazy_static! {
|
||||
"beacon_block_processing_db_write_seconds",
|
||||
"Time spent writing a newly processed block and state to DB"
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_FORK_CHOICE_REGISTER: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_fork_choice_register_seconds",
|
||||
"Time spent registering the new block with fork choice (but not finding head)"
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_ATTESTATION_OBSERVATION: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_attestation_observation_seconds",
|
||||
"Time spent hashing and remembering all the attestations in the block"
|
||||
@@ -115,10 +111,6 @@ lazy_static! {
|
||||
/*
|
||||
* General Attestation Processing
|
||||
*/
|
||||
pub static ref ATTESTATION_PROCESSING_APPLY_TO_FORK_CHOICE: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_apply_to_fork_choice",
|
||||
"Time spent applying an attestation to fork choice"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_APPLY_TO_AGG_POOL: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_apply_to_agg_pool",
|
||||
"Time spent applying an attestation to the naive aggregation pool"
|
||||
|
||||
@@ -7,15 +7,18 @@ use std::mem;
|
||||
use std::sync::mpsc;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use store::hot_cold_store::{process_finalization, HotColdDBError};
|
||||
use store::iter::{ParentRootBlockIterator, RootsIterator};
|
||||
use store::{hot_cold_store::HotColdDBError, Error, SimpleDiskStore, Store, StoreOp};
|
||||
pub use store::{DiskStore, MemoryStore};
|
||||
use store::{Error, ItemStore, StoreOp};
|
||||
pub use store::{HotColdDB, MemoryStore};
|
||||
use types::*;
|
||||
use types::{BeaconState, EthSpec, Hash256, Slot};
|
||||
|
||||
/// Trait for migration processes that update the database upon finalization.
|
||||
pub trait Migrate<S: Store<E>, E: EthSpec>: Send + Sync + 'static {
|
||||
fn new(db: Arc<S>, log: Logger) -> Self;
|
||||
pub trait Migrate<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>:
|
||||
Send + Sync + 'static
|
||||
{
|
||||
fn new(db: Arc<HotColdDB<E, Hot, Cold>>, log: Logger) -> Self;
|
||||
|
||||
fn process_finalization(
|
||||
&self,
|
||||
@@ -29,18 +32,23 @@ pub trait Migrate<S: Store<E>, E: EthSpec>: Send + Sync + 'static {
|
||||
}
|
||||
|
||||
/// Traverses live heads and prunes blocks and states of chains that we know can't be built
|
||||
/// upon because finalization would prohibit it. This is a optimisation intended to save disk
|
||||
/// upon because finalization would prohibit it. This is an optimisation intended to save disk
|
||||
/// space.
|
||||
///
|
||||
/// Assumptions:
|
||||
/// * It is called after every finalization.
|
||||
fn prune_abandoned_forks(
|
||||
store: Arc<S>,
|
||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
old_finalized_block_hash: SignedBeaconBlockHash,
|
||||
new_finalized_block_hash: SignedBeaconBlockHash,
|
||||
new_finalized_slot: Slot,
|
||||
) -> Result<(), BeaconChainError> {
|
||||
// There will never be any blocks to prune if there is only a single head in the chain.
|
||||
if head_tracker.heads().len() == 1 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let old_finalized_slot = store
|
||||
.get_block(&old_finalized_block_hash.into())?
|
||||
.ok_or_else(|| BeaconChainError::MissingBeaconBlock(old_finalized_block_hash.into()))?
|
||||
@@ -83,9 +91,10 @@ pub trait Migrate<S: Store<E>, E: EthSpec>: Send + Sync + 'static {
|
||||
.ok_or_else(|| BeaconStateError::MissingBeaconBlock(head_hash.into()))?
|
||||
.state_root();
|
||||
|
||||
let iterator = std::iter::once((head_hash, head_state_hash, head_slot))
|
||||
let iter = std::iter::once(Ok((head_hash, head_state_hash, head_slot)))
|
||||
.chain(RootsIterator::from_block(Arc::clone(&store), head_hash)?);
|
||||
for (block_hash, state_hash, slot) in iterator {
|
||||
for maybe_tuple in iter {
|
||||
let (block_hash, state_hash, slot) = maybe_tuple?;
|
||||
if slot < old_finalized_slot {
|
||||
// We must assume here any candidate chains include old_finalized_block_hash,
|
||||
// i.e. there aren't any forks starting at a block that is a strict ancestor of
|
||||
@@ -143,16 +152,16 @@ pub trait Migrate<S: Store<E>, E: EthSpec>: Send + Sync + 'static {
|
||||
}
|
||||
}
|
||||
|
||||
let batch: Vec<StoreOp> = abandoned_blocks
|
||||
let batch: Vec<StoreOp<E>> = abandoned_blocks
|
||||
.into_iter()
|
||||
.map(|block_hash| StoreOp::DeleteBlock(block_hash))
|
||||
.map(StoreOp::DeleteBlock)
|
||||
.chain(
|
||||
abandoned_states
|
||||
.into_iter()
|
||||
.map(|(slot, state_hash)| StoreOp::DeleteState(state_hash, slot)),
|
||||
)
|
||||
.collect();
|
||||
store.do_atomically(&batch)?;
|
||||
store.do_atomically(batch)?;
|
||||
for head_hash in abandoned_heads.into_iter() {
|
||||
head_tracker.remove_head(head_hash);
|
||||
}
|
||||
@@ -164,14 +173,8 @@ pub trait Migrate<S: Store<E>, E: EthSpec>: Send + Sync + 'static {
|
||||
/// Migrator that does nothing, for stores that don't need migration.
|
||||
pub struct NullMigrator;
|
||||
|
||||
impl<E: EthSpec> Migrate<SimpleDiskStore<E>, E> for NullMigrator {
|
||||
fn new(_: Arc<SimpleDiskStore<E>>, _: Logger) -> Self {
|
||||
NullMigrator
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Migrate<MemoryStore<E>, E> for NullMigrator {
|
||||
fn new(_: Arc<MemoryStore<E>>, _: Logger) -> Self {
|
||||
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold> for NullMigrator {
|
||||
fn new(_: Arc<HotColdDB<E, Hot, Cold>>, _: Logger) -> Self {
|
||||
NullMigrator
|
||||
}
|
||||
}
|
||||
@@ -179,12 +182,14 @@ impl<E: EthSpec> Migrate<MemoryStore<E>, E> for NullMigrator {
|
||||
/// Migrator that immediately calls the store's migration function, blocking the current execution.
|
||||
///
|
||||
/// Mostly useful for tests.
|
||||
pub struct BlockingMigrator<S> {
|
||||
db: Arc<S>,
|
||||
pub struct BlockingMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec, S: Store<E>> Migrate<S, E> for BlockingMigrator<S> {
|
||||
fn new(db: Arc<S>, _: Logger) -> Self {
|
||||
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold>
|
||||
for BlockingMigrator<E, Hot, Cold>
|
||||
{
|
||||
fn new(db: Arc<HotColdDB<E, Hot, Cold>>, _: Logger) -> Self {
|
||||
BlockingMigrator { db }
|
||||
}
|
||||
|
||||
@@ -197,7 +202,7 @@ impl<E: EthSpec, S: Store<E>> Migrate<S, E> for BlockingMigrator<S> {
|
||||
old_finalized_block_hash: SignedBeaconBlockHash,
|
||||
new_finalized_block_hash: SignedBeaconBlockHash,
|
||||
) {
|
||||
if let Err(e) = S::process_finalization(self.db.clone(), state_root, &new_finalized_state) {
|
||||
if let Err(e) = process_finalization(self.db.clone(), state_root, &new_finalized_state) {
|
||||
// This migrator is only used for testing, so we just log to stderr without a logger.
|
||||
eprintln!("Migration error: {:?}", e);
|
||||
}
|
||||
@@ -224,14 +229,16 @@ type MpscSender<E> = mpsc::Sender<(
|
||||
)>;
|
||||
|
||||
/// Migrator that runs a background thread to migrate state from the hot to the cold database.
|
||||
pub struct BackgroundMigrator<E: EthSpec> {
|
||||
db: Arc<DiskStore<E>>,
|
||||
pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
tx_thread: Mutex<(MpscSender<E>, thread::JoinHandle<()>)>,
|
||||
log: Logger,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Migrate<DiskStore<E>, E> for BackgroundMigrator<E> {
|
||||
fn new(db: Arc<DiskStore<E>>, log: Logger) -> Self {
|
||||
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold>
|
||||
for BackgroundMigrator<E, Hot, Cold>
|
||||
{
|
||||
fn new(db: Arc<HotColdDB<E, Hot, Cold>>, log: Logger) -> Self {
|
||||
let tx_thread = Mutex::new(Self::spawn_thread(db.clone(), log.clone()));
|
||||
Self { db, tx_thread, log }
|
||||
}
|
||||
@@ -282,18 +289,19 @@ impl<E: EthSpec> Migrate<DiskStore<E>, E> for BackgroundMigrator<E> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> BackgroundMigrator<E> {
|
||||
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Hot, Cold> {
|
||||
/// Return true if a migration needs to be performed, given a new `finalized_slot`.
|
||||
fn needs_migration(&self, finalized_slot: Slot, max_finality_distance: u64) -> bool {
|
||||
let finality_distance = finalized_slot - self.db.get_split_slot();
|
||||
finality_distance > max_finality_distance
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
/// Spawn a new child thread to run the migration process.
|
||||
///
|
||||
/// Return a channel handle for sending new finalized states to the thread.
|
||||
fn spawn_thread(
|
||||
db: Arc<DiskStore<E>>,
|
||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
log: Logger,
|
||||
) -> (
|
||||
mpsc::Sender<(
|
||||
@@ -317,7 +325,7 @@ impl<E: EthSpec> BackgroundMigrator<E> {
|
||||
new_finalized_slot,
|
||||
)) = rx.recv()
|
||||
{
|
||||
match DiskStore::process_finalization(db.clone(), state_root, &state) {
|
||||
match process_finalization(db.clone(), state_root, &state) {
|
||||
Ok(()) => {}
|
||||
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
||||
debug!(
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use crate::metrics;
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::HashMap;
|
||||
use types::{Attestation, AttestationData, EthSpec, Slot};
|
||||
|
||||
@@ -120,6 +119,11 @@ impl<E: EthSpec> AggregatedAttestationMap<E> {
|
||||
Ok(self.map.get(data).cloned())
|
||||
}
|
||||
|
||||
/// Iterate all attestations in `self`.
|
||||
pub fn iter(&self) -> impl Iterator<Item = &Attestation<E>> {
|
||||
self.map.iter().map(|(_key, attestation)| attestation)
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.map.len()
|
||||
}
|
||||
@@ -147,15 +151,15 @@ impl<E: EthSpec> AggregatedAttestationMap<E> {
|
||||
/// than that will also be refused. Pruning is done automatically based upon the attestations it
|
||||
/// receives and it can be triggered manually.
|
||||
pub struct NaiveAggregationPool<E: EthSpec> {
|
||||
lowest_permissible_slot: RwLock<Slot>,
|
||||
maps: RwLock<HashMap<Slot, AggregatedAttestationMap<E>>>,
|
||||
lowest_permissible_slot: Slot,
|
||||
maps: HashMap<Slot, AggregatedAttestationMap<E>>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Default for NaiveAggregationPool<E> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lowest_permissible_slot: RwLock::new(Slot::new(0)),
|
||||
maps: RwLock::new(HashMap::new()),
|
||||
lowest_permissible_slot: Slot::new(0),
|
||||
maps: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -168,10 +172,10 @@ impl<E: EthSpec> NaiveAggregationPool<E> {
|
||||
///
|
||||
/// The pool may be pruned if the given `attestation.data` has a slot higher than any
|
||||
/// previously seen.
|
||||
pub fn insert(&self, attestation: &Attestation<E>) -> Result<InsertOutcome, Error> {
|
||||
pub fn insert(&mut self, attestation: &Attestation<E>) -> Result<InsertOutcome, Error> {
|
||||
let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_INSERT);
|
||||
let slot = attestation.data.slot;
|
||||
let lowest_permissible_slot: Slot = *self.lowest_permissible_slot.read();
|
||||
let lowest_permissible_slot = self.lowest_permissible_slot;
|
||||
|
||||
// Reject any attestations that are too old.
|
||||
if slot < lowest_permissible_slot {
|
||||
@@ -183,16 +187,16 @@ impl<E: EthSpec> NaiveAggregationPool<E> {
|
||||
|
||||
let lock_timer =
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_MAPS_WRITE_LOCK);
|
||||
let mut maps = self.maps.write();
|
||||
drop(lock_timer);
|
||||
|
||||
let outcome = if let Some(map) = maps.get_mut(&slot) {
|
||||
let outcome = if let Some(map) = self.maps.get_mut(&slot) {
|
||||
map.insert(attestation)
|
||||
} else {
|
||||
let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_CREATE_MAP);
|
||||
// To avoid re-allocations, try and determine a rough initial capacity for the new item
|
||||
// by obtaining the mean size of all items in earlier epoch.
|
||||
let (count, sum) = maps
|
||||
let (count, sum) = self
|
||||
.maps
|
||||
.iter()
|
||||
// Only include epochs that are less than the given slot in the average. This should
|
||||
// generally avoid including recent epochs that are still "filling up".
|
||||
@@ -205,12 +209,11 @@ impl<E: EthSpec> NaiveAggregationPool<E> {
|
||||
|
||||
let mut item = AggregatedAttestationMap::new(initial_capacity);
|
||||
let outcome = item.insert(attestation);
|
||||
maps.insert(slot, item);
|
||||
self.maps.insert(slot, item);
|
||||
|
||||
outcome
|
||||
};
|
||||
|
||||
drop(maps);
|
||||
self.prune(slot);
|
||||
|
||||
outcome
|
||||
@@ -219,16 +222,20 @@ impl<E: EthSpec> NaiveAggregationPool<E> {
|
||||
/// Returns an aggregated `Attestation` with the given `data`, if any.
|
||||
pub fn get(&self, data: &AttestationData) -> Result<Option<Attestation<E>>, Error> {
|
||||
self.maps
|
||||
.read()
|
||||
.iter()
|
||||
.find(|(slot, _map)| **slot == data.slot)
|
||||
.map(|(_slot, map)| map.get(data))
|
||||
.unwrap_or_else(|| Ok(None))
|
||||
}
|
||||
|
||||
/// Iterate all attestations in all slots of `self`.
|
||||
pub fn iter(&self) -> impl Iterator<Item = &Attestation<E>> {
|
||||
self.maps.iter().map(|(_slot, map)| map.iter()).flatten()
|
||||
}
|
||||
|
||||
/// Removes any attestations with a slot lower than `current_slot` and bars any future
|
||||
/// attestations with a slot lower than `current_slot - SLOTS_RETAINED`.
|
||||
pub fn prune(&self, current_slot: Slot) {
|
||||
pub fn prune(&mut self, current_slot: Slot) {
|
||||
let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_PRUNE);
|
||||
|
||||
// Taking advantage of saturating subtraction on `Slot`.
|
||||
@@ -236,30 +243,34 @@ impl<E: EthSpec> NaiveAggregationPool<E> {
|
||||
|
||||
// No need to prune if the lowest permissible slot has not changed and the queue length is
|
||||
// less than the maximum
|
||||
if *self.lowest_permissible_slot.read() == lowest_permissible_slot
|
||||
&& self.maps.read().len() <= SLOTS_RETAINED
|
||||
if self.lowest_permissible_slot == lowest_permissible_slot
|
||||
&& self.maps.len() <= SLOTS_RETAINED
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
*self.lowest_permissible_slot.write() = lowest_permissible_slot;
|
||||
let mut maps = self.maps.write();
|
||||
self.lowest_permissible_slot = lowest_permissible_slot;
|
||||
|
||||
// Remove any maps that are definitely expired.
|
||||
maps.retain(|slot, _map| *slot >= lowest_permissible_slot);
|
||||
self.maps
|
||||
.retain(|slot, _map| *slot >= lowest_permissible_slot);
|
||||
|
||||
// If we have too many maps, remove the lowest amount to ensure we only have
|
||||
// `SLOTS_RETAINED` left.
|
||||
if maps.len() > SLOTS_RETAINED {
|
||||
let mut slots = maps.iter().map(|(slot, _map)| *slot).collect::<Vec<_>>();
|
||||
if self.maps.len() > SLOTS_RETAINED {
|
||||
let mut slots = self
|
||||
.maps
|
||||
.iter()
|
||||
.map(|(slot, _map)| *slot)
|
||||
.collect::<Vec<_>>();
|
||||
// Sort is generally pretty slow, however `SLOTS_RETAINED` is quite low so it should be
|
||||
// negligible.
|
||||
slots.sort_unstable();
|
||||
slots
|
||||
.into_iter()
|
||||
.take(maps.len().saturating_sub(SLOTS_RETAINED))
|
||||
.take(self.maps.len().saturating_sub(SLOTS_RETAINED))
|
||||
.for_each(|slot| {
|
||||
maps.remove(&slot);
|
||||
self.maps.remove(&slot);
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -304,7 +315,7 @@ mod tests {
|
||||
fn single_attestation() {
|
||||
let mut a = get_attestation(Slot::new(0));
|
||||
|
||||
let pool = NaiveAggregationPool::default();
|
||||
let mut pool = NaiveAggregationPool::default();
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
@@ -352,7 +363,7 @@ mod tests {
|
||||
sign(&mut a_0, 0, genesis_validators_root);
|
||||
sign(&mut a_1, 1, genesis_validators_root);
|
||||
|
||||
let pool = NaiveAggregationPool::default();
|
||||
let mut pool = NaiveAggregationPool::default();
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a_0),
|
||||
@@ -409,7 +420,7 @@ mod tests {
|
||||
let mut base = get_attestation(Slot::new(0));
|
||||
sign(&mut base, 0, Hash256::random());
|
||||
|
||||
let pool = NaiveAggregationPool::default();
|
||||
let mut pool = NaiveAggregationPool::default();
|
||||
|
||||
for i in 0..SLOTS_RETAINED * 2 {
|
||||
let slot = Slot::from(i);
|
||||
@@ -424,22 +435,16 @@ mod tests {
|
||||
|
||||
if i < SLOTS_RETAINED {
|
||||
let len = i + 1;
|
||||
assert_eq!(
|
||||
pool.maps.read().len(),
|
||||
len,
|
||||
"the pool should have length {}",
|
||||
len
|
||||
);
|
||||
assert_eq!(pool.maps.len(), len, "the pool should have length {}", len);
|
||||
} else {
|
||||
assert_eq!(
|
||||
pool.maps.read().len(),
|
||||
pool.maps.len(),
|
||||
SLOTS_RETAINED,
|
||||
"the pool should have length SLOTS_RETAINED"
|
||||
);
|
||||
|
||||
let mut pool_slots = pool
|
||||
.maps
|
||||
.read()
|
||||
.iter()
|
||||
.map(|(slot, _map)| *slot)
|
||||
.collect::<Vec<_>>();
|
||||
@@ -463,7 +468,7 @@ mod tests {
|
||||
let mut base = get_attestation(Slot::new(0));
|
||||
sign(&mut base, 0, Hash256::random());
|
||||
|
||||
let pool = NaiveAggregationPool::default();
|
||||
let mut pool = NaiveAggregationPool::default();
|
||||
|
||||
for i in 0..=MAX_ATTESTATIONS_PER_SLOT {
|
||||
let mut a = base.clone();
|
||||
|
||||
@@ -88,9 +88,9 @@ impl Item for EpochBitfield {
|
||||
.unwrap_or_else(|| {
|
||||
self.bitfield
|
||||
.resize(validator_index.saturating_add(1), false);
|
||||
self.bitfield
|
||||
.get_mut(validator_index)
|
||||
.map(|mut bit| *bit = true);
|
||||
if let Some(mut bit) = self.bitfield.get_mut(validator_index) {
|
||||
*bit = true;
|
||||
}
|
||||
false
|
||||
})
|
||||
}
|
||||
@@ -197,7 +197,7 @@ impl<T: Item, E: EthSpec> AutoPruningContainer<T, E> {
|
||||
.map(|(_epoch, item)| item.len())
|
||||
.fold((0, 0), |(count, sum), len| (count + 1, sum + len));
|
||||
|
||||
let initial_capacity = sum.checked_div(count).unwrap_or(T::default_capacity());
|
||||
let initial_capacity = sum.checked_div(count).unwrap_or_else(T::default_capacity);
|
||||
|
||||
let mut item = T::with_capacity(initial_capacity);
|
||||
item.insert(validator_index);
|
||||
|
||||
104
beacon_node/beacon_chain/src/observed_operations.rs
Normal file
104
beacon_node/beacon_chain/src/observed_operations.rs
Normal file
@@ -0,0 +1,104 @@
|
||||
use derivative::Derivative;
|
||||
use parking_lot::Mutex;
|
||||
use smallvec::SmallVec;
|
||||
use state_processing::{SigVerifiedOp, VerifyOperation};
|
||||
use std::collections::HashSet;
|
||||
use std::iter::FromIterator;
|
||||
use std::marker::PhantomData;
|
||||
use types::{
|
||||
AttesterSlashing, BeaconState, ChainSpec, EthSpec, ProposerSlashing, SignedVoluntaryExit,
|
||||
};
|
||||
|
||||
/// Number of validator indices to store on the stack in `observed_validators`.
|
||||
pub const SMALL_VEC_SIZE: usize = 8;
|
||||
|
||||
/// Stateful tracker for exit/slashing operations seen on the network.
|
||||
///
|
||||
/// Implements the conditions for gossip verification of exits and slashings from the P2P spec.
|
||||
#[derive(Debug, Derivative)]
|
||||
#[derivative(Default(bound = "T: ObservableOperation<E>, E: EthSpec"))]
|
||||
pub struct ObservedOperations<T: ObservableOperation<E>, E: EthSpec> {
|
||||
/// Indices of validators for whom we have already seen an instance of an operation `T`.
|
||||
///
|
||||
/// For voluntary exits, this is the set of all `signed_voluntary_exit.message.validator_index`.
|
||||
/// For proposer slashings, this is the set of all `proposer_slashing.index`.
|
||||
/// For attester slashings, this is the set of all validators who would be slashed by
|
||||
/// previously seen attester slashings, i.e. those validators in the intersection of
|
||||
/// `attestation_1.attester_indices` and `attestation_2.attester_indices`.
|
||||
observed_validator_indices: Mutex<HashSet<u64>>,
|
||||
_phantom: PhantomData<(T, E)>,
|
||||
}
|
||||
|
||||
/// Was the observed operation new and valid for further processing, or a useless duplicate?
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub enum ObservationOutcome<T> {
|
||||
New(SigVerifiedOp<T>),
|
||||
AlreadyKnown,
|
||||
}
|
||||
|
||||
/// Trait for exits and slashings which can be observed using `ObservedOperations`.
|
||||
pub trait ObservableOperation<E: EthSpec>: VerifyOperation<E> + Sized {
|
||||
/// The set of validator indices involved in this operation.
|
||||
///
|
||||
/// See the comment on `observed_validator_indices` above for detail.
|
||||
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]>;
|
||||
}
|
||||
|
||||
impl<E: EthSpec> ObservableOperation<E> for SignedVoluntaryExit {
|
||||
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
|
||||
std::iter::once(self.message.validator_index).collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> ObservableOperation<E> for ProposerSlashing {
|
||||
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
|
||||
std::iter::once(self.signed_header_1.message.proposer_index).collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> ObservableOperation<E> for AttesterSlashing<E> {
|
||||
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
|
||||
let attestation_1_indices =
|
||||
HashSet::<u64>::from_iter(self.attestation_1.attesting_indices.iter().copied());
|
||||
let attestation_2_indices =
|
||||
HashSet::<u64>::from_iter(self.attestation_2.attesting_indices.iter().copied());
|
||||
attestation_1_indices
|
||||
.intersection(&attestation_2_indices)
|
||||
.copied()
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ObservableOperation<E>, E: EthSpec> ObservedOperations<T, E> {
|
||||
pub fn verify_and_observe(
|
||||
&self,
|
||||
op: T,
|
||||
head_state: &BeaconState<E>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<ObservationOutcome<T>, T::Error> {
|
||||
let mut observed_validator_indices = self.observed_validator_indices.lock();
|
||||
let new_validator_indices = op.observed_validators();
|
||||
|
||||
// If all of the new validator indices have been previously observed, short-circuit
|
||||
// the validation. This implements the uniqueness check part of the spec, which for attester
|
||||
// slashings reads:
|
||||
//
|
||||
// At least one index in the intersection of the attesting indices of each attestation has
|
||||
// not yet been seen in any prior attester_slashing.
|
||||
if new_validator_indices
|
||||
.iter()
|
||||
.all(|index| observed_validator_indices.contains(index))
|
||||
{
|
||||
return Ok(ObservationOutcome::AlreadyKnown);
|
||||
}
|
||||
|
||||
// Validate the op using operation-specific logic (`verify_attester_slashing`, etc).
|
||||
let verified_op = op.validate(head_state, spec)?;
|
||||
|
||||
// Add the relevant indices to the set of known indices to prevent processing of duplicates
|
||||
// in the future.
|
||||
observed_validator_indices.extend(new_validator_indices);
|
||||
|
||||
Ok(ObservationOutcome::New(verified_op))
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::head_tracker::SszHeadTracker;
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use store::{DBColumn, Error as StoreError, SimpleStoreItem};
|
||||
use store::{DBColumn, Error as StoreError, StoreItem};
|
||||
use types::Hash256;
|
||||
|
||||
#[derive(Clone, Encode, Decode)]
|
||||
@@ -11,7 +11,7 @@ pub struct PersistedBeaconChain {
|
||||
pub ssz_head_tracker: SszHeadTracker,
|
||||
}
|
||||
|
||||
impl SimpleStoreItem for PersistedBeaconChain {
|
||||
impl StoreItem for PersistedBeaconChain {
|
||||
fn db_column() -> DBColumn {
|
||||
DBColumn::BeaconChain
|
||||
}
|
||||
|
||||
25
beacon_node/beacon_chain/src/persisted_fork_choice.rs
Normal file
25
beacon_node/beacon_chain/src/persisted_fork_choice.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
use crate::beacon_fork_choice_store::PersistedForkChoiceStore as ForkChoiceStore;
|
||||
use fork_choice::PersistedForkChoice as ForkChoice;
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use store::{DBColumn, Error, StoreItem};
|
||||
|
||||
#[derive(Encode, Decode)]
|
||||
pub struct PersistedForkChoice {
|
||||
pub fork_choice: ForkChoice,
|
||||
pub fork_choice_store: ForkChoiceStore,
|
||||
}
|
||||
|
||||
impl StoreItem for PersistedForkChoice {
|
||||
fn db_column() -> DBColumn {
|
||||
DBColumn::ForkChoice
|
||||
}
|
||||
|
||||
fn as_store_bytes(&self) -> Vec<u8> {
|
||||
self.as_ssz_bytes()
|
||||
}
|
||||
|
||||
fn from_store_bytes(bytes: &[u8]) -> std::result::Result<Self, Error> {
|
||||
Self::from_ssz_bytes(bytes).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
@@ -99,7 +99,7 @@ mod test {
|
||||
use super::*;
|
||||
use types::{
|
||||
test_utils::{generate_deterministic_keypair, TestingBeaconStateBuilder},
|
||||
BeaconBlock, Epoch, MainnetEthSpec, Signature, SignedBeaconBlock, Slot,
|
||||
BeaconBlock, Epoch, MainnetEthSpec, SignedBeaconBlock, Slot,
|
||||
};
|
||||
|
||||
const CACHE_SIZE: usize = 4;
|
||||
@@ -115,7 +115,9 @@ mod test {
|
||||
beacon_state_root: Hash256::from_low_u64_be(i),
|
||||
beacon_block: SignedBeaconBlock {
|
||||
message: BeaconBlock::empty(&spec),
|
||||
signature: Signature::new(&[42], &generate_deterministic_keypair(0).sk),
|
||||
signature: generate_deterministic_keypair(0)
|
||||
.sk
|
||||
.sign(Hash256::from_low_u64_be(42)),
|
||||
},
|
||||
beacon_block_root: Hash256::from_low_u64_be(i),
|
||||
}
|
||||
|
||||
@@ -18,13 +18,13 @@ use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::{DiskStore, MemoryStore, Store};
|
||||
use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore};
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use tree_hash::TreeHash;
|
||||
use types::{
|
||||
AggregateSignature, Attestation, BeaconState, BeaconStateHash, ChainSpec, Domain, EthSpec,
|
||||
Hash256, Keypair, SecretKey, SelectionProof, Signature, SignedAggregateAndProof,
|
||||
SignedBeaconBlock, SignedBeaconBlockHash, SignedRoot, Slot,
|
||||
Hash256, Keypair, SecretKey, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock,
|
||||
SignedBeaconBlockHash, SignedRoot, Slot, SubnetId,
|
||||
};
|
||||
|
||||
pub use types::test_utils::generate_deterministic_keypairs;
|
||||
@@ -34,17 +34,19 @@ pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690;
|
||||
// This parameter is required by a builder but not used because we use the `TestingSlotClock`.
|
||||
pub const HARNESS_SLOT_TIME: Duration = Duration::from_secs(1);
|
||||
|
||||
pub type BaseHarnessType<TStore, TStoreMigrator, TEthSpec> = Witness<
|
||||
TStore,
|
||||
pub type BaseHarnessType<TStoreMigrator, TEthSpec, THotStore, TColdStore> = Witness<
|
||||
TStoreMigrator,
|
||||
TestingSlotClock,
|
||||
CachingEth1Backend<TEthSpec, TStore>,
|
||||
CachingEth1Backend<TEthSpec>,
|
||||
TEthSpec,
|
||||
NullEventHandler<TEthSpec>,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>;
|
||||
|
||||
pub type HarnessType<E> = BaseHarnessType<MemoryStore<E>, NullMigrator, E>;
|
||||
pub type DiskHarnessType<E> = BaseHarnessType<DiskStore<E>, BlockingMigrator<DiskStore<E>>, E>;
|
||||
pub type HarnessType<E> = BaseHarnessType<NullMigrator, E, MemoryStore<E>, MemoryStore<E>>;
|
||||
pub type DiskHarnessType<E> =
|
||||
BaseHarnessType<BlockingMigrator<E, LevelDB<E>, LevelDB<E>>, E, LevelDB<E>, LevelDB<E>>;
|
||||
|
||||
/// Indicates how the `BeaconChainHarness` should produce blocks.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
@@ -84,12 +86,12 @@ pub struct BeaconChainHarness<T: BeaconChainTypes> {
|
||||
|
||||
impl<E: EthSpec> BeaconChainHarness<HarnessType<E>> {
|
||||
/// Instantiate a new harness with `validator_count` initial validators.
|
||||
pub fn new(eth_spec_instance: E, keypairs: Vec<Keypair>) -> Self {
|
||||
pub fn new(eth_spec_instance: E, keypairs: Vec<Keypair>, config: StoreConfig) -> Self {
|
||||
// Setting the target aggregators to really high means that _all_ validators in the
|
||||
// committee are required to produce an aggregate. This is overkill, however with small
|
||||
// validator counts it's the only way to be certain there is _at least one_ aggregator per
|
||||
// committee.
|
||||
Self::new_with_target_aggregators(eth_spec_instance, keypairs, 1 << 32)
|
||||
Self::new_with_target_aggregators(eth_spec_instance, keypairs, 1 << 32, config)
|
||||
}
|
||||
|
||||
/// Instantiate a new harness with `validator_count` initial validators and a custom
|
||||
@@ -98,6 +100,7 @@ impl<E: EthSpec> BeaconChainHarness<HarnessType<E>> {
|
||||
eth_spec_instance: E,
|
||||
keypairs: Vec<Keypair>,
|
||||
target_aggregators_per_committee: u64,
|
||||
config: StoreConfig,
|
||||
) -> Self {
|
||||
let data_dir = tempdir().expect("should create temporary data_dir");
|
||||
let mut spec = E::default_spec();
|
||||
@@ -105,11 +108,11 @@ impl<E: EthSpec> BeaconChainHarness<HarnessType<E>> {
|
||||
spec.target_aggregators_per_committee = target_aggregators_per_committee;
|
||||
|
||||
let log = NullLoggerBuilder.build().expect("logger should build");
|
||||
|
||||
let store = HotColdDB::open_ephemeral(config, spec.clone(), log.clone()).unwrap();
|
||||
let chain = BeaconChainBuilder::new(eth_spec_instance)
|
||||
.logger(log.clone())
|
||||
.logger(log)
|
||||
.custom_spec(spec.clone())
|
||||
.store(Arc::new(MemoryStore::open()))
|
||||
.store(Arc::new(store))
|
||||
.store_migrator(NullMigrator)
|
||||
.data_dir(data_dir.path().to_path_buf())
|
||||
.genesis_state(
|
||||
@@ -122,8 +125,6 @@ impl<E: EthSpec> BeaconChainHarness<HarnessType<E>> {
|
||||
.null_event_handler()
|
||||
.testing_slot_clock(HARNESS_SLOT_TIME)
|
||||
.expect("should configure testing slot clock")
|
||||
.reduced_tree_fork_choice()
|
||||
.expect("should add fork choice to builder")
|
||||
.build()
|
||||
.expect("should build");
|
||||
|
||||
@@ -140,7 +141,7 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
||||
/// Instantiate a new harness with `validator_count` initial validators.
|
||||
pub fn new_with_disk_store(
|
||||
eth_spec_instance: E,
|
||||
store: Arc<DiskStore<E>>,
|
||||
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
|
||||
keypairs: Vec<Keypair>,
|
||||
) -> Self {
|
||||
let data_dir = tempdir().expect("should create temporary data_dir");
|
||||
@@ -152,10 +153,7 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
||||
.logger(log.clone())
|
||||
.custom_spec(spec.clone())
|
||||
.store(store.clone())
|
||||
.store_migrator(<BlockingMigrator<_> as Migrate<_, E>>::new(
|
||||
store,
|
||||
log.clone(),
|
||||
))
|
||||
.store_migrator(BlockingMigrator::new(store, log.clone()))
|
||||
.data_dir(data_dir.path().to_path_buf())
|
||||
.genesis_state(
|
||||
interop_genesis_state::<E>(&keypairs, HARNESS_GENESIS_TIME, &spec)
|
||||
@@ -167,8 +165,6 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
||||
.null_event_handler()
|
||||
.testing_slot_clock(HARNESS_SLOT_TIME)
|
||||
.expect("should configure testing slot clock")
|
||||
.reduced_tree_fork_choice()
|
||||
.expect("should add fork choice to builder")
|
||||
.build()
|
||||
.expect("should build");
|
||||
|
||||
@@ -183,7 +179,7 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
||||
/// Instantiate a new harness with `validator_count` initial validators.
|
||||
pub fn resume_from_disk_store(
|
||||
eth_spec_instance: E,
|
||||
store: Arc<DiskStore<E>>,
|
||||
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
|
||||
keypairs: Vec<Keypair>,
|
||||
data_dir: TempDir,
|
||||
) -> Self {
|
||||
@@ -195,7 +191,7 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
||||
.logger(log.clone())
|
||||
.custom_spec(spec)
|
||||
.store(store.clone())
|
||||
.store_migrator(<BlockingMigrator<_> as Migrate<_, E>>::new(
|
||||
.store_migrator(<BlockingMigrator<_, _, _> as Migrate<E, _, _>>::new(
|
||||
store,
|
||||
log.clone(),
|
||||
))
|
||||
@@ -207,8 +203,6 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
||||
.null_event_handler()
|
||||
.testing_slot_clock(Duration::from_secs(1))
|
||||
.expect("should configure testing slot clock")
|
||||
.reduced_tree_fork_choice()
|
||||
.expect("should add fork choice to builder")
|
||||
.build()
|
||||
.expect("should build");
|
||||
|
||||
@@ -221,11 +215,12 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, M, E> BeaconChainHarness<BaseHarnessType<S, M, E>>
|
||||
impl<M, E, Hot, Cold> BeaconChainHarness<BaseHarnessType<M, E, Hot, Cold>>
|
||||
where
|
||||
S: Store<E>,
|
||||
M: Migrate<S, E>,
|
||||
M: Migrate<E, Hot, Cold>,
|
||||
E: EthSpec,
|
||||
Hot: ItemStore<E>,
|
||||
Cold: ItemStore<E>,
|
||||
{
|
||||
/// Advance the slot of the `BeaconChain`.
|
||||
///
|
||||
@@ -249,6 +244,35 @@ where
|
||||
block_strategy: BlockStrategy,
|
||||
attestation_strategy: AttestationStrategy,
|
||||
) -> Hash256 {
|
||||
let mut i = 0;
|
||||
self.extend_chain_while(
|
||||
|_, _| {
|
||||
i += 1;
|
||||
i <= num_blocks
|
||||
},
|
||||
block_strategy,
|
||||
attestation_strategy,
|
||||
)
|
||||
}
|
||||
|
||||
/// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the
|
||||
/// last-produced block (the head of the chain).
|
||||
///
|
||||
/// Chain will be extended while `predidcate` returns `true`.
|
||||
///
|
||||
/// The `block_strategy` dictates where the new blocks will be placed.
|
||||
///
|
||||
/// The `attestation_strategy` dictates which validators will attest to the newly created
|
||||
/// blocks.
|
||||
pub fn extend_chain_while<F>(
|
||||
&self,
|
||||
mut predicate: F,
|
||||
block_strategy: BlockStrategy,
|
||||
attestation_strategy: AttestationStrategy,
|
||||
) -> Hash256
|
||||
where
|
||||
F: FnMut(&SignedBeaconBlock<E>, &BeaconState<E>) -> bool,
|
||||
{
|
||||
let mut state = {
|
||||
// Determine the slot for the first block (or skipped block).
|
||||
let state_slot = match block_strategy {
|
||||
@@ -271,13 +295,17 @@ where
|
||||
|
||||
let mut head_block_root = None;
|
||||
|
||||
for _ in 0..num_blocks {
|
||||
loop {
|
||||
let (block, new_state) = self.build_block(state.clone(), slot, block_strategy);
|
||||
|
||||
if !predicate(&block, &new_state) {
|
||||
break;
|
||||
}
|
||||
|
||||
while self.chain.slot().expect("should have a slot") < slot {
|
||||
self.advance_slot();
|
||||
}
|
||||
|
||||
let (block, new_state) = self.build_block(state.clone(), slot, block_strategy);
|
||||
|
||||
let block_root = self
|
||||
.chain
|
||||
.process_block(block)
|
||||
@@ -295,6 +323,39 @@ where
|
||||
head_block_root.expect("did not produce any blocks")
|
||||
}
|
||||
|
||||
/// A simple method to produce a block at the current slot without applying it to the chain.
|
||||
///
|
||||
/// Always uses `BlockStrategy::OnCanonicalHead`.
|
||||
pub fn get_block(&self) -> (SignedBeaconBlock<E>, BeaconState<E>) {
|
||||
let state = self
|
||||
.chain
|
||||
.state_at_slot(
|
||||
self.chain.slot().unwrap() - 1,
|
||||
StateSkipConfig::WithStateRoots,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let slot = self.chain.slot().unwrap();
|
||||
|
||||
self.build_block(state, slot, BlockStrategy::OnCanonicalHead)
|
||||
}
|
||||
|
||||
/// A simple method to produce and process all attestation at the current slot. Always uses
|
||||
/// `AttestationStrategy::AllValidators`.
|
||||
pub fn generate_all_attestations(&self) {
|
||||
let slot = self.chain.slot().unwrap();
|
||||
let (state, block_root) = {
|
||||
let head = self.chain.head().unwrap();
|
||||
(head.beacon_state.clone(), head.beacon_block_root)
|
||||
};
|
||||
self.add_attestations_for_slot(
|
||||
&AttestationStrategy::AllValidators,
|
||||
&state,
|
||||
block_root,
|
||||
slot,
|
||||
);
|
||||
}
|
||||
|
||||
/// Returns current canonical head slot
|
||||
pub fn get_chain_slot(&self) -> Slot {
|
||||
self.chain.slot().unwrap()
|
||||
@@ -332,6 +393,7 @@ where
|
||||
(block_root.into(), new_state)
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
/// `add_block()` repeated `num_blocks` times.
|
||||
pub fn add_blocks(
|
||||
&self,
|
||||
@@ -361,6 +423,7 @@ where
|
||||
(blocks, states, slot, head_hash, state)
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
/// A wrapper on `add_blocks()` to avoid passing enums explicitly.
|
||||
pub fn add_canonical_chain_blocks(
|
||||
&self,
|
||||
@@ -385,6 +448,7 @@ where
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
/// A wrapper on `add_blocks()` to avoid passing enums explicitly.
|
||||
pub fn add_stray_blocks(
|
||||
&self,
|
||||
@@ -443,7 +507,7 @@ where
|
||||
};
|
||||
|
||||
let sk = &self.keypairs[proposer_index].sk;
|
||||
let fork = &state.fork.clone();
|
||||
let fork = &state.fork;
|
||||
|
||||
let randao_reveal = {
|
||||
let epoch = slot.epoch(E::slots_per_epoch());
|
||||
@@ -451,7 +515,7 @@ where
|
||||
self.spec
|
||||
.get_domain(epoch, Domain::Randao, fork, state.genesis_validators_root);
|
||||
let message = epoch.signing_root(domain);
|
||||
Signature::new(message.as_bytes(), sk)
|
||||
sk.sign(message)
|
||||
};
|
||||
|
||||
let (block, state) = self
|
||||
@@ -475,12 +539,16 @@ where
|
||||
state: &BeaconState<E>,
|
||||
head_block_root: Hash256,
|
||||
attestation_slot: Slot,
|
||||
) -> Vec<Vec<Attestation<E>>> {
|
||||
) -> Vec<Vec<(Attestation<E>, SubnetId)>> {
|
||||
let spec = &self.spec;
|
||||
let fork = &state.fork;
|
||||
|
||||
let attesting_validators = self.get_attesting_validators(attestation_strategy);
|
||||
|
||||
let committee_count = state
|
||||
.get_committee_count_at_slot(state.slot)
|
||||
.expect("should get committee count");
|
||||
|
||||
state
|
||||
.get_beacon_committees_at_slot(state.slot)
|
||||
.expect("should get committees")
|
||||
@@ -518,17 +586,21 @@ where
|
||||
|
||||
let message = attestation.data.signing_root(domain);
|
||||
|
||||
let mut agg_sig = AggregateSignature::new();
|
||||
let mut agg_sig = AggregateSignature::infinity();
|
||||
|
||||
agg_sig.add(&Signature::new(
|
||||
message.as_bytes(),
|
||||
self.get_sk(*validator_index),
|
||||
));
|
||||
agg_sig.add_assign(&self.get_sk(*validator_index).sign(message));
|
||||
|
||||
agg_sig
|
||||
};
|
||||
|
||||
Some(attestation)
|
||||
let subnet_id = SubnetId::compute_subnet_for_attestation_data::<E>(
|
||||
&attestation.data,
|
||||
committee_count,
|
||||
&self.chain.spec,
|
||||
)
|
||||
.expect("should get subnet_id");
|
||||
|
||||
Some((attestation, subnet_id))
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
@@ -573,16 +645,16 @@ where
|
||||
.into_iter()
|
||||
.for_each(|committee_attestations| {
|
||||
// Submit each unaggregated attestation to the chain.
|
||||
for attestation in &committee_attestations {
|
||||
for (attestation, subnet_id) in &committee_attestations {
|
||||
self.chain
|
||||
.verify_unaggregated_attestation_for_gossip(attestation.clone())
|
||||
.verify_unaggregated_attestation_for_gossip(attestation.clone(), *subnet_id)
|
||||
.expect("should not error during attestation processing")
|
||||
.add_to_pool(&self.chain)
|
||||
.expect("should add attestation to naive pool");
|
||||
}
|
||||
|
||||
// If there are any attestations in this committee, create an aggregate.
|
||||
if let Some(attestation) = committee_attestations.first() {
|
||||
if let Some((attestation, _)) = committee_attestations.first() {
|
||||
let bc = state.get_beacon_committee(attestation.data.slot, attestation.data.index)
|
||||
.expect("should get committee");
|
||||
|
||||
@@ -604,7 +676,7 @@ where
|
||||
selection_proof.is_aggregator(bc.committee.len(), spec).unwrap_or(false)
|
||||
})
|
||||
.copied()
|
||||
.expect(&format!(
|
||||
.unwrap_or_else(|| panic!(
|
||||
"Committee {} at slot {} with {} attesting validators does not have any aggregators",
|
||||
bc.index, state.slot, bc.committee.len()
|
||||
));
|
||||
@@ -616,7 +688,7 @@ where
|
||||
.get_aggregated_attestation(&attestation.data)
|
||||
.expect("should not error whilst finding aggregate")
|
||||
.unwrap_or_else(|| {
|
||||
committee_attestations.iter().skip(1).fold(attestation.clone(), |mut agg, att| {
|
||||
committee_attestations.iter().skip(1).fold(attestation.clone(), |mut agg, (att, _)| {
|
||||
agg.aggregate(att);
|
||||
agg
|
||||
})
|
||||
@@ -632,14 +704,16 @@ where
|
||||
spec,
|
||||
);
|
||||
|
||||
self.chain
|
||||
let attn = self.chain
|
||||
.verify_aggregated_attestation_for_gossip(signed_aggregate)
|
||||
.expect("should not error during attestation processing")
|
||||
.add_to_pool(&self.chain)
|
||||
.expect("should add attestation to naive aggregation pool")
|
||||
.add_to_fork_choice(&self.chain)
|
||||
.expect("should not error during attestation processing");
|
||||
|
||||
self.chain.apply_attestation_to_fork_choice(&attn)
|
||||
.expect("should add attestation to fork choice");
|
||||
}
|
||||
|
||||
self.chain.add_to_block_inclusion_pool(attn)
|
||||
.expect("should add attestation to op pool");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -138,8 +138,9 @@ struct ValidatorPubkeyCacheFile(File);
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Error {
|
||||
IoError(io::Error),
|
||||
SszError(DecodeError),
|
||||
Io(io::Error),
|
||||
Ssz(DecodeError),
|
||||
PubkeyDecode(bls::Error),
|
||||
/// The file read from disk does not have a contiguous list of validator public keys. The file
|
||||
/// has become corrupted.
|
||||
InconsistentIndex {
|
||||
@@ -162,7 +163,7 @@ impl ValidatorPubkeyCacheFile {
|
||||
.write(true)
|
||||
.open(path)
|
||||
.map(Self)
|
||||
.map_err(Error::IoError)
|
||||
.map_err(Error::Io)
|
||||
}
|
||||
|
||||
/// Opens an existing file for reading and writing.
|
||||
@@ -174,7 +175,7 @@ impl ValidatorPubkeyCacheFile {
|
||||
.append(true)
|
||||
.open(path)
|
||||
.map(Self)
|
||||
.map_err(Error::IoError)
|
||||
.map_err(Error::Io)
|
||||
}
|
||||
|
||||
/// Append a public key to file.
|
||||
@@ -188,10 +189,9 @@ impl ValidatorPubkeyCacheFile {
|
||||
/// Creates a `ValidatorPubkeyCache` by reading and parsing the underlying file.
|
||||
pub fn into_cache(mut self) -> Result<ValidatorPubkeyCache, Error> {
|
||||
let mut bytes = vec![];
|
||||
self.0.read_to_end(&mut bytes).map_err(Error::IoError)?;
|
||||
self.0.read_to_end(&mut bytes).map_err(Error::Io)?;
|
||||
|
||||
let list: Vec<(usize, PublicKeyBytes)> =
|
||||
Vec::from_ssz_bytes(&bytes).map_err(Error::SszError)?;
|
||||
let list: Vec<(usize, PublicKeyBytes)> = Vec::from_ssz_bytes(&bytes).map_err(Error::Ssz)?;
|
||||
|
||||
let mut last = None;
|
||||
let mut pubkeys = Vec::with_capacity(list.len());
|
||||
@@ -201,7 +201,7 @@ impl ValidatorPubkeyCacheFile {
|
||||
let expected = last.map(|n| n + 1);
|
||||
if expected.map_or(true, |expected| index == expected) {
|
||||
last = Some(index);
|
||||
pubkeys.push((&pubkey).try_into().map_err(Error::SszError)?);
|
||||
pubkeys.push((&pubkey).try_into().map_err(Error::PubkeyDecode)?);
|
||||
indices.insert(pubkey, index);
|
||||
} else {
|
||||
return Err(Error::InconsistentIndex {
|
||||
@@ -225,7 +225,7 @@ fn append_to_file(file: &mut File, index: usize, pubkey: &PublicKeyBytes) -> Res
|
||||
index.ssz_append(&mut line);
|
||||
pubkey.ssz_append(&mut line);
|
||||
|
||||
file.write_all(&mut line).map_err(Error::IoError)
|
||||
file.write_all(&line).map_err(Error::Io)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -7,6 +7,7 @@ use beacon_chain::{
|
||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy},
|
||||
StateSkipConfig,
|
||||
};
|
||||
use store::config::StoreConfig;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot};
|
||||
|
||||
@@ -25,7 +26,11 @@ lazy_static! {
|
||||
fn produces_attestations() {
|
||||
let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 4;
|
||||
|
||||
let harness = BeaconChainHarness::new(MainnetEthSpec, KEYPAIRS[..].to_vec());
|
||||
let harness = BeaconChainHarness::new(
|
||||
MainnetEthSpec,
|
||||
KEYPAIRS[..].to_vec(),
|
||||
StoreConfig::default(),
|
||||
);
|
||||
|
||||
// Skip past the genesis slot.
|
||||
harness.advance_slot();
|
||||
@@ -106,7 +111,7 @@ fn produces_attestations() {
|
||||
);
|
||||
assert_eq!(
|
||||
attestation.signature,
|
||||
AggregateSignature::empty_signature(),
|
||||
AggregateSignature::empty(),
|
||||
"bad signature"
|
||||
);
|
||||
assert_eq!(data.index, index, "bad index");
|
||||
|
||||
@@ -8,13 +8,14 @@ use beacon_chain::{
|
||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType},
|
||||
BeaconChain, BeaconChainTypes,
|
||||
};
|
||||
use int_to_bytes::int_to_bytes32;
|
||||
use state_processing::per_slot_processing;
|
||||
use store::Store;
|
||||
use store::config::StoreConfig;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{
|
||||
test_utils::generate_deterministic_keypair, AggregateSignature, Attestation, EthSpec, Hash256,
|
||||
Keypair, MainnetEthSpec, SecretKey, SelectionProof, Signature, SignedAggregateAndProof,
|
||||
SignedBeaconBlock, Unsigned,
|
||||
Keypair, MainnetEthSpec, SecretKey, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock,
|
||||
SubnetId, Unsigned,
|
||||
};
|
||||
|
||||
pub type E = MainnetEthSpec;
|
||||
@@ -36,6 +37,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<E>> {
|
||||
// A kind-of arbitrary number that ensures that _some_ validators are aggregators, but
|
||||
// not all.
|
||||
4,
|
||||
StoreConfig::default(),
|
||||
);
|
||||
|
||||
harness.advance_slot();
|
||||
@@ -48,7 +50,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<E>> {
|
||||
/// Also returns some info about who created it.
|
||||
fn get_valid_unaggregated_attestation<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
) -> (Attestation<T::EthSpec>, usize, usize, SecretKey) {
|
||||
) -> (Attestation<T::EthSpec>, usize, usize, SecretKey, SubnetId) {
|
||||
let head = chain.head().expect("should get head");
|
||||
let current_slot = chain.slot().expect("should get slot");
|
||||
|
||||
@@ -77,11 +79,21 @@ fn get_valid_unaggregated_attestation<T: BeaconChainTypes>(
|
||||
)
|
||||
.expect("should sign attestation");
|
||||
|
||||
let subnet_id = SubnetId::compute_subnet_for_attestation_data::<E>(
|
||||
&valid_attestation.data,
|
||||
head.beacon_state
|
||||
.get_committee_count_at_slot(current_slot)
|
||||
.expect("should get committee count"),
|
||||
&chain.spec,
|
||||
)
|
||||
.expect("should get subnet_id");
|
||||
|
||||
(
|
||||
valid_attestation,
|
||||
validator_index,
|
||||
validator_committee_index,
|
||||
validator_sk,
|
||||
subnet_id,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -193,23 +205,25 @@ fn aggregated_gossip_verification() {
|
||||
"the test requires a new epoch to avoid already-seen errors"
|
||||
);
|
||||
|
||||
let (valid_attestation, _attester_index, _attester_committee_index, validator_sk) =
|
||||
let (valid_attestation, _attester_index, _attester_committee_index, validator_sk, _subnet_id) =
|
||||
get_valid_unaggregated_attestation(&harness.chain);
|
||||
let (valid_aggregate, aggregator_index, aggregator_sk) =
|
||||
get_valid_aggregated_attestation(&harness.chain, valid_attestation);
|
||||
|
||||
macro_rules! assert_invalid {
|
||||
($desc: tt, $attn_getter: expr, $error: expr) => {
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.verify_aggregated_attestation_for_gossip($attn_getter)
|
||||
.err()
|
||||
.expect(&format!(
|
||||
"{} should error during verify_aggregated_attestation_for_gossip",
|
||||
$desc
|
||||
)),
|
||||
$error,
|
||||
($desc: tt, $attn_getter: expr, $($error: pat) |+ $( if $guard: expr )?) => {
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_aggregated_attestation_for_gossip($attn_getter)
|
||||
.err()
|
||||
.expect(&format!(
|
||||
"{} should error during verify_aggregated_attestation_for_gossip",
|
||||
$desc
|
||||
)),
|
||||
$( $error ) |+ $( if $guard )?
|
||||
),
|
||||
"case: {}",
|
||||
$desc,
|
||||
);
|
||||
@@ -219,7 +233,7 @@ fn aggregated_gossip_verification() {
|
||||
/*
|
||||
* The following two tests ensure:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* aggregate.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (with a
|
||||
* MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. aggregate.data.slot +
|
||||
@@ -235,10 +249,8 @@ fn aggregated_gossip_verification() {
|
||||
a.message.aggregate.data.slot = future_slot;
|
||||
a
|
||||
},
|
||||
AttnError::FutureSlot {
|
||||
attestation_slot: future_slot,
|
||||
latest_permissible_slot: current_slot,
|
||||
}
|
||||
AttnError::FutureSlot { attestation_slot, latest_permissible_slot }
|
||||
if attestation_slot == future_slot && latest_permissible_slot == current_slot
|
||||
);
|
||||
|
||||
let early_slot = current_slot
|
||||
@@ -254,17 +266,19 @@ fn aggregated_gossip_verification() {
|
||||
a
|
||||
},
|
||||
AttnError::PastSlot {
|
||||
attestation_slot: early_slot,
|
||||
attestation_slot,
|
||||
// Subtract an additional slot since the harness will be exactly on the start of the
|
||||
// slot and the propagation tolerance will allow an extra slot.
|
||||
earliest_permissible_slot: current_slot - E::slots_per_epoch() - 1,
|
||||
earliest_permissible_slot
|
||||
}
|
||||
if attestation_slot == early_slot
|
||||
&& earliest_permissible_slot == current_slot - E::slots_per_epoch() - 1
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The block being voted for (aggregate.data.beacon_block_root) passes validation.
|
||||
*/
|
||||
@@ -278,14 +292,36 @@ fn aggregated_gossip_verification() {
|
||||
a
|
||||
},
|
||||
AttnError::UnknownHeadBlock {
|
||||
beacon_block_root: unknown_root
|
||||
beacon_block_root
|
||||
}
|
||||
if beacon_block_root == unknown_root
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The attestation has participants.
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"aggregate with no participants",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
let aggregation_bits = &mut a.message.aggregate.aggregation_bits;
|
||||
aggregation_bits.difference_inplace(&aggregation_bits.clone());
|
||||
assert!(aggregation_bits.is_zero());
|
||||
a.message.aggregate.signature = AggregateSignature::infinity();
|
||||
a
|
||||
},
|
||||
AttnError::EmptyAggregationBitfield
|
||||
);
|
||||
|
||||
/*
|
||||
* This test ensures:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The aggregator signature, signed_aggregate_and_proof.signature, is valid.
|
||||
*/
|
||||
@@ -295,7 +331,7 @@ fn aggregated_gossip_verification() {
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
|
||||
a.signature = Signature::new(&[42, 42], &validator_sk);
|
||||
a.signature = validator_sk.sign(Hash256::from_low_u64_be(42));
|
||||
|
||||
a
|
||||
},
|
||||
@@ -305,7 +341,7 @@ fn aggregated_gossip_verification() {
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The aggregate_and_proof.selection_proof is a valid signature of the aggregate.data.slot by
|
||||
* the validator with index aggregate_and_proof.aggregator_index.
|
||||
@@ -335,7 +371,9 @@ fn aggregated_gossip_verification() {
|
||||
let mut i: u64 = 0;
|
||||
a.message.selection_proof = loop {
|
||||
i += 1;
|
||||
let proof: SelectionProof = Signature::new(&i.to_le_bytes(), &validator_sk).into();
|
||||
let proof: SelectionProof = validator_sk
|
||||
.sign(Hash256::from_slice(&int_to_bytes32(i)))
|
||||
.into();
|
||||
if proof
|
||||
.is_aggregator(committee_len, &harness.chain.spec)
|
||||
.unwrap()
|
||||
@@ -352,7 +390,7 @@ fn aggregated_gossip_verification() {
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The signature of aggregate is valid.
|
||||
*/
|
||||
@@ -362,8 +400,8 @@ fn aggregated_gossip_verification() {
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
|
||||
let mut agg_sig = AggregateSignature::new();
|
||||
agg_sig.add(&Signature::new(&[42, 42], &aggregator_sk));
|
||||
let mut agg_sig = AggregateSignature::infinity();
|
||||
agg_sig.add_assign(&aggregator_sk.sign(Hash256::from_low_u64_be(42)));
|
||||
a.message.aggregate.signature = agg_sig;
|
||||
|
||||
a
|
||||
@@ -379,17 +417,18 @@ fn aggregated_gossip_verification() {
|
||||
a.message.aggregator_index = too_high_index;
|
||||
a
|
||||
},
|
||||
AttnError::ValidatorIndexTooHigh(too_high_index as usize)
|
||||
AttnError::ValidatorIndexTooHigh(index)
|
||||
if index == too_high_index as usize
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The aggregator's validator index is within the aggregate's committee -- i.e.
|
||||
* aggregate_and_proof.aggregator_index in get_attesting_indices(state, aggregate.data,
|
||||
* aggregate.aggregation_bits).
|
||||
* The aggregator's validator index is within the committee -- i.e.
|
||||
* aggregate_and_proof.aggregator_index in get_beacon_committee(state, aggregate.data.slot,
|
||||
* aggregate.data.index).
|
||||
*/
|
||||
|
||||
let unknown_validator = VALIDATOR_COUNT as u64;
|
||||
@@ -406,14 +445,15 @@ fn aggregated_gossip_verification() {
|
||||
//
|
||||
// However the following error is triggered first:
|
||||
AttnError::AggregatorNotInCommittee {
|
||||
aggregator_index: unknown_validator
|
||||
aggregator_index
|
||||
}
|
||||
if aggregator_index == unknown_validator
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* aggregate_and_proof.selection_proof selects the validator as an aggregator for the slot --
|
||||
* i.e. is_aggregator(state, aggregate.data.slot, aggregate.data.index,
|
||||
@@ -423,7 +463,7 @@ fn aggregated_gossip_verification() {
|
||||
let (non_aggregator_index, non_aggregator_sk) =
|
||||
get_non_aggregator(&harness.chain, &valid_aggregate.message.aggregate);
|
||||
assert_invalid!(
|
||||
"aggregate with from non-aggregator",
|
||||
"aggregate from non-aggregator",
|
||||
{
|
||||
SignedAggregateAndProof::from_aggregate(
|
||||
non_aggregator_index as u64,
|
||||
@@ -436,10 +476,13 @@ fn aggregated_gossip_verification() {
|
||||
)
|
||||
},
|
||||
AttnError::InvalidSelectionProof {
|
||||
aggregator_index: non_aggregator_index as u64
|
||||
aggregator_index: index
|
||||
}
|
||||
if index == non_aggregator_index as u64
|
||||
);
|
||||
|
||||
// NOTE: from here on, the tests are stateful, and rely on the valid attestation having been
|
||||
// seen. A refactor to give each test case its own state might be nice at some point
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
@@ -449,28 +492,26 @@ fn aggregated_gossip_verification() {
|
||||
);
|
||||
|
||||
/*
|
||||
* The following tests ensures:
|
||||
* The following test ensures:
|
||||
*
|
||||
* NOTE: this is a slight deviation from the spec, see:
|
||||
* https://github.com/ethereum/eth2.0-specs/pull/1749
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* Spec v0.11.2
|
||||
*
|
||||
* The aggregate attestation defined by hash_tree_root(aggregate) has not already been seen
|
||||
* (via aggregate gossip, within a block, or through the creation of an equivalent aggregate
|
||||
* locally).
|
||||
* The valid aggregate attestation defined by hash_tree_root(aggregate) has not already been
|
||||
* seen (via aggregate gossip, within a block, or through the creation of an equivalent
|
||||
* aggregate locally).
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"aggregate with that has already been seen",
|
||||
"aggregate that has already been seen",
|
||||
valid_aggregate.clone(),
|
||||
AttnError::AttestationAlreadyKnown(valid_aggregate.message.aggregate.tree_hash_root())
|
||||
AttnError::AttestationAlreadyKnown(hash)
|
||||
if hash == valid_aggregate.message.aggregate.tree_hash_root()
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The aggregate is the first valid aggregate received for the aggregator with index
|
||||
* aggregate_and_proof.aggregator_index for the epoch aggregate.data.target.epoch.
|
||||
@@ -483,7 +524,8 @@ fn aggregated_gossip_verification() {
|
||||
a.message.aggregate.data.beacon_block_root = Hash256::from_low_u64_le(42);
|
||||
a
|
||||
},
|
||||
AttnError::AggregatorAlreadyKnown(aggregator_index as u64)
|
||||
AttnError::AggregatorAlreadyKnown(index)
|
||||
if index == aggregator_index as u64
|
||||
);
|
||||
}
|
||||
|
||||
@@ -512,31 +554,61 @@ fn unaggregated_gossip_verification() {
|
||||
"the test requires a new epoch to avoid already-seen errors"
|
||||
);
|
||||
|
||||
let (valid_attestation, validator_index, validator_committee_index, validator_sk) =
|
||||
get_valid_unaggregated_attestation(&harness.chain);
|
||||
let (
|
||||
valid_attestation,
|
||||
expected_validator_index,
|
||||
validator_committee_index,
|
||||
validator_sk,
|
||||
subnet_id,
|
||||
) = get_valid_unaggregated_attestation(&harness.chain);
|
||||
|
||||
macro_rules! assert_invalid {
|
||||
($desc: tt, $attn_getter: expr, $error: expr) => {
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.verify_unaggregated_attestation_for_gossip($attn_getter)
|
||||
.err()
|
||||
.expect(&format!(
|
||||
"{} should error during verify_unaggregated_attestation_for_gossip",
|
||||
$desc
|
||||
)),
|
||||
$error,
|
||||
($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat) |+ $( if $guard: expr )?) => {
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_unaggregated_attestation_for_gossip($attn_getter, $subnet_getter)
|
||||
.err()
|
||||
.expect(&format!(
|
||||
"{} should error during verify_unaggregated_attestation_for_gossip",
|
||||
$desc
|
||||
)),
|
||||
$( $error ) |+ $( if $guard )?
|
||||
),
|
||||
"case: {}",
|
||||
$desc,
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The attestation is for the correct subnet (i.e. compute_subnet_for_attestation(state,
|
||||
* attestation.data.slot, attestation.data.index) == subnet_id).
|
||||
*/
|
||||
let id: u64 = subnet_id.into();
|
||||
let invalid_subnet_id = SubnetId::new(id + 1);
|
||||
assert_invalid!(
|
||||
"attestation from future slot",
|
||||
{
|
||||
valid_attestation.clone()
|
||||
},
|
||||
invalid_subnet_id,
|
||||
AttnError::InvalidSubnetId {
|
||||
received,
|
||||
expected,
|
||||
}
|
||||
if received == invalid_subnet_id && expected == subnet_id
|
||||
);
|
||||
|
||||
/*
|
||||
* The following two tests ensure:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* attestation.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (within a
|
||||
* MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. attestation.data.slot +
|
||||
@@ -552,10 +624,12 @@ fn unaggregated_gossip_verification() {
|
||||
a.data.slot = future_slot;
|
||||
a
|
||||
},
|
||||
subnet_id,
|
||||
AttnError::FutureSlot {
|
||||
attestation_slot: future_slot,
|
||||
latest_permissible_slot: current_slot,
|
||||
attestation_slot,
|
||||
latest_permissible_slot,
|
||||
}
|
||||
if attestation_slot == future_slot && latest_permissible_slot == current_slot
|
||||
);
|
||||
|
||||
let early_slot = current_slot
|
||||
@@ -570,18 +644,20 @@ fn unaggregated_gossip_verification() {
|
||||
a.data.slot = early_slot;
|
||||
a
|
||||
},
|
||||
subnet_id,
|
||||
AttnError::PastSlot {
|
||||
attestation_slot: early_slot,
|
||||
attestation_slot,
|
||||
// Subtract an additional slot since the harness will be exactly on the start of the
|
||||
// slot and the propagation tolerance will allow an extra slot.
|
||||
earliest_permissible_slot: current_slot - E::slots_per_epoch() - 1,
|
||||
earliest_permissible_slot,
|
||||
}
|
||||
if attestation_slot == early_slot && earliest_permissible_slot == current_slot - E::slots_per_epoch() - 1
|
||||
);
|
||||
|
||||
/*
|
||||
* The following two tests ensure:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The attestation is unaggregated -- that is, it has exactly one participating validator
|
||||
* (len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1).
|
||||
@@ -601,6 +677,7 @@ fn unaggregated_gossip_verification() {
|
||||
);
|
||||
a
|
||||
},
|
||||
subnet_id,
|
||||
AttnError::NotExactlyOneAggregationBitSet(0)
|
||||
);
|
||||
|
||||
@@ -613,13 +690,14 @@ fn unaggregated_gossip_verification() {
|
||||
.expect("should set second aggregation bit");
|
||||
a
|
||||
},
|
||||
subnet_id,
|
||||
AttnError::NotExactlyOneAggregationBitSet(2)
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures that:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The block being voted for (attestation.data.beacon_block_root) passes validation.
|
||||
*/
|
||||
@@ -632,15 +710,17 @@ fn unaggregated_gossip_verification() {
|
||||
a.data.beacon_block_root = unknown_root;
|
||||
a
|
||||
},
|
||||
subnet_id,
|
||||
AttnError::UnknownHeadBlock {
|
||||
beacon_block_root: unknown_root
|
||||
beacon_block_root,
|
||||
}
|
||||
if beacon_block_root == unknown_root
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures that:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The signature of attestation is valid.
|
||||
*/
|
||||
@@ -650,27 +730,25 @@ fn unaggregated_gossip_verification() {
|
||||
{
|
||||
let mut a = valid_attestation.clone();
|
||||
|
||||
let mut agg_sig = AggregateSignature::new();
|
||||
agg_sig.add(&Signature::new(&[42, 42], &validator_sk));
|
||||
let mut agg_sig = AggregateSignature::infinity();
|
||||
agg_sig.add_assign(&validator_sk.sign(Hash256::from_low_u64_be(42)));
|
||||
a.signature = agg_sig;
|
||||
|
||||
a
|
||||
},
|
||||
subnet_id,
|
||||
AttnError::InvalidSignature
|
||||
);
|
||||
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.verify_unaggregated_attestation_for_gossip(valid_attestation.clone())
|
||||
.is_ok(),
|
||||
"valid attestation should be verified"
|
||||
);
|
||||
harness
|
||||
.chain
|
||||
.verify_unaggregated_attestation_for_gossip(valid_attestation.clone(), subnet_id)
|
||||
.expect("valid attestation should be verified");
|
||||
|
||||
/*
|
||||
* The following test ensures that:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
*
|
||||
* There has been no other valid attestation seen on an attestation subnet that has an
|
||||
@@ -680,242 +758,12 @@ fn unaggregated_gossip_verification() {
|
||||
assert_invalid!(
|
||||
"attestation that has already been seen",
|
||||
valid_attestation.clone(),
|
||||
subnet_id,
|
||||
AttnError::PriorAttestationKnown {
|
||||
validator_index: validator_index as u64,
|
||||
epoch: current_epoch
|
||||
validator_index,
|
||||
epoch,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/// Tests the verification conditions for an unaggregated attestation on the gossip network.
|
||||
#[test]
|
||||
fn fork_choice_verification() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let chain = &harness.chain;
|
||||
|
||||
// Extend the chain out a few epochs so we have some chain depth to play with.
|
||||
harness.extend_chain(
|
||||
MainnetEthSpec::slots_per_epoch() as usize * 3 - 1,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
// Advance into a slot where there have not been blocks or attestations produced.
|
||||
harness.advance_slot();
|
||||
|
||||
// We're going to produce the attestations at the first slot of the epoch.
|
||||
let (valid_attestation, _validator_index, _validator_committee_index, _validator_sk) =
|
||||
get_valid_unaggregated_attestation(&harness.chain);
|
||||
|
||||
// Extend the chain two more blocks, but without any attestations so we don't trigger the
|
||||
// "already seen" caches.
|
||||
//
|
||||
// Because of this, the attestation we're dealing with was made one slot prior to the current
|
||||
// slot. This allows us to test the `AttestsToFutureBlock` condition.
|
||||
harness.extend_chain(
|
||||
2,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::SomeValidators(vec![]),
|
||||
);
|
||||
|
||||
let current_slot = chain.slot().expect("should get slot");
|
||||
let current_epoch = chain.epoch().expect("should get epoch");
|
||||
|
||||
let attestation = harness
|
||||
.chain
|
||||
.verify_unaggregated_attestation_for_gossip(valid_attestation.clone())
|
||||
.expect("precondition: should gossip verify attestation");
|
||||
|
||||
macro_rules! assert_invalid {
|
||||
($desc: tt, $attn_getter: expr, $error: expr) => {
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.apply_attestation_to_fork_choice(&$attn_getter)
|
||||
.err()
|
||||
.expect(&format!(
|
||||
"{} should error during apply_attestation_to_fork_choice",
|
||||
$desc
|
||||
)),
|
||||
$error,
|
||||
"case: {}",
|
||||
$desc,
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
assert_invalid!(
|
||||
"attestation without any aggregation bits set",
|
||||
{
|
||||
let mut a = attestation.clone();
|
||||
a.__indexed_attestation_mut().attesting_indices = vec![].into();
|
||||
a
|
||||
},
|
||||
AttnError::EmptyAggregationBitfield
|
||||
);
|
||||
|
||||
/*
|
||||
* The following two tests ensure that:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
*
|
||||
* assert target.epoch in [current_epoch, previous_epoch]
|
||||
*/
|
||||
|
||||
let future_epoch = current_epoch + 1;
|
||||
assert_invalid!(
|
||||
"attestation from future epoch",
|
||||
{
|
||||
let mut a = attestation.clone();
|
||||
a.__indexed_attestation_mut().data.target.epoch = future_epoch;
|
||||
a
|
||||
},
|
||||
AttnError::FutureEpoch {
|
||||
attestation_epoch: future_epoch,
|
||||
current_epoch
|
||||
}
|
||||
);
|
||||
|
||||
assert!(
|
||||
current_epoch > 1,
|
||||
"precondition: must be able to have a past epoch"
|
||||
);
|
||||
|
||||
let past_epoch = current_epoch - 2;
|
||||
assert_invalid!(
|
||||
"attestation from past epoch",
|
||||
{
|
||||
let mut a = attestation.clone();
|
||||
a.__indexed_attestation_mut().data.target.epoch = past_epoch;
|
||||
a
|
||||
},
|
||||
AttnError::PastEpoch {
|
||||
attestation_epoch: past_epoch,
|
||||
current_epoch
|
||||
}
|
||||
);
|
||||
|
||||
/*
|
||||
* This test ensures that:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
*
|
||||
* assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"attestation with bad target epoch",
|
||||
{
|
||||
let mut a = attestation.clone();
|
||||
|
||||
let indexed = a.__indexed_attestation_mut();
|
||||
indexed.data.target.epoch = indexed.data.slot.epoch(E::slots_per_epoch()) - 1;
|
||||
a
|
||||
},
|
||||
AttnError::BadTargetEpoch
|
||||
);
|
||||
|
||||
/*
|
||||
* This test ensures that:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
*
|
||||
* Attestations target be for a known block. If target block is unknown, delay consideration
|
||||
* until the block is found
|
||||
*
|
||||
* assert target.root in store.blocks
|
||||
*/
|
||||
|
||||
let unknown_root = Hash256::from_low_u64_le(42);
|
||||
assert_invalid!(
|
||||
"attestation with unknown target root",
|
||||
{
|
||||
let mut a = attestation.clone();
|
||||
|
||||
let indexed = a.__indexed_attestation_mut();
|
||||
indexed.data.target.root = unknown_root;
|
||||
a
|
||||
},
|
||||
AttnError::UnknownTargetRoot(unknown_root)
|
||||
);
|
||||
|
||||
// NOTE: we're not testing an assert from the spec:
|
||||
//
|
||||
// `assert get_current_slot(store) >= compute_start_slot_at_epoch(target.epoch)`
|
||||
//
|
||||
// I think this check is redundant and I've raised an issue here:
|
||||
//
|
||||
// https://github.com/ethereum/eth2.0-specs/pull/1755
|
||||
|
||||
/*
|
||||
* This test asserts that:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
*
|
||||
* # Attestations must be for a known block. If block is unknown, delay consideration until the
|
||||
* block is found
|
||||
*
|
||||
* assert attestation.data.beacon_block_root in store.blocks
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"attestation with unknown beacon block root",
|
||||
{
|
||||
let mut a = attestation.clone();
|
||||
|
||||
let indexed = a.__indexed_attestation_mut();
|
||||
indexed.data.beacon_block_root = unknown_root;
|
||||
a
|
||||
},
|
||||
AttnError::UnknownHeadBlock {
|
||||
beacon_block_root: unknown_root
|
||||
}
|
||||
);
|
||||
|
||||
let future_block = harness
|
||||
.chain
|
||||
.block_at_slot(current_slot)
|
||||
.expect("should not error getting block")
|
||||
.expect("should find block at current slot");
|
||||
assert_invalid!(
|
||||
"attestation to future block",
|
||||
{
|
||||
let mut a = attestation.clone();
|
||||
|
||||
let indexed = a.__indexed_attestation_mut();
|
||||
|
||||
assert!(
|
||||
future_block.slot() > indexed.data.slot,
|
||||
"precondition: the attestation must attest to the future"
|
||||
);
|
||||
|
||||
indexed.data.beacon_block_root = future_block.canonical_root();
|
||||
a
|
||||
},
|
||||
AttnError::AttestsToFutureBlock {
|
||||
block: current_slot,
|
||||
attestation: current_slot - 1
|
||||
}
|
||||
);
|
||||
|
||||
// Note: we're not checking the "attestations can only affect the fork choice of subsequent
|
||||
// slots" part of the spec, we do this upstream.
|
||||
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.apply_attestation_to_fork_choice(&attestation.clone())
|
||||
.is_ok(),
|
||||
"should verify valid attestation"
|
||||
);
|
||||
|
||||
// There's nothing stopping fork choice from accepting the same attestation twice.
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.apply_attestation_to_fork_choice(&attestation)
|
||||
.is_ok(),
|
||||
"should verify valid attestation a second time"
|
||||
if validator_index == expected_validator_index as u64 && epoch == current_epoch
|
||||
);
|
||||
}
|
||||
|
||||
@@ -931,7 +779,7 @@ fn attestation_that_skips_epochs() {
|
||||
harness.extend_chain(
|
||||
MainnetEthSpec::slots_per_epoch() as usize * 3 + 1,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
AttestationStrategy::SomeValidators(vec![]),
|
||||
);
|
||||
|
||||
let current_slot = chain.slot().expect("should get slot");
|
||||
@@ -952,7 +800,7 @@ fn attestation_that_skips_epochs() {
|
||||
per_slot_processing(&mut state, None, &harness.spec).expect("should process slot");
|
||||
}
|
||||
|
||||
let attestation = harness
|
||||
let (attestation, subnet_id) = harness
|
||||
.get_unaggregated_attestations(
|
||||
&AttestationStrategy::AllValidators,
|
||||
&state,
|
||||
@@ -969,7 +817,7 @@ fn attestation_that_skips_epochs() {
|
||||
let block_slot = harness
|
||||
.chain
|
||||
.store
|
||||
.get::<SignedBeaconBlock<E>>(&block_root)
|
||||
.get_item::<SignedBeaconBlock<E>>(&block_root)
|
||||
.expect("should not error getting block")
|
||||
.expect("should find attestation block")
|
||||
.message
|
||||
@@ -980,11 +828,8 @@ fn attestation_that_skips_epochs() {
|
||||
"the attestation must skip more than two epochs"
|
||||
);
|
||||
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.verify_unaggregated_attestation_for_gossip(attestation)
|
||||
.is_ok(),
|
||||
"should gossip verify attestation that skips slots"
|
||||
);
|
||||
harness
|
||||
.chain
|
||||
.verify_unaggregated_attestation_for_gossip(attestation, subnet_id)
|
||||
.expect("should gossip verify attestation that skips slots");
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ use beacon_chain::{
|
||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType},
|
||||
BeaconSnapshot, BlockError,
|
||||
};
|
||||
use store::config::StoreConfig;
|
||||
use types::{
|
||||
test_utils::generate_deterministic_keypair, AggregateSignature, AttestationData,
|
||||
AttesterSlashing, Checkpoint, Deposit, DepositData, Epoch, EthSpec, Hash256,
|
||||
@@ -47,7 +48,11 @@ fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
|
||||
}
|
||||
|
||||
fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<E>> {
|
||||
let harness = BeaconChainHarness::new(MainnetEthSpec, KEYPAIRS[0..validator_count].to_vec());
|
||||
let harness = BeaconChainHarness::new(
|
||||
MainnetEthSpec,
|
||||
KEYPAIRS[0..validator_count].to_vec(),
|
||||
StoreConfig::default(),
|
||||
);
|
||||
|
||||
harness.advance_slot();
|
||||
|
||||
@@ -63,13 +68,13 @@ fn chain_segment_blocks() -> Vec<SignedBeaconBlock<E>> {
|
||||
|
||||
fn junk_signature() -> Signature {
|
||||
let kp = generate_deterministic_keypair(VALIDATOR_COUNT);
|
||||
let message = &[42, 42];
|
||||
Signature::new(message, &kp.sk)
|
||||
let message = Hash256::from_slice(&[42; 32]);
|
||||
kp.sk.sign(message)
|
||||
}
|
||||
|
||||
fn junk_aggregate_signature() -> AggregateSignature {
|
||||
let mut agg_sig = AggregateSignature::new();
|
||||
agg_sig.add(&junk_signature());
|
||||
let mut agg_sig = AggregateSignature::empty();
|
||||
agg_sig.add_assign(&junk_signature());
|
||||
agg_sig
|
||||
}
|
||||
|
||||
@@ -121,13 +126,13 @@ fn chain_segment_full_segment() {
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(vec![])
|
||||
.to_block_error()
|
||||
.into_block_error()
|
||||
.expect("should import empty chain segment");
|
||||
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.to_block_error()
|
||||
.into_block_error()
|
||||
.expect("should import chain segment");
|
||||
|
||||
harness.chain.fork_choice().expect("should run fork choice");
|
||||
@@ -158,7 +163,7 @@ fn chain_segment_varying_chunk_size() {
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(chunk.to_vec())
|
||||
.to_block_error()
|
||||
.into_block_error()
|
||||
.expect(&format!(
|
||||
"should import chain segment of len {}",
|
||||
chunk_size
|
||||
@@ -193,12 +198,14 @@ fn chain_segment_non_linear_parent_roots() {
|
||||
let mut blocks = chain_segment_blocks();
|
||||
blocks.remove(2);
|
||||
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.to_block_error(),
|
||||
Err(BlockError::NonLinearParentRoots),
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearParentRoots)
|
||||
),
|
||||
"should not import chain with missing parent"
|
||||
);
|
||||
|
||||
@@ -208,12 +215,14 @@ fn chain_segment_non_linear_parent_roots() {
|
||||
let mut blocks = chain_segment_blocks();
|
||||
blocks[3].message.parent_root = Hash256::zero();
|
||||
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.to_block_error(),
|
||||
Err(BlockError::NonLinearParentRoots),
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearParentRoots)
|
||||
),
|
||||
"should not import chain with a broken parent root link"
|
||||
);
|
||||
}
|
||||
@@ -233,12 +242,14 @@ fn chain_segment_non_linear_slots() {
|
||||
let mut blocks = chain_segment_blocks();
|
||||
blocks[3].message.slot = Slot::new(0);
|
||||
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.to_block_error(),
|
||||
Err(BlockError::NonLinearSlots),
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearSlots)
|
||||
),
|
||||
"should not import chain with a parent that has a lower slot than its child"
|
||||
);
|
||||
|
||||
@@ -249,12 +260,14 @@ fn chain_segment_non_linear_slots() {
|
||||
let mut blocks = chain_segment_blocks();
|
||||
blocks[3].message.slot = blocks[2].message.slot;
|
||||
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.to_block_error(),
|
||||
Err(BlockError::NonLinearSlots),
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearSlots)
|
||||
),
|
||||
"should not import chain with a parent that has an equal slot to its child"
|
||||
);
|
||||
}
|
||||
@@ -279,7 +292,7 @@ fn invalid_signatures() {
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(ancestor_blocks)
|
||||
.to_block_error()
|
||||
.into_block_error()
|
||||
.expect("should import all blocks prior to the one being tested");
|
||||
|
||||
// For the given snapshots, test the following:
|
||||
@@ -297,19 +310,26 @@ fn invalid_signatures() {
|
||||
.collect();
|
||||
|
||||
// Ensure the block will be rejected if imported in a chain segment.
|
||||
assert_eq!(
|
||||
harness.chain.process_chain_segment(blocks).to_block_error(),
|
||||
Err(BlockError::InvalidSignature),
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks)
|
||||
.into_block_error(),
|
||||
Err(BlockError::InvalidSignature)
|
||||
),
|
||||
"should not import chain segment with an invalid {} signature",
|
||||
item
|
||||
);
|
||||
|
||||
// Ensure the block will be rejected if imported on its own (without gossip checking).
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.process_block(snapshots[block_index].beacon_block.clone()),
|
||||
Err(BlockError::InvalidSignature),
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_block(snapshots[block_index].beacon_block.clone()),
|
||||
Err(BlockError::InvalidSignature)
|
||||
),
|
||||
"should not import individual block with an invalid {} signature",
|
||||
item
|
||||
);
|
||||
@@ -332,17 +352,24 @@ fn invalid_signatures() {
|
||||
.map(|snapshot| snapshot.beacon_block.clone())
|
||||
.collect();
|
||||
// Ensure the block will be rejected if imported in a chain segment.
|
||||
assert_eq!(
|
||||
harness.chain.process_chain_segment(blocks).to_block_error(),
|
||||
Err(BlockError::InvalidSignature),
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks)
|
||||
.into_block_error(),
|
||||
Err(BlockError::InvalidSignature)
|
||||
),
|
||||
"should not import chain segment with an invalid gossip signature",
|
||||
);
|
||||
// Ensure the block will be rejected if imported on its own (without gossip checking).
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.process_block(snapshots[block_index].beacon_block.clone()),
|
||||
Err(BlockError::InvalidSignature),
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_block(snapshots[block_index].beacon_block.clone()),
|
||||
Err(BlockError::InvalidSignature)
|
||||
),
|
||||
"should not import individual block with an invalid gossip signature",
|
||||
);
|
||||
|
||||
@@ -467,8 +494,13 @@ fn invalid_signatures() {
|
||||
.map(|snapshot| snapshot.beacon_block.clone())
|
||||
.collect();
|
||||
assert!(
|
||||
harness.chain.process_chain_segment(blocks).to_block_error()
|
||||
!= Err(BlockError::InvalidSignature),
|
||||
!matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks)
|
||||
.into_block_error(),
|
||||
Err(BlockError::InvalidSignature)
|
||||
),
|
||||
"should not throw an invalid signature error for a bad deposit signature"
|
||||
);
|
||||
|
||||
@@ -535,7 +567,7 @@ fn block_gossip_verification() {
|
||||
/*
|
||||
* This test ensures that:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The block is not from a future slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) --
|
||||
* i.e. validate that signed_beacon_block.message.slot <= current_slot (a client MAY queue
|
||||
@@ -543,21 +575,24 @@ fn block_gossip_verification() {
|
||||
*/
|
||||
|
||||
let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone();
|
||||
let block_slot = block.message.slot + 1;
|
||||
block.message.slot = block_slot;
|
||||
assert_eq!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block)),
|
||||
BlockError::FutureSlot {
|
||||
present_slot: block_slot - 1,
|
||||
block_slot
|
||||
},
|
||||
let expected_block_slot = block.message.slot + 1;
|
||||
block.message.slot = expected_block_slot;
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block)),
|
||||
BlockError::FutureSlot {
|
||||
present_slot,
|
||||
block_slot,
|
||||
}
|
||||
if present_slot == expected_block_slot - 1 && block_slot == expected_block_slot
|
||||
),
|
||||
"should not import a block with a future slot"
|
||||
);
|
||||
|
||||
/*
|
||||
* This test ensure that:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The block is from a slot greater than the latest finalized slot -- i.e. validate that
|
||||
* signed_beacon_block.message.slot >
|
||||
@@ -567,27 +602,30 @@ fn block_gossip_verification() {
|
||||
*/
|
||||
|
||||
let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone();
|
||||
let finalized_slot = harness
|
||||
let expected_finalized_slot = harness
|
||||
.chain
|
||||
.head_info()
|
||||
.expect("should get head info")
|
||||
.finalized_checkpoint
|
||||
.epoch
|
||||
.start_slot(E::slots_per_epoch());
|
||||
block.message.slot = finalized_slot;
|
||||
assert_eq!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block)),
|
||||
BlockError::WouldRevertFinalizedSlot {
|
||||
block_slot: finalized_slot,
|
||||
finalized_slot
|
||||
},
|
||||
block.message.slot = expected_finalized_slot;
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block)),
|
||||
BlockError::WouldRevertFinalizedSlot {
|
||||
block_slot,
|
||||
finalized_slot,
|
||||
}
|
||||
if block_slot == expected_finalized_slot && finalized_slot == expected_finalized_slot
|
||||
),
|
||||
"should not import a block with a finalized slot"
|
||||
);
|
||||
|
||||
/*
|
||||
* This test ensures that:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The proposer signature, signed_beacon_block.signature, is valid with respect to the
|
||||
* proposer_index pubkey.
|
||||
@@ -595,16 +633,18 @@ fn block_gossip_verification() {
|
||||
|
||||
let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone();
|
||||
block.signature = junk_signature();
|
||||
assert_eq!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block)),
|
||||
BlockError::ProposalSignatureInvalid,
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block)),
|
||||
BlockError::ProposalSignatureInvalid
|
||||
),
|
||||
"should not import a block with an invalid proposal signature"
|
||||
);
|
||||
|
||||
/*
|
||||
* This test ensures that:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The block is proposed by the expected proposer_index for the block's slot in the context of
|
||||
* the current shuffling (defined by parent_root/slot). If the proposer_index cannot
|
||||
@@ -625,21 +665,27 @@ fn block_gossip_verification() {
|
||||
harness.chain.genesis_validators_root,
|
||||
&harness.chain.spec,
|
||||
);
|
||||
assert_eq!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block.clone())),
|
||||
BlockError::IncorrectBlockProposer {
|
||||
block: other_proposer,
|
||||
local_shuffling: expected_proposer
|
||||
},
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block.clone())),
|
||||
BlockError::IncorrectBlockProposer {
|
||||
block,
|
||||
local_shuffling,
|
||||
}
|
||||
if block == other_proposer && local_shuffling == expected_proposer
|
||||
),
|
||||
"should not import a block with the wrong proposer index"
|
||||
);
|
||||
// Check to ensure that we registered this is a valid block from this proposer.
|
||||
assert_eq!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block.clone())),
|
||||
BlockError::RepeatProposal {
|
||||
proposer: other_proposer,
|
||||
slot: block.message.slot
|
||||
},
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block.clone())),
|
||||
BlockError::RepeatProposal {
|
||||
proposer,
|
||||
slot,
|
||||
}
|
||||
if proposer == other_proposer && slot == block.message.slot
|
||||
),
|
||||
"should register any valid signature against the proposer, even if the block failed later verification"
|
||||
);
|
||||
|
||||
@@ -652,23 +698,26 @@ fn block_gossip_verification() {
|
||||
/*
|
||||
* This test ensures that:
|
||||
*
|
||||
* Spec v0.11.2
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The block is the first block with valid signature received for the proposer for the slot,
|
||||
* signed_beacon_block.message.slot.
|
||||
*/
|
||||
|
||||
let block = CHAIN_SEGMENT[block_index].beacon_block.clone();
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.verify_block_for_gossip(block.clone())
|
||||
.err()
|
||||
.expect("should error when processing known block"),
|
||||
BlockError::RepeatProposal {
|
||||
proposer: block.message.proposer_index,
|
||||
slot: block.message.slot,
|
||||
},
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_block_for_gossip(block.clone())
|
||||
.err()
|
||||
.expect("should error when processing known block"),
|
||||
BlockError::RepeatProposal {
|
||||
proposer,
|
||||
slot,
|
||||
}
|
||||
if proposer == block.message.proposer_index && slot == block.message.slot
|
||||
),
|
||||
"the second proposal by this validator should be rejected"
|
||||
);
|
||||
}
|
||||
|
||||
275
beacon_node/beacon_chain/tests/op_verification.rs
Normal file
275
beacon_node/beacon_chain/tests/op_verification.rs
Normal file
@@ -0,0 +1,275 @@
|
||||
//! Tests for gossip verification of voluntary exits, propser slashings and attester slashings.
|
||||
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use beacon_chain::observed_operations::ObservationOutcome;
|
||||
use beacon_chain::test_utils::{
|
||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
|
||||
};
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use std::sync::Arc;
|
||||
use store::{LevelDB, StoreConfig};
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use types::test_utils::{
|
||||
AttesterSlashingTestTask, ProposerSlashingTestTask, TestingAttesterSlashingBuilder,
|
||||
TestingProposerSlashingBuilder, TestingVoluntaryExitBuilder,
|
||||
};
|
||||
use types::*;
|
||||
|
||||
pub const VALIDATOR_COUNT: usize = 24;
|
||||
|
||||
lazy_static! {
|
||||
/// A cached set of keys.
|
||||
static ref KEYPAIRS: Vec<Keypair> =
|
||||
types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
}
|
||||
|
||||
type E = MinimalEthSpec;
|
||||
type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
|
||||
type HotColdDB = store::HotColdDB<E, LevelDB<E>, LevelDB<E>>;
|
||||
|
||||
fn get_store(db_path: &TempDir) -> Arc<HotColdDB> {
|
||||
let spec = E::default_spec();
|
||||
let hot_path = db_path.path().join("hot_db");
|
||||
let cold_path = db_path.path().join("cold_db");
|
||||
let config = StoreConfig::default();
|
||||
let log = NullLoggerBuilder.build().expect("logger should build");
|
||||
Arc::new(
|
||||
HotColdDB::open(&hot_path, &cold_path, config, spec, log)
|
||||
.expect("disk store should initialize"),
|
||||
)
|
||||
}
|
||||
|
||||
fn get_harness(store: Arc<HotColdDB>, validator_count: usize) -> TestHarness {
|
||||
let harness = BeaconChainHarness::new_with_disk_store(
|
||||
MinimalEthSpec,
|
||||
store,
|
||||
KEYPAIRS[0..validator_count].to_vec(),
|
||||
);
|
||||
harness.advance_slot();
|
||||
harness
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn voluntary_exit() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||
let spec = &harness.chain.spec;
|
||||
|
||||
harness.extend_chain(
|
||||
(E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
let head_info = harness.chain.head_info().unwrap();
|
||||
|
||||
let make_exit = |validator_index: usize, exit_epoch: u64| {
|
||||
TestingVoluntaryExitBuilder::new(Epoch::new(exit_epoch), validator_index as u64).build(
|
||||
&KEYPAIRS[validator_index].sk,
|
||||
&head_info.fork,
|
||||
head_info.genesis_validators_root,
|
||||
spec,
|
||||
)
|
||||
};
|
||||
|
||||
let validator_index1 = VALIDATOR_COUNT - 1;
|
||||
let validator_index2 = VALIDATOR_COUNT - 2;
|
||||
|
||||
let exit1 = make_exit(validator_index1, spec.shard_committee_period);
|
||||
|
||||
// First verification should show it to be fresh.
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_voluntary_exit_for_gossip(exit1.clone())
|
||||
.unwrap(),
|
||||
ObservationOutcome::New(_)
|
||||
));
|
||||
|
||||
// Second should not.
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_voluntary_exit_for_gossip(exit1.clone()),
|
||||
Ok(ObservationOutcome::AlreadyKnown)
|
||||
));
|
||||
|
||||
// A different exit for the same validator should also be detected as a duplicate.
|
||||
let exit2 = make_exit(validator_index1, spec.shard_committee_period + 1);
|
||||
assert!(matches!(
|
||||
harness.chain.verify_voluntary_exit_for_gossip(exit2),
|
||||
Ok(ObservationOutcome::AlreadyKnown)
|
||||
));
|
||||
|
||||
// Exit for a different validator should be fine.
|
||||
let exit3 = make_exit(validator_index2, spec.shard_committee_period);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_voluntary_exit_for_gossip(exit3)
|
||||
.unwrap(),
|
||||
ObservationOutcome::New(_)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn proposer_slashing() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||
let spec = &harness.chain.spec;
|
||||
|
||||
let head_info = harness.chain.head_info().unwrap();
|
||||
|
||||
let validator_index1 = VALIDATOR_COUNT - 1;
|
||||
let validator_index2 = VALIDATOR_COUNT - 2;
|
||||
|
||||
let make_slashing = |validator_index: usize| {
|
||||
TestingProposerSlashingBuilder::double_vote::<E>(
|
||||
ProposerSlashingTestTask::Valid,
|
||||
validator_index as u64,
|
||||
&KEYPAIRS[validator_index].sk,
|
||||
&head_info.fork,
|
||||
head_info.genesis_validators_root,
|
||||
spec,
|
||||
)
|
||||
};
|
||||
|
||||
let slashing1 = make_slashing(validator_index1);
|
||||
|
||||
// First slashing for this proposer should be allowed.
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_proposer_slashing_for_gossip(slashing1.clone())
|
||||
.unwrap(),
|
||||
ObservationOutcome::New(_)
|
||||
));
|
||||
// Duplicate slashing should be detected.
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_proposer_slashing_for_gossip(slashing1.clone())
|
||||
.unwrap(),
|
||||
ObservationOutcome::AlreadyKnown
|
||||
));
|
||||
|
||||
// Different slashing for the same index should be rejected
|
||||
let slashing2 = ProposerSlashing {
|
||||
signed_header_1: slashing1.signed_header_2,
|
||||
signed_header_2: slashing1.signed_header_1,
|
||||
};
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_proposer_slashing_for_gossip(slashing2)
|
||||
.unwrap(),
|
||||
ObservationOutcome::AlreadyKnown
|
||||
));
|
||||
|
||||
// Proposer slashing for a different index should be accepted
|
||||
let slashing3 = make_slashing(validator_index2);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_proposer_slashing_for_gossip(slashing3)
|
||||
.unwrap(),
|
||||
ObservationOutcome::New(_)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn attester_slashing() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||
let spec = &harness.chain.spec;
|
||||
|
||||
let head_info = harness.chain.head_info().unwrap();
|
||||
|
||||
// First third of the validators
|
||||
let first_third = (0..VALIDATOR_COUNT as u64 / 3).collect::<Vec<_>>();
|
||||
// First half of the validators
|
||||
let first_half = (0..VALIDATOR_COUNT as u64 / 2).collect::<Vec<_>>();
|
||||
// Last third of the validators
|
||||
let last_third = (2 * VALIDATOR_COUNT as u64 / 3..VALIDATOR_COUNT as u64).collect::<Vec<_>>();
|
||||
// Last half of the validators
|
||||
let second_half = (VALIDATOR_COUNT as u64 / 2..VALIDATOR_COUNT as u64).collect::<Vec<_>>();
|
||||
|
||||
let signer = |idx: u64, message: &[u8]| {
|
||||
KEYPAIRS[idx as usize]
|
||||
.sk
|
||||
.sign(Hash256::from_slice(&message))
|
||||
};
|
||||
|
||||
let make_slashing = |validators| {
|
||||
TestingAttesterSlashingBuilder::double_vote::<_, E>(
|
||||
AttesterSlashingTestTask::Valid,
|
||||
validators,
|
||||
signer,
|
||||
&head_info.fork,
|
||||
head_info.genesis_validators_root,
|
||||
spec,
|
||||
)
|
||||
};
|
||||
|
||||
// Slashing for first third of validators should be accepted.
|
||||
let slashing1 = make_slashing(&first_third);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_attester_slashing_for_gossip(slashing1.clone())
|
||||
.unwrap(),
|
||||
ObservationOutcome::New(_)
|
||||
));
|
||||
|
||||
// Overlapping slashing for first half of validators should also be accepted.
|
||||
let slashing2 = make_slashing(&first_half);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_attester_slashing_for_gossip(slashing2.clone())
|
||||
.unwrap(),
|
||||
ObservationOutcome::New(_)
|
||||
));
|
||||
|
||||
// Repeating slashing1 or slashing2 should be rejected
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_attester_slashing_for_gossip(slashing1.clone())
|
||||
.unwrap(),
|
||||
ObservationOutcome::AlreadyKnown
|
||||
));
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_attester_slashing_for_gossip(slashing2.clone())
|
||||
.unwrap(),
|
||||
ObservationOutcome::AlreadyKnown
|
||||
));
|
||||
|
||||
// Slashing for last half of validators should be accepted (distinct from all existing)
|
||||
let slashing3 = make_slashing(&second_half);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_attester_slashing_for_gossip(slashing3)
|
||||
.unwrap(),
|
||||
ObservationOutcome::New(_)
|
||||
));
|
||||
// Slashing for last third (contained in last half) should be rejected.
|
||||
let slashing4 = make_slashing(&last_third);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_attester_slashing_for_gossip(slashing4)
|
||||
.unwrap(),
|
||||
ObservationOutcome::AlreadyKnown
|
||||
));
|
||||
}
|
||||
@@ -9,7 +9,7 @@ use beacon_chain::{
|
||||
};
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use std::sync::Arc;
|
||||
use store::{DiskStore, StoreConfig};
|
||||
use store::{HotColdDB, LevelDB, StoreConfig};
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use types::{EthSpec, Keypair, MinimalEthSpec};
|
||||
|
||||
@@ -23,14 +23,14 @@ lazy_static! {
|
||||
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
}
|
||||
|
||||
fn get_store(db_path: &TempDir) -> Arc<DiskStore<E>> {
|
||||
fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>> {
|
||||
let spec = E::default_spec();
|
||||
let hot_path = db_path.path().join("hot_db");
|
||||
let cold_path = db_path.path().join("cold_db");
|
||||
let config = StoreConfig::default();
|
||||
let log = NullLoggerBuilder.build().expect("logger should build");
|
||||
Arc::new(
|
||||
DiskStore::open(&hot_path, &cold_path, config, spec, log)
|
||||
HotColdDB::open(&hot_path, &cold_path, config, spec, log)
|
||||
.expect("disk store should initialize"),
|
||||
)
|
||||
}
|
||||
@@ -143,14 +143,18 @@ fn finalizes_after_resuming_from_db() {
|
||||
fn assert_chains_pretty_much_the_same<T: BeaconChainTypes>(a: &BeaconChain<T>, b: &BeaconChain<T>) {
|
||||
assert_eq!(a.spec, b.spec, "spec should be equal");
|
||||
assert_eq!(a.op_pool, b.op_pool, "op_pool should be equal");
|
||||
assert_eq!(a.head(), b.head(), "head() should be equal");
|
||||
assert_eq!(
|
||||
a.head().unwrap(),
|
||||
b.head().unwrap(),
|
||||
"head() should be equal"
|
||||
);
|
||||
assert_eq!(a.heads(), b.heads(), "heads() should be equal");
|
||||
assert_eq!(
|
||||
a.genesis_block_root, b.genesis_block_root,
|
||||
"genesis_block_root should be equal"
|
||||
);
|
||||
assert!(
|
||||
a.fork_choice == b.fork_choice,
|
||||
*a.fork_choice.read() == *b.fork_choice.read(),
|
||||
"fork_choice should be equal"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -3,6 +3,11 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate slog_term;
|
||||
|
||||
use crate::slog::Drain;
|
||||
use beacon_chain::attestation_verification::Error as AttnError;
|
||||
use beacon_chain::test_utils::{
|
||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
|
||||
@@ -10,13 +15,12 @@ use beacon_chain::test_utils::{
|
||||
use beacon_chain::BeaconSnapshot;
|
||||
use beacon_chain::StateSkipConfig;
|
||||
use rand::Rng;
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use store::{
|
||||
iter::{BlockRootsIterator, StateRootsIterator},
|
||||
DiskStore, Store, StoreConfig,
|
||||
HotColdDB, LevelDB, StoreConfig,
|
||||
};
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use tree_hash::TreeHash;
|
||||
@@ -35,19 +39,26 @@ lazy_static! {
|
||||
type E = MinimalEthSpec;
|
||||
type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
|
||||
|
||||
fn get_store(db_path: &TempDir) -> Arc<DiskStore<E>> {
|
||||
fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>> {
|
||||
let spec = MinimalEthSpec::default_spec();
|
||||
let hot_path = db_path.path().join("hot_db");
|
||||
let cold_path = db_path.path().join("cold_db");
|
||||
let config = StoreConfig::default();
|
||||
let log = NullLoggerBuilder.build().expect("logger should build");
|
||||
|
||||
let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter);
|
||||
let drain = slog_term::FullFormat::new(decorator).build();
|
||||
let log = slog::Logger::root(std::sync::Mutex::new(drain).fuse(), o!());
|
||||
|
||||
Arc::new(
|
||||
DiskStore::open(&hot_path, &cold_path, config, spec, log)
|
||||
HotColdDB::open(&hot_path, &cold_path, config, spec, log)
|
||||
.expect("disk store should initialize"),
|
||||
)
|
||||
}
|
||||
|
||||
fn get_harness(store: Arc<DiskStore<E>>, validator_count: usize) -> TestHarness {
|
||||
fn get_harness(
|
||||
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
|
||||
validator_count: usize,
|
||||
) -> TestHarness {
|
||||
let harness = BeaconChainHarness::new_with_disk_store(
|
||||
MinimalEthSpec,
|
||||
store,
|
||||
@@ -290,7 +301,7 @@ fn epoch_boundary_state_attestation_processing() {
|
||||
|
||||
let mut checked_pre_fin = false;
|
||||
|
||||
for attestation in late_attestations.into_iter().flatten() {
|
||||
for (attestation, subnet_id) in late_attestations.into_iter().flatten() {
|
||||
// load_epoch_boundary_state is idempotent!
|
||||
let block_root = attestation.data.beacon_block_root;
|
||||
let block = store.get_block(&block_root).unwrap().expect("block exists");
|
||||
@@ -314,24 +325,25 @@ fn epoch_boundary_state_attestation_processing() {
|
||||
|
||||
let res = harness
|
||||
.chain
|
||||
.verify_unaggregated_attestation_for_gossip(attestation.clone());
|
||||
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id);
|
||||
|
||||
let current_slot = harness.chain.slot().expect("should get slot");
|
||||
let attestation_slot = attestation.data.slot;
|
||||
let expected_attestation_slot = attestation.data.slot;
|
||||
// Extra -1 to handle gossip clock disparity.
|
||||
let earliest_permissible_slot = current_slot - E::slots_per_epoch() - 1;
|
||||
let expected_earliest_permissible_slot = current_slot - E::slots_per_epoch() - 1;
|
||||
|
||||
if attestation_slot <= finalized_epoch.start_slot(E::slots_per_epoch())
|
||||
|| attestation_slot < earliest_permissible_slot
|
||||
if expected_attestation_slot <= finalized_epoch.start_slot(E::slots_per_epoch())
|
||||
|| expected_attestation_slot < expected_earliest_permissible_slot
|
||||
{
|
||||
checked_pre_fin = true;
|
||||
assert_eq!(
|
||||
assert!(matches!(
|
||||
res.err().unwrap(),
|
||||
AttnError::PastSlot {
|
||||
attestation_slot,
|
||||
earliest_permissible_slot,
|
||||
}
|
||||
);
|
||||
if attestation_slot == expected_attestation_slot && earliest_permissible_slot == expected_earliest_permissible_slot
|
||||
));
|
||||
} else {
|
||||
res.expect("should have verified attetation");
|
||||
}
|
||||
@@ -391,30 +403,32 @@ fn delete_blocks_and_states() {
|
||||
.expect("faulty head state exists");
|
||||
|
||||
let states_to_delete = StateRootsIterator::new(store.clone(), &faulty_head_state)
|
||||
.map(Result::unwrap)
|
||||
.take_while(|(_, slot)| *slot > unforked_blocks)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Delete faulty fork
|
||||
// Attempting to load those states should find them unavailable
|
||||
for (state_root, slot) in &states_to_delete {
|
||||
assert_eq!(store.delete_state(state_root, *slot), Ok(()));
|
||||
assert_eq!(store.get_state(state_root, Some(*slot)), Ok(None));
|
||||
store.delete_state(state_root, *slot).unwrap();
|
||||
assert_eq!(store.get_state(state_root, Some(*slot)).unwrap(), None);
|
||||
}
|
||||
|
||||
// Double-deleting should also be OK (deleting non-existent things is fine)
|
||||
for (state_root, slot) in &states_to_delete {
|
||||
assert_eq!(store.delete_state(state_root, *slot), Ok(()));
|
||||
store.delete_state(state_root, *slot).unwrap();
|
||||
}
|
||||
|
||||
// Deleting the blocks from the fork should remove them completely
|
||||
let blocks_to_delete = BlockRootsIterator::new(store.clone(), &faulty_head_state)
|
||||
.map(Result::unwrap)
|
||||
// Extra +1 here accounts for the skipped slot that started this fork
|
||||
.take_while(|(_, slot)| *slot > unforked_blocks + 1)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for (block_root, _) in blocks_to_delete {
|
||||
assert_eq!(store.delete_block(&block_root), Ok(()));
|
||||
assert_eq!(store.get_block(&block_root), Ok(None));
|
||||
store.delete_block(&block_root).unwrap();
|
||||
assert_eq!(store.get_block(&block_root).unwrap(), None);
|
||||
}
|
||||
|
||||
// Deleting frozen states should do nothing
|
||||
@@ -423,10 +437,11 @@ fn delete_blocks_and_states() {
|
||||
.chain
|
||||
.rev_iter_state_roots()
|
||||
.expect("rev iter ok")
|
||||
.map(Result::unwrap)
|
||||
.filter(|(_, slot)| *slot < split_slot);
|
||||
|
||||
for (state_root, slot) in finalized_states {
|
||||
assert_eq!(store.delete_state(&state_root, slot), Ok(()));
|
||||
store.delete_state(&state_root, slot).unwrap();
|
||||
}
|
||||
|
||||
// After all that, the chain dump should still be OK
|
||||
@@ -658,11 +673,12 @@ fn check_shuffling_compatible(
|
||||
let previous_pivot_slot =
|
||||
(head_state.previous_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch());
|
||||
|
||||
for (block_root, slot) in harness
|
||||
for maybe_tuple in harness
|
||||
.chain
|
||||
.rev_iter_block_roots_from(head_block_root)
|
||||
.unwrap()
|
||||
{
|
||||
let (block_root, slot) = maybe_tuple.unwrap();
|
||||
// Shuffling is compatible targeting the current epoch,
|
||||
// iff slot is greater than or equal to the current epoch pivot block
|
||||
assert_eq!(
|
||||
@@ -1304,8 +1320,8 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) {
|
||||
);
|
||||
}
|
||||
|
||||
/// Check that the DiskStore's split_slot is equal to the start slot of the last finalized epoch.
|
||||
fn check_split_slot(harness: &TestHarness, store: Arc<DiskStore<E>>) {
|
||||
/// Check that the HotColdDB's split_slot is equal to the start slot of the last finalized epoch.
|
||||
fn check_split_slot(harness: &TestHarness, store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>) {
|
||||
let split_slot = store.get_split_slot();
|
||||
assert_eq!(
|
||||
harness
|
||||
@@ -1356,13 +1372,15 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) {
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let head = harness.chain.head().expect("should get head");
|
||||
let mut forward_block_roots = Store::forwards_block_roots_iterator(
|
||||
let mut forward_block_roots = HotColdDB::forwards_block_roots_iterator(
|
||||
harness.chain.store.clone(),
|
||||
Slot::new(0),
|
||||
head.beacon_state,
|
||||
head.beacon_block_root,
|
||||
&harness.spec,
|
||||
)
|
||||
.unwrap()
|
||||
.map(Result::unwrap)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Drop the block roots for skipped slots.
|
||||
@@ -1386,6 +1404,7 @@ fn check_iterators(harness: &TestHarness) {
|
||||
.rev_iter_state_roots()
|
||||
.expect("should get iter")
|
||||
.last()
|
||||
.map(Result::unwrap)
|
||||
.map(|(_, slot)| slot),
|
||||
Some(Slot::new(0))
|
||||
);
|
||||
@@ -1395,6 +1414,7 @@ fn check_iterators(harness: &TestHarness) {
|
||||
.rev_iter_block_roots()
|
||||
.expect("should get iter")
|
||||
.last()
|
||||
.map(Result::unwrap)
|
||||
.map(|(_, slot)| slot),
|
||||
Some(Slot::new(0))
|
||||
);
|
||||
|
||||
@@ -13,7 +13,7 @@ use operation_pool::PersistedOperationPool;
|
||||
use state_processing::{
|
||||
per_slot_processing, per_slot_processing::Error as SlotProcessingError, EpochProcessingError,
|
||||
};
|
||||
use store::Store;
|
||||
use store::config::StoreConfig;
|
||||
use types::{BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot};
|
||||
|
||||
// Should ideally be divisible by 3.
|
||||
@@ -25,7 +25,11 @@ lazy_static! {
|
||||
}
|
||||
|
||||
fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<MinimalEthSpec>> {
|
||||
let harness = BeaconChainHarness::new(MinimalEthSpec, KEYPAIRS[0..validator_count].to_vec());
|
||||
let harness = BeaconChainHarness::new(
|
||||
MinimalEthSpec,
|
||||
KEYPAIRS[0..validator_count].to_vec(),
|
||||
StoreConfig::default(),
|
||||
);
|
||||
|
||||
harness.advance_slot();
|
||||
|
||||
@@ -73,11 +77,13 @@ fn iterators() {
|
||||
.chain
|
||||
.rev_iter_block_roots()
|
||||
.expect("should get iter")
|
||||
.map(Result::unwrap)
|
||||
.collect();
|
||||
let state_roots: Vec<(Hash256, Slot)> = harness
|
||||
.chain
|
||||
.rev_iter_state_roots()
|
||||
.expect("should get iter")
|
||||
.map(Result::unwrap)
|
||||
.collect();
|
||||
|
||||
assert_eq!(
|
||||
@@ -348,16 +354,14 @@ fn roundtrip_operation_pool() {
|
||||
.persist_op_pool()
|
||||
.expect("should persist op pool");
|
||||
|
||||
let head_state = harness.chain.head().expect("should get head").beacon_state;
|
||||
|
||||
let key = Hash256::from_slice(&OP_POOL_DB_KEY);
|
||||
let restored_op_pool = harness
|
||||
.chain
|
||||
.store
|
||||
.get::<PersistedOperationPool<MinimalEthSpec>>(&key)
|
||||
.get_item::<PersistedOperationPool<MinimalEthSpec>>(&key)
|
||||
.expect("should read db")
|
||||
.expect("should find op pool")
|
||||
.into_operation_pool(&head_state, &harness.spec);
|
||||
.into_operation_pool();
|
||||
|
||||
assert_eq!(harness.chain.op_pool, restored_op_pool);
|
||||
}
|
||||
@@ -375,7 +379,13 @@ fn unaggregated_attestations_added_to_fork_choice_some_none() {
|
||||
);
|
||||
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
let fork_choice = &harness.chain.fork_choice;
|
||||
let mut fork_choice = harness.chain.fork_choice.write();
|
||||
|
||||
// Move forward a slot so all queued attestations can be processed.
|
||||
harness.advance_slot();
|
||||
fork_choice
|
||||
.update_time(harness.chain.slot().unwrap())
|
||||
.unwrap();
|
||||
|
||||
let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT)
|
||||
.into_iter()
|
||||
@@ -397,7 +407,7 @@ fn unaggregated_attestations_added_to_fork_choice_some_none() {
|
||||
assert_eq!(
|
||||
latest_message.unwrap().1,
|
||||
slot.epoch(MinimalEthSpec::slots_per_epoch()),
|
||||
"Latest message slot for {} should be equal to slot {}.",
|
||||
"Latest message epoch for {} should be equal to epoch {}.",
|
||||
validator,
|
||||
slot
|
||||
)
|
||||
@@ -447,23 +457,25 @@ fn attestations_with_increasing_slots() {
|
||||
harness.advance_slot();
|
||||
}
|
||||
|
||||
for attestation in attestations.into_iter().flatten() {
|
||||
for (attestation, subnet_id) in attestations.into_iter().flatten() {
|
||||
let res = harness
|
||||
.chain
|
||||
.verify_unaggregated_attestation_for_gossip(attestation.clone());
|
||||
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id);
|
||||
|
||||
let current_slot = harness.chain.slot().expect("should get slot");
|
||||
let attestation_slot = attestation.data.slot;
|
||||
let earliest_permissible_slot = current_slot - MinimalEthSpec::slots_per_epoch() - 1;
|
||||
let expected_attestation_slot = attestation.data.slot;
|
||||
let expected_earliest_permissible_slot =
|
||||
current_slot - MinimalEthSpec::slots_per_epoch() - 1;
|
||||
|
||||
if attestation_slot < earliest_permissible_slot {
|
||||
assert_eq!(
|
||||
if expected_attestation_slot < expected_earliest_permissible_slot {
|
||||
assert!(matches!(
|
||||
res.err().unwrap(),
|
||||
AttnError::PastSlot {
|
||||
attestation_slot,
|
||||
earliest_permissible_slot,
|
||||
}
|
||||
)
|
||||
if attestation_slot == expected_attestation_slot && earliest_permissible_slot == expected_earliest_permissible_slot
|
||||
))
|
||||
} else {
|
||||
res.expect("should process attestation");
|
||||
}
|
||||
@@ -483,7 +495,13 @@ fn unaggregated_attestations_added_to_fork_choice_all_updated() {
|
||||
);
|
||||
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
let fork_choice = &harness.chain.fork_choice;
|
||||
let mut fork_choice = harness.chain.fork_choice.write();
|
||||
|
||||
// Move forward a slot so all queued attestations can be processed.
|
||||
harness.advance_slot();
|
||||
fork_choice
|
||||
.update_time(harness.chain.slot().unwrap())
|
||||
.unwrap();
|
||||
|
||||
let validators: Vec<usize> = (0..VALIDATOR_COUNT).collect();
|
||||
let slots: Vec<Slot> = validators
|
||||
@@ -558,19 +576,22 @@ fn run_skip_slot_test(skip_slots: u64) {
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
harness_b.chain.process_block(
|
||||
harness_a
|
||||
.chain
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_block
|
||||
.clone(),
|
||||
),
|
||||
Ok(harness_a
|
||||
harness_b
|
||||
.chain
|
||||
.process_block(
|
||||
harness_a
|
||||
.chain
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_block
|
||||
.clone(),
|
||||
)
|
||||
.unwrap(),
|
||||
harness_a
|
||||
.chain
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_block_root)
|
||||
.beacon_block_root
|
||||
);
|
||||
|
||||
harness_b
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "client"
|
||||
version = "0.1.2"
|
||||
authors = ["Age Manning <Age@AgeManning.com>"]
|
||||
version = "0.2.0"
|
||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dev-dependencies]
|
||||
@@ -13,22 +13,22 @@ beacon_chain = { path = "../beacon_chain" }
|
||||
store = { path = "../store" }
|
||||
network = { path = "../network" }
|
||||
timer = { path = "../timer" }
|
||||
eth2-libp2p = { path = "../eth2-libp2p" }
|
||||
eth2_libp2p = { path = "../eth2_libp2p" }
|
||||
rest_api = { path = "../rest_api" }
|
||||
parking_lot = "0.10.2"
|
||||
parking_lot = "0.11.0"
|
||||
websocket_server = { path = "../websocket_server" }
|
||||
prometheus = "0.8.0"
|
||||
types = { path = "../../eth2/types" }
|
||||
prometheus = "0.9.0"
|
||||
types = { path = "../../consensus/types" }
|
||||
tree_hash = "0.1.0"
|
||||
eth2_config = { path = "../../eth2/utils/eth2_config" }
|
||||
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
||||
eth2_config = { path = "../../common/eth2_config" }
|
||||
slot_clock = { path = "../../common/slot_clock" }
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.110"
|
||||
error-chain = "0.12.2"
|
||||
serde_yaml = "0.8.11"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
slog-async = "2.5.0"
|
||||
tokio = "0.2.20"
|
||||
tokio = "0.2.21"
|
||||
dirs = "2.0.2"
|
||||
futures = "0.3.5"
|
||||
reqwest = "0.10.4"
|
||||
@@ -38,4 +38,6 @@ genesis = { path = "../genesis" }
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
eth2_ssz = "0.1.2"
|
||||
lazy_static = "1.4.0"
|
||||
lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" }
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||
time = "0.2.16"
|
||||
bus = "2.2.3"
|
||||
|
||||
@@ -1,28 +1,35 @@
|
||||
use crate::config::{ClientGenesis, Config as ClientConfig};
|
||||
use crate::notifier::spawn_notifier;
|
||||
use crate::Client;
|
||||
use beacon_chain::events::TeeEventHandler;
|
||||
use beacon_chain::{
|
||||
builder::{BeaconChainBuilder, Witness},
|
||||
eth1_chain::{CachingEth1Backend, Eth1Chain},
|
||||
migrate::{BackgroundMigrator, Migrate, NullMigrator},
|
||||
migrate::{BackgroundMigrator, Migrate},
|
||||
slot_clock::{SlotClock, SystemTimeSlotClock},
|
||||
store::{DiskStore, MemoryStore, SimpleDiskStore, Store, StoreConfig},
|
||||
store::{HotColdDB, ItemStore, LevelDB, StoreConfig},
|
||||
BeaconChain, BeaconChainTypes, Eth1ChainBackend, EventHandler,
|
||||
};
|
||||
use bus::Bus;
|
||||
use environment::RuntimeContext;
|
||||
use eth1::{Config as Eth1Config, Service as Eth1Service};
|
||||
use eth2_config::Eth2Config;
|
||||
use eth2_libp2p::NetworkGlobals;
|
||||
use genesis::{interop_genesis_state, Eth1GenesisService};
|
||||
use network::{NetworkConfig, NetworkMessage, NetworkService};
|
||||
use parking_lot::Mutex;
|
||||
use slog::info;
|
||||
use ssz::Decode;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use timer::spawn_timer;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use types::{test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec};
|
||||
use types::{
|
||||
test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec,
|
||||
SignedBeaconBlockHash,
|
||||
};
|
||||
use websocket_server::{Config as WebSocketConfig, WebSocketSender};
|
||||
|
||||
/// Interval between polling the eth1 node for genesis information.
|
||||
@@ -43,14 +50,14 @@ pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000;
|
||||
/// `self.memory_store(..)` has been called.
|
||||
pub struct ClientBuilder<T: BeaconChainTypes> {
|
||||
slot_clock: Option<T::SlotClock>,
|
||||
store: Option<Arc<T::Store>>,
|
||||
#[allow(clippy::type_complexity)]
|
||||
store: Option<Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>>,
|
||||
store_migrator: Option<T::StoreMigrator>,
|
||||
runtime_context: Option<RuntimeContext<T::EthSpec>>,
|
||||
chain_spec: Option<ChainSpec>,
|
||||
beacon_chain_builder: Option<BeaconChainBuilder<T>>,
|
||||
beacon_chain: Option<Arc<BeaconChain<T>>>,
|
||||
eth1_service: Option<Eth1Service>,
|
||||
exit_channels: Vec<tokio::sync::oneshot::Sender<()>>,
|
||||
event_handler: Option<T::EventHandler>,
|
||||
network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
|
||||
network_send: Option<UnboundedSender<NetworkMessage<T::EthSpec>>>,
|
||||
@@ -59,17 +66,26 @@ pub struct ClientBuilder<T: BeaconChainTypes> {
|
||||
eth_spec_instance: T::EthSpec,
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
ClientBuilder<
|
||||
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>,
|
||||
Witness<
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
|
||||
TSlotClock: SlotClock + Clone + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
{
|
||||
/// Instantiates a new, empty builder.
|
||||
///
|
||||
@@ -84,7 +100,6 @@ where
|
||||
beacon_chain_builder: None,
|
||||
beacon_chain: None,
|
||||
eth1_service: None,
|
||||
exit_channels: vec![],
|
||||
event_handler: None,
|
||||
network_globals: None,
|
||||
network_send: None,
|
||||
@@ -120,6 +135,7 @@ where
|
||||
let eth_spec_instance = self.eth_spec_instance.clone();
|
||||
let data_dir = config.data_dir.clone();
|
||||
let disabled_forks = config.disabled_forks.clone();
|
||||
let graffiti = config.graffiti;
|
||||
|
||||
let store =
|
||||
store.ok_or_else(|| "beacon_chain_start_method requires a store".to_string())?;
|
||||
@@ -132,12 +148,13 @@ where
|
||||
.ok_or_else(|| "beacon_chain_start_method requires a chain spec".to_string())?;
|
||||
|
||||
let builder = BeaconChainBuilder::new(eth_spec_instance)
|
||||
.logger(context.log.clone())
|
||||
.logger(context.log().clone())
|
||||
.store(store)
|
||||
.store_migrator(store_migrator)
|
||||
.data_dir(data_dir)
|
||||
.custom_spec(spec.clone())
|
||||
.disabled_forks(disabled_forks);
|
||||
.disabled_forks(disabled_forks)
|
||||
.graffiti(graffiti);
|
||||
|
||||
let chain_exists = builder
|
||||
.store_contains_beacon_chain()
|
||||
@@ -150,7 +167,7 @@ where
|
||||
// Alternatively, if there's a beacon chain in the database then always resume
|
||||
// using it.
|
||||
let client_genesis = if client_genesis == ClientGenesis::FromStore && !chain_exists {
|
||||
info!(context.log, "Defaulting to deposit contract genesis");
|
||||
info!(context.log(), "Defaulting to deposit contract genesis");
|
||||
|
||||
ClientGenesis::DepositContract
|
||||
} else if chain_exists {
|
||||
@@ -172,7 +189,7 @@ where
|
||||
genesis_state_bytes,
|
||||
} => {
|
||||
info!(
|
||||
context.log,
|
||||
context.log(),
|
||||
"Starting from known genesis state";
|
||||
);
|
||||
|
||||
@@ -183,14 +200,18 @@ where
|
||||
}
|
||||
ClientGenesis::DepositContract => {
|
||||
info!(
|
||||
context.log,
|
||||
context.log(),
|
||||
"Waiting for eth2 genesis from eth1";
|
||||
"eth1_endpoint" => &config.eth1.endpoint,
|
||||
"contract_deploy_block" => config.eth1.deposit_contract_deploy_block,
|
||||
"deposit_contract" => &config.eth1.deposit_contract_address
|
||||
);
|
||||
|
||||
let genesis_service = Eth1GenesisService::new(config.eth1, context.log.clone());
|
||||
let genesis_service = Eth1GenesisService::new(
|
||||
config.eth1,
|
||||
context.log().clone(),
|
||||
context.eth2_config().spec.clone(),
|
||||
);
|
||||
|
||||
let genesis_state = genesis_service
|
||||
.wait_for_genesis_state(
|
||||
@@ -223,19 +244,18 @@ where
|
||||
.ok_or_else(|| "network requires a runtime_context")?
|
||||
.clone();
|
||||
|
||||
let (network_globals, network_send, network_exit) =
|
||||
NetworkService::start(beacon_chain, config, &context.runtime_handle, context.log)
|
||||
let (network_globals, network_send) =
|
||||
NetworkService::start(beacon_chain, config, context.executor)
|
||||
.map_err(|e| format!("Failed to start network: {:?}", e))?;
|
||||
|
||||
self.network_globals = Some(network_globals);
|
||||
self.network_send = Some(network_send);
|
||||
self.exit_channels.push(network_exit);
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Immediately starts the timer service.
|
||||
fn timer(mut self) -> Result<Self, String> {
|
||||
fn timer(self) -> Result<Self, String> {
|
||||
let context = self
|
||||
.runtime_context
|
||||
.as_ref()
|
||||
@@ -251,13 +271,9 @@ where
|
||||
.ok_or_else(|| "node timer requires a chain spec".to_string())?
|
||||
.milliseconds_per_slot;
|
||||
|
||||
let timer_exit = context
|
||||
.runtime_handle
|
||||
.enter(|| timer::spawn(beacon_chain, milliseconds_per_slot))
|
||||
spawn_timer(context.executor, beacon_chain, milliseconds_per_slot)
|
||||
.map_err(|e| format!("Unable to start node timer: {}", e))?;
|
||||
|
||||
self.exit_channels.push(timer_exit);
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
@@ -266,6 +282,7 @@ where
|
||||
mut self,
|
||||
client_config: &ClientConfig,
|
||||
eth2_config: &Eth2Config,
|
||||
events: Arc<Mutex<Bus<SignedBeaconBlockHash>>>,
|
||||
) -> Result<Self, String> {
|
||||
let beacon_chain = self
|
||||
.beacon_chain
|
||||
@@ -290,32 +307,29 @@ where
|
||||
network_chan: network_send,
|
||||
};
|
||||
|
||||
let log = context.log.clone();
|
||||
let (exit_channel, listening_addr) = context.runtime_handle.enter(|| {
|
||||
rest_api::start_server(
|
||||
&client_config.rest_api,
|
||||
beacon_chain,
|
||||
network_info,
|
||||
client_config
|
||||
.create_db_path()
|
||||
.map_err(|_| "unable to read data dir")?,
|
||||
client_config
|
||||
.create_freezer_db_path()
|
||||
.map_err(|_| "unable to read freezer DB dir")?,
|
||||
eth2_config.clone(),
|
||||
log,
|
||||
)
|
||||
.map_err(|e| format!("Failed to start HTTP API: {:?}", e))
|
||||
})?;
|
||||
let listening_addr = rest_api::start_server(
|
||||
context.executor,
|
||||
&client_config.rest_api,
|
||||
beacon_chain,
|
||||
network_info,
|
||||
client_config
|
||||
.create_db_path()
|
||||
.map_err(|_| "unable to read data dir")?,
|
||||
client_config
|
||||
.create_freezer_db_path()
|
||||
.map_err(|_| "unable to read freezer DB dir")?,
|
||||
eth2_config.clone(),
|
||||
events,
|
||||
)
|
||||
.map_err(|e| format!("Failed to start HTTP API: {:?}", e))?;
|
||||
|
||||
self.exit_channels.push(exit_channel);
|
||||
self.http_listen_addr = Some(listening_addr);
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Immediately starts the service that periodically logs information each slot.
|
||||
pub fn notifier(mut self) -> Result<Self, String> {
|
||||
pub fn notifier(self) -> Result<Self, String> {
|
||||
let context = self
|
||||
.runtime_context
|
||||
.as_ref()
|
||||
@@ -335,19 +349,13 @@ where
|
||||
.ok_or_else(|| "slot_notifier requires a chain spec".to_string())?
|
||||
.milliseconds_per_slot;
|
||||
|
||||
let exit_channel = context
|
||||
.runtime_handle
|
||||
.enter(|| {
|
||||
spawn_notifier(
|
||||
beacon_chain,
|
||||
network_globals,
|
||||
milliseconds_per_slot,
|
||||
context.log.clone(),
|
||||
)
|
||||
})
|
||||
.map_err(|e| format!("Unable to start slot notifier: {}", e))?;
|
||||
|
||||
self.exit_channels.push(exit_channel);
|
||||
spawn_notifier(
|
||||
context.executor,
|
||||
beacon_chain,
|
||||
network_globals,
|
||||
milliseconds_per_slot,
|
||||
)
|
||||
.map_err(|e| format!("Unable to start slot notifier: {}", e))?;
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
@@ -358,29 +366,46 @@ where
|
||||
/// If type inference errors are being raised, see the comment on the definition of `Self`.
|
||||
pub fn build(
|
||||
self,
|
||||
) -> Client<Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>>
|
||||
{
|
||||
) -> Client<
|
||||
Witness<
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
> {
|
||||
Client {
|
||||
beacon_chain: self.beacon_chain,
|
||||
network_globals: self.network_globals,
|
||||
http_listen_addr: self.http_listen_addr,
|
||||
websocket_listen_addr: self.websocket_listen_addr,
|
||||
_exit_channels: self.exit_channels,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
ClientBuilder<
|
||||
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>,
|
||||
Witness<
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
|
||||
TSlotClock: SlotClock + Clone + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
{
|
||||
/// Consumes the internal `BeaconChainBuilder`, attaching the resulting `BeaconChain` to self.
|
||||
pub fn build_beacon_chain(mut self) -> Result<Self, String> {
|
||||
@@ -396,8 +421,6 @@ where
|
||||
.clone()
|
||||
.ok_or_else(|| "beacon_chain requires a slot clock")?,
|
||||
)
|
||||
.reduced_tree_fork_choice()
|
||||
.map_err(|e| format!("Failed to init fork choice: {}", e))?
|
||||
.build()
|
||||
.map_err(|e| format!("Failed to build beacon chain: {}", e))?;
|
||||
|
||||
@@ -410,74 +433,74 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec>
|
||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, THotStore, TColdStore>
|
||||
ClientBuilder<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
WebSocketSender<TEthSpec>,
|
||||
TeeEventHandler<TEthSpec>,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
{
|
||||
#[allow(clippy::type_complexity)]
|
||||
/// Specifies that the `BeaconChain` should publish events using the WebSocket server.
|
||||
pub fn websocket_event_handler(mut self, config: WebSocketConfig) -> Result<Self, String> {
|
||||
pub fn tee_event_handler(
|
||||
mut self,
|
||||
config: WebSocketConfig,
|
||||
) -> Result<(Self, Arc<Mutex<Bus<SignedBeaconBlockHash>>>), String> {
|
||||
let context = self
|
||||
.runtime_context
|
||||
.as_ref()
|
||||
.ok_or_else(|| "websocket_event_handler requires a runtime_context")?
|
||||
.ok_or_else(|| "tee_event_handler requires a runtime_context")?
|
||||
.service_context("ws".into());
|
||||
|
||||
let (sender, exit_channel, listening_addr): (
|
||||
WebSocketSender<TEthSpec>,
|
||||
Option<_>,
|
||||
Option<_>,
|
||||
) = if config.enabled {
|
||||
let (sender, exit, listening_addr) = context
|
||||
.runtime_handle
|
||||
.enter(|| websocket_server::start_server(&config, &context.log))?;
|
||||
(sender, Some(exit), Some(listening_addr))
|
||||
let log = context.log().clone();
|
||||
let (sender, listening_addr): (WebSocketSender<TEthSpec>, Option<_>) = if config.enabled {
|
||||
let (sender, listening_addr) =
|
||||
websocket_server::start_server(context.executor, &config)?;
|
||||
(sender, Some(listening_addr))
|
||||
} else {
|
||||
(WebSocketSender::dummy(), None, None)
|
||||
(WebSocketSender::dummy(), None)
|
||||
};
|
||||
|
||||
if let Some(channel) = exit_channel {
|
||||
self.exit_channels.push(channel);
|
||||
}
|
||||
self.event_handler = Some(sender);
|
||||
self.websocket_listen_addr = listening_addr;
|
||||
|
||||
Ok(self)
|
||||
let (tee_event_handler, bus) = TeeEventHandler::new(log, sender)?;
|
||||
self.event_handler = Some(tee_event_handler);
|
||||
Ok((self, bus))
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
ClientBuilder<
|
||||
Witness<
|
||||
DiskStore<TEthSpec>,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
LevelDB<TEthSpec>,
|
||||
LevelDB<TEthSpec>,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TStoreMigrator: Migrate<DiskStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, DiskStore<TEthSpec>> + 'static,
|
||||
TStoreMigrator: Migrate<TEthSpec, LevelDB<TEthSpec>, LevelDB<TEthSpec>> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
{
|
||||
/// Specifies that the `Client` should use a `DiskStore` database.
|
||||
/// Specifies that the `Client` should use a `HotColdDB` database.
|
||||
pub fn disk_store(
|
||||
mut self,
|
||||
hot_path: &Path,
|
||||
@@ -494,84 +517,32 @@ where
|
||||
.clone()
|
||||
.ok_or_else(|| "disk_store requires a chain spec".to_string())?;
|
||||
|
||||
let store = DiskStore::open(hot_path, cold_path, config, spec, context.log)
|
||||
let store = HotColdDB::open(hot_path, cold_path, config, spec, context.log().clone())
|
||||
.map_err(|e| format!("Unable to open database: {:?}", e))?;
|
||||
self.store = Some(Arc::new(store));
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
ClientBuilder<
|
||||
Witness<
|
||||
SimpleDiskStore<TEthSpec>,
|
||||
TStoreMigrator,
|
||||
BackgroundMigrator<TEthSpec, THotStore, TColdStore>,
|
||||
TSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TStoreMigrator: Migrate<SimpleDiskStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, SimpleDiskStore<TEthSpec>> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
{
|
||||
/// Specifies that the `Client` should use a `DiskStore` database.
|
||||
pub fn simple_disk_store(mut self, path: &Path) -> Result<Self, String> {
|
||||
let store =
|
||||
SimpleDiskStore::open(path).map_err(|e| format!("Unable to open database: {:?}", e))?;
|
||||
self.store = Some(Arc::new(store));
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
ClientBuilder<
|
||||
Witness<
|
||||
MemoryStore<TEthSpec>,
|
||||
NullMigrator,
|
||||
TSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, MemoryStore<TEthSpec>> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
{
|
||||
/// Specifies that the `Client` should use a `MemoryStore` database.
|
||||
///
|
||||
/// Also sets the `store_migrator` to the `NullMigrator`, as that's the only viable choice.
|
||||
pub fn memory_store(mut self) -> Self {
|
||||
let store = MemoryStore::open();
|
||||
self.store = Some(Arc::new(store));
|
||||
self.store_migrator = Some(NullMigrator);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
ClientBuilder<
|
||||
Witness<
|
||||
DiskStore<TEthSpec>,
|
||||
BackgroundMigrator<TEthSpec>,
|
||||
TSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, DiskStore<TEthSpec>> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
{
|
||||
pub fn background_migrator(mut self) -> Result<Self, String> {
|
||||
let context = self
|
||||
@@ -582,28 +553,30 @@ where
|
||||
let store = self.store.clone().ok_or_else(|| {
|
||||
"background_migrator requires the store to be initialized".to_string()
|
||||
})?;
|
||||
self.store_migrator = Some(BackgroundMigrator::new(store, context.log.clone()));
|
||||
self.store_migrator = Some(BackgroundMigrator::new(store, context.log().clone()));
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TSlotClock, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
ClientBuilder<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
CachingEth1Backend<TEthSpec, TStore>,
|
||||
CachingEth1Backend<TEthSpec>,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
{
|
||||
/// Specifies that the `BeaconChain` should cache eth1 blocks/logs from a remote eth1 node
|
||||
/// (e.g., Parity/Geth) and refer to that cache when collecting deposits or eth1 votes during
|
||||
@@ -617,10 +590,10 @@ where
|
||||
let beacon_chain_builder = self
|
||||
.beacon_chain_builder
|
||||
.ok_or_else(|| "caching_eth1_backend requires a beacon_chain_builder")?;
|
||||
let store = self
|
||||
.store
|
||||
let spec = self
|
||||
.chain_spec
|
||||
.clone()
|
||||
.ok_or_else(|| "caching_eth1_backend requires a store".to_string())?;
|
||||
.ok_or_else(|| "caching_eth1_backend requires a chain spec".to_string())?;
|
||||
|
||||
let backend = if let Some(eth1_service_from_genesis) = self.eth1_service {
|
||||
eth1_service_from_genesis.update_config(config)?;
|
||||
@@ -635,7 +608,7 @@ where
|
||||
// adding earlier blocks too.
|
||||
eth1_service_from_genesis.drop_block_cache();
|
||||
|
||||
CachingEth1Backend::from_service(eth1_service_from_genesis, store)
|
||||
CachingEth1Backend::from_service(eth1_service_from_genesis)
|
||||
} else {
|
||||
beacon_chain_builder
|
||||
.get_persisted_eth1_backend()?
|
||||
@@ -643,26 +616,24 @@ where
|
||||
Eth1Chain::from_ssz_container(
|
||||
&persisted,
|
||||
config.clone(),
|
||||
store.clone(),
|
||||
&context.log,
|
||||
&context.log().clone(),
|
||||
spec.clone(),
|
||||
)
|
||||
.map(|chain| chain.into_backend())
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
Ok(CachingEth1Backend::new(config, context.log.clone(), store))
|
||||
Ok(CachingEth1Backend::new(
|
||||
config,
|
||||
context.log().clone(),
|
||||
spec.clone(),
|
||||
))
|
||||
})?
|
||||
};
|
||||
|
||||
self.eth1_service = None;
|
||||
|
||||
let exit = {
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
self.exit_channels.push(tx);
|
||||
rx
|
||||
};
|
||||
|
||||
// Starts the service that connects to an eth1 node and periodically updates caches.
|
||||
context.runtime_handle.enter(|| backend.start(exit));
|
||||
backend.start(context.executor);
|
||||
|
||||
self.beacon_chain_builder = Some(beacon_chain_builder.eth1_backend(Some(backend)));
|
||||
|
||||
@@ -700,16 +671,25 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
ClientBuilder<
|
||||
Witness<TStore, TStoreMigrator, SystemTimeSlotClock, TEth1Backend, TEthSpec, TEventHandler>,
|
||||
Witness<
|
||||
TStoreMigrator,
|
||||
SystemTimeSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
{
|
||||
/// Specifies that the slot clock should read the time from the computers system clock.
|
||||
pub fn system_time_slot_clock(mut self) -> Result<Self, String> {
|
||||
|
||||
@@ -2,6 +2,7 @@ use network::NetworkConfig;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use types::Graffiti;
|
||||
|
||||
pub const DEFAULT_DATADIR: &str = ".lighthouse";
|
||||
|
||||
@@ -45,7 +46,6 @@ pub struct Config {
|
||||
pub db_name: String,
|
||||
/// Path where the freezer database will be located.
|
||||
pub freezer_db_path: Option<PathBuf>,
|
||||
pub testnet_dir: Option<PathBuf>,
|
||||
pub log_file: PathBuf,
|
||||
pub spec_constants: String,
|
||||
/// If true, the node will use co-ordinated junk for eth1 values.
|
||||
@@ -55,6 +55,8 @@ pub struct Config {
|
||||
pub sync_eth1_chain: bool,
|
||||
/// A list of hard-coded forks that will be disabled.
|
||||
pub disabled_forks: Vec<String>,
|
||||
/// Graffiti to be inserted everytime we create a block.
|
||||
pub graffiti: Graffiti,
|
||||
#[serde(skip)]
|
||||
/// The `genesis` field is not serialized or deserialized by `serde` to ensure it is defined
|
||||
/// via the CLI at runtime, instead of from a configuration file saved to disk.
|
||||
@@ -72,7 +74,6 @@ impl Default for Config {
|
||||
data_dir: PathBuf::from(DEFAULT_DATADIR),
|
||||
db_name: "chain_db".to_string(),
|
||||
freezer_db_path: None,
|
||||
testnet_dir: None,
|
||||
log_file: PathBuf::from(""),
|
||||
genesis: <_>::default(),
|
||||
store: <_>::default(),
|
||||
@@ -84,6 +85,7 @@ impl Default for Config {
|
||||
sync_eth1_chain: false,
|
||||
eth1: <_>::default(),
|
||||
disabled_forks: Vec::new(),
|
||||
graffiti: Graffiti::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use network;
|
||||
|
||||
use error_chain::error_chain;
|
||||
|
||||
error_chain! {
|
||||
|
||||
@@ -25,8 +25,6 @@ pub struct Client<T: BeaconChainTypes> {
|
||||
network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
|
||||
http_listen_addr: Option<SocketAddr>,
|
||||
websocket_listen_addr: Option<SocketAddr>,
|
||||
/// Exit channels will complete/error when dropped, causing each service to exit gracefully.
|
||||
_exit_channels: Vec<tokio::sync::oneshot::Sender<()>>,
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> Client<T> {
|
||||
|
||||
@@ -7,29 +7,26 @@ use slog::{debug, error, info, warn};
|
||||
use slot_clock::SlotClock;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::time::delay_for;
|
||||
use types::{EthSpec, Slot};
|
||||
|
||||
/// Create a warning log whenever the peer count is at or below this value.
|
||||
pub const WARN_PEER_COUNT: usize = 1;
|
||||
|
||||
const SECS_PER_MINUTE: f64 = 60.0;
|
||||
const SECS_PER_HOUR: f64 = 3600.0;
|
||||
const SECS_PER_DAY: f64 = 86400.0; // non-leap
|
||||
const SECS_PER_WEEK: f64 = 604_800.0; // non-leap
|
||||
const DAYS_PER_WEEK: f64 = 7.0;
|
||||
const HOURS_PER_DAY: f64 = 24.0;
|
||||
const MINUTES_PER_HOUR: f64 = 60.0;
|
||||
const DAYS_PER_WEEK: i64 = 7;
|
||||
const HOURS_PER_DAY: i64 = 24;
|
||||
const MINUTES_PER_HOUR: i64 = 60;
|
||||
|
||||
/// The number of historical observations that should be used to determine the average sync time.
|
||||
const SPEEDO_OBSERVATIONS: usize = 4;
|
||||
|
||||
/// Spawns a notifier service which periodically logs information about the node.
|
||||
pub fn spawn_notifier<T: BeaconChainTypes>(
|
||||
executor: environment::TaskExecutor,
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
network: Arc<NetworkGlobals<T::EthSpec>>,
|
||||
milliseconds_per_slot: u64,
|
||||
log: slog::Logger,
|
||||
) -> Result<tokio::sync::oneshot::Sender<()>, String> {
|
||||
) -> Result<(), String> {
|
||||
let slot_duration = Duration::from_millis(milliseconds_per_slot);
|
||||
let duration_to_next_slot = beacon_chain
|
||||
.slot_clock
|
||||
@@ -43,10 +40,30 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
|
||||
let interval_duration = slot_duration;
|
||||
|
||||
let speedo = Mutex::new(Speedo::default());
|
||||
let log = executor.log().clone();
|
||||
let mut interval = tokio::time::interval_at(start_instant, interval_duration);
|
||||
|
||||
let interval_future = async move {
|
||||
while let Some(_) = interval.next().await {
|
||||
// Perform pre-genesis logging.
|
||||
loop {
|
||||
match beacon_chain.slot_clock.duration_to_next_slot() {
|
||||
// If the duration to the next slot is greater than the slot duration, then we are
|
||||
// waiting for genesis.
|
||||
Some(next_slot) if next_slot > slot_duration => {
|
||||
info!(
|
||||
log,
|
||||
"Waiting for genesis";
|
||||
"peers" => peer_count_pretty(network.connected_peers()),
|
||||
"wait_time" => estimated_time_pretty(Some(next_slot.as_secs() as f64)),
|
||||
);
|
||||
delay_for(slot_duration).await;
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
|
||||
// Perform post-genesis logging.
|
||||
while interval.next().await.is_some() {
|
||||
let connected_peer_count = network.connected_peers();
|
||||
let sync_state = network.sync_state();
|
||||
|
||||
@@ -113,45 +130,41 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
|
||||
"speed" => sync_speed_pretty(speedo.slots_per_second()),
|
||||
"est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)),
|
||||
);
|
||||
} else {
|
||||
if sync_state.is_synced() {
|
||||
let block_info = if current_slot > head_slot {
|
||||
format!(" … empty")
|
||||
} else {
|
||||
format!("{}", head_root)
|
||||
};
|
||||
info!(
|
||||
log,
|
||||
"Synced";
|
||||
"peers" => peer_count_pretty(connected_peer_count),
|
||||
"finalized_root" => format!("{}", finalized_root),
|
||||
"finalized_epoch" => finalized_epoch,
|
||||
"epoch" => current_epoch,
|
||||
"block" => block_info,
|
||||
"slot" => current_slot,
|
||||
);
|
||||
} else if sync_state.is_synced() {
|
||||
let block_info = if current_slot > head_slot {
|
||||
" … empty".to_string()
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Searching for peers";
|
||||
"peers" => peer_count_pretty(connected_peer_count),
|
||||
"finalized_root" => format!("{}", finalized_root),
|
||||
"finalized_epoch" => finalized_epoch,
|
||||
"head_slot" => head_slot,
|
||||
"current_slot" => current_slot,
|
||||
);
|
||||
}
|
||||
head_root.to_string()
|
||||
};
|
||||
info!(
|
||||
log,
|
||||
"Synced";
|
||||
"peers" => peer_count_pretty(connected_peer_count),
|
||||
"finalized_root" => format!("{}", finalized_root),
|
||||
"finalized_epoch" => finalized_epoch,
|
||||
"epoch" => current_epoch,
|
||||
"block" => block_info,
|
||||
"slot" => current_slot,
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Searching for peers";
|
||||
"peers" => peer_count_pretty(connected_peer_count),
|
||||
"finalized_root" => format!("{}", finalized_root),
|
||||
"finalized_epoch" => finalized_epoch,
|
||||
"head_slot" => head_slot,
|
||||
"current_slot" => current_slot,
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok::<(), ()>(())
|
||||
};
|
||||
|
||||
let (exit_signal, exit) = tokio::sync::oneshot::channel();
|
||||
|
||||
// run the notifier on the current executor
|
||||
tokio::spawn(futures::future::select(Box::pin(interval_future), exit));
|
||||
executor.spawn(interval_future.unwrap_or_else(|_| ()), "notifier");
|
||||
|
||||
Ok(exit_signal)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the peer count, returning something helpful if it's `usize::max_value` (effectively a
|
||||
@@ -200,31 +213,21 @@ fn seconds_pretty(secs: f64) -> String {
|
||||
return "--".into();
|
||||
}
|
||||
|
||||
let weeks = secs / SECS_PER_WEEK;
|
||||
let days = secs / SECS_PER_DAY;
|
||||
let hours = secs / SECS_PER_HOUR;
|
||||
let minutes = secs / SECS_PER_MINUTE;
|
||||
let d = time::Duration::seconds_f64(secs);
|
||||
|
||||
if weeks.floor() > 0.0 {
|
||||
format!(
|
||||
"{:.0} weeks {:.0} days",
|
||||
weeks,
|
||||
(days % DAYS_PER_WEEK).round()
|
||||
)
|
||||
} else if days.floor() > 0.0 {
|
||||
format!(
|
||||
"{:.0} days {:.0} hrs",
|
||||
days,
|
||||
(hours % HOURS_PER_DAY).round()
|
||||
)
|
||||
} else if hours.floor() > 0.0 {
|
||||
format!(
|
||||
"{:.0} hrs {:.0} mins",
|
||||
hours,
|
||||
(minutes % MINUTES_PER_HOUR).round()
|
||||
)
|
||||
let weeks = d.whole_weeks();
|
||||
let days = d.whole_days();
|
||||
let hours = d.whole_hours();
|
||||
let minutes = d.whole_minutes();
|
||||
|
||||
if weeks > 0 {
|
||||
format!("{:.0} weeks {:.0} days", weeks, days % DAYS_PER_WEEK)
|
||||
} else if days > 0 {
|
||||
format!("{:.0} days {:.0} hrs", days, hours % HOURS_PER_DAY)
|
||||
} else if hours > 0 {
|
||||
format!("{:.0} hrs {:.0} mins", hours, minutes % MINUTES_PER_HOUR)
|
||||
} else {
|
||||
format!("{:.0} mins", minutes.round())
|
||||
format!("{:.0} mins", minutes)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
[package]
|
||||
name = "eth1"
|
||||
version = "0.1.2"
|
||||
version = "0.2.0"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dev-dependencies]
|
||||
eth1_test_rig = { path = "../../tests/eth1_test_rig" }
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
eth1_test_rig = { path = "../../testing/eth1_test_rig" }
|
||||
toml = "0.5.6"
|
||||
web3 = "0.10.0"
|
||||
web3 = "0.11.0"
|
||||
sloggers = "1.0.0"
|
||||
|
||||
[dependencies]
|
||||
@@ -17,16 +16,17 @@ futures = { version = "0.3.5", features = ["compat"] }
|
||||
serde_json = "1.0.52"
|
||||
serde = { version = "1.0.110", features = ["derive"] }
|
||||
hex = "0.4.2"
|
||||
types = { path = "../../eth2/types"}
|
||||
merkle_proof = { path = "../../eth2/utils/merkle_proof"}
|
||||
types = { path = "../../consensus/types"}
|
||||
merkle_proof = { path = "../../consensus/merkle_proof"}
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
tree_hash = "0.1.0"
|
||||
eth2_hashing = "0.1.0"
|
||||
parking_lot = "0.10.2"
|
||||
parking_lot = "0.11.0"
|
||||
slog = "2.5.2"
|
||||
tokio = { version = "0.2.20", features = ["full"] }
|
||||
state_processing = { path = "../../eth2/state_processing" }
|
||||
tokio = { version = "0.2.21", features = ["full"] }
|
||||
state_processing = { path = "../../consensus/state_processing" }
|
||||
libflate = "1.0.0"
|
||||
lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics"}
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics"}
|
||||
lazy_static = "1.4.0"
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
|
||||
@@ -243,36 +243,38 @@ impl DepositCache {
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the deposit count at block height = block_number.
|
||||
/// Returns the number of deposits with valid signatures that have been observed up to and
|
||||
/// including the block at `block_number`.
|
||||
///
|
||||
/// Fetches the `DepositLog` that was emitted at or just before `block_number`
|
||||
/// and returns the deposit count as `index + 1`.
|
||||
///
|
||||
/// Returns `None` if block number queried is 0 or less than deposit_contract_deployed block.
|
||||
pub fn get_deposit_count_from_cache(&self, block_number: u64) -> Option<u64> {
|
||||
// Contract cannot be deployed in 0'th block
|
||||
if block_number == 0 {
|
||||
return None;
|
||||
}
|
||||
if block_number < self.deposit_contract_deploy_block {
|
||||
return None;
|
||||
}
|
||||
// Return 0 if block_num queried is before first deposit
|
||||
if let Some(first_deposit) = self.logs.first() {
|
||||
if first_deposit.block_number > block_number {
|
||||
return Some(0);
|
||||
}
|
||||
}
|
||||
let index = self
|
||||
.logs
|
||||
.binary_search_by(|deposit| deposit.block_number.cmp(&block_number));
|
||||
match index {
|
||||
Ok(index) => self.logs.get(index).map(|x| x.index + 1),
|
||||
Err(next) => Some(
|
||||
/// Returns `None` if the `block_number` is zero or prior to contract deployment.
|
||||
pub fn get_valid_signature_count(&self, block_number: u64) -> Option<usize> {
|
||||
if block_number == 0 || block_number < self.deposit_contract_deploy_block {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
self.logs
|
||||
.get(next.saturating_sub(1))
|
||||
.map_or(0, |x| x.index + 1),
|
||||
),
|
||||
.iter()
|
||||
.take_while(|deposit| deposit.block_number <= block_number)
|
||||
.filter(|deposit| deposit.signature_is_valid)
|
||||
.count(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of deposits that have been observed up to and
|
||||
/// including the block at `block_number`.
|
||||
///
|
||||
/// Returns `None` if the `block_number` is zero or prior to contract deployment.
|
||||
pub fn get_deposit_count_from_cache(&self, block_number: u64) -> Option<u64> {
|
||||
if block_number == 0 || block_number < self.deposit_contract_deploy_block {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
self.logs
|
||||
.iter()
|
||||
.take_while(|deposit| deposit.block_number <= block_number)
|
||||
.count() as u64,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -291,15 +293,18 @@ pub mod tests {
|
||||
use super::*;
|
||||
use crate::deposit_log::tests::EXAMPLE_LOG;
|
||||
use crate::http::Log;
|
||||
use types::{EthSpec, MainnetEthSpec};
|
||||
|
||||
pub const TREE_DEPTH: usize = 32;
|
||||
|
||||
fn example_log() -> DepositLog {
|
||||
let spec = MainnetEthSpec::default_spec();
|
||||
|
||||
let log = Log {
|
||||
block_number: 42,
|
||||
data: EXAMPLE_LOG.to_vec(),
|
||||
};
|
||||
DepositLog::from_log(&log).expect("should decode log")
|
||||
DepositLog::from_log(&log, &spec).expect("should decode log")
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
use super::http::Log;
|
||||
use ssz::Decode;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use types::{DepositData, Hash256, PublicKeyBytes, SignatureBytes};
|
||||
use state_processing::per_block_processing::signature_sets::{
|
||||
deposit_pubkey_signature_message, deposit_signature_set,
|
||||
};
|
||||
use types::{ChainSpec, DepositData, Hash256, PublicKeyBytes, SignatureBytes};
|
||||
|
||||
/// The following constants define the layout of bytes in the deposit contract `DepositEvent`. The
|
||||
/// event bytes are formatted according to the Ethereum ABI.
|
||||
@@ -24,11 +27,13 @@ pub struct DepositLog {
|
||||
pub block_number: u64,
|
||||
/// The index included with the deposit log.
|
||||
pub index: u64,
|
||||
/// True if the signature is valid.
|
||||
pub signature_is_valid: bool,
|
||||
}
|
||||
|
||||
impl DepositLog {
|
||||
/// Attempts to parse a raw `Log` from the deposit contract into a `DepositLog`.
|
||||
pub fn from_log(log: &Log) -> Result<Self, String> {
|
||||
pub fn from_log(log: &Log, spec: &ChainSpec) -> Result<Self, String> {
|
||||
let bytes = &log.data;
|
||||
|
||||
let pubkey = bytes
|
||||
@@ -58,10 +63,14 @@ impl DepositLog {
|
||||
.map_err(|e| format!("Invalid signature ssz: {:?}", e))?,
|
||||
};
|
||||
|
||||
let signature_is_valid = deposit_pubkey_signature_message(&deposit_data, spec)
|
||||
.map_or(false, |msg| deposit_signature_set(&msg).verify());
|
||||
|
||||
Ok(DepositLog {
|
||||
deposit_data,
|
||||
block_number: log.block_number,
|
||||
index: u64::from_ssz_bytes(index).map_err(|e| format!("Invalid index ssz: {:?}", e))?,
|
||||
signature_is_valid,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -70,6 +79,7 @@ impl DepositLog {
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use crate::http::Log;
|
||||
use types::{EthSpec, MainnetEthSpec};
|
||||
|
||||
/// The data from a deposit event, using the v0.8.3 version of the deposit contract.
|
||||
pub const EXAMPLE_LOG: &[u8] = &[
|
||||
@@ -103,6 +113,6 @@ pub mod tests {
|
||||
block_number: 42,
|
||||
data: EXAMPLE_LOG.to_vec(),
|
||||
};
|
||||
DepositLog::from_log(&log).expect("should decode log");
|
||||
DepositLog::from_log(&log, &MainnetEthSpec::default_spec()).expect("should decode log");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ use crate::{
|
||||
use parking_lot::RwLock;
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use types::ChainSpec;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DepositUpdater {
|
||||
@@ -28,6 +29,7 @@ pub struct Inner {
|
||||
pub block_cache: RwLock<BlockCache>,
|
||||
pub deposit_cache: RwLock<DepositUpdater>,
|
||||
pub config: RwLock<Config>,
|
||||
pub spec: ChainSpec,
|
||||
}
|
||||
|
||||
impl Inner {
|
||||
@@ -47,10 +49,15 @@ impl Inner {
|
||||
}
|
||||
|
||||
/// Recover `Inner` given byte representation of eth1 deposit and block caches.
|
||||
pub fn from_bytes(bytes: &[u8], config: Config) -> Result<Self, String> {
|
||||
pub fn from_bytes(bytes: &[u8], config: Config, spec: ChainSpec) -> Result<Self, String> {
|
||||
let ssz_cache = SszEth1Cache::from_ssz_bytes(bytes)
|
||||
.map_err(|e| format!("Ssz decoding error: {:?}", e))?;
|
||||
Ok(ssz_cache.to_inner(config)?)
|
||||
Ok(ssz_cache.to_inner(config, spec)?)
|
||||
}
|
||||
|
||||
/// Returns a reference to the specification.
|
||||
pub fn spec(&self) -> &ChainSpec {
|
||||
&self.spec
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,7 +79,7 @@ impl SszEth1Cache {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_inner(&self, config: Config) -> Result<Inner, String> {
|
||||
pub fn to_inner(&self, config: Config, spec: ChainSpec) -> Result<Inner, String> {
|
||||
Ok(Inner {
|
||||
block_cache: RwLock::new(self.block_cache.clone()),
|
||||
deposit_cache: RwLock::new(DepositUpdater {
|
||||
@@ -80,6 +87,7 @@ impl SszEth1Cache {
|
||||
last_processed_block: self.last_processed_block,
|
||||
}),
|
||||
config: RwLock::new(config),
|
||||
spec,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ use std::ops::{Range, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use tokio::time::{interval_at, Duration, Instant};
|
||||
use types::ChainSpec;
|
||||
|
||||
const STANDARD_TIMEOUT_MILLIS: u64 = 15_000;
|
||||
|
||||
@@ -130,14 +131,15 @@ pub struct Service {
|
||||
|
||||
impl Service {
|
||||
/// Creates a new service. Does not attempt to connect to the eth1 node.
|
||||
pub fn new(config: Config, log: Logger) -> Self {
|
||||
pub fn new(config: Config, log: Logger, spec: ChainSpec) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Inner {
|
||||
block_cache: <_>::default(),
|
||||
deposit_cache: RwLock::new(DepositUpdater::new(
|
||||
config.deposit_contract_deploy_block,
|
||||
)),
|
||||
config: RwLock::new(config),
|
||||
..Inner::default()
|
||||
spec,
|
||||
}),
|
||||
log,
|
||||
}
|
||||
@@ -149,8 +151,13 @@ impl Service {
|
||||
}
|
||||
|
||||
/// Recover the deposit and block caches from encoded bytes.
|
||||
pub fn from_bytes(bytes: &[u8], config: Config, log: Logger) -> Result<Self, String> {
|
||||
let inner = Inner::from_bytes(bytes, config)?;
|
||||
pub fn from_bytes(
|
||||
bytes: &[u8],
|
||||
config: Config,
|
||||
log: Logger,
|
||||
spec: ChainSpec,
|
||||
) -> Result<Self, String> {
|
||||
let inner = Inner::from_bytes(bytes, config, spec)?;
|
||||
Ok(Self {
|
||||
inner: Arc::new(inner),
|
||||
log,
|
||||
@@ -167,6 +174,13 @@ impl Service {
|
||||
&self.inner.deposit_cache
|
||||
}
|
||||
|
||||
/// Removes all blocks from the cache, except for the latest block.
|
||||
///
|
||||
/// We don't remove the latest blocks so we don't lose track of the latest block.
|
||||
pub fn clear_block_cache(&self) {
|
||||
self.inner.block_cache.write().truncate(1)
|
||||
}
|
||||
|
||||
/// Drop the block cache, replacing it with an empty one.
|
||||
pub fn drop_block_cache(&self) {
|
||||
*(self.inner.block_cache.write()) = BlockCache::default();
|
||||
@@ -187,6 +201,14 @@ impl Service {
|
||||
self.inner.block_cache.read().lowest_block_number()
|
||||
}
|
||||
|
||||
/// Returns the highest block that is present in both the deposit and block caches.
|
||||
pub fn highest_safe_block(&self) -> Option<u64> {
|
||||
let block_cache = self.blocks().read().highest_block_number()?;
|
||||
let deposit_cache = self.deposits().read().last_processed_block?;
|
||||
|
||||
Some(std::cmp::min(block_cache, deposit_cache))
|
||||
}
|
||||
|
||||
/// Returns the number of currently cached blocks.
|
||||
pub fn block_cache_len(&self) -> usize {
|
||||
self.blocks().read().len()
|
||||
@@ -197,6 +219,34 @@ impl Service {
|
||||
self.deposits().read().cache.len()
|
||||
}
|
||||
|
||||
/// Returns the number of deposits with valid signatures that have been observed.
|
||||
pub fn get_valid_signature_count(&self) -> Option<usize> {
|
||||
self.deposits()
|
||||
.read()
|
||||
.cache
|
||||
.get_valid_signature_count(self.highest_safe_block()?)
|
||||
}
|
||||
|
||||
/// Returns the number of deposits with valid signatures that have been observed, without
|
||||
/// respecting the `highest_safe_block`.
|
||||
pub fn get_raw_valid_signature_count(&self) -> Option<usize> {
|
||||
let deposits = self.deposits().read();
|
||||
deposits
|
||||
.cache
|
||||
.get_valid_signature_count(deposits.cache.latest_block_number()?)
|
||||
}
|
||||
|
||||
/// Returns the number of deposits with valid signatures that have been observed up to and
|
||||
/// including the block at `block_number`.
|
||||
///
|
||||
/// Returns `None` if the `block_number` is zero or prior to contract deployment.
|
||||
pub fn get_valid_signature_count_at_block(&self, block_number: u64) -> Option<usize> {
|
||||
self.deposits()
|
||||
.read()
|
||||
.cache
|
||||
.get_valid_signature_count(block_number)
|
||||
}
|
||||
|
||||
/// Read the service's configuration.
|
||||
pub fn config(&self) -> RwLockReadGuard<Config> {
|
||||
self.inner.config.read()
|
||||
@@ -239,32 +289,34 @@ impl Service {
|
||||
///
|
||||
/// Emits logs for debugging and errors.
|
||||
pub async fn update(
|
||||
service: Self,
|
||||
&self,
|
||||
) -> Result<(DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), String> {
|
||||
let update_deposit_cache = async {
|
||||
let outcome = Service::update_deposit_cache(service.clone())
|
||||
let outcome = self
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to update eth1 cache: {:?}", e))?;
|
||||
|
||||
trace!(
|
||||
service.log,
|
||||
self.log,
|
||||
"Updated eth1 deposit cache";
|
||||
"cached_deposits" => service.inner.deposit_cache.read().cache.len(),
|
||||
"cached_deposits" => self.inner.deposit_cache.read().cache.len(),
|
||||
"logs_imported" => outcome.logs_imported,
|
||||
"last_processed_eth1_block" => service.inner.deposit_cache.read().last_processed_block,
|
||||
"last_processed_eth1_block" => self.inner.deposit_cache.read().last_processed_block,
|
||||
);
|
||||
Ok(outcome)
|
||||
};
|
||||
|
||||
let update_block_cache = async {
|
||||
let outcome = Service::update_block_cache(service.clone())
|
||||
let outcome = self
|
||||
.update_block_cache()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to update eth1 cache: {:?}", e))?;
|
||||
|
||||
trace!(
|
||||
service.log,
|
||||
self.log,
|
||||
"Updated eth1 block cache";
|
||||
"cached_blocks" => service.inner.block_cache.read().len(),
|
||||
"cached_blocks" => self.inner.block_cache.read().len(),
|
||||
"blocks_imported" => outcome.blocks_imported,
|
||||
"head_block" => outcome.head_block_number,
|
||||
);
|
||||
@@ -283,35 +335,31 @@ impl Service {
|
||||
/// - Err(_) if there is an error.
|
||||
///
|
||||
/// Emits logs for debugging and errors.
|
||||
pub fn auto_update(service: Self, exit: tokio::sync::oneshot::Receiver<()>) {
|
||||
let update_interval = Duration::from_millis(service.config().auto_update_interval_millis);
|
||||
pub fn auto_update(self, handle: environment::TaskExecutor) {
|
||||
let update_interval = Duration::from_millis(self.config().auto_update_interval_millis);
|
||||
|
||||
let mut interval = interval_at(Instant::now(), update_interval);
|
||||
|
||||
let update_future = async move {
|
||||
while interval.next().await.is_some() {
|
||||
Service::do_update(service.clone(), update_interval)
|
||||
.await
|
||||
.ok();
|
||||
self.do_update(update_interval).await.ok();
|
||||
}
|
||||
};
|
||||
|
||||
let future = futures::future::select(Box::pin(update_future), exit);
|
||||
|
||||
tokio::task::spawn(future);
|
||||
handle.spawn(update_future, "eth1");
|
||||
}
|
||||
|
||||
async fn do_update(service: Self, update_interval: Duration) -> Result<(), ()> {
|
||||
let update_result = Service::update(service.clone()).await;
|
||||
async fn do_update(&self, update_interval: Duration) -> Result<(), ()> {
|
||||
let update_result = self.update().await;
|
||||
match update_result {
|
||||
Err(e) => error!(
|
||||
service.log,
|
||||
self.log,
|
||||
"Failed to update eth1 cache";
|
||||
"retry_millis" => update_interval.as_millis(),
|
||||
"error" => e,
|
||||
),
|
||||
Ok((deposit, block)) => debug!(
|
||||
service.log,
|
||||
self.log,
|
||||
"Updated eth1 cache";
|
||||
"retry_millis" => update_interval.as_millis(),
|
||||
"blocks" => format!("{:?}", block),
|
||||
@@ -333,23 +381,23 @@ impl Service {
|
||||
/// - Err(_) if there is an error.
|
||||
///
|
||||
/// Emits logs for debugging and errors.
|
||||
pub async fn update_deposit_cache(service: Self) -> Result<DepositCacheUpdateOutcome, Error> {
|
||||
let endpoint = service.config().endpoint.clone();
|
||||
let follow_distance = service.config().follow_distance;
|
||||
let deposit_contract_address = service.config().deposit_contract_address.clone();
|
||||
pub async fn update_deposit_cache(&self) -> Result<DepositCacheUpdateOutcome, Error> {
|
||||
let endpoint = self.config().endpoint.clone();
|
||||
let follow_distance = self.config().follow_distance;
|
||||
let deposit_contract_address = self.config().deposit_contract_address.clone();
|
||||
|
||||
let blocks_per_log_query = service.config().blocks_per_log_query;
|
||||
let max_log_requests_per_update = service
|
||||
let blocks_per_log_query = self.config().blocks_per_log_query;
|
||||
let max_log_requests_per_update = self
|
||||
.config()
|
||||
.max_log_requests_per_update
|
||||
.unwrap_or_else(usize::max_value);
|
||||
|
||||
let next_required_block = service
|
||||
let next_required_block = self
|
||||
.deposits()
|
||||
.read()
|
||||
.last_processed_block
|
||||
.map(|n| n + 1)
|
||||
.unwrap_or_else(|| service.config().deposit_contract_deploy_block);
|
||||
.unwrap_or_else(|| self.config().deposit_contract_deploy_block);
|
||||
|
||||
let range = get_new_block_numbers(&endpoint, next_required_block, follow_distance).await?;
|
||||
|
||||
@@ -393,13 +441,15 @@ impl Service {
|
||||
|
||||
let mut logs_imported = 0;
|
||||
for (block_range, log_chunk) in logs.iter() {
|
||||
let mut cache = service.deposits().write();
|
||||
let mut cache = self.deposits().write();
|
||||
log_chunk
|
||||
.into_iter()
|
||||
.iter()
|
||||
.map(|raw_log| {
|
||||
DepositLog::from_log(&raw_log).map_err(|error| Error::FailedToParseDepositLog {
|
||||
block_range: block_range.clone(),
|
||||
error,
|
||||
DepositLog::from_log(&raw_log, self.inner.spec()).map_err(|error| {
|
||||
Error::FailedToParseDepositLog {
|
||||
block_range: block_range.clone(),
|
||||
error,
|
||||
}
|
||||
})
|
||||
})
|
||||
// Return early if any of the logs cannot be parsed.
|
||||
@@ -437,18 +487,18 @@ impl Service {
|
||||
|
||||
if logs_imported > 0 {
|
||||
info!(
|
||||
service.log,
|
||||
self.log,
|
||||
"Imported deposit log(s)";
|
||||
"latest_block" => service.inner.deposit_cache.read().cache.latest_block_number(),
|
||||
"total" => service.deposit_cache_len(),
|
||||
"latest_block" => self.inner.deposit_cache.read().cache.latest_block_number(),
|
||||
"total" => self.deposit_cache_len(),
|
||||
"new" => logs_imported
|
||||
);
|
||||
} else {
|
||||
debug!(
|
||||
service.log,
|
||||
self.log,
|
||||
"No new deposits found";
|
||||
"latest_block" => service.inner.deposit_cache.read().cache.latest_block_number(),
|
||||
"total_deposits" => service.deposit_cache_len(),
|
||||
"latest_block" => self.inner.deposit_cache.read().cache.latest_block_number(),
|
||||
"total_deposits" => self.deposit_cache_len(),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -466,23 +516,23 @@ impl Service {
|
||||
/// - Err(_) if there is an error.
|
||||
///
|
||||
/// Emits logs for debugging and errors.
|
||||
pub async fn update_block_cache(service: Self) -> Result<BlockCacheUpdateOutcome, Error> {
|
||||
let block_cache_truncation = service.config().block_cache_truncation;
|
||||
let max_blocks_per_update = service
|
||||
pub async fn update_block_cache(&self) -> Result<BlockCacheUpdateOutcome, Error> {
|
||||
let block_cache_truncation = self.config().block_cache_truncation;
|
||||
let max_blocks_per_update = self
|
||||
.config()
|
||||
.max_blocks_per_update
|
||||
.unwrap_or_else(usize::max_value);
|
||||
|
||||
let next_required_block = service
|
||||
let next_required_block = self
|
||||
.inner
|
||||
.block_cache
|
||||
.read()
|
||||
.highest_block_number()
|
||||
.map(|n| n + 1)
|
||||
.unwrap_or_else(|| service.config().lowest_cached_block_number);
|
||||
.unwrap_or_else(|| self.config().lowest_cached_block_number);
|
||||
|
||||
let endpoint = service.config().endpoint.clone();
|
||||
let follow_distance = service.config().follow_distance;
|
||||
let endpoint = self.config().endpoint.clone();
|
||||
let follow_distance = self.config().follow_distance;
|
||||
|
||||
let range = get_new_block_numbers(&endpoint, next_required_block, follow_distance).await?;
|
||||
// Map the range of required blocks into a Vec.
|
||||
@@ -504,7 +554,7 @@ impl Service {
|
||||
// If the range of required blocks is larger than `max_size`, drop all
|
||||
// existing blocks and download `max_size` count of blocks.
|
||||
let first_block = range.end() - max_size;
|
||||
(*service.inner.block_cache.write()) = BlockCache::default();
|
||||
(*self.inner.block_cache.write()) = BlockCache::default();
|
||||
(first_block..=*range.end()).collect::<Vec<u64>>()
|
||||
} else {
|
||||
range.collect::<Vec<u64>>()
|
||||
@@ -515,7 +565,7 @@ impl Service {
|
||||
};
|
||||
// Download the range of blocks and sequentially import them into the cache.
|
||||
// Last processed block in deposit cache
|
||||
let latest_in_cache = service
|
||||
let latest_in_cache = self
|
||||
.inner
|
||||
.deposit_cache
|
||||
.read()
|
||||
@@ -535,7 +585,7 @@ impl Service {
|
||||
|mut block_numbers| async {
|
||||
match block_numbers.next() {
|
||||
Some(block_number) => {
|
||||
match download_eth1_block(service.inner.clone(), block_number).await {
|
||||
match download_eth1_block(self.inner.clone(), block_number).await {
|
||||
Ok(eth1_block) => Ok(Some((eth1_block, block_numbers))),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
@@ -549,8 +599,7 @@ impl Service {
|
||||
|
||||
let mut blocks_imported = 0;
|
||||
for eth1_block in eth1_blocks {
|
||||
service
|
||||
.inner
|
||||
self.inner
|
||||
.block_cache
|
||||
.write()
|
||||
.insert_root_or_child(eth1_block)
|
||||
@@ -558,12 +607,11 @@ impl Service {
|
||||
|
||||
metrics::set_gauge(
|
||||
&metrics::BLOCK_CACHE_LEN,
|
||||
service.inner.block_cache.read().len() as i64,
|
||||
self.inner.block_cache.read().len() as i64,
|
||||
);
|
||||
metrics::set_gauge(
|
||||
&metrics::LATEST_CACHED_BLOCK_TIMESTAMP,
|
||||
service
|
||||
.inner
|
||||
self.inner
|
||||
.block_cache
|
||||
.read()
|
||||
.latest_block_timestamp()
|
||||
@@ -574,44 +622,14 @@ impl Service {
|
||||
}
|
||||
|
||||
// Prune the block cache, preventing it from growing too large.
|
||||
service.inner.prune_blocks();
|
||||
self.inner.prune_blocks();
|
||||
|
||||
metrics::set_gauge(
|
||||
&metrics::BLOCK_CACHE_LEN,
|
||||
service.inner.block_cache.read().len() as i64,
|
||||
self.inner.block_cache.read().len() as i64,
|
||||
);
|
||||
|
||||
let block_cache = service.inner.block_cache.read();
|
||||
let latest_block_mins = block_cache
|
||||
.latest_block_timestamp()
|
||||
.and_then(|timestamp| {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.ok()
|
||||
.and_then(|now| now.checked_sub(Duration::from_secs(timestamp)))
|
||||
})
|
||||
.map(|duration| format!("{} mins", duration.as_secs() / 60))
|
||||
.unwrap_or_else(|| "n/a".into());
|
||||
|
||||
if blocks_imported > 0 {
|
||||
info!(
|
||||
service.log,
|
||||
"Imported eth1 block(s)";
|
||||
"latest_block_age" => latest_block_mins,
|
||||
"latest_block" => block_cache.highest_block_number(),
|
||||
"total_cached_blocks" => block_cache.len(),
|
||||
"new" => blocks_imported
|
||||
);
|
||||
} else {
|
||||
debug!(
|
||||
service.log,
|
||||
"No new eth1 blocks imported";
|
||||
"latest_block" => block_cache.highest_block_number(),
|
||||
"cached_blocks" => block_cache.len(),
|
||||
);
|
||||
}
|
||||
|
||||
let block_cache = service.inner.block_cache.read();
|
||||
let block_cache = self.inner.block_cache.read();
|
||||
let latest_block_mins = block_cache
|
||||
.latest_block_timestamp()
|
||||
.and_then(|timestamp| {
|
||||
@@ -625,7 +643,7 @@ impl Service {
|
||||
|
||||
if blocks_imported > 0 {
|
||||
debug!(
|
||||
service.log,
|
||||
self.log,
|
||||
"Imported eth1 block(s)";
|
||||
"latest_block_age" => latest_block_mins,
|
||||
"latest_block" => block_cache.highest_block_number(),
|
||||
@@ -634,7 +652,7 @@ impl Service {
|
||||
);
|
||||
} else {
|
||||
debug!(
|
||||
service.log,
|
||||
self.log,
|
||||
"No new eth1 blocks imported";
|
||||
"latest_block" => block_cache.highest_block_number(),
|
||||
"cached_blocks" => block_cache.len(),
|
||||
@@ -643,7 +661,7 @@ impl Service {
|
||||
|
||||
Ok(BlockCacheUpdateOutcome {
|
||||
blocks_imported,
|
||||
head_block_number: service.inner.block_cache.read().highest_block_number(),
|
||||
head_block_number: self.inner.block_cache.read().highest_block_number(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ fn random_deposit_data() -> DepositData {
|
||||
pubkey: keypair.pk.into(),
|
||||
withdrawal_credentials: Hash256::zero(),
|
||||
amount: 32_000_000_000,
|
||||
signature: Signature::empty_signature().into(),
|
||||
signature: Signature::empty().into(),
|
||||
};
|
||||
|
||||
deposit.signature = deposit.create_signature(&keypair.sk, &MainnetEthSpec::default_spec());
|
||||
@@ -99,6 +99,7 @@ async fn get_block_number(web3: &Web3<Http>) -> u64 {
|
||||
|
||||
mod eth1_cache {
|
||||
use super::*;
|
||||
use types::{EthSpec, MainnetEthSpec};
|
||||
|
||||
#[tokio::test]
|
||||
async fn simple_scenario() {
|
||||
@@ -122,6 +123,7 @@ mod eth1_cache {
|
||||
..Config::default()
|
||||
},
|
||||
log.clone(),
|
||||
MainnetEthSpec::default_spec(),
|
||||
);
|
||||
|
||||
// Create some blocks and then consume them, performing the test `rounds` times.
|
||||
@@ -143,14 +145,17 @@ mod eth1_cache {
|
||||
eth1.ganache.evm_mine().await.expect("should mine block");
|
||||
}
|
||||
|
||||
Service::update_deposit_cache(service.clone())
|
||||
service
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.expect("should update deposit cache");
|
||||
Service::update_block_cache(service.clone())
|
||||
service
|
||||
.update_block_cache()
|
||||
.await
|
||||
.expect("should update block cache");
|
||||
|
||||
Service::update_block_cache(service.clone())
|
||||
service
|
||||
.update_block_cache()
|
||||
.await
|
||||
.expect("should update cache when nothing has changed");
|
||||
|
||||
@@ -194,6 +199,7 @@ mod eth1_cache {
|
||||
..Config::default()
|
||||
},
|
||||
log,
|
||||
MainnetEthSpec::default_spec(),
|
||||
);
|
||||
|
||||
let blocks = cache_len * 2;
|
||||
@@ -202,10 +208,12 @@ mod eth1_cache {
|
||||
eth1.ganache.evm_mine().await.expect("should mine block")
|
||||
}
|
||||
|
||||
Service::update_deposit_cache(service.clone())
|
||||
service
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.expect("should update deposit cache");
|
||||
Service::update_block_cache(service.clone())
|
||||
service
|
||||
.update_block_cache()
|
||||
.await
|
||||
.expect("should update block cache");
|
||||
|
||||
@@ -240,16 +248,19 @@ mod eth1_cache {
|
||||
..Config::default()
|
||||
},
|
||||
log,
|
||||
MainnetEthSpec::default_spec(),
|
||||
);
|
||||
|
||||
for _ in 0..4u8 {
|
||||
for _ in 0..cache_len / 2 {
|
||||
eth1.ganache.evm_mine().await.expect("should mine block")
|
||||
}
|
||||
Service::update_deposit_cache(service.clone())
|
||||
service
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.expect("should update deposit cache");
|
||||
Service::update_block_cache(service.clone())
|
||||
service
|
||||
.update_block_cache()
|
||||
.await
|
||||
.expect("should update block cache");
|
||||
}
|
||||
@@ -282,21 +293,19 @@ mod eth1_cache {
|
||||
..Config::default()
|
||||
},
|
||||
log,
|
||||
MainnetEthSpec::default_spec(),
|
||||
);
|
||||
|
||||
for _ in 0..n {
|
||||
eth1.ganache.evm_mine().await.expect("should mine block")
|
||||
}
|
||||
futures::try_join!(
|
||||
Service::update_deposit_cache(service.clone()),
|
||||
Service::update_deposit_cache(service.clone())
|
||||
service.update_deposit_cache(),
|
||||
service.update_deposit_cache()
|
||||
)
|
||||
.expect("should perform two simultaneous updates of deposit cache");
|
||||
futures::try_join!(
|
||||
Service::update_block_cache(service.clone()),
|
||||
Service::update_block_cache(service.clone())
|
||||
)
|
||||
.expect("should perform two simultaneous updates of block cache");
|
||||
futures::try_join!(service.update_block_cache(), service.update_block_cache())
|
||||
.expect("should perform two simultaneous updates of block cache");
|
||||
|
||||
assert!(service.block_cache_len() >= n, "should grow the cache");
|
||||
}
|
||||
@@ -328,6 +337,7 @@ mod deposit_tree {
|
||||
..Config::default()
|
||||
},
|
||||
log,
|
||||
MainnetEthSpec::default_spec(),
|
||||
);
|
||||
|
||||
for round in 0..3 {
|
||||
@@ -340,11 +350,13 @@ mod deposit_tree {
|
||||
.expect("should perform a deposit");
|
||||
}
|
||||
|
||||
Service::update_deposit_cache(service.clone())
|
||||
service
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.expect("should perform update");
|
||||
|
||||
Service::update_deposit_cache(service.clone())
|
||||
service
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.expect("should perform update when nothing has changed");
|
||||
|
||||
@@ -401,6 +413,7 @@ mod deposit_tree {
|
||||
..Config::default()
|
||||
},
|
||||
log,
|
||||
MainnetEthSpec::default_spec(),
|
||||
);
|
||||
|
||||
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||
@@ -413,8 +426,8 @@ mod deposit_tree {
|
||||
}
|
||||
|
||||
futures::try_join!(
|
||||
Service::update_deposit_cache(service.clone()),
|
||||
Service::update_deposit_cache(service.clone())
|
||||
service.update_deposit_cache(),
|
||||
service.update_deposit_cache()
|
||||
)
|
||||
.expect("should perform two updates concurrently");
|
||||
|
||||
@@ -425,6 +438,8 @@ mod deposit_tree {
|
||||
async fn cache_consistency() {
|
||||
let n = 8;
|
||||
|
||||
let spec = &MainnetEthSpec::default_spec();
|
||||
|
||||
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||
|
||||
let eth1 = GanacheEth1Instance::new()
|
||||
@@ -462,7 +477,7 @@ mod deposit_tree {
|
||||
let logs: Vec<_> = blocking_deposit_logs(ð1, 0..block_number)
|
||||
.await
|
||||
.iter()
|
||||
.map(|raw| DepositLog::from_log(raw).expect("should parse deposit log"))
|
||||
.map(|raw| DepositLog::from_log(raw, spec).expect("should parse deposit log"))
|
||||
.inspect(|log| {
|
||||
tree.insert_log(log.clone())
|
||||
.expect("should add consecutive logs")
|
||||
@@ -639,6 +654,7 @@ mod fast {
|
||||
..Config::default()
|
||||
},
|
||||
log,
|
||||
MainnetEthSpec::default_spec(),
|
||||
);
|
||||
let n = 10;
|
||||
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||
@@ -651,7 +667,8 @@ mod fast {
|
||||
eth1.ganache.evm_mine().await.expect("should mine block");
|
||||
}
|
||||
|
||||
Service::update_deposit_cache(service.clone())
|
||||
service
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.expect("should perform update");
|
||||
|
||||
@@ -708,7 +725,7 @@ mod persist {
|
||||
block_cache_truncation: None,
|
||||
..Config::default()
|
||||
};
|
||||
let service = Service::new(config.clone(), log.clone());
|
||||
let service = Service::new(config.clone(), log.clone(), MainnetEthSpec::default_spec());
|
||||
let n = 10;
|
||||
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||
for deposit in &deposits {
|
||||
@@ -718,7 +735,8 @@ mod persist {
|
||||
.expect("should perform a deposit");
|
||||
}
|
||||
|
||||
Service::update_deposit_cache(service.clone())
|
||||
service
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.expect("should perform update");
|
||||
|
||||
@@ -729,7 +747,8 @@ mod persist {
|
||||
|
||||
let deposit_count = service.deposit_cache_len();
|
||||
|
||||
Service::update_block_cache(service.clone())
|
||||
service
|
||||
.update_block_cache()
|
||||
.await
|
||||
.expect("should perform update");
|
||||
|
||||
@@ -745,7 +764,8 @@ mod persist {
|
||||
// Drop service and recover from bytes
|
||||
drop(service);
|
||||
|
||||
let recovered_service = Service::from_bytes(ð1_bytes, config, log).unwrap();
|
||||
let recovered_service =
|
||||
Service::from_bytes(ð1_bytes, config, log, MainnetEthSpec::default_spec()).unwrap();
|
||||
assert_eq!(
|
||||
recovered_service.block_cache_len(),
|
||||
block_count,
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
[package]
|
||||
name = "eth2-libp2p"
|
||||
version = "0.1.2"
|
||||
authors = ["Age Manning <Age@AgeManning.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
hex = "0.4.2"
|
||||
types = { path = "../../eth2/types" }
|
||||
hashset_delay = { path = "../../eth2/utils/hashset_delay" }
|
||||
eth2_ssz_types = { path = "../../eth2/utils/ssz_types" }
|
||||
serde = { version = "1.0.110", features = ["derive"] }
|
||||
serde_derive = "1.0.110"
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
version = { path = "../version" }
|
||||
tokio = { version = "0.2.20", features = ["time"] }
|
||||
futures = "0.3.5"
|
||||
error-chain = "0.12.2"
|
||||
dirs = "2.0.2"
|
||||
fnv = "1.0.6"
|
||||
unsigned-varint = { git = "https://github.com/sigp/unsigned-varint", branch = "latest-codecs", features = ["codec"] }
|
||||
lazy_static = "1.4.0"
|
||||
lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" }
|
||||
smallvec = "1.4.0"
|
||||
lru = "0.4.3"
|
||||
parking_lot = "0.10.2"
|
||||
sha2 = "0.8.1"
|
||||
base64 = "0.12.1"
|
||||
snap = "1.0.0"
|
||||
void = "1.0.2"
|
||||
tokio-io-timeout = "0.4.0"
|
||||
tokio-util = { version = "0.3.1", features = ["codec", "compat"] }
|
||||
# Patched for quick updates
|
||||
discv5 = { git = "https://github.com/sigp/discv5", rev = "7b3bd40591b62b8c002ffdb85de008aa9f82e2e5" }
|
||||
tiny-keccak = "2.0.2"
|
||||
libp2p-tcp = { version = "0.18.0", default-features = false, features = ["tokio"] }
|
||||
|
||||
[dependencies.libp2p]
|
||||
version = "0.18.1"
|
||||
default-features = false
|
||||
features = ["websocket", "identify", "mplex", "yamux", "noise", "secio", "gossipsub", "dns"]
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "0.2.20", features = ["full"] }
|
||||
slog-stdlog = "4.0.0"
|
||||
slog-term = "2.5.0"
|
||||
slog-async = "2.5.0"
|
||||
tempdir = "0.3.7"
|
||||
@@ -1,564 +0,0 @@
|
||||
use crate::discovery::{enr::Eth2Enr, Discovery};
|
||||
use crate::peer_manager::{PeerManager, PeerManagerEvent};
|
||||
use crate::rpc::*;
|
||||
use crate::types::{GossipEncoding, GossipKind, GossipTopic};
|
||||
use crate::{error, Enr, NetworkConfig, NetworkGlobals, PubsubMessage, TopicHash};
|
||||
use discv5::Discv5Event;
|
||||
use futures::prelude::*;
|
||||
use libp2p::{
|
||||
core::identity::Keypair,
|
||||
gossipsub::{Gossipsub, GossipsubEvent, MessageId},
|
||||
identify::{Identify, IdentifyEvent},
|
||||
swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters},
|
||||
NetworkBehaviour, PeerId,
|
||||
};
|
||||
use lru::LruCache;
|
||||
use slog::{crit, debug, o};
|
||||
use std::{
|
||||
marker::PhantomData,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
use types::{EnrForkId, EthSpec, SubnetId};
|
||||
|
||||
const MAX_IDENTIFY_ADDRESSES: usize = 10;
|
||||
|
||||
/// Builds the network behaviour that manages the core protocols of eth2.
|
||||
/// This core behaviour is managed by `Behaviour` which adds peer management to all core
|
||||
/// behaviours.
|
||||
#[derive(NetworkBehaviour)]
|
||||
#[behaviour(out_event = "BehaviourEvent<TSpec>", poll_method = "poll")]
|
||||
pub struct Behaviour<TSpec: EthSpec> {
|
||||
/// The routing pub-sub mechanism for eth2.
|
||||
gossipsub: Gossipsub,
|
||||
/// The Eth2 RPC specified in the wire-0 protocol.
|
||||
eth2_rpc: RPC<TSpec>,
|
||||
/// Keep regular connection to peers and disconnect if absent.
|
||||
// TODO: Using id for initial interop. This will be removed by mainnet.
|
||||
/// Provides IP addresses and peer information.
|
||||
identify: Identify,
|
||||
/// Discovery behaviour.
|
||||
discovery: Discovery<TSpec>,
|
||||
/// The peer manager that keeps track of peer's reputation and status.
|
||||
#[behaviour(ignore)]
|
||||
peer_manager: PeerManager<TSpec>,
|
||||
/// The events generated by this behaviour to be consumed in the swarm poll.
|
||||
#[behaviour(ignore)]
|
||||
events: Vec<BehaviourEvent<TSpec>>,
|
||||
/// The current meta data of the node, so respond to pings and get metadata
|
||||
#[behaviour(ignore)]
|
||||
meta_data: MetaData<TSpec>,
|
||||
/// A cache of recently seen gossip messages. This is used to filter out any possible
|
||||
/// duplicates that may still be seen over gossipsub.
|
||||
#[behaviour(ignore)]
|
||||
// TODO: Remove this
|
||||
seen_gossip_messages: LruCache<MessageId, ()>,
|
||||
/// A collections of variables accessible outside the network service.
|
||||
#[behaviour(ignore)]
|
||||
network_globals: Arc<NetworkGlobals<TSpec>>,
|
||||
#[behaviour(ignore)]
|
||||
/// Keeps track of the current EnrForkId for upgrading gossipsub topics.
|
||||
// NOTE: This can be accessed via the network_globals ENR. However we keep it here for quick
|
||||
// lookups for every gossipsub message send.
|
||||
enr_fork_id: EnrForkId,
|
||||
#[behaviour(ignore)]
|
||||
/// Logger for behaviour actions.
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
/// Implements the combined behaviour for the libp2p service.
|
||||
impl<TSpec: EthSpec> Behaviour<TSpec> {
|
||||
pub fn new(
|
||||
local_key: &Keypair,
|
||||
net_conf: &NetworkConfig,
|
||||
network_globals: Arc<NetworkGlobals<TSpec>>,
|
||||
log: &slog::Logger,
|
||||
) -> error::Result<Self> {
|
||||
let local_peer_id = local_key.public().into_peer_id();
|
||||
let behaviour_log = log.new(o!());
|
||||
|
||||
let identify = Identify::new(
|
||||
"lighthouse/libp2p".into(),
|
||||
version::version(),
|
||||
local_key.public(),
|
||||
);
|
||||
|
||||
let enr_fork_id = network_globals
|
||||
.local_enr
|
||||
.read()
|
||||
.eth2()
|
||||
.expect("Local ENR must have a fork id");
|
||||
|
||||
let attnets = network_globals
|
||||
.local_enr
|
||||
.read()
|
||||
.bitfield::<TSpec>()
|
||||
.expect("Local ENR must have subnet bitfield");
|
||||
|
||||
let meta_data = MetaData {
|
||||
seq_number: 1,
|
||||
attnets,
|
||||
};
|
||||
|
||||
Ok(Behaviour {
|
||||
eth2_rpc: RPC::new(log.clone()),
|
||||
gossipsub: Gossipsub::new(local_peer_id, net_conf.gs_config.clone()),
|
||||
discovery: Discovery::new(local_key, net_conf, network_globals.clone(), log)?,
|
||||
identify,
|
||||
peer_manager: PeerManager::new(network_globals.clone(), log),
|
||||
events: Vec::new(),
|
||||
seen_gossip_messages: LruCache::new(100_000),
|
||||
meta_data,
|
||||
network_globals,
|
||||
enr_fork_id,
|
||||
log: behaviour_log,
|
||||
})
|
||||
}
|
||||
|
||||
/// Obtain a reference to the discovery protocol.
|
||||
pub fn discovery(&self) -> &Discovery<TSpec> {
|
||||
&self.discovery
|
||||
}
|
||||
|
||||
/// Obtain a reference to the gossipsub protocol.
|
||||
pub fn gs(&self) -> &Gossipsub {
|
||||
&self.gossipsub
|
||||
}
|
||||
|
||||
/* Pubsub behaviour functions */
|
||||
|
||||
/// Subscribes to a gossipsub topic kind, letting the network service determine the
|
||||
/// encoding and fork version.
|
||||
pub fn subscribe_kind(&mut self, kind: GossipKind) -> bool {
|
||||
let gossip_topic = GossipTopic::new(
|
||||
kind,
|
||||
GossipEncoding::default(),
|
||||
self.enr_fork_id.fork_digest,
|
||||
);
|
||||
self.subscribe(gossip_topic)
|
||||
}
|
||||
|
||||
/// Unsubscribes from a gossipsub topic kind, letting the network service determine the
|
||||
/// encoding and fork version.
|
||||
pub fn unsubscribe_kind(&mut self, kind: GossipKind) -> bool {
|
||||
let gossip_topic = GossipTopic::new(
|
||||
kind,
|
||||
GossipEncoding::default(),
|
||||
self.enr_fork_id.fork_digest,
|
||||
);
|
||||
self.unsubscribe(gossip_topic)
|
||||
}
|
||||
|
||||
/// Subscribes to a specific subnet id;
|
||||
pub fn subscribe_to_subnet(&mut self, subnet_id: SubnetId) -> bool {
|
||||
let topic = GossipTopic::new(
|
||||
subnet_id.into(),
|
||||
GossipEncoding::default(),
|
||||
self.enr_fork_id.fork_digest,
|
||||
);
|
||||
self.subscribe(topic)
|
||||
}
|
||||
|
||||
/// Un-Subscribes from a specific subnet id;
|
||||
pub fn unsubscribe_from_subnet(&mut self, subnet_id: SubnetId) -> bool {
|
||||
let topic = GossipTopic::new(
|
||||
subnet_id.into(),
|
||||
GossipEncoding::default(),
|
||||
self.enr_fork_id.fork_digest,
|
||||
);
|
||||
self.unsubscribe(topic)
|
||||
}
|
||||
|
||||
/// Subscribes to a gossipsub topic.
|
||||
fn subscribe(&mut self, topic: GossipTopic) -> bool {
|
||||
// update the network globals
|
||||
self.network_globals
|
||||
.gossipsub_subscriptions
|
||||
.write()
|
||||
.insert(topic.clone());
|
||||
|
||||
let topic_str: String = topic.clone().into();
|
||||
debug!(self.log, "Subscribed to topic"; "topic" => topic_str);
|
||||
self.gossipsub.subscribe(topic.into())
|
||||
}
|
||||
|
||||
/// Unsubscribe from a gossipsub topic.
|
||||
fn unsubscribe(&mut self, topic: GossipTopic) -> bool {
|
||||
// update the network globals
|
||||
self.network_globals
|
||||
.gossipsub_subscriptions
|
||||
.write()
|
||||
.remove(&topic);
|
||||
// unsubscribe from the topic
|
||||
self.gossipsub.unsubscribe(topic.into())
|
||||
}
|
||||
|
||||
/// Publishes a list of messages on the pubsub (gossipsub) behaviour, choosing the encoding.
|
||||
pub fn publish(&mut self, messages: Vec<PubsubMessage<TSpec>>) {
|
||||
for message in messages {
|
||||
for topic in message.topics(GossipEncoding::default(), self.enr_fork_id.fork_digest) {
|
||||
match message.encode(GossipEncoding::default()) {
|
||||
Ok(message_data) => {
|
||||
self.gossipsub.publish(&topic.into(), message_data);
|
||||
}
|
||||
Err(e) => crit!(self.log, "Could not publish message"; "error" => e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Forwards a message that is waiting in gossipsub's mcache. Messages are only propagated
|
||||
/// once validated by the beacon chain.
|
||||
pub fn propagate_message(&mut self, propagation_source: &PeerId, message_id: MessageId) {
|
||||
self.gossipsub
|
||||
.propagate_message(&message_id, propagation_source);
|
||||
}
|
||||
|
||||
/* Eth2 RPC behaviour functions */
|
||||
|
||||
/// Sends an RPC Request/Response via the RPC protocol.
|
||||
pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent<TSpec>) {
|
||||
self.eth2_rpc.send_rpc(peer_id, rpc_event);
|
||||
}
|
||||
|
||||
/* Discovery / Peer management functions */
|
||||
|
||||
/// Notify discovery that the peer has been banned.
|
||||
pub fn peer_banned(&mut self, peer_id: PeerId) {
|
||||
self.discovery.peer_banned(peer_id);
|
||||
}
|
||||
|
||||
/// Notify discovery that the peer has been unbanned.
|
||||
pub fn peer_unbanned(&mut self, peer_id: &PeerId) {
|
||||
self.discovery.peer_unbanned(peer_id);
|
||||
}
|
||||
|
||||
/// Returns an iterator over all enr entries in the DHT.
|
||||
pub fn enr_entries(&mut self) -> impl Iterator<Item = &Enr> {
|
||||
self.discovery.enr_entries()
|
||||
}
|
||||
|
||||
/// Add an ENR to the routing table of the discovery mechanism.
|
||||
pub fn add_enr(&mut self, enr: Enr) {
|
||||
self.discovery.add_enr(enr);
|
||||
}
|
||||
|
||||
/// Updates a subnet value to the ENR bitfield.
|
||||
///
|
||||
/// The `value` is `true` if a subnet is being added and false otherwise.
|
||||
pub fn update_enr_subnet(&mut self, subnet_id: SubnetId, value: bool) {
|
||||
if let Err(e) = self.discovery.update_enr_bitfield(subnet_id, value) {
|
||||
crit!(self.log, "Could not update ENR bitfield"; "error" => e);
|
||||
}
|
||||
// update the local meta data which informs our peers of the update during PINGS
|
||||
self.update_metadata();
|
||||
}
|
||||
|
||||
/// A request to search for peers connected to a long-lived subnet.
|
||||
pub fn peers_request(&mut self, subnet_id: SubnetId) {
|
||||
self.discovery.peers_request(subnet_id);
|
||||
}
|
||||
|
||||
/// Updates the local ENR's "eth2" field with the latest EnrForkId.
|
||||
pub fn update_fork_version(&mut self, enr_fork_id: EnrForkId) {
|
||||
self.discovery.update_eth2_enr(enr_fork_id.clone());
|
||||
|
||||
// unsubscribe from all gossip topics and re-subscribe to their new fork counterparts
|
||||
let subscribed_topics = self
|
||||
.network_globals
|
||||
.gossipsub_subscriptions
|
||||
.read()
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect::<Vec<GossipTopic>>();
|
||||
|
||||
// unsubscribe from all topics
|
||||
for topic in &subscribed_topics {
|
||||
self.unsubscribe(topic.clone());
|
||||
}
|
||||
|
||||
// re-subscribe modifying the fork version
|
||||
for mut topic in subscribed_topics {
|
||||
*topic.digest() = enr_fork_id.fork_digest;
|
||||
self.subscribe(topic);
|
||||
}
|
||||
|
||||
// update the local reference
|
||||
self.enr_fork_id = enr_fork_id;
|
||||
}
|
||||
|
||||
/* Private internal functions */
|
||||
|
||||
/// Updates the current meta data of the node.
|
||||
fn update_metadata(&mut self) {
|
||||
self.meta_data.seq_number += 1;
|
||||
self.meta_data.attnets = self
|
||||
.discovery
|
||||
.local_enr()
|
||||
.bitfield::<TSpec>()
|
||||
.expect("Local discovery must have bitfield");
|
||||
}
|
||||
|
||||
/// Sends a PING/PONG request/response to a peer.
|
||||
fn send_ping(&mut self, id: RequestId, peer_id: PeerId, is_request: bool) {
|
||||
let ping = crate::rpc::methods::Ping {
|
||||
data: self.meta_data.seq_number,
|
||||
};
|
||||
|
||||
let event = if is_request {
|
||||
debug!(self.log, "Sending Ping"; "request_id" => id, "peer_id" => peer_id.to_string());
|
||||
RPCEvent::Request(id, RPCRequest::Ping(ping))
|
||||
} else {
|
||||
debug!(self.log, "Sending Pong"; "request_id" => id, "peer_id" => peer_id.to_string());
|
||||
RPCEvent::Response(id, RPCCodedResponse::Success(RPCResponse::Pong(ping)))
|
||||
};
|
||||
self.send_rpc(peer_id, event);
|
||||
}
|
||||
|
||||
/// Sends a METADATA request to a peer.
|
||||
fn send_meta_data_request(&mut self, peer_id: PeerId) {
|
||||
let metadata_request =
|
||||
RPCEvent::Request(RequestId::from(0usize), RPCRequest::MetaData(PhantomData));
|
||||
self.send_rpc(peer_id, metadata_request);
|
||||
}
|
||||
|
||||
/// Sends a METADATA response to a peer.
|
||||
fn send_meta_data_response(&mut self, id: RequestId, peer_id: PeerId) {
|
||||
let metadata_response = RPCEvent::Response(
|
||||
id,
|
||||
RPCCodedResponse::Success(RPCResponse::MetaData(self.meta_data.clone())),
|
||||
);
|
||||
self.send_rpc(peer_id, metadata_response);
|
||||
}
|
||||
|
||||
/// Returns a reference to the peer manager to allow the swarm to notify the manager of peer
|
||||
/// status
|
||||
pub fn peer_manager(&mut self) -> &mut PeerManager<TSpec> {
|
||||
&mut self.peer_manager
|
||||
}
|
||||
|
||||
/* Address in the new behaviour. Connections are now maintained at the swarm level.
|
||||
/// Notifies the behaviour that a peer has connected.
|
||||
pub fn notify_peer_connect(&mut self, peer_id: PeerId, endpoint: ConnectedPoint) {
|
||||
match endpoint {
|
||||
ConnectedPoint::Dialer { .. } => self.peer_manager.connect_outgoing(&peer_id),
|
||||
ConnectedPoint::Listener { .. } => self.peer_manager.connect_ingoing(&peer_id),
|
||||
};
|
||||
|
||||
// Find ENR info about a peer if possible.
|
||||
if let Some(enr) = self.discovery.enr_of_peer(&peer_id) {
|
||||
let bitfield = match enr.bitfield::<TSpec>() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
warn!(self.log, "Peer has invalid ENR bitfield";
|
||||
"peer_id" => format!("{}", peer_id),
|
||||
"error" => format!("{:?}", e));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// use this as a baseline, until we get the actual meta-data
|
||||
let meta_data = MetaData {
|
||||
seq_number: 0,
|
||||
attnets: bitfield,
|
||||
};
|
||||
// TODO: Shift to the peer manager
|
||||
self.network_globals
|
||||
.peers
|
||||
.write()
|
||||
.add_metadata(&peer_id, meta_data);
|
||||
}
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
// Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour
|
||||
impl<TSpec: EthSpec> NetworkBehaviourEventProcess<GossipsubEvent> for Behaviour<TSpec> {
|
||||
fn inject_event(&mut self, event: GossipsubEvent) {
|
||||
match event {
|
||||
GossipsubEvent::Message(propagation_source, id, gs_msg) => {
|
||||
// Note: We are keeping track here of the peer that sent us the message, not the
|
||||
// peer that originally published the message.
|
||||
if self.seen_gossip_messages.put(id.clone(), ()).is_none() {
|
||||
match PubsubMessage::decode(&gs_msg.topics, &gs_msg.data) {
|
||||
Err(e) => {
|
||||
debug!(self.log, "Could not decode gossipsub message"; "error" => format!("{}", e))
|
||||
}
|
||||
Ok(msg) => {
|
||||
// if this message isn't a duplicate, notify the network
|
||||
self.events.push(BehaviourEvent::PubsubMessage {
|
||||
id,
|
||||
source: propagation_source,
|
||||
topics: gs_msg.topics,
|
||||
message: msg,
|
||||
});
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match PubsubMessage::<TSpec>::decode(&gs_msg.topics, &gs_msg.data) {
|
||||
Err(e) => {
|
||||
debug!(self.log, "Could not decode gossipsub message"; "error" => format!("{}", e))
|
||||
}
|
||||
Ok(msg) => {
|
||||
debug!(self.log, "A duplicate gossipsub message was received"; "message_source" => format!("{}", gs_msg.source), "propagated_peer" => format!("{}",propagation_source), "message" => format!("{}", msg));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
GossipsubEvent::Subscribed { peer_id, topic } => {
|
||||
self.events
|
||||
.push(BehaviourEvent::PeerSubscribed(peer_id, topic));
|
||||
}
|
||||
GossipsubEvent::Unsubscribed { .. } => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> NetworkBehaviourEventProcess<RPCMessage<TSpec>> for Behaviour<TSpec> {
|
||||
fn inject_event(&mut self, message: RPCMessage<TSpec>) {
|
||||
let peer_id = message.peer_id;
|
||||
// The METADATA and PING RPC responses are handled within the behaviour and not
|
||||
// propagated
|
||||
// TODO: Improve the RPC types to better handle this logic discrepancy
|
||||
match message.event {
|
||||
RPCEvent::Request(id, RPCRequest::Ping(ping)) => {
|
||||
// inform the peer manager and send the response
|
||||
self.peer_manager.ping_request(&peer_id, ping.data);
|
||||
// send a ping response
|
||||
self.send_ping(id, peer_id, false);
|
||||
}
|
||||
RPCEvent::Request(id, RPCRequest::MetaData(_)) => {
|
||||
// send the requested meta-data
|
||||
self.send_meta_data_response(id, peer_id);
|
||||
}
|
||||
RPCEvent::Response(_, RPCCodedResponse::Success(RPCResponse::Pong(ping))) => {
|
||||
self.peer_manager.pong_response(&peer_id, ping.data);
|
||||
}
|
||||
RPCEvent::Response(_, RPCCodedResponse::Success(RPCResponse::MetaData(meta_data))) => {
|
||||
self.peer_manager.meta_data_response(&peer_id, meta_data);
|
||||
}
|
||||
RPCEvent::Request(_, RPCRequest::Status(_))
|
||||
| RPCEvent::Response(_, RPCCodedResponse::Success(RPCResponse::Status(_))) => {
|
||||
// inform the peer manager that we have received a status from a peer
|
||||
self.peer_manager.peer_statusd(&peer_id);
|
||||
// propagate the STATUS message upwards
|
||||
self.events
|
||||
.push(BehaviourEvent::RPC(peer_id, message.event));
|
||||
}
|
||||
RPCEvent::Error(_, protocol, ref err) => {
|
||||
self.peer_manager.handle_rpc_error(&peer_id, protocol, err);
|
||||
self.events
|
||||
.push(BehaviourEvent::RPC(peer_id, message.event));
|
||||
}
|
||||
_ => {
|
||||
// propagate all other RPC messages upwards
|
||||
self.events
|
||||
.push(BehaviourEvent::RPC(peer_id, message.event))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> Behaviour<TSpec> {
|
||||
/// Consumes the events list when polled.
|
||||
fn poll<TBehaviourIn>(
|
||||
&mut self,
|
||||
cx: &mut Context,
|
||||
_: &mut impl PollParameters,
|
||||
) -> Poll<NetworkBehaviourAction<TBehaviourIn, BehaviourEvent<TSpec>>> {
|
||||
// check the peer manager for events
|
||||
loop {
|
||||
match self.peer_manager.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(event)) => match event {
|
||||
PeerManagerEvent::Status(peer_id) => {
|
||||
// it's time to status. We don't keep a beacon chain reference here, so we inform
|
||||
// the network to send a status to this peer
|
||||
return Poll::Ready(NetworkBehaviourAction::GenerateEvent(
|
||||
BehaviourEvent::StatusPeer(peer_id),
|
||||
));
|
||||
}
|
||||
PeerManagerEvent::Ping(peer_id) => {
|
||||
// send a ping request to this peer
|
||||
self.send_ping(RequestId::from(0usize), peer_id, true);
|
||||
}
|
||||
PeerManagerEvent::MetaData(peer_id) => {
|
||||
self.send_meta_data_request(peer_id);
|
||||
}
|
||||
PeerManagerEvent::_DisconnectPeer(_peer_id) => {
|
||||
//TODO: Implement
|
||||
}
|
||||
PeerManagerEvent::_BanPeer(_peer_id) => {
|
||||
//TODO: Implement
|
||||
}
|
||||
},
|
||||
Poll::Pending => break,
|
||||
Poll::Ready(None) => break, // peer manager ended
|
||||
}
|
||||
}
|
||||
|
||||
if !self.events.is_empty() {
|
||||
return Poll::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0)));
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> NetworkBehaviourEventProcess<IdentifyEvent> for Behaviour<TSpec> {
|
||||
fn inject_event(&mut self, event: IdentifyEvent) {
|
||||
match event {
|
||||
IdentifyEvent::Received {
|
||||
peer_id,
|
||||
mut info,
|
||||
observed_addr,
|
||||
} => {
|
||||
if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES {
|
||||
debug!(
|
||||
self.log,
|
||||
"More than 10 addresses have been identified, truncating"
|
||||
);
|
||||
info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES);
|
||||
}
|
||||
// send peer info to the peer manager.
|
||||
self.peer_manager.identify(&peer_id, &info);
|
||||
|
||||
debug!(self.log, "Identified Peer"; "peer" => format!("{}", peer_id),
|
||||
"protocol_version" => info.protocol_version,
|
||||
"agent_version" => info.agent_version,
|
||||
"listening_ addresses" => format!("{:?}", info.listen_addrs),
|
||||
"observed_address" => format!("{:?}", observed_addr),
|
||||
"protocols" => format!("{:?}", info.protocols)
|
||||
);
|
||||
}
|
||||
IdentifyEvent::Sent { .. } => {}
|
||||
IdentifyEvent::Error { .. } => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> NetworkBehaviourEventProcess<Discv5Event> for Behaviour<TSpec> {
|
||||
fn inject_event(&mut self, _event: Discv5Event) {
|
||||
// discv5 has no events to inject
|
||||
}
|
||||
}
|
||||
|
||||
/// The types of events than can be obtained from polling the behaviour.
|
||||
#[derive(Debug)]
|
||||
pub enum BehaviourEvent<TSpec: EthSpec> {
|
||||
/// A received RPC event and the peer that it was received from.
|
||||
RPC(PeerId, RPCEvent<TSpec>),
|
||||
PubsubMessage {
|
||||
/// The gossipsub message id. Used when propagating blocks after validation.
|
||||
id: MessageId,
|
||||
/// The peer from which we received this message, not the peer that published it.
|
||||
source: PeerId,
|
||||
/// The topics that this message was sent on.
|
||||
topics: Vec<TopicHash>,
|
||||
/// The message itself.
|
||||
message: PubsubMessage<TSpec>,
|
||||
},
|
||||
/// Subscribed to peer for given topic
|
||||
PeerSubscribed(PeerId, TopicHash),
|
||||
/// Inform the network to send a Status to this peer.
|
||||
StatusPeer(PeerId),
|
||||
}
|
||||
@@ -1,535 +0,0 @@
|
||||
///! This manages the discovery and management of peers.
|
||||
pub(crate) mod enr;
|
||||
pub mod enr_ext;
|
||||
|
||||
// Allow external use of the lighthouse ENR builder
|
||||
pub use enr::{build_enr, CombinedKey, Keypair};
|
||||
pub use enr_ext::{CombinedKeyExt, EnrExt};
|
||||
|
||||
use crate::metrics;
|
||||
use crate::{error, Enr, NetworkConfig, NetworkGlobals};
|
||||
use discv5::{enr::NodeId, Discv5, Discv5Event};
|
||||
use enr::{Eth2Enr, BITFIELD_ENR_KEY, ETH2_ENR_KEY};
|
||||
use futures::prelude::*;
|
||||
use libp2p::core::{connection::ConnectionId, Multiaddr, PeerId};
|
||||
use libp2p::multiaddr::Protocol;
|
||||
use libp2p::swarm::{
|
||||
protocols_handler::DummyProtocolsHandler, DialPeerCondition, NetworkBehaviour,
|
||||
NetworkBehaviourAction, PollParameters, ProtocolsHandler,
|
||||
};
|
||||
use lru::LruCache;
|
||||
use slog::{crit, debug, info, warn};
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_types::BitVector;
|
||||
use std::{
|
||||
collections::{HashSet, VecDeque},
|
||||
net::SocketAddr,
|
||||
path::Path,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::time::{delay_until, Delay, Instant};
|
||||
use types::{EnrForkId, EthSpec, SubnetId};
|
||||
|
||||
/// Maximum seconds before searching for extra peers.
|
||||
const MAX_TIME_BETWEEN_PEER_SEARCHES: u64 = 120;
|
||||
/// Initial delay between peer searches.
|
||||
const INITIAL_SEARCH_DELAY: u64 = 5;
|
||||
/// Local ENR storage filename.
|
||||
pub const ENR_FILENAME: &str = "enr.dat";
|
||||
/// Number of peers we'd like to have connected to a given long-lived subnet.
|
||||
const TARGET_SUBNET_PEERS: u64 = 3;
|
||||
|
||||
/// Lighthouse discovery behaviour. This provides peer management and discovery using the Discv5
|
||||
/// libp2p protocol.
|
||||
pub struct Discovery<TSpec: EthSpec> {
|
||||
/// Events to be processed by the behaviour.
|
||||
events: VecDeque<NetworkBehaviourAction<void::Void, Discv5Event>>,
|
||||
|
||||
/// A collection of seen live ENRs for quick lookup and to map peer-id's to ENRs.
|
||||
cached_enrs: LruCache<PeerId, Enr>,
|
||||
|
||||
/// The currently banned peers.
|
||||
banned_peers: HashSet<PeerId>,
|
||||
|
||||
/// The target number of connected peers on the libp2p interface.
|
||||
max_peers: usize,
|
||||
|
||||
/// The directory where the ENR is stored.
|
||||
enr_dir: String,
|
||||
|
||||
/// The delay between peer discovery searches.
|
||||
peer_discovery_delay: Delay,
|
||||
|
||||
/// Tracks the last discovery delay. The delay is doubled each round until the max
|
||||
/// time is reached.
|
||||
past_discovery_delay: u64,
|
||||
|
||||
/// The TCP port for libp2p. Used to convert an updated IP address to a multiaddr. Note: This
|
||||
/// assumes that the external TCP port is the same as the internal TCP port if behind a NAT.
|
||||
//TODO: Improve NAT handling limit the above restriction
|
||||
tcp_port: u16,
|
||||
|
||||
/// The discovery behaviour used to discover new peers.
|
||||
discovery: Discv5,
|
||||
|
||||
/// A collection of network constants that can be read from other threads.
|
||||
network_globals: Arc<NetworkGlobals<TSpec>>,
|
||||
|
||||
/// Logger for the discovery behaviour.
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> Discovery<TSpec> {
|
||||
pub fn new(
|
||||
local_key: &Keypair,
|
||||
config: &NetworkConfig,
|
||||
network_globals: Arc<NetworkGlobals<TSpec>>,
|
||||
log: &slog::Logger,
|
||||
) -> error::Result<Self> {
|
||||
let log = log.clone();
|
||||
|
||||
let enr_dir = match config.network_dir.to_str() {
|
||||
Some(path) => String::from(path),
|
||||
None => String::from(""),
|
||||
};
|
||||
|
||||
let local_enr = network_globals.local_enr.read().clone();
|
||||
|
||||
info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> format!("{}",local_enr.node_id()), "ip" => format!("{:?}", local_enr.ip()), "udp"=> format!("{:?}", local_enr.udp()), "tcp" => format!("{:?}", local_enr.tcp()));
|
||||
|
||||
let listen_socket = SocketAddr::new(config.listen_address, config.discovery_port);
|
||||
|
||||
// convert the keypair into an ENR key
|
||||
let enr_key: CombinedKey = CombinedKey::from_libp2p(&local_key)?;
|
||||
|
||||
let mut discovery = Discv5::new(
|
||||
local_enr,
|
||||
enr_key,
|
||||
config.discv5_config.clone(),
|
||||
listen_socket,
|
||||
)
|
||||
.map_err(|e| format!("Discv5 service failed. Error: {:?}", e))?;
|
||||
|
||||
// Add bootnodes to routing table
|
||||
for bootnode_enr in config.boot_nodes.clone() {
|
||||
debug!(
|
||||
log,
|
||||
"Adding node to routing table";
|
||||
"node_id" => format!("{}", bootnode_enr.node_id()),
|
||||
"peer_id" => format!("{}", bootnode_enr.peer_id()),
|
||||
"ip" => format!("{:?}", bootnode_enr.ip()),
|
||||
"udp" => format!("{:?}", bootnode_enr.udp()),
|
||||
"tcp" => format!("{:?}", bootnode_enr.tcp())
|
||||
);
|
||||
let _ = discovery.add_enr(bootnode_enr).map_err(|e| {
|
||||
warn!(
|
||||
log,
|
||||
"Could not add peer to the local routing table";
|
||||
"error" => format!("{}", e)
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
events: VecDeque::with_capacity(16),
|
||||
cached_enrs: LruCache::new(50),
|
||||
banned_peers: HashSet::new(),
|
||||
max_peers: config.max_peers,
|
||||
peer_discovery_delay: delay_until(Instant::now()),
|
||||
past_discovery_delay: INITIAL_SEARCH_DELAY,
|
||||
tcp_port: config.libp2p_port,
|
||||
discovery,
|
||||
network_globals,
|
||||
log,
|
||||
enr_dir,
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the nodes local ENR.
|
||||
pub fn local_enr(&self) -> &Enr {
|
||||
self.discovery.local_enr()
|
||||
}
|
||||
|
||||
/// Manually search for peers. This restarts the discovery round, sparking multiple rapid
|
||||
/// queries.
|
||||
pub fn discover_peers(&mut self) {
|
||||
self.past_discovery_delay = INITIAL_SEARCH_DELAY;
|
||||
self.find_peers();
|
||||
}
|
||||
|
||||
/// Add an ENR to the routing table of the discovery mechanism.
|
||||
pub fn add_enr(&mut self, enr: Enr) {
|
||||
// add the enr to seen caches
|
||||
self.cached_enrs.put(enr.peer_id(), enr.clone());
|
||||
|
||||
let _ = self.discovery.add_enr(enr).map_err(|e| {
|
||||
warn!(
|
||||
self.log,
|
||||
"Could not add peer to the local routing table";
|
||||
"error" => format!("{}", e)
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
/// The peer has been banned. Add this peer to the banned list to prevent any future
|
||||
/// re-connections.
|
||||
// TODO: Remove the peer from the DHT if present
|
||||
pub fn peer_banned(&mut self, peer_id: PeerId) {
|
||||
self.banned_peers.insert(peer_id);
|
||||
}
|
||||
|
||||
pub fn peer_unbanned(&mut self, peer_id: &PeerId) {
|
||||
self.banned_peers.remove(peer_id);
|
||||
}
|
||||
|
||||
/// Returns an iterator over all enr entries in the DHT.
|
||||
pub fn enr_entries(&mut self) -> impl Iterator<Item = &Enr> {
|
||||
self.discovery.enr_entries()
|
||||
}
|
||||
|
||||
/// Returns the ENR of a known peer if it exists.
|
||||
pub fn enr_of_peer(&mut self, peer_id: &PeerId) -> Option<Enr> {
|
||||
// first search the local cache
|
||||
if let Some(enr) = self.cached_enrs.get(peer_id) {
|
||||
return Some(enr.clone());
|
||||
}
|
||||
// not in the local cache, look in the routing table
|
||||
if let Ok(_node_id) = enr_ext::peer_id_to_node_id(peer_id) {
|
||||
// TODO: Need to update discv5
|
||||
// self.discovery.find_enr(&node_id)
|
||||
return None;
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds/Removes a subnet from the ENR Bitfield
|
||||
pub fn update_enr_bitfield(&mut self, subnet_id: SubnetId, value: bool) -> Result<(), String> {
|
||||
let id = *subnet_id as usize;
|
||||
|
||||
let local_enr = self.discovery.local_enr();
|
||||
let mut current_bitfield = local_enr.bitfield::<TSpec>()?;
|
||||
|
||||
if id >= current_bitfield.len() {
|
||||
return Err(format!(
|
||||
"Subnet id: {} is outside the ENR bitfield length: {}",
|
||||
id,
|
||||
current_bitfield.len()
|
||||
));
|
||||
}
|
||||
|
||||
if current_bitfield
|
||||
.get(id)
|
||||
.map_err(|_| String::from("Subnet ID out of bounds"))?
|
||||
== value
|
||||
{
|
||||
return Err(format!(
|
||||
"Subnet id: {} already in the local ENR already has value: {}",
|
||||
id, value
|
||||
));
|
||||
}
|
||||
|
||||
// set the subnet bitfield in the ENR
|
||||
current_bitfield
|
||||
.set(id, value)
|
||||
.map_err(|_| String::from("Subnet ID out of bounds, could not set subnet ID"))?;
|
||||
|
||||
// insert the bitfield into the ENR record
|
||||
let _ = self
|
||||
.discovery
|
||||
.enr_insert(BITFIELD_ENR_KEY, current_bitfield.as_ssz_bytes());
|
||||
|
||||
// replace the global version
|
||||
*self.network_globals.local_enr.write() = self.discovery.local_enr().clone();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Updates the `eth2` field of our local ENR.
|
||||
pub fn update_eth2_enr(&mut self, enr_fork_id: EnrForkId) {
|
||||
// to avoid having a reference to the spec constant, for the logging we assume
|
||||
// FAR_FUTURE_EPOCH is u64::max_value()
|
||||
let next_fork_epoch_log = if enr_fork_id.next_fork_epoch == u64::max_value() {
|
||||
String::from("No other fork")
|
||||
} else {
|
||||
format!("{:?}", enr_fork_id.next_fork_epoch)
|
||||
};
|
||||
|
||||
info!(self.log, "Updating the ENR fork version";
|
||||
"fork_digest" => format!("{:?}", enr_fork_id.fork_digest),
|
||||
"next_fork_version" => format!("{:?}", enr_fork_id.next_fork_version),
|
||||
"next_fork_epoch" => next_fork_epoch_log,
|
||||
);
|
||||
|
||||
let _ = self
|
||||
.discovery
|
||||
.enr_insert(ETH2_ENR_KEY.into(), enr_fork_id.as_ssz_bytes())
|
||||
.map_err(|e| {
|
||||
warn!(
|
||||
self.log,
|
||||
"Could not update eth2 ENR field";
|
||||
"error" => format!("{:?}", e)
|
||||
)
|
||||
});
|
||||
|
||||
// replace the global version with discovery version
|
||||
*self.network_globals.local_enr.write() = self.discovery.local_enr().clone();
|
||||
}
|
||||
|
||||
/// A request to find peers on a given subnet.
|
||||
// TODO: This logic should be improved with added sophistication in peer management
|
||||
// This currently checks for currently connected peers and if we don't have
|
||||
// PEERS_WANTED_BEFORE_DISCOVERY connected to a given subnet we search for more.
|
||||
pub fn peers_request(&mut self, subnet_id: SubnetId) {
|
||||
let peers_on_subnet = self
|
||||
.network_globals
|
||||
.peers
|
||||
.read()
|
||||
.peers_on_subnet(subnet_id)
|
||||
.count() as u64;
|
||||
|
||||
if peers_on_subnet < TARGET_SUBNET_PEERS {
|
||||
let target_peers = TARGET_SUBNET_PEERS - peers_on_subnet;
|
||||
debug!(self.log, "Searching for peers for subnet";
|
||||
"subnet_id" => *subnet_id,
|
||||
"connected_peers_on_subnet" => peers_on_subnet,
|
||||
"target_subnet_peers" => TARGET_SUBNET_PEERS,
|
||||
"peers_to_find" => target_peers
|
||||
);
|
||||
|
||||
let log_clone = self.log.clone();
|
||||
|
||||
let subnet_predicate = move |enr: &Enr| {
|
||||
if let Some(bitfield_bytes) = enr.get(BITFIELD_ENR_KEY) {
|
||||
let bitfield = match BitVector::<TSpec::SubnetBitfieldLength>::from_ssz_bytes(
|
||||
bitfield_bytes,
|
||||
) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
warn!(log_clone, "Could not decode ENR bitfield for peer"; "peer_id" => format!("{}", enr.peer_id()), "error" => format!("{:?}", e));
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
return bitfield.get(*subnet_id as usize).unwrap_or_else(|_| {
|
||||
debug!(log_clone, "Peer found but not on desired subnet"; "peer_id" => format!("{}", enr.peer_id()));
|
||||
false
|
||||
});
|
||||
}
|
||||
false
|
||||
};
|
||||
|
||||
// start the query
|
||||
self.start_query(subnet_predicate, target_peers as usize);
|
||||
} else {
|
||||
debug!(self.log, "Discovery ignored";
|
||||
"reason" => "Already connected to desired peers",
|
||||
"connected_peers_on_subnet" => peers_on_subnet,
|
||||
"target_subnet_peers" => TARGET_SUBNET_PEERS,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/* Internal Functions */
|
||||
|
||||
/// Run a standard query to search for more peers.
|
||||
///
|
||||
/// This searches for the standard kademlia bucket size (16) peers.
|
||||
fn find_peers(&mut self) {
|
||||
debug!(self.log, "Searching for peers");
|
||||
self.start_query(|_| true, 16);
|
||||
}
|
||||
|
||||
/// Search for a specified number of new peers using the underlying discovery mechanism.
|
||||
///
|
||||
/// This can optionally search for peers for a given predicate. Regardless of the predicate
|
||||
/// given, this will only search for peers on the same enr_fork_id as specified in the local
|
||||
/// ENR.
|
||||
fn start_query<F>(&mut self, enr_predicate: F, num_nodes: usize)
|
||||
where
|
||||
F: Fn(&Enr) -> bool + Send + 'static + Clone,
|
||||
{
|
||||
// pick a random NodeId
|
||||
let random_node = NodeId::random();
|
||||
|
||||
let enr_fork_id = match self.local_enr().eth2() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
crit!(self.log, "Local ENR has no fork id"; "error" => e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
// predicate for finding nodes with a matching fork
|
||||
let eth2_fork_predicate = move |enr: &Enr| enr.eth2() == Ok(enr_fork_id.clone());
|
||||
let predicate = move |enr: &Enr| eth2_fork_predicate(enr) && enr_predicate(enr);
|
||||
|
||||
// general predicate
|
||||
self.discovery
|
||||
.find_enr_predicate(random_node, predicate, num_nodes);
|
||||
}
|
||||
}
|
||||
|
||||
// Build a dummy Network behaviour around the discv5 server
|
||||
impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
|
||||
type ProtocolsHandler = DummyProtocolsHandler;
|
||||
type OutEvent = Discv5Event;
|
||||
|
||||
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
||||
DummyProtocolsHandler::default()
|
||||
}
|
||||
|
||||
fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec<Multiaddr> {
|
||||
if let Some(enr) = self.enr_of_peer(peer_id) {
|
||||
// ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP
|
||||
// port is removed, which is assumed to be associated with the discv5 protocol (and
|
||||
// therefore irrelevant for other libp2p components).
|
||||
let mut out_list = enr.multiaddr();
|
||||
out_list.retain(|addr| {
|
||||
addr.iter()
|
||||
.find(|v| match v {
|
||||
Protocol::Udp(_) => true,
|
||||
_ => false,
|
||||
})
|
||||
.is_none()
|
||||
});
|
||||
|
||||
out_list
|
||||
} else {
|
||||
// PeerId is not known
|
||||
Vec::new()
|
||||
}
|
||||
}
|
||||
|
||||
// ignore libp2p connections/streams
|
||||
fn inject_connected(&mut self, _: &PeerId) {}
|
||||
|
||||
// ignore libp2p connections/streams
|
||||
fn inject_disconnected(&mut self, _: &PeerId) {}
|
||||
|
||||
// no libp2p discv5 events - event originate from the session_service.
|
||||
fn inject_event(
|
||||
&mut self,
|
||||
_: PeerId,
|
||||
_: ConnectionId,
|
||||
_event: <Self::ProtocolsHandler as ProtocolsHandler>::OutEvent,
|
||||
) {
|
||||
void::unreachable(_event)
|
||||
}
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context,
|
||||
_: &mut impl PollParameters,
|
||||
) -> Poll<
|
||||
NetworkBehaviourAction<
|
||||
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||
Self::OutEvent,
|
||||
>,
|
||||
> {
|
||||
// search for peers if it is time
|
||||
loop {
|
||||
match self.peer_discovery_delay.poll_unpin(cx) {
|
||||
Poll::Ready(_) => {
|
||||
if self.network_globals.connected_peers() < self.max_peers {
|
||||
self.find_peers();
|
||||
}
|
||||
// Set to maximum, and update to earlier, once we get our results back.
|
||||
self.peer_discovery_delay.reset(
|
||||
Instant::now() + Duration::from_secs(MAX_TIME_BETWEEN_PEER_SEARCHES),
|
||||
);
|
||||
}
|
||||
Poll::Pending => break,
|
||||
}
|
||||
}
|
||||
|
||||
// Poll discovery
|
||||
loop {
|
||||
match self.discovery.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(event)) => {
|
||||
match event {
|
||||
Discv5Event::Discovered(_enr) => {
|
||||
// peers that get discovered during a query but are not contactable or
|
||||
// don't match a predicate can end up here. For debugging purposes we
|
||||
// log these to see if we are unnecessarily dropping discovered peers
|
||||
/*
|
||||
if enr.eth2() == self.local_enr().eth2() {
|
||||
trace!(self.log, "Peer found in process of query"; "peer_id" => format!("{}", enr.peer_id()), "tcp_socket" => enr.tcp_socket());
|
||||
} else {
|
||||
// this is temporary warning for debugging the DHT
|
||||
warn!(self.log, "Found peer during discovery not on correct fork"; "peer_id" => format!("{}", enr.peer_id()), "tcp_socket" => enr.tcp_socket());
|
||||
}
|
||||
*/
|
||||
}
|
||||
Discv5Event::SocketUpdated(socket) => {
|
||||
info!(self.log, "Address updated"; "ip" => format!("{}",socket.ip()), "udp_port" => format!("{}", socket.port()));
|
||||
metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT);
|
||||
let mut address = Multiaddr::from(socket.ip());
|
||||
address.push(Protocol::Tcp(self.tcp_port));
|
||||
let enr = self.discovery.local_enr();
|
||||
enr::save_enr_to_disk(Path::new(&self.enr_dir), enr, &self.log);
|
||||
|
||||
return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr {
|
||||
address,
|
||||
});
|
||||
}
|
||||
Discv5Event::FindNodeResult { closer_peers, .. } => {
|
||||
debug!(self.log, "Discovery query completed"; "peers_found" => closer_peers.len());
|
||||
// update the time to the next query
|
||||
if self.past_discovery_delay < MAX_TIME_BETWEEN_PEER_SEARCHES {
|
||||
self.past_discovery_delay *= 2;
|
||||
}
|
||||
let delay = std::cmp::max(
|
||||
self.past_discovery_delay,
|
||||
MAX_TIME_BETWEEN_PEER_SEARCHES,
|
||||
);
|
||||
self.peer_discovery_delay
|
||||
.reset(Instant::now() + Duration::from_secs(delay));
|
||||
|
||||
for enr in closer_peers {
|
||||
// cache known peers
|
||||
let peer_id = enr.peer_id();
|
||||
self.cached_enrs.put(enr.peer_id(), enr);
|
||||
|
||||
// if we need more peers, attempt a connection
|
||||
if self.network_globals.connected_or_dialing_peers()
|
||||
< self.max_peers
|
||||
&& !self
|
||||
.network_globals
|
||||
.peers
|
||||
.read()
|
||||
.is_connected_or_dialing(&peer_id)
|
||||
&& !self.banned_peers.contains(&peer_id)
|
||||
{
|
||||
// TODO: Debugging only
|
||||
// NOTE: The peer manager will get updated by the global swarm.
|
||||
let connection_status = self
|
||||
.network_globals
|
||||
.peers
|
||||
.read()
|
||||
.connection_status(&peer_id);
|
||||
debug!(self.log, "Connecting to discovered peer"; "peer_id"=> peer_id.to_string(), "status" => format!("{:?}", connection_status));
|
||||
self.events.push_back(NetworkBehaviourAction::DialPeer {
|
||||
peer_id,
|
||||
condition: DialPeerCondition::Disconnected,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
// discv5 does not output any other NetworkBehaviourAction
|
||||
Poll::Ready(_) => {}
|
||||
Poll::Pending => break,
|
||||
}
|
||||
}
|
||||
|
||||
// process any queued events
|
||||
if let Some(event) = self.events.pop_front() {
|
||||
return Poll::Ready(event);
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
pub use lighthouse_metrics::*;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref ADDRESS_UPDATE_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_address_update_total",
|
||||
"Count of libp2p socked updated events (when our view of our IP address has changed)"
|
||||
);
|
||||
pub static ref PEERS_CONNECTED: Result<IntGauge> = try_create_int_gauge(
|
||||
"libp2p_peer_connected_peers_total",
|
||||
"Count of libp2p peers currently connected"
|
||||
);
|
||||
pub static ref PEER_CONNECT_EVENT_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_peer_connect_event_total",
|
||||
"Count of libp2p peer connect events (not the current number of connected peers)"
|
||||
);
|
||||
pub static ref PEER_DISCONNECT_EVENT_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_peer_disconnect_event_total",
|
||||
"Count of libp2p peer disconnect events"
|
||||
);
|
||||
}
|
||||
@@ -1,512 +0,0 @@
|
||||
//! Implementation of a Lighthouse's peer management system.
|
||||
|
||||
pub use self::peerdb::*;
|
||||
use crate::metrics;
|
||||
use crate::rpc::{MetaData, Protocol, RPCError, RPCResponseErrorCode};
|
||||
use crate::{NetworkGlobals, PeerId};
|
||||
use futures::prelude::*;
|
||||
use futures::Stream;
|
||||
use hashset_delay::HashSetDelay;
|
||||
use libp2p::identify::IdentifyInfo;
|
||||
use slog::{crit, debug, error, warn};
|
||||
use smallvec::SmallVec;
|
||||
use std::convert::TryInto;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::{Duration, Instant};
|
||||
use types::EthSpec;
|
||||
|
||||
pub mod client;
|
||||
mod peer_info;
|
||||
mod peer_sync_status;
|
||||
mod peerdb;
|
||||
|
||||
pub use peer_info::{PeerConnectionStatus::*, PeerInfo};
|
||||
pub use peer_sync_status::{PeerSyncStatus, SyncInfo};
|
||||
/// The minimum reputation before a peer is disconnected.
|
||||
// Most likely this needs tweaking.
|
||||
const _MIN_REP_BEFORE_BAN: Rep = 10;
|
||||
/// The time in seconds between re-status's peers.
|
||||
const STATUS_INTERVAL: u64 = 300;
|
||||
/// The time in seconds between PING events. We do not send a ping if the other peer as PING'd us within
|
||||
/// this time frame (Seconds)
|
||||
const PING_INTERVAL: u64 = 30;
|
||||
|
||||
/// The main struct that handles peer's reputation and connection status.
|
||||
pub struct PeerManager<TSpec: EthSpec> {
|
||||
/// Storage of network globals to access the `PeerDB`.
|
||||
network_globals: Arc<NetworkGlobals<TSpec>>,
|
||||
/// A queue of events that the `PeerManager` is waiting to produce.
|
||||
events: SmallVec<[PeerManagerEvent; 5]>,
|
||||
/// A collection of peers awaiting to be Ping'd.
|
||||
ping_peers: HashSetDelay<PeerId>,
|
||||
/// A collection of peers awaiting to be Status'd.
|
||||
status_peers: HashSetDelay<PeerId>,
|
||||
/// Last updated moment.
|
||||
_last_updated: Instant,
|
||||
/// The logger associated with the `PeerManager`.
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
/// A collection of actions a peer can perform which will adjust its reputation.
|
||||
/// Each variant has an associated reputation change.
|
||||
// To easily assess the behaviour of reputation changes the number of variants should stay low, and
|
||||
// somewhat generic.
|
||||
pub enum PeerAction {
|
||||
/// We should not communicate more with this peer.
|
||||
/// This action will cause the peer to get banned.
|
||||
Fatal,
|
||||
/// An error occurred with this peer but it is not necessarily malicious.
|
||||
/// We have high tolerance for this actions: several occurrences are needed for a peer to get
|
||||
/// kicked.
|
||||
/// NOTE: ~15 occurrences will get the peer banned
|
||||
HighToleranceError,
|
||||
/// An error occurred with this peer but it is not necessarily malicious.
|
||||
/// We have high tolerance for this actions: several occurrences are needed for a peer to get
|
||||
/// kicked.
|
||||
/// NOTE: ~10 occurrences will get the peer banned
|
||||
MidToleranceError,
|
||||
/// This peer's action is not malicious but will not be tolerated. A few occurrences will cause
|
||||
/// the peer to get kicked.
|
||||
/// NOTE: ~5 occurrences will get the peer banned
|
||||
LowToleranceError,
|
||||
/// Received an expected message.
|
||||
_ValidMessage,
|
||||
}
|
||||
|
||||
impl PeerAction {
|
||||
fn rep_change(&self) -> RepChange {
|
||||
match self {
|
||||
PeerAction::Fatal => RepChange::worst(),
|
||||
PeerAction::LowToleranceError => RepChange::bad(60),
|
||||
PeerAction::MidToleranceError => RepChange::bad(25),
|
||||
PeerAction::HighToleranceError => RepChange::bad(15),
|
||||
PeerAction::_ValidMessage => RepChange::good(20),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The events that the `PeerManager` outputs (requests).
|
||||
pub enum PeerManagerEvent {
|
||||
/// Sends a STATUS to a peer.
|
||||
Status(PeerId),
|
||||
/// Sends a PING to a peer.
|
||||
Ping(PeerId),
|
||||
/// Request METADATA from a peer.
|
||||
MetaData(PeerId),
|
||||
/// The peer should be disconnected.
|
||||
_DisconnectPeer(PeerId),
|
||||
/// The peer should be disconnected and banned.
|
||||
_BanPeer(PeerId),
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
pub fn new(network_globals: Arc<NetworkGlobals<TSpec>>, log: &slog::Logger) -> Self {
|
||||
PeerManager {
|
||||
network_globals,
|
||||
events: SmallVec::new(),
|
||||
_last_updated: Instant::now(),
|
||||
ping_peers: HashSetDelay::new(Duration::from_secs(PING_INTERVAL)),
|
||||
status_peers: HashSetDelay::new(Duration::from_secs(STATUS_INTERVAL)),
|
||||
log: log.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/* Public accessible functions */
|
||||
|
||||
/// A ping request has been received.
|
||||
// NOTE: The behaviour responds with a PONG automatically
|
||||
// TODO: Update last seen
|
||||
pub fn ping_request(&mut self, peer_id: &PeerId, seq: u64) {
|
||||
if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) {
|
||||
// received a ping
|
||||
// reset the to-ping timer for this peer
|
||||
debug!(self.log, "Received a ping request"; "peer_id" => peer_id.to_string(), "seq_no" => seq);
|
||||
self.ping_peers.insert(peer_id.clone());
|
||||
|
||||
// if the sequence number is unknown send an update the meta data of the peer.
|
||||
if let Some(meta_data) = &peer_info.meta_data {
|
||||
if meta_data.seq_number < seq {
|
||||
debug!(self.log, "Requesting new metadata from peer";
|
||||
"peer_id" => peer_id.to_string(), "known_seq_no" => meta_data.seq_number, "ping_seq_no" => seq);
|
||||
self.events
|
||||
.push(PeerManagerEvent::MetaData(peer_id.clone()));
|
||||
}
|
||||
} else {
|
||||
// if we don't know the meta-data, request it
|
||||
debug!(self.log, "Requesting first metadata from peer";
|
||||
"peer_id" => peer_id.to_string());
|
||||
self.events
|
||||
.push(PeerManagerEvent::MetaData(peer_id.clone()));
|
||||
}
|
||||
} else {
|
||||
crit!(self.log, "Received a PING from an unknown peer";
|
||||
"peer_id" => peer_id.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
/// A PONG has been returned from a peer.
|
||||
// TODO: Update last seen
|
||||
pub fn pong_response(&mut self, peer_id: &PeerId, seq: u64) {
|
||||
if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) {
|
||||
// received a pong
|
||||
|
||||
// if the sequence number is unknown send update the meta data of the peer.
|
||||
if let Some(meta_data) = &peer_info.meta_data {
|
||||
if meta_data.seq_number < seq {
|
||||
debug!(self.log, "Requesting new metadata from peer";
|
||||
"peer_id" => peer_id.to_string(), "known_seq_no" => meta_data.seq_number, "pong_seq_no" => seq);
|
||||
self.events
|
||||
.push(PeerManagerEvent::MetaData(peer_id.clone()));
|
||||
}
|
||||
} else {
|
||||
// if we don't know the meta-data, request it
|
||||
debug!(self.log, "Requesting first metadata from peer";
|
||||
"peer_id" => peer_id.to_string());
|
||||
self.events
|
||||
.push(PeerManagerEvent::MetaData(peer_id.clone()));
|
||||
}
|
||||
} else {
|
||||
crit!(self.log, "Received a PONG from an unknown peer"; "peer_id" => peer_id.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
/// Received a metadata response from a peer.
|
||||
// TODO: Update last seen
|
||||
pub fn meta_data_response(&mut self, peer_id: &PeerId, meta_data: MetaData<TSpec>) {
|
||||
if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) {
|
||||
if let Some(known_meta_data) = &peer_info.meta_data {
|
||||
if known_meta_data.seq_number < meta_data.seq_number {
|
||||
debug!(self.log, "Updating peer's metadata";
|
||||
"peer_id" => peer_id.to_string(), "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number);
|
||||
peer_info.meta_data = Some(meta_data);
|
||||
} else {
|
||||
debug!(self.log, "Received old metadata";
|
||||
"peer_id" => peer_id.to_string(), "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number);
|
||||
}
|
||||
} else {
|
||||
// we have no meta-data for this peer, update
|
||||
debug!(self.log, "Obtained peer's metadata";
|
||||
"peer_id" => peer_id.to_string(), "new_seq_no" => meta_data.seq_number);
|
||||
peer_info.meta_data = Some(meta_data);
|
||||
}
|
||||
} else {
|
||||
crit!(self.log, "Received METADATA from an unknown peer";
|
||||
"peer_id" => peer_id.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
/// A STATUS message has been received from a peer. This resets the status timer.
|
||||
pub fn peer_statusd(&mut self, peer_id: &PeerId) {
|
||||
self.status_peers.insert(peer_id.clone());
|
||||
}
|
||||
|
||||
/// Updates the state of the peer as disconnected.
|
||||
pub fn notify_disconnect(&mut self, peer_id: &PeerId) {
|
||||
//self.update_reputations();
|
||||
self.network_globals.peers.write().disconnect(peer_id);
|
||||
|
||||
// remove the ping and status timer for the peer
|
||||
self.ping_peers.remove(peer_id);
|
||||
self.status_peers.remove(peer_id);
|
||||
metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT);
|
||||
metrics::set_gauge(
|
||||
&metrics::PEERS_CONNECTED,
|
||||
self.network_globals.connected_peers() as i64,
|
||||
);
|
||||
}
|
||||
|
||||
/// Sets a peer as connected as long as their reputation allows it
|
||||
/// Informs if the peer was accepted
|
||||
pub fn connect_ingoing(&mut self, peer_id: &PeerId) -> bool {
|
||||
self.connect_peer(peer_id, ConnectingType::IngoingConnected)
|
||||
}
|
||||
|
||||
/// Sets a peer as connected as long as their reputation allows it
|
||||
/// Informs if the peer was accepted
|
||||
pub fn connect_outgoing(&mut self, peer_id: &PeerId) -> bool {
|
||||
self.connect_peer(peer_id, ConnectingType::OutgoingConnected)
|
||||
}
|
||||
|
||||
/// Updates the database informing that a peer is being dialed.
|
||||
pub fn dialing_peer(&mut self, peer_id: &PeerId) -> bool {
|
||||
self.connect_peer(peer_id, ConnectingType::Dialing)
|
||||
}
|
||||
|
||||
/// Reports a peer for some action.
|
||||
///
|
||||
/// If the peer doesn't exist, log a warning and insert defaults.
|
||||
pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction) {
|
||||
//TODO: Check these. There are double disconnects for example
|
||||
// self.update_reputations();
|
||||
self.network_globals
|
||||
.peers
|
||||
.write()
|
||||
.add_reputation(peer_id, action.rep_change());
|
||||
// self.update_reputations();
|
||||
}
|
||||
|
||||
/// Updates `PeerInfo` with `identify` information.
|
||||
pub fn identify(&mut self, peer_id: &PeerId, info: &IdentifyInfo) {
|
||||
if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) {
|
||||
peer_info.client = client::Client::from_identify_info(info);
|
||||
peer_info.listening_addresses = info.listen_addrs.clone();
|
||||
} else {
|
||||
crit!(self.log, "Received an Identify response from an unknown peer"; "peer_id" => peer_id.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_rpc_error(&mut self, peer_id: &PeerId, protocol: Protocol, err: &RPCError) {
|
||||
let client = self
|
||||
.network_globals
|
||||
.peers
|
||||
.read()
|
||||
.peer_info(peer_id)
|
||||
.map(|info| info.client.clone())
|
||||
.unwrap_or_default();
|
||||
debug!(self.log, "RPCError"; "protocol" => protocol.to_string(), "err" => err.to_string(), "client" => client.to_string());
|
||||
|
||||
// Map this error to a `PeerAction` (if any)
|
||||
let peer_action = match err {
|
||||
RPCError::IncompleteStream => {
|
||||
// They closed early, this could mean poor connection
|
||||
PeerAction::MidToleranceError
|
||||
}
|
||||
RPCError::InternalError(_reason) => {
|
||||
// Our fault. Do nothing
|
||||
return;
|
||||
}
|
||||
RPCError::InvalidData => {
|
||||
// Peer is not complying with the protocol. This is considered a malicious action
|
||||
PeerAction::Fatal
|
||||
}
|
||||
RPCError::IoError(_e) => {
|
||||
// this could their fault or ours, so we tolerate this
|
||||
PeerAction::HighToleranceError
|
||||
}
|
||||
RPCError::ErrorResponse(code) => match code {
|
||||
RPCResponseErrorCode::Unknown => PeerAction::HighToleranceError,
|
||||
RPCResponseErrorCode::ServerError => PeerAction::MidToleranceError,
|
||||
RPCResponseErrorCode::InvalidRequest => PeerAction::LowToleranceError,
|
||||
},
|
||||
RPCError::SSZDecodeError(_) => PeerAction::Fatal,
|
||||
RPCError::UnsupportedProtocol => {
|
||||
// Not supporting a protocol shouldn't be considered a malicious action, but
|
||||
// it is an action that in some cases will make the peer unfit to continue
|
||||
// communicating.
|
||||
// TODO: To avoid punishing a peer repeatedly for not supporting a protocol, this
|
||||
// information could be stored and used to prevent sending requests for the given
|
||||
// protocol to this peer. Similarly, to avoid blacklisting a peer for a protocol
|
||||
// forever, if stored this information should expire.
|
||||
match protocol {
|
||||
Protocol::Ping => PeerAction::Fatal,
|
||||
Protocol::BlocksByRange => return,
|
||||
Protocol::BlocksByRoot => return,
|
||||
Protocol::Goodbye => return,
|
||||
Protocol::MetaData => PeerAction::LowToleranceError,
|
||||
Protocol::Status => PeerAction::LowToleranceError,
|
||||
}
|
||||
}
|
||||
RPCError::StreamTimeout => match protocol {
|
||||
Protocol::Ping => PeerAction::LowToleranceError,
|
||||
Protocol::BlocksByRange => PeerAction::MidToleranceError,
|
||||
Protocol::BlocksByRoot => PeerAction::MidToleranceError,
|
||||
Protocol::Goodbye => return,
|
||||
Protocol::MetaData => return,
|
||||
Protocol::Status => return,
|
||||
},
|
||||
RPCError::NegotiationTimeout => PeerAction::HighToleranceError,
|
||||
};
|
||||
|
||||
self.report_peer(peer_id, peer_action);
|
||||
}
|
||||
|
||||
/* Internal functions */
|
||||
|
||||
/// Registers a peer as connected. The `ingoing` parameter determines if the peer is being
|
||||
/// dialed or connecting to us.
|
||||
///
|
||||
/// This is called by `connect_ingoing` and `connect_outgoing`.
|
||||
///
|
||||
/// This informs if the peer was accepted in to the db or not.
|
||||
// TODO: Drop peers if over max_peer limit
|
||||
fn connect_peer(&mut self, peer_id: &PeerId, connection: ConnectingType) -> bool {
|
||||
// TODO: remove after timed updates
|
||||
//self.update_reputations();
|
||||
|
||||
{
|
||||
let mut peerdb = self.network_globals.peers.write();
|
||||
if peerdb.connection_status(peer_id).map(|c| c.is_banned()) == Some(true) {
|
||||
// don't connect if the peer is banned
|
||||
// TODO: Handle this case. If peer is banned this shouldn't be reached. It will put
|
||||
// our connection/disconnection out of sync with libp2p
|
||||
// return false;
|
||||
}
|
||||
|
||||
match connection {
|
||||
ConnectingType::Dialing => peerdb.dialing_peer(peer_id),
|
||||
ConnectingType::IngoingConnected => peerdb.connect_outgoing(peer_id),
|
||||
ConnectingType::OutgoingConnected => peerdb.connect_ingoing(peer_id),
|
||||
}
|
||||
}
|
||||
|
||||
// start a ping and status timer for the peer
|
||||
self.ping_peers.insert(peer_id.clone());
|
||||
self.status_peers.insert(peer_id.clone());
|
||||
|
||||
// increment prometheus metrics
|
||||
metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT);
|
||||
metrics::set_gauge(
|
||||
&metrics::PEERS_CONNECTED,
|
||||
self.network_globals.connected_peers() as i64,
|
||||
);
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
/// Notifies the peer manager that this peer is being dialed.
|
||||
pub fn _dialing_peer(&mut self, peer_id: &PeerId) {
|
||||
self.network_globals.peers.write().dialing_peer(peer_id);
|
||||
}
|
||||
|
||||
/// Updates the reputation of known peers according to their connection
|
||||
/// status and the time that has passed.
|
||||
///
|
||||
/// **Disconnected peers** get a 1rep hit every hour they stay disconnected.
|
||||
/// **Banned peers** get a 1rep gain for every hour to slowly allow them back again.
|
||||
///
|
||||
/// A banned(disconnected) peer that gets its rep above(below) MIN_REP_BEFORE_BAN is
|
||||
/// now considered a disconnected(banned) peer.
|
||||
fn _update_reputations(&mut self) {
|
||||
// avoid locking the peerdb too often
|
||||
// TODO: call this on a timer
|
||||
if self._last_updated.elapsed().as_secs() < 30 {
|
||||
return;
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
|
||||
// Check for peers that get banned, unbanned and that should be disconnected
|
||||
let mut ban_queue = Vec::new();
|
||||
let mut unban_queue = Vec::new();
|
||||
|
||||
/* Check how long have peers been in this state and update their reputations if needed */
|
||||
let mut pdb = self.network_globals.peers.write();
|
||||
|
||||
for (id, info) in pdb._peers_mut() {
|
||||
// Update reputations
|
||||
match info.connection_status {
|
||||
Connected { .. } => {
|
||||
// Connected peers gain reputation by sending useful messages
|
||||
}
|
||||
Disconnected { since } | Banned { since } => {
|
||||
// For disconnected peers, lower their reputation by 1 for every hour they
|
||||
// stay disconnected. This helps us slowly forget disconnected peers.
|
||||
// In the same way, slowly allow banned peers back again.
|
||||
let dc_hours = now
|
||||
.checked_duration_since(since)
|
||||
.unwrap_or_else(|| Duration::from_secs(0))
|
||||
.as_secs()
|
||||
/ 3600;
|
||||
let last_dc_hours = self
|
||||
._last_updated
|
||||
.checked_duration_since(since)
|
||||
.unwrap_or_else(|| Duration::from_secs(0))
|
||||
.as_secs()
|
||||
/ 3600;
|
||||
if dc_hours > last_dc_hours {
|
||||
// this should be 1 most of the time
|
||||
let rep_dif = (dc_hours - last_dc_hours)
|
||||
.try_into()
|
||||
.unwrap_or(Rep::max_value());
|
||||
|
||||
info.reputation = if info.connection_status.is_banned() {
|
||||
info.reputation.saturating_add(rep_dif)
|
||||
} else {
|
||||
info.reputation.saturating_sub(rep_dif)
|
||||
};
|
||||
}
|
||||
}
|
||||
Dialing { since } => {
|
||||
// A peer shouldn't be dialing for more than 2 minutes
|
||||
if since.elapsed().as_secs() > 120 {
|
||||
warn!(self.log,"Peer has been dialing for too long"; "peer_id" => id.to_string());
|
||||
// TODO: decide how to handle this
|
||||
}
|
||||
}
|
||||
Unknown => {} //TODO: Handle this case
|
||||
}
|
||||
// Check if the peer gets banned or unbanned and if it should be disconnected
|
||||
if info.reputation < _MIN_REP_BEFORE_BAN && !info.connection_status.is_banned() {
|
||||
// This peer gets banned. Check if we should request disconnection
|
||||
ban_queue.push(id.clone());
|
||||
} else if info.reputation >= _MIN_REP_BEFORE_BAN && info.connection_status.is_banned() {
|
||||
// This peer gets unbanned
|
||||
unban_queue.push(id.clone());
|
||||
}
|
||||
}
|
||||
|
||||
for id in ban_queue {
|
||||
pdb.ban(&id);
|
||||
|
||||
self.events.push(PeerManagerEvent::_BanPeer(id.clone()));
|
||||
}
|
||||
|
||||
for id in unban_queue {
|
||||
pdb.disconnect(&id);
|
||||
}
|
||||
|
||||
self._last_updated = Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> Stream for PeerManager<TSpec> {
|
||||
type Item = PeerManagerEvent;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
// poll the timeouts for pings and status'
|
||||
loop {
|
||||
match self.ping_peers.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(peer_id))) => {
|
||||
self.ping_peers.insert(peer_id.clone());
|
||||
self.events.push(PeerManagerEvent::Ping(peer_id));
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
error!(self.log, "Failed to check for peers to ping"; "error" => format!("{}",e))
|
||||
}
|
||||
Poll::Ready(None) | Poll::Pending => break,
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
match self.status_peers.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(peer_id))) => {
|
||||
self.status_peers.insert(peer_id.clone());
|
||||
self.events.push(PeerManagerEvent::Status(peer_id))
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
error!(self.log, "Failed to check for peers to ping"; "error" => format!("{}",e))
|
||||
}
|
||||
Poll::Ready(None) | Poll::Pending => break,
|
||||
}
|
||||
}
|
||||
|
||||
if !self.events.is_empty() {
|
||||
return Poll::Ready(Some(self.events.remove(0)));
|
||||
} else {
|
||||
self.events.shrink_to_fit();
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
enum ConnectingType {
|
||||
/// We are in the process of dialing this peer.
|
||||
Dialing,
|
||||
/// A peer has dialed us.
|
||||
IngoingConnected,
|
||||
/// We have successfully dialed a peer.
|
||||
OutgoingConnected,
|
||||
}
|
||||
@@ -1,254 +0,0 @@
|
||||
use crate::rpc::methods::*;
|
||||
use crate::rpc::{
|
||||
codec::base::OutboundCodec,
|
||||
protocol::{Encoding, Protocol, ProtocolId, RPCError, Version},
|
||||
};
|
||||
use crate::rpc::{ErrorMessage, RPCCodedResponse, RPCRequest, RPCResponse};
|
||||
use libp2p::bytes::{BufMut, Bytes, BytesMut};
|
||||
use ssz::{Decode, Encode};
|
||||
use std::marker::PhantomData;
|
||||
use tokio_util::codec::{Decoder, Encoder};
|
||||
use types::{EthSpec, SignedBeaconBlock};
|
||||
use unsigned_varint::codec::UviBytes;
|
||||
|
||||
/* Inbound Codec */
|
||||
|
||||
pub struct SSZInboundCodec<TSpec: EthSpec> {
|
||||
inner: UviBytes,
|
||||
protocol: ProtocolId,
|
||||
phantom: PhantomData<TSpec>,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> SSZInboundCodec<TSpec> {
|
||||
pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self {
|
||||
let mut uvi_codec = UviBytes::default();
|
||||
uvi_codec.set_max_len(max_packet_size);
|
||||
|
||||
// this encoding only applies to ssz.
|
||||
debug_assert_eq!(protocol.encoding, Encoding::SSZ);
|
||||
|
||||
SSZInboundCodec {
|
||||
inner: uvi_codec,
|
||||
protocol,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Encoder for inbound streams: Encodes RPC Responses sent to peers.
|
||||
impl<TSpec: EthSpec> Encoder<RPCCodedResponse<TSpec>> for SSZInboundCodec<TSpec> {
|
||||
type Error = RPCError;
|
||||
|
||||
fn encode(
|
||||
&mut self,
|
||||
item: RPCCodedResponse<TSpec>,
|
||||
dst: &mut BytesMut,
|
||||
) -> Result<(), Self::Error> {
|
||||
let bytes = match item {
|
||||
RPCCodedResponse::Success(resp) => match resp {
|
||||
RPCResponse::Status(res) => res.as_ssz_bytes(),
|
||||
RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(),
|
||||
RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(),
|
||||
RPCResponse::Pong(res) => res.data.as_ssz_bytes(),
|
||||
RPCResponse::MetaData(res) => res.as_ssz_bytes(),
|
||||
},
|
||||
RPCCodedResponse::InvalidRequest(err) => err.as_ssz_bytes(),
|
||||
RPCCodedResponse::ServerError(err) => err.as_ssz_bytes(),
|
||||
RPCCodedResponse::Unknown(err) => err.as_ssz_bytes(),
|
||||
RPCCodedResponse::StreamTermination(_) => {
|
||||
unreachable!("Code error - attempting to encode a stream termination")
|
||||
}
|
||||
};
|
||||
if !bytes.is_empty() {
|
||||
// length-prefix and return
|
||||
return self
|
||||
.inner
|
||||
.encode(Bytes::from(bytes), dst)
|
||||
.map_err(RPCError::from);
|
||||
} else {
|
||||
// payload is empty, add a 0-byte length prefix
|
||||
dst.reserve(1);
|
||||
dst.put_u8(0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Decoder for inbound streams: Decodes RPC requests from peers
|
||||
impl<TSpec: EthSpec> Decoder for SSZInboundCodec<TSpec> {
|
||||
type Item = RPCRequest<TSpec>;
|
||||
type Error = RPCError;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
match self.inner.decode(src).map_err(RPCError::from) {
|
||||
Ok(Some(packet)) => match self.protocol.message_name {
|
||||
Protocol::Status => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes(
|
||||
&packet,
|
||||
)?))),
|
||||
},
|
||||
Protocol::Goodbye => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes(
|
||||
&packet,
|
||||
)?))),
|
||||
},
|
||||
Protocol::BlocksByRange => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCRequest::BlocksByRange(
|
||||
BlocksByRangeRequest::from_ssz_bytes(&packet)?,
|
||||
))),
|
||||
},
|
||||
Protocol::BlocksByRoot => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCRequest::BlocksByRoot(BlocksByRootRequest {
|
||||
block_roots: Vec::from_ssz_bytes(&packet)?,
|
||||
}))),
|
||||
},
|
||||
Protocol::Ping => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCRequest::Ping(Ping {
|
||||
data: u64::from_ssz_bytes(&packet)?,
|
||||
}))),
|
||||
},
|
||||
Protocol::MetaData => match self.protocol.version {
|
||||
Version::V1 => {
|
||||
if packet.len() > 0 {
|
||||
Err(RPCError::InvalidData)
|
||||
} else {
|
||||
Ok(Some(RPCRequest::MetaData(PhantomData)))
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
Ok(None) => Ok(None),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Outbound Codec: Codec for initiating RPC requests */
|
||||
|
||||
pub struct SSZOutboundCodec<TSpec: EthSpec> {
|
||||
inner: UviBytes,
|
||||
protocol: ProtocolId,
|
||||
phantom: PhantomData<TSpec>,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> SSZOutboundCodec<TSpec> {
|
||||
pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self {
|
||||
let mut uvi_codec = UviBytes::default();
|
||||
uvi_codec.set_max_len(max_packet_size);
|
||||
|
||||
// this encoding only applies to ssz.
|
||||
debug_assert_eq!(protocol.encoding, Encoding::SSZ);
|
||||
|
||||
SSZOutboundCodec {
|
||||
inner: uvi_codec,
|
||||
protocol,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Encoder for outbound streams: Encodes RPC Requests to peers
|
||||
impl<TSpec: EthSpec> Encoder<RPCRequest<TSpec>> for SSZOutboundCodec<TSpec> {
|
||||
type Error = RPCError;
|
||||
|
||||
fn encode(&mut self, item: RPCRequest<TSpec>, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
let bytes = match item {
|
||||
RPCRequest::Status(req) => req.as_ssz_bytes(),
|
||||
RPCRequest::Goodbye(req) => req.as_ssz_bytes(),
|
||||
RPCRequest::BlocksByRange(req) => req.as_ssz_bytes(),
|
||||
RPCRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(),
|
||||
RPCRequest::Ping(req) => req.as_ssz_bytes(),
|
||||
RPCRequest::MetaData(_) => return Ok(()), // no metadata to encode
|
||||
};
|
||||
// length-prefix
|
||||
self.inner
|
||||
.encode(libp2p::bytes::Bytes::from(bytes), dst)
|
||||
.map_err(RPCError::from)
|
||||
}
|
||||
}
|
||||
|
||||
// Decoder for outbound streams: Decodes RPC responses from peers.
|
||||
//
|
||||
// The majority of the decoding has now been pushed upstream due to the changing specification.
|
||||
// We prefer to decode blocks and attestations with extra knowledge about the chain to perform
|
||||
// faster verification checks before decoding entire blocks/attestations.
|
||||
impl<TSpec: EthSpec> Decoder for SSZOutboundCodec<TSpec> {
|
||||
type Item = RPCResponse<TSpec>;
|
||||
type Error = RPCError;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
if src.len() == 1 && src[0] == 0_u8 {
|
||||
// the object is empty. We return the empty object if this is the case
|
||||
// clear the buffer and return an empty object
|
||||
src.clear();
|
||||
match self.protocol.message_name {
|
||||
Protocol::Status => match self.protocol.version {
|
||||
Version::V1 => Err(RPCError::IncompleteStream), // cannot have an empty HELLO message. The stream has terminated unexpectedly
|
||||
},
|
||||
Protocol::Goodbye => Err(RPCError::InvalidData),
|
||||
Protocol::BlocksByRange => match self.protocol.version {
|
||||
Version::V1 => Err(RPCError::IncompleteStream), // cannot have an empty block message.
|
||||
},
|
||||
Protocol::BlocksByRoot => match self.protocol.version {
|
||||
Version::V1 => Err(RPCError::IncompleteStream), // cannot have an empty block message.
|
||||
},
|
||||
Protocol::Ping => match self.protocol.version {
|
||||
Version::V1 => Err(RPCError::IncompleteStream), // cannot have an empty block message.
|
||||
},
|
||||
Protocol::MetaData => match self.protocol.version {
|
||||
Version::V1 => Err(RPCError::IncompleteStream), // cannot have an empty block message.
|
||||
},
|
||||
}
|
||||
} else {
|
||||
match self.inner.decode(src).map_err(RPCError::from) {
|
||||
Ok(Some(mut packet)) => {
|
||||
// take the bytes from the buffer
|
||||
let raw_bytes = packet.split();
|
||||
|
||||
match self.protocol.message_name {
|
||||
Protocol::Status => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCResponse::Status(
|
||||
StatusMessage::from_ssz_bytes(&raw_bytes)?,
|
||||
))),
|
||||
},
|
||||
Protocol::Goodbye => Err(RPCError::InvalidData),
|
||||
Protocol::BlocksByRange => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCResponse::BlocksByRange(Box::new(
|
||||
SignedBeaconBlock::from_ssz_bytes(&raw_bytes)?,
|
||||
)))),
|
||||
},
|
||||
Protocol::BlocksByRoot => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCResponse::BlocksByRoot(Box::new(
|
||||
SignedBeaconBlock::from_ssz_bytes(&raw_bytes)?,
|
||||
)))),
|
||||
},
|
||||
Protocol::Ping => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCResponse::Pong(Ping {
|
||||
data: u64::from_ssz_bytes(&raw_bytes)?,
|
||||
}))),
|
||||
},
|
||||
Protocol::MetaData => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCResponse::MetaData(
|
||||
MetaData::from_ssz_bytes(&raw_bytes)?,
|
||||
))),
|
||||
},
|
||||
}
|
||||
}
|
||||
Ok(None) => Ok(None), // waiting for more bytes
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> OutboundCodec<RPCRequest<TSpec>> for SSZOutboundCodec<TSpec> {
|
||||
type ErrorType = ErrorMessage;
|
||||
|
||||
fn decode_error(&mut self, src: &mut BytesMut) -> Result<Option<Self::ErrorType>, RPCError> {
|
||||
match self.inner.decode(src).map_err(RPCError::from) {
|
||||
Ok(Some(packet)) => Ok(Some(ErrorMessage::from_ssz_bytes(&packet)?)),
|
||||
Ok(None) => Ok(None),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,951 +0,0 @@
|
||||
#![allow(clippy::type_complexity)]
|
||||
#![allow(clippy::cognitive_complexity)]
|
||||
|
||||
use super::methods::{ErrorMessage, RPCCodedResponse, RequestId, ResponseTermination};
|
||||
use super::protocol::{Protocol, RPCError, RPCProtocol, RPCRequest};
|
||||
use super::RPCEvent;
|
||||
use crate::rpc::protocol::{InboundFramed, OutboundFramed};
|
||||
use fnv::FnvHashMap;
|
||||
use futures::prelude::*;
|
||||
use libp2p::core::upgrade::{
|
||||
InboundUpgrade, NegotiationError, OutboundUpgrade, ProtocolError, UpgradeError,
|
||||
};
|
||||
use libp2p::swarm::protocols_handler::{
|
||||
KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
|
||||
};
|
||||
use libp2p::swarm::NegotiatedSubstream;
|
||||
use slog::{crit, debug, error, trace, warn};
|
||||
use smallvec::SmallVec;
|
||||
use std::{
|
||||
collections::hash_map::Entry,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio::time::{delay_queue, DelayQueue};
|
||||
use types::EthSpec;
|
||||
|
||||
//TODO: Implement check_timeout() on the substream types
|
||||
|
||||
/// The time (in seconds) before a substream that is awaiting a response from the user times out.
|
||||
pub const RESPONSE_TIMEOUT: u64 = 10;
|
||||
|
||||
/// The number of times to retry an outbound upgrade in the case of IO errors.
|
||||
const IO_ERROR_RETRIES: u8 = 3;
|
||||
|
||||
/// Inbound requests are given a sequential `RequestId` to keep track of. All inbound streams are
|
||||
/// identified by their substream ID which is identical to the RPC Id.
|
||||
type InboundRequestId = RequestId;
|
||||
/// Outbound requests are associated with an id that is given by the application that sent the
|
||||
/// request.
|
||||
type OutboundRequestId = RequestId;
|
||||
|
||||
/// Implementation of `ProtocolsHandler` for the RPC protocol.
|
||||
pub struct RPCHandler<TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
/// The upgrade for inbound substreams.
|
||||
listen_protocol: SubstreamProtocol<RPCProtocol<TSpec>>,
|
||||
|
||||
/// If something bad happened and we should shut down the handler with an error.
|
||||
pending_error: Vec<(RequestId, Protocol, RPCError)>,
|
||||
|
||||
/// Queue of events to produce in `poll()`.
|
||||
events_out: SmallVec<[RPCEvent<TSpec>; 4]>,
|
||||
|
||||
/// Queue of outbound substreams to open.
|
||||
dial_queue: SmallVec<[(RequestId, RPCRequest<TSpec>); 4]>,
|
||||
|
||||
/// Current number of concurrent outbound substreams being opened.
|
||||
dial_negotiated: u32,
|
||||
|
||||
/// Current inbound substreams awaiting processing.
|
||||
inbound_substreams: FnvHashMap<
|
||||
InboundRequestId,
|
||||
(
|
||||
InboundSubstreamState<TSpec>,
|
||||
Option<delay_queue::Key>,
|
||||
Protocol,
|
||||
),
|
||||
>,
|
||||
|
||||
/// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout.
|
||||
inbound_substreams_delay: DelayQueue<InboundRequestId>,
|
||||
|
||||
/// Map of outbound substreams that need to be driven to completion. The `RequestId` is
|
||||
/// maintained by the application sending the request.
|
||||
outbound_substreams:
|
||||
FnvHashMap<OutboundRequestId, (OutboundSubstreamState<TSpec>, delay_queue::Key, Protocol)>,
|
||||
|
||||
/// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout.
|
||||
outbound_substreams_delay: DelayQueue<OutboundRequestId>,
|
||||
|
||||
/// Map of outbound items that are queued as the stream processes them.
|
||||
queued_outbound_items: FnvHashMap<RequestId, Vec<RPCCodedResponse<TSpec>>>,
|
||||
|
||||
/// Sequential ID for waiting substreams. For inbound substreams, this is also the inbound request ID.
|
||||
current_inbound_substream_id: RequestId,
|
||||
|
||||
/// Maximum number of concurrent outbound substreams being opened. Value is never modified.
|
||||
max_dial_negotiated: u32,
|
||||
|
||||
/// Value to return from `connection_keep_alive`.
|
||||
keep_alive: KeepAlive,
|
||||
|
||||
/// After the given duration has elapsed, an inactive connection will shutdown.
|
||||
inactive_timeout: Duration,
|
||||
|
||||
/// Try to negotiate the outbound upgrade a few times if there is an IO error before reporting the request as failed.
|
||||
/// This keeps track of the number of attempts.
|
||||
outbound_io_error_retries: u8,
|
||||
|
||||
/// Logger for handling RPC streams
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
pub enum InboundSubstreamState<TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
/// A response has been sent, pending writing.
|
||||
ResponsePendingSend {
|
||||
/// The substream used to send the response
|
||||
substream: InboundFramed<NegotiatedSubstream, TSpec>,
|
||||
/// The message that is attempting to be sent.
|
||||
message: RPCCodedResponse<TSpec>,
|
||||
/// Whether a stream termination is requested. If true the stream will be closed after
|
||||
/// this send. Otherwise it will transition to an idle state until a stream termination is
|
||||
/// requested or a timeout is reached.
|
||||
closing: bool,
|
||||
},
|
||||
/// A response has been sent, pending flush.
|
||||
ResponsePendingFlush {
|
||||
/// The substream used to send the response
|
||||
substream: InboundFramed<NegotiatedSubstream, TSpec>,
|
||||
/// Whether a stream termination is requested. If true the stream will be closed after
|
||||
/// this send. Otherwise it will transition to an idle state until a stream termination is
|
||||
/// requested or a timeout is reached.
|
||||
closing: bool,
|
||||
},
|
||||
/// The response stream is idle and awaiting input from the application to send more chunked
|
||||
/// responses.
|
||||
ResponseIdle(InboundFramed<NegotiatedSubstream, TSpec>),
|
||||
/// The substream is attempting to shutdown.
|
||||
Closing(InboundFramed<NegotiatedSubstream, TSpec>),
|
||||
/// Temporary state during processing
|
||||
Poisoned,
|
||||
}
|
||||
|
||||
/// State of an outbound substream. Either waiting for a response, or in the process of sending.
|
||||
pub enum OutboundSubstreamState<TSpec: EthSpec> {
|
||||
/// A request has been sent, and we are awaiting a response. This future is driven in the
|
||||
/// handler because GOODBYE requests can be handled and responses dropped instantly.
|
||||
RequestPendingResponse {
|
||||
/// The framed negotiated substream.
|
||||
substream: OutboundFramed<NegotiatedSubstream, TSpec>,
|
||||
/// Keeps track of the actual request sent.
|
||||
request: RPCRequest<TSpec>,
|
||||
},
|
||||
/// Closing an outbound substream>
|
||||
Closing(OutboundFramed<NegotiatedSubstream, TSpec>),
|
||||
/// Temporary state during processing
|
||||
Poisoned,
|
||||
}
|
||||
|
||||
impl<TSpec> InboundSubstreamState<TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
/// Moves the substream state to closing and informs the connected peer. The
|
||||
/// `queued_outbound_items` must be given as a parameter to add stream termination messages to
|
||||
/// the outbound queue.
|
||||
pub fn close(&mut self, outbound_queue: &mut Vec<RPCCodedResponse<TSpec>>) {
|
||||
// When terminating a stream, report the stream termination to the requesting user via
|
||||
// an RPC error
|
||||
let error = RPCCodedResponse::ServerError(ErrorMessage {
|
||||
error_message: b"Request timed out".to_vec(),
|
||||
});
|
||||
|
||||
// The stream termination type is irrelevant, this will terminate the
|
||||
// stream
|
||||
let stream_termination =
|
||||
RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange);
|
||||
|
||||
match std::mem::replace(self, InboundSubstreamState::Poisoned) {
|
||||
// if we are busy awaiting a send/flush add the termination to the queue
|
||||
InboundSubstreamState::ResponsePendingSend {
|
||||
substream,
|
||||
message,
|
||||
closing,
|
||||
} => {
|
||||
if !closing {
|
||||
outbound_queue.push(error);
|
||||
outbound_queue.push(stream_termination);
|
||||
}
|
||||
// if the stream is closing after the send, allow it to finish
|
||||
|
||||
*self = InboundSubstreamState::ResponsePendingSend {
|
||||
substream,
|
||||
message,
|
||||
closing,
|
||||
}
|
||||
}
|
||||
// if we are busy awaiting a send/flush add the termination to the queue
|
||||
InboundSubstreamState::ResponsePendingFlush { substream, closing } => {
|
||||
if !closing {
|
||||
outbound_queue.push(error);
|
||||
outbound_queue.push(stream_termination);
|
||||
}
|
||||
// if the stream is closing after the send, allow it to finish
|
||||
*self = InboundSubstreamState::ResponsePendingFlush { substream, closing }
|
||||
}
|
||||
InboundSubstreamState::ResponseIdle(substream) => {
|
||||
*self = InboundSubstreamState::ResponsePendingSend {
|
||||
substream: substream,
|
||||
message: error,
|
||||
closing: true,
|
||||
};
|
||||
}
|
||||
InboundSubstreamState::Closing(substream) => {
|
||||
// let the stream close
|
||||
*self = InboundSubstreamState::Closing(substream);
|
||||
}
|
||||
InboundSubstreamState::Poisoned => {
|
||||
unreachable!("Coding error: Timeout poisoned substream")
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec> RPCHandler<TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
pub fn new(
|
||||
listen_protocol: SubstreamProtocol<RPCProtocol<TSpec>>,
|
||||
inactive_timeout: Duration,
|
||||
log: &slog::Logger,
|
||||
) -> Self {
|
||||
RPCHandler {
|
||||
listen_protocol,
|
||||
pending_error: Vec::new(),
|
||||
events_out: SmallVec::new(),
|
||||
dial_queue: SmallVec::new(),
|
||||
dial_negotiated: 0,
|
||||
queued_outbound_items: FnvHashMap::default(),
|
||||
inbound_substreams: FnvHashMap::default(),
|
||||
outbound_substreams: FnvHashMap::default(),
|
||||
inbound_substreams_delay: DelayQueue::new(),
|
||||
outbound_substreams_delay: DelayQueue::new(),
|
||||
current_inbound_substream_id: 1,
|
||||
max_dial_negotiated: 8,
|
||||
keep_alive: KeepAlive::Yes,
|
||||
inactive_timeout,
|
||||
outbound_io_error_retries: 0,
|
||||
log: log.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of pending requests.
|
||||
pub fn pending_requests(&self) -> u32 {
|
||||
self.dial_negotiated + self.dial_queue.len() as u32
|
||||
}
|
||||
|
||||
/// Returns a reference to the listen protocol configuration.
|
||||
///
|
||||
/// > **Note**: If you modify the protocol, modifications will only applies to future inbound
|
||||
/// > substreams, not the ones already being negotiated.
|
||||
pub fn listen_protocol_ref(&self) -> &SubstreamProtocol<RPCProtocol<TSpec>> {
|
||||
&self.listen_protocol
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the listen protocol configuration.
|
||||
///
|
||||
/// > **Note**: If you modify the protocol, modifications will only applies to future inbound
|
||||
/// > substreams, not the ones already being negotiated.
|
||||
pub fn listen_protocol_mut(&mut self) -> &mut SubstreamProtocol<RPCProtocol<TSpec>> {
|
||||
&mut self.listen_protocol
|
||||
}
|
||||
|
||||
/// Opens an outbound substream with a request.
|
||||
pub fn send_request(&mut self, id: RequestId, req: RPCRequest<TSpec>) {
|
||||
self.keep_alive = KeepAlive::Yes;
|
||||
|
||||
self.dial_queue.push((id, req));
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec> ProtocolsHandler for RPCHandler<TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
type InEvent = RPCEvent<TSpec>;
|
||||
type OutEvent = RPCEvent<TSpec>;
|
||||
type Error = RPCError;
|
||||
type InboundProtocol = RPCProtocol<TSpec>;
|
||||
type OutboundProtocol = RPCRequest<TSpec>;
|
||||
type OutboundOpenInfo = (RequestId, RPCRequest<TSpec>); // Keep track of the id and the request
|
||||
|
||||
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol> {
|
||||
self.listen_protocol.clone()
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_inbound(
|
||||
&mut self,
|
||||
substream: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
|
||||
) {
|
||||
// update the keep alive timeout if there are no more remaining outbound streams
|
||||
if let KeepAlive::Until(_) = self.keep_alive {
|
||||
self.keep_alive = KeepAlive::Until(Instant::now() + self.inactive_timeout);
|
||||
}
|
||||
|
||||
let (req, substream) = substream;
|
||||
// drop the stream and return a 0 id for goodbye "requests"
|
||||
if let r @ RPCRequest::Goodbye(_) = req {
|
||||
self.events_out.push(RPCEvent::Request(0, r));
|
||||
return;
|
||||
}
|
||||
|
||||
// New inbound request. Store the stream and tag the output.
|
||||
let delay_key = self.inbound_substreams_delay.insert(
|
||||
self.current_inbound_substream_id,
|
||||
Duration::from_secs(RESPONSE_TIMEOUT),
|
||||
);
|
||||
let awaiting_stream = InboundSubstreamState::ResponseIdle(substream);
|
||||
self.inbound_substreams.insert(
|
||||
self.current_inbound_substream_id,
|
||||
(awaiting_stream, Some(delay_key), req.protocol()),
|
||||
);
|
||||
|
||||
self.events_out
|
||||
.push(RPCEvent::Request(self.current_inbound_substream_id, req));
|
||||
self.current_inbound_substream_id += 1;
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_outbound(
|
||||
&mut self,
|
||||
out: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
|
||||
request_info: Self::OutboundOpenInfo,
|
||||
) {
|
||||
self.dial_negotiated -= 1;
|
||||
|
||||
if self.dial_negotiated == 0
|
||||
&& self.dial_queue.is_empty()
|
||||
&& self.outbound_substreams.is_empty()
|
||||
{
|
||||
self.keep_alive = KeepAlive::Until(Instant::now() + self.inactive_timeout);
|
||||
} else {
|
||||
self.keep_alive = KeepAlive::Yes;
|
||||
}
|
||||
|
||||
// add the stream to substreams if we expect a response, otherwise drop the stream.
|
||||
let (mut id, request) = request_info;
|
||||
if request.expect_response() {
|
||||
// outbound requests can be sent from various aspects of lighthouse which don't
|
||||
// track request ids. In the future these will be flagged as None, currently they
|
||||
// are flagged as 0. These can overlap. In this case, we pick the highest request
|
||||
// Id available
|
||||
if id == 0 && self.outbound_substreams.get(&id).is_some() {
|
||||
// have duplicate outbound request with no id. Pick one that will not collide
|
||||
let mut new_id = std::usize::MAX;
|
||||
while self.outbound_substreams.get(&new_id).is_some() {
|
||||
// panic all outbound substreams are full
|
||||
new_id -= 1;
|
||||
}
|
||||
trace!(self.log, "New outbound stream id created"; "id" => new_id);
|
||||
id = RequestId::from(new_id);
|
||||
}
|
||||
|
||||
// new outbound request. Store the stream and tag the output.
|
||||
let delay_key = self
|
||||
.outbound_substreams_delay
|
||||
.insert(id, Duration::from_secs(RESPONSE_TIMEOUT));
|
||||
let protocol = request.protocol();
|
||||
let awaiting_stream = OutboundSubstreamState::RequestPendingResponse {
|
||||
substream: out,
|
||||
request,
|
||||
};
|
||||
if let Some(_) = self
|
||||
.outbound_substreams
|
||||
.insert(id, (awaiting_stream, delay_key, protocol))
|
||||
{
|
||||
crit!(self.log, "Duplicate outbound substream id"; "id" => format!("{:?}", id));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: If the substream has closed due to inactivity, or the substream is in the
|
||||
// wrong state a response will fail silently.
|
||||
fn inject_event(&mut self, rpc_event: Self::InEvent) {
|
||||
match rpc_event {
|
||||
RPCEvent::Request(id, req) => self.send_request(id, req),
|
||||
RPCEvent::Response(rpc_id, response) => {
|
||||
// Variables indicating if the response is an error response or a multi-part
|
||||
// response
|
||||
let res_is_error = response.is_error();
|
||||
let res_is_multiple = response.multiple_responses();
|
||||
|
||||
// check if the stream matching the response still exists
|
||||
match self.inbound_substreams.get_mut(&rpc_id) {
|
||||
Some((substream_state, _, protocol)) => {
|
||||
match std::mem::replace(substream_state, InboundSubstreamState::Poisoned) {
|
||||
InboundSubstreamState::ResponseIdle(substream) => {
|
||||
// close the stream if there is no response
|
||||
match response {
|
||||
RPCCodedResponse::StreamTermination(_) => {
|
||||
//trace!(self.log, "Stream termination sent. Ending the stream");
|
||||
*substream_state =
|
||||
InboundSubstreamState::Closing(substream);
|
||||
}
|
||||
_ => {
|
||||
if let Some(error_code) = response.error_code() {
|
||||
self.pending_error.push((
|
||||
rpc_id,
|
||||
*protocol,
|
||||
RPCError::ErrorResponse(error_code),
|
||||
));
|
||||
}
|
||||
// send the response
|
||||
// if it's a single rpc request or an error, close the stream after
|
||||
*substream_state =
|
||||
InboundSubstreamState::ResponsePendingSend {
|
||||
substream: substream,
|
||||
message: response,
|
||||
closing: !res_is_multiple | res_is_error, // close if an error or we are not expecting more responses
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
InboundSubstreamState::ResponsePendingSend {
|
||||
substream,
|
||||
message,
|
||||
closing,
|
||||
} if res_is_multiple => {
|
||||
// the stream is in use, add the request to a pending queue
|
||||
self.queued_outbound_items
|
||||
.entry(rpc_id)
|
||||
.or_insert_with(Vec::new)
|
||||
.push(response);
|
||||
|
||||
// return the state
|
||||
*substream_state = InboundSubstreamState::ResponsePendingSend {
|
||||
substream,
|
||||
message,
|
||||
closing,
|
||||
};
|
||||
}
|
||||
InboundSubstreamState::ResponsePendingFlush { substream, closing }
|
||||
if res_is_multiple =>
|
||||
{
|
||||
// the stream is in use, add the request to a pending queue
|
||||
self.queued_outbound_items
|
||||
.entry(rpc_id)
|
||||
.or_insert_with(Vec::new)
|
||||
.push(response);
|
||||
|
||||
// return the state
|
||||
*substream_state = InboundSubstreamState::ResponsePendingFlush {
|
||||
substream,
|
||||
closing,
|
||||
};
|
||||
}
|
||||
InboundSubstreamState::Closing(substream) => {
|
||||
*substream_state = InboundSubstreamState::Closing(substream);
|
||||
debug!(self.log, "Response not sent. Stream is closing"; "response" => format!("{}",response));
|
||||
}
|
||||
InboundSubstreamState::ResponsePendingSend {
|
||||
substream,
|
||||
message,
|
||||
..
|
||||
} => {
|
||||
*substream_state = InboundSubstreamState::ResponsePendingSend {
|
||||
substream,
|
||||
message,
|
||||
closing: true,
|
||||
};
|
||||
error!(self.log, "Attempted sending multiple responses to a single response request");
|
||||
}
|
||||
InboundSubstreamState::ResponsePendingFlush { substream, .. } => {
|
||||
*substream_state = InboundSubstreamState::ResponsePendingFlush {
|
||||
substream,
|
||||
closing: true,
|
||||
};
|
||||
error!(self.log, "Attempted sending multiple responses to a single response request");
|
||||
}
|
||||
InboundSubstreamState::Poisoned => {
|
||||
crit!(self.log, "Poisoned inbound substream");
|
||||
unreachable!("Coding error: Poisoned substream");
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
warn!(self.log, "Stream has expired. Response not sent"; "response" => response.to_string(), "id" => rpc_id);
|
||||
}
|
||||
};
|
||||
}
|
||||
// We do not send errors as responses
|
||||
RPCEvent::Error(..) => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_dial_upgrade_error(
|
||||
&mut self,
|
||||
request_info: Self::OutboundOpenInfo,
|
||||
error: ProtocolsHandlerUpgrErr<
|
||||
<Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Error,
|
||||
>,
|
||||
) {
|
||||
let (id, req) = request_info;
|
||||
if let ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(RPCError::IoError(_))) = error {
|
||||
self.outbound_io_error_retries += 1;
|
||||
if self.outbound_io_error_retries < IO_ERROR_RETRIES {
|
||||
self.send_request(id, req);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
self.outbound_io_error_retries = 0;
|
||||
// map the error
|
||||
let rpc_error = match error {
|
||||
ProtocolsHandlerUpgrErr::Timer => RPCError::InternalError("Timer failed"),
|
||||
ProtocolsHandlerUpgrErr::Timeout => RPCError::NegotiationTimeout,
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)) => e,
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => {
|
||||
RPCError::UnsupportedProtocol
|
||||
}
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(
|
||||
NegotiationError::ProtocolError(e),
|
||||
)) => match e {
|
||||
ProtocolError::IoError(io_err) => RPCError::IoError(io_err.to_string()),
|
||||
ProtocolError::InvalidProtocol => {
|
||||
RPCError::InternalError("Protocol was deemed invalid")
|
||||
}
|
||||
ProtocolError::InvalidMessage | ProtocolError::TooManyProtocols => {
|
||||
// Peer is sending invalid data during the negotiation phase, not
|
||||
// participating in the protocol
|
||||
RPCError::InvalidData
|
||||
}
|
||||
},
|
||||
};
|
||||
self.pending_error.push((id, req.protocol(), rpc_error));
|
||||
}
|
||||
|
||||
fn connection_keep_alive(&self) -> KeepAlive {
|
||||
self.keep_alive
|
||||
}
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<
|
||||
ProtocolsHandlerEvent<
|
||||
Self::OutboundProtocol,
|
||||
Self::OutboundOpenInfo,
|
||||
Self::OutEvent,
|
||||
Self::Error,
|
||||
>,
|
||||
> {
|
||||
if !self.pending_error.is_empty() {
|
||||
let (id, protocol, err) = self.pending_error.remove(0);
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(RPCEvent::Error(
|
||||
id, protocol, err,
|
||||
)));
|
||||
}
|
||||
|
||||
// return any events that need to be reported
|
||||
if !self.events_out.is_empty() {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(self.events_out.remove(0)));
|
||||
} else {
|
||||
self.events_out.shrink_to_fit();
|
||||
}
|
||||
|
||||
// purge expired inbound substreams and send an error
|
||||
loop {
|
||||
match self.inbound_substreams_delay.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(stream_id))) => {
|
||||
// handle a stream timeout for various states
|
||||
if let Some((substream_state, delay_key, _)) =
|
||||
self.inbound_substreams.get_mut(stream_id.get_ref())
|
||||
{
|
||||
// the delay has been removed
|
||||
*delay_key = None;
|
||||
|
||||
let outbound_queue = self
|
||||
.queued_outbound_items
|
||||
.entry(stream_id.into_inner())
|
||||
.or_insert_with(Vec::new);
|
||||
substream_state.close(outbound_queue);
|
||||
}
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
warn!(self.log, "Inbound substream poll failed"; "error" => format!("{:?}", e));
|
||||
// drops the peer if we cannot read the delay queue
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::InternalError(
|
||||
"Could not poll inbound stream timer",
|
||||
)));
|
||||
}
|
||||
Poll::Pending | Poll::Ready(None) => break,
|
||||
}
|
||||
}
|
||||
|
||||
// purge expired outbound substreams
|
||||
loop {
|
||||
match self.outbound_substreams_delay.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(stream_id))) => {
|
||||
if let Some((_id, _stream, protocol)) =
|
||||
self.outbound_substreams.remove(stream_id.get_ref())
|
||||
{
|
||||
// notify the user
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(RPCEvent::Error(
|
||||
*stream_id.get_ref(),
|
||||
protocol,
|
||||
RPCError::StreamTimeout,
|
||||
)));
|
||||
} else {
|
||||
crit!(self.log, "timed out substream not in the books"; "stream_id" => stream_id.get_ref());
|
||||
}
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
warn!(self.log, "Outbound substream poll failed"; "error" => format!("{:?}", e));
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::InternalError(
|
||||
"Could not poll outbound stream timer",
|
||||
)));
|
||||
}
|
||||
Poll::Pending | Poll::Ready(None) => break,
|
||||
}
|
||||
}
|
||||
|
||||
// drive inbound streams that need to be processed
|
||||
for request_id in self.inbound_substreams.keys().copied().collect::<Vec<_>>() {
|
||||
// Drain all queued items until all messages have been processed for this stream
|
||||
// TODO Improve this code logic
|
||||
let mut new_items_to_send = true;
|
||||
while new_items_to_send {
|
||||
new_items_to_send = false;
|
||||
match self.inbound_substreams.entry(request_id) {
|
||||
Entry::Occupied(mut entry) => {
|
||||
match std::mem::replace(
|
||||
&mut entry.get_mut().0,
|
||||
InboundSubstreamState::Poisoned,
|
||||
) {
|
||||
InboundSubstreamState::ResponsePendingSend {
|
||||
mut substream,
|
||||
message,
|
||||
closing,
|
||||
} => {
|
||||
match Sink::poll_ready(Pin::new(&mut substream), cx) {
|
||||
Poll::Ready(Ok(())) => {
|
||||
// stream is ready to send data
|
||||
match Sink::start_send(Pin::new(&mut substream), message) {
|
||||
Ok(()) => {
|
||||
// await flush
|
||||
entry.get_mut().0 =
|
||||
InboundSubstreamState::ResponsePendingFlush {
|
||||
substream,
|
||||
closing,
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
// error with sending in the codec
|
||||
warn!(self.log, "Error sending RPC message"; "error" => e.to_string());
|
||||
// keep connection with the peer and return the
|
||||
// stream to awaiting response if this message
|
||||
// wasn't closing the stream
|
||||
// TODO: Duplicate code
|
||||
if closing {
|
||||
entry.get_mut().0 =
|
||||
InboundSubstreamState::Closing(substream)
|
||||
} else {
|
||||
// check for queued chunks and update the stream
|
||||
entry.get_mut().0 = apply_queued_responses(
|
||||
substream,
|
||||
&mut self
|
||||
.queued_outbound_items
|
||||
.get_mut(&request_id),
|
||||
&mut new_items_to_send,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Poll::Ready(Err(e)) => {
|
||||
error!(self.log, "Outbound substream error while sending RPC message: {:?}", e);
|
||||
entry.remove();
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(e));
|
||||
}
|
||||
Poll::Pending => {
|
||||
// the stream is not yet ready, continue waiting
|
||||
entry.get_mut().0 =
|
||||
InboundSubstreamState::ResponsePendingSend {
|
||||
substream,
|
||||
message,
|
||||
closing,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
InboundSubstreamState::ResponsePendingFlush {
|
||||
mut substream,
|
||||
closing,
|
||||
} => {
|
||||
match Sink::poll_flush(Pin::new(&mut substream), cx) {
|
||||
Poll::Ready(Ok(())) => {
|
||||
// finished flushing
|
||||
// TODO: Duplicate code
|
||||
if closing {
|
||||
entry.get_mut().0 =
|
||||
InboundSubstreamState::Closing(substream)
|
||||
} else {
|
||||
// check for queued chunks and update the stream
|
||||
entry.get_mut().0 = apply_queued_responses(
|
||||
substream,
|
||||
&mut self
|
||||
.queued_outbound_items
|
||||
.get_mut(&request_id),
|
||||
&mut new_items_to_send,
|
||||
);
|
||||
}
|
||||
}
|
||||
Poll::Ready(Err(e)) => {
|
||||
// error during flush
|
||||
trace!(self.log, "Error sending flushing RPC message"; "error" => e.to_string());
|
||||
// we drop the stream on error and inform the user, remove
|
||||
// any pending requests
|
||||
// TODO: Duplicate code
|
||||
if let Some(delay_key) = &entry.get().1 {
|
||||
self.inbound_substreams_delay.remove(delay_key);
|
||||
}
|
||||
self.queued_outbound_items.remove(&request_id);
|
||||
entry.remove();
|
||||
|
||||
if self.outbound_substreams.is_empty()
|
||||
&& self.inbound_substreams.is_empty()
|
||||
{
|
||||
self.keep_alive = KeepAlive::Until(
|
||||
Instant::now() + self.inactive_timeout,
|
||||
);
|
||||
}
|
||||
}
|
||||
Poll::Pending => {
|
||||
entry.get_mut().0 =
|
||||
InboundSubstreamState::ResponsePendingFlush {
|
||||
substream,
|
||||
closing,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
InboundSubstreamState::ResponseIdle(substream) => {
|
||||
entry.get_mut().0 = apply_queued_responses(
|
||||
substream,
|
||||
&mut self.queued_outbound_items.get_mut(&request_id),
|
||||
&mut new_items_to_send,
|
||||
);
|
||||
}
|
||||
InboundSubstreamState::Closing(mut substream) => {
|
||||
match Sink::poll_close(Pin::new(&mut substream), cx) {
|
||||
Poll::Ready(Ok(())) => {
|
||||
if let Some(delay_key) = &entry.get().1 {
|
||||
self.inbound_substreams_delay.remove(delay_key);
|
||||
}
|
||||
self.queued_outbound_items.remove(&request_id);
|
||||
entry.remove();
|
||||
|
||||
if self.outbound_substreams.is_empty()
|
||||
&& self.inbound_substreams.is_empty()
|
||||
{
|
||||
self.keep_alive = KeepAlive::Until(
|
||||
Instant::now() + self.inactive_timeout,
|
||||
);
|
||||
}
|
||||
} // drop the stream
|
||||
Poll::Ready(Err(e)) => {
|
||||
error!(self.log, "Error closing inbound stream"; "error" => e.to_string());
|
||||
// drop the stream anyway
|
||||
// TODO: Duplicate code
|
||||
if let Some(delay_key) = &entry.get().1 {
|
||||
self.inbound_substreams_delay.remove(delay_key);
|
||||
}
|
||||
self.queued_outbound_items.remove(&request_id);
|
||||
entry.remove();
|
||||
|
||||
if self.outbound_substreams.is_empty()
|
||||
&& self.inbound_substreams.is_empty()
|
||||
{
|
||||
self.keep_alive = KeepAlive::Until(
|
||||
Instant::now() + self.inactive_timeout,
|
||||
);
|
||||
}
|
||||
}
|
||||
Poll::Pending => {
|
||||
entry.get_mut().0 =
|
||||
InboundSubstreamState::Closing(substream);
|
||||
}
|
||||
}
|
||||
}
|
||||
InboundSubstreamState::Poisoned => {
|
||||
crit!(self.log, "Poisoned outbound substream");
|
||||
unreachable!("Coding Error: Inbound Substream is poisoned");
|
||||
}
|
||||
}
|
||||
}
|
||||
Entry::Vacant(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// drive outbound streams that need to be processed
|
||||
for request_id in self.outbound_substreams.keys().copied().collect::<Vec<_>>() {
|
||||
match self.outbound_substreams.entry(request_id) {
|
||||
Entry::Occupied(mut entry) => {
|
||||
match std::mem::replace(
|
||||
&mut entry.get_mut().0,
|
||||
OutboundSubstreamState::Poisoned,
|
||||
) {
|
||||
OutboundSubstreamState::RequestPendingResponse {
|
||||
mut substream,
|
||||
request,
|
||||
} => match substream.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(response))) => {
|
||||
if request.multiple_responses() && !response.is_error() {
|
||||
entry.get_mut().0 =
|
||||
OutboundSubstreamState::RequestPendingResponse {
|
||||
substream,
|
||||
request,
|
||||
};
|
||||
let delay_key = &entry.get().1;
|
||||
self.outbound_substreams_delay
|
||||
.reset(delay_key, Duration::from_secs(RESPONSE_TIMEOUT));
|
||||
} else {
|
||||
// either this is a single response request or we received an
|
||||
// error
|
||||
//trace!(self.log, "Closing single stream request");
|
||||
// only expect a single response, close the stream
|
||||
entry.get_mut().0 = OutboundSubstreamState::Closing(substream);
|
||||
}
|
||||
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(
|
||||
RPCEvent::Response(request_id, response),
|
||||
));
|
||||
}
|
||||
Poll::Ready(None) => {
|
||||
// stream closed
|
||||
// if we expected multiple streams send a stream termination,
|
||||
// else report the stream terminating only.
|
||||
//trace!(self.log, "RPC Response - stream closed by remote");
|
||||
// drop the stream
|
||||
let delay_key = &entry.get().1;
|
||||
self.outbound_substreams_delay.remove(delay_key);
|
||||
entry.remove_entry();
|
||||
// notify the application error
|
||||
if request.multiple_responses() {
|
||||
// return an end of stream result
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(
|
||||
RPCEvent::Response(
|
||||
request_id,
|
||||
RPCCodedResponse::StreamTermination(
|
||||
request.stream_termination(),
|
||||
),
|
||||
),
|
||||
));
|
||||
} // else we return an error, stream should not have closed early.
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(
|
||||
RPCEvent::Error(
|
||||
request_id,
|
||||
request.protocol(),
|
||||
RPCError::IncompleteStream,
|
||||
),
|
||||
));
|
||||
}
|
||||
Poll::Pending => {
|
||||
entry.get_mut().0 = OutboundSubstreamState::RequestPendingResponse {
|
||||
substream,
|
||||
request,
|
||||
}
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
// drop the stream
|
||||
let delay_key = &entry.get().1;
|
||||
self.outbound_substreams_delay.remove(delay_key);
|
||||
let protocol = entry.get().2;
|
||||
entry.remove_entry();
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(
|
||||
RPCEvent::Error(request_id, protocol, e),
|
||||
));
|
||||
}
|
||||
},
|
||||
OutboundSubstreamState::Closing(mut substream) => {
|
||||
match Sink::poll_close(Pin::new(&mut substream), cx) {
|
||||
// TODO: check if this is supposed to be a stream
|
||||
Poll::Ready(_) => {
|
||||
// drop the stream - including if there is an error
|
||||
let delay_key = &entry.get().1;
|
||||
self.outbound_substreams_delay.remove(delay_key);
|
||||
entry.remove_entry();
|
||||
|
||||
if self.outbound_substreams.is_empty()
|
||||
&& self.inbound_substreams.is_empty()
|
||||
{
|
||||
self.keep_alive = KeepAlive::Until(
|
||||
Instant::now() + self.inactive_timeout,
|
||||
);
|
||||
}
|
||||
}
|
||||
Poll::Pending => {
|
||||
entry.get_mut().0 = OutboundSubstreamState::Closing(substream);
|
||||
}
|
||||
}
|
||||
}
|
||||
OutboundSubstreamState::Poisoned => {
|
||||
crit!(self.log, "Poisoned outbound substream");
|
||||
unreachable!("Coding Error: Outbound substream is poisoned")
|
||||
}
|
||||
}
|
||||
}
|
||||
Entry::Vacant(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
// establish outbound substreams
|
||||
if !self.dial_queue.is_empty() && self.dial_negotiated < self.max_dial_negotiated {
|
||||
self.dial_negotiated += 1;
|
||||
let (id, req) = self.dial_queue.remove(0);
|
||||
self.dial_queue.shrink_to_fit();
|
||||
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
|
||||
protocol: SubstreamProtocol::new(req.clone()),
|
||||
info: (id, req),
|
||||
});
|
||||
}
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
// Check for new items to send to the peer and update the underlying stream
|
||||
fn apply_queued_responses<TSpec: EthSpec>(
|
||||
substream: InboundFramed<NegotiatedSubstream, TSpec>,
|
||||
queued_outbound_items: &mut Option<&mut Vec<RPCCodedResponse<TSpec>>>,
|
||||
new_items_to_send: &mut bool,
|
||||
) -> InboundSubstreamState<TSpec> {
|
||||
match queued_outbound_items {
|
||||
Some(ref mut queue) if !queue.is_empty() => {
|
||||
*new_items_to_send = true;
|
||||
// we have queued items
|
||||
match queue.remove(0) {
|
||||
RPCCodedResponse::StreamTermination(_) => {
|
||||
// close the stream if this is a stream termination
|
||||
InboundSubstreamState::Closing(substream)
|
||||
}
|
||||
chunk => InboundSubstreamState::ResponsePendingSend {
|
||||
substream: substream,
|
||||
message: chunk,
|
||||
closing: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// no items queued set to idle
|
||||
InboundSubstreamState::ResponseIdle(substream)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,188 +0,0 @@
|
||||
//! The Ethereum 2.0 Wire Protocol
|
||||
//!
|
||||
//! This protocol is a purpose built Ethereum 2.0 libp2p protocol. It's role is to facilitate
|
||||
//! direct peer-to-peer communication primarily for sending/receiving chain information for
|
||||
//! syncing.
|
||||
|
||||
use handler::RPCHandler;
|
||||
use libp2p::core::{connection::ConnectionId, ConnectedPoint};
|
||||
use libp2p::swarm::{
|
||||
protocols_handler::ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler,
|
||||
PollParameters, SubstreamProtocol,
|
||||
};
|
||||
use libp2p::{Multiaddr, PeerId};
|
||||
pub use methods::{
|
||||
ErrorMessage, MetaData, RPCCodedResponse, RPCResponse, RPCResponseErrorCode, RequestId,
|
||||
ResponseTermination, StatusMessage,
|
||||
};
|
||||
pub use protocol::{Protocol, RPCError, RPCProtocol, RPCRequest};
|
||||
use slog::{debug, o};
|
||||
use std::marker::PhantomData;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use types::EthSpec;
|
||||
|
||||
pub(crate) mod codec;
|
||||
mod handler;
|
||||
pub mod methods;
|
||||
mod protocol;
|
||||
|
||||
/// The return type used in the behaviour and the resultant event from the protocols handler.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum RPCEvent<T: EthSpec> {
|
||||
/// An inbound/outbound request for RPC protocol. The first parameter is a sequential
|
||||
/// id which tracks an awaiting substream for the response.
|
||||
Request(RequestId, RPCRequest<T>),
|
||||
/// A response that is being sent or has been received from the RPC protocol. The first parameter returns
|
||||
/// that which was sent with the corresponding request, the second is a single chunk of a
|
||||
/// response.
|
||||
Response(RequestId, RPCCodedResponse<T>),
|
||||
/// An Error occurred.
|
||||
Error(RequestId, Protocol, RPCError),
|
||||
}
|
||||
|
||||
/// Messages sent to the user from the RPC protocol.
|
||||
pub struct RPCMessage<TSpec: EthSpec> {
|
||||
/// The peer that sent the message.
|
||||
pub peer_id: PeerId,
|
||||
/// The message that was sent.
|
||||
pub event: RPCEvent<TSpec>,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> RPCEvent<T> {
|
||||
pub fn id(&self) -> usize {
|
||||
match *self {
|
||||
RPCEvent::Request(id, _) => id,
|
||||
RPCEvent::Response(id, _) => id,
|
||||
RPCEvent::Error(id, _, _) => id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> std::fmt::Display for RPCEvent<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
RPCEvent::Request(id, req) => write!(f, "RPC Request(id: {}, {})", id, req),
|
||||
RPCEvent::Response(id, res) => write!(f, "RPC Response(id: {}, {})", id, res),
|
||||
RPCEvent::Error(id, prot, err) => write!(
|
||||
f,
|
||||
"RPC Error(id: {}, protocol: {:?} error: {:?})",
|
||||
id, prot, err
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level
|
||||
/// logic.
|
||||
pub struct RPC<TSpec: EthSpec> {
|
||||
/// Queue of events to processed.
|
||||
events: Vec<NetworkBehaviourAction<RPCEvent<TSpec>, RPCMessage<TSpec>>>,
|
||||
/// Slog logger for RPC behaviour.
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> RPC<TSpec> {
|
||||
pub fn new(log: slog::Logger) -> Self {
|
||||
let log = log.new(o!("service" => "libp2p_rpc"));
|
||||
RPC {
|
||||
events: Vec::new(),
|
||||
log,
|
||||
}
|
||||
}
|
||||
|
||||
/// Submits an RPC request.
|
||||
///
|
||||
/// The peer must be connected for this to succeed.
|
||||
pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent<TSpec>) {
|
||||
self.events.push(NetworkBehaviourAction::NotifyHandler {
|
||||
peer_id,
|
||||
handler: NotifyHandler::Any,
|
||||
event: rpc_event,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec> NetworkBehaviour for RPC<TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
type ProtocolsHandler = RPCHandler<TSpec>;
|
||||
type OutEvent = RPCMessage<TSpec>;
|
||||
|
||||
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
||||
RPCHandler::new(
|
||||
SubstreamProtocol::new(RPCProtocol {
|
||||
phantom: PhantomData,
|
||||
}),
|
||||
Duration::from_secs(30),
|
||||
&self.log,
|
||||
)
|
||||
}
|
||||
|
||||
// handled by discovery
|
||||
fn addresses_of_peer(&mut self, _peer_id: &PeerId) -> Vec<Multiaddr> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
// Use connection established/closed instead of these currently
|
||||
fn inject_connected(&mut self, peer_id: &PeerId) {
|
||||
// find the peer's meta-data
|
||||
debug!(self.log, "Requesting new peer's metadata"; "peer_id" => format!("{}",peer_id));
|
||||
let rpc_event =
|
||||
RPCEvent::Request(RequestId::from(0usize), RPCRequest::MetaData(PhantomData));
|
||||
self.events.push(NetworkBehaviourAction::NotifyHandler {
|
||||
peer_id: peer_id.clone(),
|
||||
handler: NotifyHandler::Any,
|
||||
event: rpc_event,
|
||||
});
|
||||
}
|
||||
|
||||
fn inject_disconnected(&mut self, _peer_id: &PeerId) {}
|
||||
|
||||
fn inject_connection_established(
|
||||
&mut self,
|
||||
_peer_id: &PeerId,
|
||||
_: &ConnectionId,
|
||||
_connected_point: &ConnectedPoint,
|
||||
) {
|
||||
}
|
||||
|
||||
fn inject_connection_closed(
|
||||
&mut self,
|
||||
_peer_id: &PeerId,
|
||||
_: &ConnectionId,
|
||||
_connected_point: &ConnectedPoint,
|
||||
) {
|
||||
}
|
||||
|
||||
fn inject_event(
|
||||
&mut self,
|
||||
source: PeerId,
|
||||
_: ConnectionId,
|
||||
event: <Self::ProtocolsHandler as ProtocolsHandler>::OutEvent,
|
||||
) {
|
||||
// send the event to the user
|
||||
self.events
|
||||
.push(NetworkBehaviourAction::GenerateEvent(RPCMessage {
|
||||
peer_id: source,
|
||||
event,
|
||||
}));
|
||||
}
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
_cx: &mut Context,
|
||||
_: &mut impl PollParameters,
|
||||
) -> Poll<
|
||||
NetworkBehaviourAction<
|
||||
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||
Self::OutEvent,
|
||||
>,
|
||||
> {
|
||||
if !self.events.is_empty() {
|
||||
return Poll::Ready(self.events.remove(0));
|
||||
}
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
@@ -1,181 +0,0 @@
|
||||
#![cfg(test)]
|
||||
use crate::behaviour::Behaviour;
|
||||
use crate::multiaddr::Protocol;
|
||||
use ::types::{EnrForkId, MinimalEthSpec};
|
||||
use eth2_libp2p::discovery::{build_enr, CombinedKey, CombinedKeyExt};
|
||||
use eth2_libp2p::*;
|
||||
use futures::prelude::*;
|
||||
use libp2p::core::identity::Keypair;
|
||||
use libp2p::{
|
||||
core,
|
||||
core::{muxing::StreamMuxerBox, transport::boxed::Boxed},
|
||||
secio,
|
||||
swarm::{SwarmBuilder, SwarmEvent},
|
||||
PeerId, Swarm, Transport,
|
||||
};
|
||||
use slog::{crit, debug, info, Level};
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
type TSpec = MinimalEthSpec;
|
||||
|
||||
mod common;
|
||||
|
||||
type Libp2pBehaviour = Behaviour<TSpec>;
|
||||
|
||||
/// Build and return a eth2_libp2p Swarm with only secio support.
|
||||
fn build_secio_swarm(
|
||||
config: &NetworkConfig,
|
||||
log: slog::Logger,
|
||||
) -> error::Result<Swarm<Libp2pBehaviour>> {
|
||||
let local_keypair = Keypair::generate_secp256k1();
|
||||
let local_peer_id = PeerId::from(local_keypair.public());
|
||||
let enr_key = CombinedKey::from_libp2p(&local_keypair).unwrap();
|
||||
|
||||
let enr = build_enr::<TSpec>(&enr_key, config, EnrForkId::default()).unwrap();
|
||||
let network_globals = Arc::new(NetworkGlobals::new(
|
||||
enr,
|
||||
config.libp2p_port,
|
||||
config.discovery_port,
|
||||
&log,
|
||||
));
|
||||
|
||||
let mut swarm = {
|
||||
// Set up the transport - tcp/ws with secio and mplex/yamux
|
||||
let transport = build_secio_transport(local_keypair.clone());
|
||||
// Lighthouse network behaviour
|
||||
let behaviour = Behaviour::new(&local_keypair, config, network_globals.clone(), &log)?;
|
||||
// requires a tokio runtime
|
||||
struct Executor(tokio::runtime::Handle);
|
||||
impl libp2p::core::Executor for Executor {
|
||||
fn exec(&self, f: Pin<Box<dyn Future<Output = ()> + Send>>) {
|
||||
self.0.spawn(f);
|
||||
}
|
||||
}
|
||||
SwarmBuilder::new(transport, behaviour, local_peer_id.clone())
|
||||
.executor(Box::new(Executor(tokio::runtime::Handle::current())))
|
||||
.build()
|
||||
};
|
||||
|
||||
// listen on the specified address
|
||||
let listen_multiaddr = {
|
||||
let mut m = Multiaddr::from(config.listen_address);
|
||||
m.push(Protocol::Tcp(config.libp2p_port));
|
||||
m
|
||||
};
|
||||
|
||||
match Swarm::listen_on(&mut swarm, listen_multiaddr.clone()) {
|
||||
Ok(_) => {
|
||||
let mut log_address = listen_multiaddr;
|
||||
log_address.push(Protocol::P2p(local_peer_id.clone().into()));
|
||||
info!(log, "Listening established"; "address" => format!("{}", log_address));
|
||||
}
|
||||
Err(err) => {
|
||||
crit!(
|
||||
log,
|
||||
"Unable to listen on libp2p address";
|
||||
"error" => format!("{:?}", err),
|
||||
"listen_multiaddr" => format!("{}", listen_multiaddr),
|
||||
);
|
||||
return Err("Libp2p was unable to listen on the given listen address.".into());
|
||||
}
|
||||
};
|
||||
|
||||
// helper closure for dialing peers
|
||||
let mut dial_addr = |multiaddr: &Multiaddr| {
|
||||
match Swarm::dial_addr(&mut swarm, multiaddr.clone()) {
|
||||
Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => format!("{}", multiaddr)),
|
||||
Err(err) => debug!(
|
||||
log,
|
||||
"Could not connect to peer"; "address" => format!("{}", multiaddr), "error" => format!("{:?}", err)
|
||||
),
|
||||
};
|
||||
};
|
||||
|
||||
// attempt to connect to any specified boot-nodes
|
||||
for bootnode_enr in &config.boot_nodes {
|
||||
for multiaddr in &bootnode_enr.multiaddr() {
|
||||
// ignore udp multiaddr if it exists
|
||||
let components = multiaddr.iter().collect::<Vec<_>>();
|
||||
if let Protocol::Udp(_) = components[1] {
|
||||
continue;
|
||||
}
|
||||
dial_addr(multiaddr);
|
||||
}
|
||||
}
|
||||
Ok(swarm)
|
||||
}
|
||||
|
||||
/// Build a simple TCP transport with secio, mplex/yamux.
|
||||
fn build_secio_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox), Error> {
|
||||
let transport = libp2p_tcp::TokioTcpConfig::new().nodelay(true);
|
||||
transport
|
||||
.upgrade(core::upgrade::Version::V1)
|
||||
.authenticate(secio::SecioConfig::new(local_private_key))
|
||||
.multiplex(core::upgrade::SelectUpgrade::new(
|
||||
libp2p::yamux::Config::default(),
|
||||
libp2p::mplex::MplexConfig::new(),
|
||||
))
|
||||
.map(|(peer, muxer), _| (peer, core::muxing::StreamMuxerBox::new(muxer)))
|
||||
.timeout(Duration::from_secs(20))
|
||||
.timeout(Duration::from_secs(20))
|
||||
.map_err(|err| Error::new(ErrorKind::Other, err))
|
||||
.boxed()
|
||||
}
|
||||
|
||||
/// Test if the encryption falls back to secio if noise isn't available
|
||||
#[tokio::test]
|
||||
async fn test_secio_noise_fallback() {
|
||||
// set up the logging. The level and enabled logging or not
|
||||
let log_level = Level::Trace;
|
||||
let enable_logging = false;
|
||||
|
||||
let log = common::build_log(log_level, enable_logging);
|
||||
|
||||
let port = common::unused_port("tcp").unwrap();
|
||||
let noisy_config = common::build_config(port, vec![], None);
|
||||
let mut noisy_node = Service::new(&noisy_config, EnrForkId::default(), &log)
|
||||
.expect("should build a libp2p instance")
|
||||
.1;
|
||||
|
||||
let port = common::unused_port("tcp").unwrap();
|
||||
let secio_config = common::build_config(port, vec![common::get_enr(&noisy_node)], None);
|
||||
|
||||
// Building a custom Libp2pService from outside the crate isn't possible because of
|
||||
// private fields in the Libp2pService struct. A swarm is good enough for testing
|
||||
// compatibility with secio.
|
||||
let mut secio_swarm =
|
||||
build_secio_swarm(&secio_config, log.clone()).expect("should build a secio swarm");
|
||||
|
||||
let secio_log = log.clone();
|
||||
|
||||
let noisy_future = async {
|
||||
loop {
|
||||
noisy_node.next_event().await;
|
||||
}
|
||||
};
|
||||
|
||||
let secio_future = async {
|
||||
loop {
|
||||
match secio_swarm.next_event().await {
|
||||
SwarmEvent::ConnectionEstablished { peer_id, .. } => {
|
||||
// secio node negotiated a secio transport with
|
||||
// the noise compatible node
|
||||
info!(secio_log, "Connected to peer {}", peer_id);
|
||||
return;
|
||||
}
|
||||
_ => {} // Ignore all other events
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
tokio::select! {
|
||||
_ = noisy_future => {}
|
||||
_ = secio_future => {}
|
||||
_ = tokio::time::delay_for(Duration::from_millis(800)) => {
|
||||
panic!("Future timed out");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,541 +0,0 @@
|
||||
#![cfg(test)]
|
||||
use eth2_libp2p::rpc::methods::*;
|
||||
use eth2_libp2p::rpc::*;
|
||||
use eth2_libp2p::{BehaviourEvent, Libp2pEvent, RPCEvent};
|
||||
use slog::{debug, warn, Level};
|
||||
use std::time::Duration;
|
||||
use tokio::time::delay_for;
|
||||
use types::{
|
||||
BeaconBlock, Epoch, EthSpec, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot,
|
||||
};
|
||||
|
||||
mod common;
|
||||
|
||||
type E = MinimalEthSpec;
|
||||
|
||||
#[tokio::test]
|
||||
// Tests the STATUS RPC message
|
||||
async fn test_status_rpc() {
|
||||
// set up the logging. The level and enabled logging or not
|
||||
let log_level = Level::Debug;
|
||||
let enable_logging = false;
|
||||
|
||||
let log = common::build_log(log_level, enable_logging);
|
||||
|
||||
// get sender/receiver
|
||||
let (mut sender, mut receiver) = common::build_node_pair(&log).await;
|
||||
|
||||
// Dummy STATUS RPC message
|
||||
let rpc_request = RPCRequest::Status(StatusMessage {
|
||||
fork_digest: [0; 4],
|
||||
finalized_root: Hash256::from_low_u64_be(0),
|
||||
finalized_epoch: Epoch::new(1),
|
||||
head_root: Hash256::from_low_u64_be(0),
|
||||
head_slot: Slot::new(1),
|
||||
});
|
||||
|
||||
// Dummy STATUS RPC message
|
||||
let rpc_response = RPCResponse::Status(StatusMessage {
|
||||
fork_digest: [0; 4],
|
||||
finalized_root: Hash256::from_low_u64_be(0),
|
||||
finalized_epoch: Epoch::new(1),
|
||||
head_root: Hash256::from_low_u64_be(0),
|
||||
head_slot: Slot::new(1),
|
||||
});
|
||||
|
||||
// build the sender future
|
||||
let sender_future = async {
|
||||
loop {
|
||||
match sender.next_event().await {
|
||||
Libp2pEvent::PeerConnected { peer_id, .. } => {
|
||||
// Send a STATUS message
|
||||
debug!(log, "Sending RPC");
|
||||
sender
|
||||
.swarm
|
||||
.send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone()));
|
||||
}
|
||||
Libp2pEvent::Behaviour(BehaviourEvent::RPC(_, event)) => match event {
|
||||
// Should receive the RPC response
|
||||
RPCEvent::Response(id, response @ RPCCodedResponse::Success(_)) => {
|
||||
if id == 10 {
|
||||
debug!(log, "Sender Received");
|
||||
let response = {
|
||||
match response {
|
||||
RPCCodedResponse::Success(r) => r,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
};
|
||||
assert_eq!(response, rpc_response.clone());
|
||||
debug!(log, "Sender Completed");
|
||||
return;
|
||||
}
|
||||
}
|
||||
_ => {} // Ignore other RPC messages
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// build the receiver future
|
||||
let receiver_future = async {
|
||||
loop {
|
||||
match receiver.next_event().await {
|
||||
Libp2pEvent::Behaviour(BehaviourEvent::RPC(peer_id, event)) => {
|
||||
match event {
|
||||
// Should receive sent RPC request
|
||||
RPCEvent::Request(id, request) => {
|
||||
if request == rpc_request {
|
||||
// send the response
|
||||
debug!(log, "Receiver Received");
|
||||
receiver.swarm.send_rpc(
|
||||
peer_id,
|
||||
RPCEvent::Response(
|
||||
id,
|
||||
RPCCodedResponse::Success(rpc_response.clone()),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
_ => {} // Ignore other RPC requests
|
||||
}
|
||||
}
|
||||
_ => {} // Ignore other events
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
tokio::select! {
|
||||
_ = sender_future => {}
|
||||
_ = receiver_future => {}
|
||||
_ = delay_for(Duration::from_millis(800)) => {
|
||||
panic!("Future timed out");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
// Tests a streamed BlocksByRange RPC Message
|
||||
async fn test_blocks_by_range_chunked_rpc() {
|
||||
// set up the logging. The level and enabled logging or not
|
||||
let log_level = Level::Trace;
|
||||
let enable_logging = false;
|
||||
|
||||
let messages_to_send = 10;
|
||||
|
||||
let log = common::build_log(log_level, enable_logging);
|
||||
|
||||
// get sender/receiver
|
||||
let (mut sender, mut receiver) = common::build_node_pair(&log).await;
|
||||
|
||||
// BlocksByRange Request
|
||||
let rpc_request = RPCRequest::BlocksByRange(BlocksByRangeRequest {
|
||||
start_slot: 0,
|
||||
count: messages_to_send,
|
||||
step: 0,
|
||||
});
|
||||
|
||||
// BlocksByRange Response
|
||||
let spec = E::default_spec();
|
||||
let empty_block = BeaconBlock::empty(&spec);
|
||||
let empty_signed = SignedBeaconBlock {
|
||||
message: empty_block,
|
||||
signature: Signature::empty_signature(),
|
||||
};
|
||||
let rpc_response = RPCResponse::BlocksByRange(Box::new(empty_signed));
|
||||
|
||||
// keep count of the number of messages received
|
||||
let mut messages_received = 0;
|
||||
// build the sender future
|
||||
let sender_future = async {
|
||||
loop {
|
||||
match sender.next_event().await {
|
||||
Libp2pEvent::PeerConnected { peer_id, .. } => {
|
||||
// Send a STATUS message
|
||||
debug!(log, "Sending RPC");
|
||||
sender
|
||||
.swarm
|
||||
.send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone()));
|
||||
}
|
||||
Libp2pEvent::Behaviour(BehaviourEvent::RPC(_, event)) => match event {
|
||||
// Should receive the RPC response
|
||||
RPCEvent::Response(id, response) => {
|
||||
if id == 10 {
|
||||
warn!(log, "Sender received a response");
|
||||
match response {
|
||||
RPCCodedResponse::Success(res) => {
|
||||
assert_eq!(res, rpc_response.clone());
|
||||
messages_received += 1;
|
||||
warn!(log, "Chunk received");
|
||||
}
|
||||
RPCCodedResponse::StreamTermination(_) => {
|
||||
// should be exactly 10 messages before terminating
|
||||
assert_eq!(messages_received, messages_to_send);
|
||||
// end the test
|
||||
return;
|
||||
}
|
||||
_ => panic!("Invalid RPC received"),
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {} // Ignore other RPC messages
|
||||
},
|
||||
_ => {} // Ignore other behaviour events
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// build the receiver future
|
||||
let receiver_future = async {
|
||||
loop {
|
||||
match receiver.next_event().await {
|
||||
Libp2pEvent::Behaviour(BehaviourEvent::RPC(peer_id, event)) => {
|
||||
match event {
|
||||
// Should receive sent RPC request
|
||||
RPCEvent::Request(id, request) => {
|
||||
if request == rpc_request {
|
||||
// send the response
|
||||
warn!(log, "Receiver got request");
|
||||
|
||||
for _ in 1..=messages_to_send {
|
||||
receiver.swarm.send_rpc(
|
||||
peer_id.clone(),
|
||||
RPCEvent::Response(
|
||||
id,
|
||||
RPCCodedResponse::Success(rpc_response.clone()),
|
||||
),
|
||||
);
|
||||
}
|
||||
// send the stream termination
|
||||
receiver.swarm.send_rpc(
|
||||
peer_id,
|
||||
RPCEvent::Response(
|
||||
id,
|
||||
RPCCodedResponse::StreamTermination(
|
||||
ResponseTermination::BlocksByRange,
|
||||
),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
_ => {} // Ignore other events
|
||||
}
|
||||
}
|
||||
_ => {} // Ignore other events
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
tokio::select! {
|
||||
_ = sender_future => {}
|
||||
_ = receiver_future => {}
|
||||
_ = delay_for(Duration::from_millis(800)) => {
|
||||
panic!("Future timed out");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
// Tests an empty response to a BlocksByRange RPC Message
|
||||
async fn test_blocks_by_range_single_empty_rpc() {
|
||||
// set up the logging. The level and enabled logging or not
|
||||
let log_level = Level::Trace;
|
||||
let enable_logging = false;
|
||||
|
||||
let log = common::build_log(log_level, enable_logging);
|
||||
|
||||
// get sender/receiver
|
||||
let (mut sender, mut receiver) = common::build_node_pair(&log).await;
|
||||
|
||||
// BlocksByRange Request
|
||||
let rpc_request = RPCRequest::BlocksByRange(BlocksByRangeRequest {
|
||||
start_slot: 0,
|
||||
count: 10,
|
||||
step: 0,
|
||||
});
|
||||
|
||||
// BlocksByRange Response
|
||||
let spec = E::default_spec();
|
||||
let empty_block = BeaconBlock::empty(&spec);
|
||||
let empty_signed = SignedBeaconBlock {
|
||||
message: empty_block,
|
||||
signature: Signature::empty_signature(),
|
||||
};
|
||||
let rpc_response = RPCResponse::BlocksByRange(Box::new(empty_signed));
|
||||
|
||||
let messages_to_send = 1;
|
||||
|
||||
// keep count of the number of messages received
|
||||
let mut messages_received = 0;
|
||||
// build the sender future
|
||||
let sender_future = async {
|
||||
loop {
|
||||
match sender.next_event().await {
|
||||
Libp2pEvent::PeerConnected { peer_id, .. } => {
|
||||
// Send a STATUS message
|
||||
debug!(log, "Sending RPC");
|
||||
sender
|
||||
.swarm
|
||||
.send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone()));
|
||||
}
|
||||
Libp2pEvent::Behaviour(BehaviourEvent::RPC(_, event)) => match event {
|
||||
// Should receive the RPC response
|
||||
RPCEvent::Response(id, response) => {
|
||||
if id == 10 {
|
||||
warn!(log, "Sender received a response");
|
||||
match response {
|
||||
RPCCodedResponse::Success(res) => {
|
||||
assert_eq!(res, rpc_response.clone());
|
||||
messages_received += 1;
|
||||
warn!(log, "Chunk received");
|
||||
}
|
||||
RPCCodedResponse::StreamTermination(_) => {
|
||||
// should be exactly 10 messages before terminating
|
||||
assert_eq!(messages_received, messages_to_send);
|
||||
// end the test
|
||||
return;
|
||||
}
|
||||
_ => panic!("Invalid RPC received"),
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {} // Ignore other RPC messages
|
||||
},
|
||||
_ => {} // Ignore other behaviour events
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// build the receiver future
|
||||
let receiver_future = async {
|
||||
loop {
|
||||
match receiver.next_event().await {
|
||||
Libp2pEvent::Behaviour(BehaviourEvent::RPC(peer_id, event)) => {
|
||||
match event {
|
||||
// Should receive sent RPC request
|
||||
RPCEvent::Request(id, request) => {
|
||||
if request == rpc_request {
|
||||
// send the response
|
||||
warn!(log, "Receiver got request");
|
||||
|
||||
for _ in 1..=messages_to_send {
|
||||
receiver.swarm.send_rpc(
|
||||
peer_id.clone(),
|
||||
RPCEvent::Response(
|
||||
id,
|
||||
RPCCodedResponse::Success(rpc_response.clone()),
|
||||
),
|
||||
);
|
||||
}
|
||||
// send the stream termination
|
||||
receiver.swarm.send_rpc(
|
||||
peer_id,
|
||||
RPCEvent::Response(
|
||||
id,
|
||||
RPCCodedResponse::StreamTermination(
|
||||
ResponseTermination::BlocksByRange,
|
||||
),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
_ => {} // Ignore other events
|
||||
}
|
||||
}
|
||||
_ => {} // Ignore other events
|
||||
}
|
||||
}
|
||||
};
|
||||
tokio::select! {
|
||||
_ = sender_future => {}
|
||||
_ = receiver_future => {}
|
||||
_ = delay_for(Duration::from_millis(800)) => {
|
||||
panic!("Future timed out");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
// Tests a streamed, chunked BlocksByRoot RPC Message
|
||||
// The size of the reponse is a full `BeaconBlock`
|
||||
// which is greater than the Snappy frame size. Hence, this test
|
||||
// serves to test the snappy framing format as well.
|
||||
async fn test_blocks_by_root_chunked_rpc() {
|
||||
// set up the logging. The level and enabled logging or not
|
||||
let log_level = Level::Debug;
|
||||
let enable_logging = false;
|
||||
|
||||
let messages_to_send = 3;
|
||||
|
||||
let log = common::build_log(log_level, enable_logging);
|
||||
let spec = E::default_spec();
|
||||
|
||||
// get sender/receiver
|
||||
let (mut sender, mut receiver) = common::build_node_pair(&log).await;
|
||||
|
||||
// BlocksByRoot Request
|
||||
let rpc_request = RPCRequest::BlocksByRoot(BlocksByRootRequest {
|
||||
block_roots: vec![Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0)],
|
||||
});
|
||||
|
||||
// BlocksByRoot Response
|
||||
let full_block = BeaconBlock::full(&spec);
|
||||
let signed_full_block = SignedBeaconBlock {
|
||||
message: full_block,
|
||||
signature: Signature::empty_signature(),
|
||||
};
|
||||
let rpc_response = RPCResponse::BlocksByRoot(Box::new(signed_full_block));
|
||||
|
||||
// keep count of the number of messages received
|
||||
let mut messages_received = 0;
|
||||
// build the sender future
|
||||
let sender_future = async {
|
||||
loop {
|
||||
match sender.next_event().await {
|
||||
Libp2pEvent::PeerConnected { peer_id, .. } => {
|
||||
// Send a STATUS message
|
||||
debug!(log, "Sending RPC");
|
||||
sender
|
||||
.swarm
|
||||
.send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone()));
|
||||
}
|
||||
Libp2pEvent::Behaviour(BehaviourEvent::RPC(_, event)) => match event {
|
||||
// Should receive the RPC response
|
||||
RPCEvent::Response(id, response) => {
|
||||
if id == 10 {
|
||||
debug!(log, "Sender received a response");
|
||||
match response {
|
||||
RPCCodedResponse::Success(res) => {
|
||||
assert_eq!(res, rpc_response.clone());
|
||||
messages_received += 1;
|
||||
debug!(log, "Chunk received");
|
||||
}
|
||||
RPCCodedResponse::StreamTermination(_) => {
|
||||
// should be exactly messages_to_send
|
||||
assert_eq!(messages_received, messages_to_send);
|
||||
// end the test
|
||||
return;
|
||||
}
|
||||
_ => {} // Ignore other RPC messages
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {} // Ignore other RPC messages
|
||||
},
|
||||
_ => {} // Ignore other behaviour events
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// build the receiver future
|
||||
let receiver_future = async {
|
||||
loop {
|
||||
match receiver.next_event().await {
|
||||
Libp2pEvent::Behaviour(BehaviourEvent::RPC(peer_id, event)) => {
|
||||
match event {
|
||||
// Should receive sent RPC request
|
||||
RPCEvent::Request(id, request) => {
|
||||
if request == rpc_request {
|
||||
// send the response
|
||||
debug!(log, "Receiver got request");
|
||||
|
||||
for _ in 1..=messages_to_send {
|
||||
receiver.swarm.send_rpc(
|
||||
peer_id.clone(),
|
||||
RPCEvent::Response(
|
||||
id,
|
||||
RPCCodedResponse::Success(rpc_response.clone()),
|
||||
),
|
||||
);
|
||||
debug!(log, "Sending message");
|
||||
}
|
||||
// send the stream termination
|
||||
receiver.swarm.send_rpc(
|
||||
peer_id,
|
||||
RPCEvent::Response(
|
||||
id,
|
||||
RPCCodedResponse::StreamTermination(
|
||||
ResponseTermination::BlocksByRange,
|
||||
),
|
||||
),
|
||||
);
|
||||
debug!(log, "Send stream term");
|
||||
}
|
||||
}
|
||||
_ => {} // Ignore other events
|
||||
}
|
||||
}
|
||||
_ => {} // Ignore other events
|
||||
}
|
||||
}
|
||||
};
|
||||
tokio::select! {
|
||||
_ = sender_future => {}
|
||||
_ = receiver_future => {}
|
||||
_ = delay_for(Duration::from_millis(1000)) => {
|
||||
panic!("Future timed out");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
// Tests a Goodbye RPC message
|
||||
async fn test_goodbye_rpc() {
|
||||
// set up the logging. The level and enabled logging or not
|
||||
let log_level = Level::Trace;
|
||||
let enable_logging = false;
|
||||
|
||||
let log = common::build_log(log_level, enable_logging);
|
||||
|
||||
// get sender/receiver
|
||||
let (mut sender, mut receiver) = common::build_node_pair(&log).await;
|
||||
|
||||
// Goodbye Request
|
||||
let rpc_request = RPCRequest::Goodbye(GoodbyeReason::ClientShutdown);
|
||||
|
||||
// build the sender future
|
||||
let sender_future = async {
|
||||
loop {
|
||||
match sender.next_event().await {
|
||||
Libp2pEvent::PeerConnected { peer_id, .. } => {
|
||||
// Send a STATUS message
|
||||
debug!(log, "Sending RPC");
|
||||
sender
|
||||
.swarm
|
||||
.send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone()));
|
||||
}
|
||||
_ => {} // Ignore other RPC messages
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// build the receiver future
|
||||
let receiver_future = async {
|
||||
loop {
|
||||
match receiver.next_event().await {
|
||||
Libp2pEvent::Behaviour(BehaviourEvent::RPC(_peer_id, event)) => {
|
||||
match event {
|
||||
// Should receive sent RPC request
|
||||
RPCEvent::Request(id, request) => {
|
||||
if request == rpc_request {
|
||||
assert_eq!(id, 0);
|
||||
assert_eq!(rpc_request.clone(), request); // receives the goodbye. Nothing left to do
|
||||
return;
|
||||
}
|
||||
}
|
||||
_ => {} // Ignore other events
|
||||
}
|
||||
}
|
||||
_ => {} // Ignore other events
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
tokio::select! {
|
||||
_ = sender_future => {}
|
||||
_ = receiver_future => {}
|
||||
_ = delay_for(Duration::from_millis(1000)) => {
|
||||
panic!("Future timed out");
|
||||
}
|
||||
}
|
||||
}
|
||||
57
beacon_node/eth2_libp2p/Cargo.toml
Normal file
57
beacon_node/eth2_libp2p/Cargo.toml
Normal file
@@ -0,0 +1,57 @@
|
||||
[package]
|
||||
name = "eth2_libp2p"
|
||||
version = "0.2.0"
|
||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
hex = "0.4.2"
|
||||
types = { path = "../../consensus/types" }
|
||||
hashset_delay = { path = "../../common/hashset_delay" }
|
||||
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
||||
serde = { version = "1.0.110", features = ["derive"] }
|
||||
serde_derive = "1.0.110"
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
lighthouse_version = { path = "../../common/lighthouse_version" }
|
||||
tokio = { version = "0.2.21", features = ["time", "macros"] }
|
||||
futures = "0.3.5"
|
||||
error-chain = "0.12.2"
|
||||
dirs = "2.0.2"
|
||||
fnv = "1.0.7"
|
||||
unsigned-varint = { git = "https://github.com/sigp/unsigned-varint", branch = "latest-codecs", features = ["codec"] }
|
||||
lazy_static = "1.4.0"
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||
smallvec = "1.4.1"
|
||||
lru = "0.5.1"
|
||||
parking_lot = "0.11.0"
|
||||
sha2 = "0.9.1"
|
||||
base64 = "0.12.1"
|
||||
snap = "1.0.0"
|
||||
void = "1.0.2"
|
||||
tokio-io-timeout = "0.4.0"
|
||||
tokio-util = { version = "0.3.1", features = ["codec", "compat"] }
|
||||
discv5 = { version = "0.1.0-alpha.7", features = ["libp2p"] }
|
||||
tiny-keccak = "2.0.2"
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
# TODO: Remove rand crate for mainnet
|
||||
rand = "0.7.3"
|
||||
|
||||
[dependencies.libp2p]
|
||||
#version = "0.19.1"
|
||||
git = "https://github.com/sigp/rust-libp2p"
|
||||
rev = "f1b660a1a96c1b6198cd62062e75d357893faf16"
|
||||
default-features = false
|
||||
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "secio", "tcp-tokio"]
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "0.2.21", features = ["full"] }
|
||||
slog-stdlog = "4.0.0"
|
||||
slog-term = "2.5.0"
|
||||
slog-async = "2.5.0"
|
||||
tempdir = "0.3.7"
|
||||
exit-future = "0.2.0"
|
||||
|
||||
[features]
|
||||
libp2p-websocket = []
|
||||
365
beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs
Normal file
365
beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs
Normal file
@@ -0,0 +1,365 @@
|
||||
use crate::rpc::*;
|
||||
use libp2p::{
|
||||
core::either::{EitherError, EitherOutput},
|
||||
core::upgrade::{EitherUpgrade, InboundUpgrade, OutboundUpgrade, SelectUpgrade, UpgradeError},
|
||||
gossipsub::Gossipsub,
|
||||
identify::Identify,
|
||||
swarm::{
|
||||
protocols_handler::{
|
||||
KeepAlive, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
|
||||
},
|
||||
NegotiatedSubstream, NetworkBehaviour, ProtocolsHandler,
|
||||
},
|
||||
};
|
||||
use std::task::{Context, Poll};
|
||||
use types::EthSpec;
|
||||
|
||||
/* Auxiliary types for simplicity */
|
||||
type GossipHandler = <Gossipsub as NetworkBehaviour>::ProtocolsHandler;
|
||||
type RPCHandler<TSpec> = <RPC<TSpec> as NetworkBehaviour>::ProtocolsHandler;
|
||||
type IdentifyHandler = <Identify as NetworkBehaviour>::ProtocolsHandler;
|
||||
|
||||
/// Handler that combines Lighthouse's Behaviours' handlers in a delegating manner.
|
||||
pub(super) struct DelegatingHandler<TSpec: EthSpec> {
|
||||
/// Handler for the Gossipsub protocol.
|
||||
gossip_handler: GossipHandler,
|
||||
/// Handler for the RPC protocol.
|
||||
rpc_handler: RPCHandler<TSpec>,
|
||||
/// Handler for the Identify protocol.
|
||||
identify_handler: IdentifyHandler,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> DelegatingHandler<TSpec> {
|
||||
pub fn new(gossipsub: &mut Gossipsub, rpc: &mut RPC<TSpec>, identify: &mut Identify) -> Self {
|
||||
DelegatingHandler {
|
||||
gossip_handler: gossipsub.new_handler(),
|
||||
rpc_handler: rpc.new_handler(),
|
||||
identify_handler: identify.new_handler(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Gives mutable access to the rpc handler.
|
||||
pub fn rpc_mut(&mut self) -> &mut RPCHandler<TSpec> {
|
||||
&mut self.rpc_handler
|
||||
}
|
||||
|
||||
/// Gives access to the rpc handler.
|
||||
pub fn rpc(&self) -> &RPCHandler<TSpec> {
|
||||
&self.rpc_handler
|
||||
}
|
||||
|
||||
/// Gives access to identify's handler.
|
||||
pub fn identify(&self) -> &IdentifyHandler {
|
||||
&self.identify_handler
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: this can all be created with macros
|
||||
|
||||
/// Wrapper around the `ProtocolsHandler::InEvent` types of the handlers.
|
||||
/// Simply delegated to the corresponding behaviour's handler.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum DelegateIn<TSpec: EthSpec> {
|
||||
Gossipsub(<GossipHandler as ProtocolsHandler>::InEvent),
|
||||
RPC(<RPCHandler<TSpec> as ProtocolsHandler>::InEvent),
|
||||
Identify(<IdentifyHandler as ProtocolsHandler>::InEvent),
|
||||
}
|
||||
|
||||
/// Wrapper around the `ProtocolsHandler::OutEvent` types of the handlers.
|
||||
/// Simply delegated to the corresponding behaviour's handler.
|
||||
pub enum DelegateOut<TSpec: EthSpec> {
|
||||
Gossipsub(<GossipHandler as ProtocolsHandler>::OutEvent),
|
||||
RPC(<RPCHandler<TSpec> as ProtocolsHandler>::OutEvent),
|
||||
Identify(Box<<IdentifyHandler as ProtocolsHandler>::OutEvent>),
|
||||
}
|
||||
|
||||
/// Wrapper around the `ProtocolsHandler::Error` types of the handlers.
|
||||
/// Simply delegated to the corresponding behaviour's handler.
|
||||
#[derive(Debug)]
|
||||
pub enum DelegateError<TSpec: EthSpec> {
|
||||
Gossipsub(<GossipHandler as ProtocolsHandler>::Error),
|
||||
RPC(<RPCHandler<TSpec> as ProtocolsHandler>::Error),
|
||||
Identify(<IdentifyHandler as ProtocolsHandler>::Error),
|
||||
Disconnected,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> std::error::Error for DelegateError<TSpec> {}
|
||||
|
||||
impl<TSpec: EthSpec> std::fmt::Display for DelegateError<TSpec> {
|
||||
fn fmt(
|
||||
&self,
|
||||
formater: &mut std::fmt::Formatter<'_>,
|
||||
) -> std::result::Result<(), std::fmt::Error> {
|
||||
match self {
|
||||
DelegateError::Gossipsub(err) => err.fmt(formater),
|
||||
DelegateError::RPC(err) => err.fmt(formater),
|
||||
DelegateError::Identify(err) => err.fmt(formater),
|
||||
DelegateError::Disconnected => write!(formater, "Disconnected"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type DelegateInProto<TSpec> = SelectUpgrade<
|
||||
<GossipHandler as ProtocolsHandler>::InboundProtocol,
|
||||
SelectUpgrade<
|
||||
<RPCHandler<TSpec> as ProtocolsHandler>::InboundProtocol,
|
||||
<IdentifyHandler as ProtocolsHandler>::InboundProtocol,
|
||||
>,
|
||||
>;
|
||||
|
||||
pub type DelegateOutProto<TSpec> = EitherUpgrade<
|
||||
<GossipHandler as ProtocolsHandler>::OutboundProtocol,
|
||||
EitherUpgrade<
|
||||
<RPCHandler<TSpec> as ProtocolsHandler>::OutboundProtocol,
|
||||
<IdentifyHandler as ProtocolsHandler>::OutboundProtocol,
|
||||
>,
|
||||
>;
|
||||
|
||||
// TODO: prob make this an enum
|
||||
pub type DelegateOutInfo<TSpec> = EitherOutput<
|
||||
<GossipHandler as ProtocolsHandler>::OutboundOpenInfo,
|
||||
EitherOutput<
|
||||
<RPCHandler<TSpec> as ProtocolsHandler>::OutboundOpenInfo,
|
||||
<IdentifyHandler as ProtocolsHandler>::OutboundOpenInfo,
|
||||
>,
|
||||
>;
|
||||
|
||||
impl<TSpec: EthSpec> ProtocolsHandler for DelegatingHandler<TSpec> {
|
||||
type InEvent = DelegateIn<TSpec>;
|
||||
type OutEvent = DelegateOut<TSpec>;
|
||||
type Error = DelegateError<TSpec>;
|
||||
type InboundProtocol = DelegateInProto<TSpec>;
|
||||
type OutboundProtocol = DelegateOutProto<TSpec>;
|
||||
type OutboundOpenInfo = DelegateOutInfo<TSpec>;
|
||||
|
||||
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol> {
|
||||
let gossip_proto = self.gossip_handler.listen_protocol();
|
||||
let rpc_proto = self.rpc_handler.listen_protocol();
|
||||
let identify_proto = self.identify_handler.listen_protocol();
|
||||
|
||||
let timeout = *gossip_proto
|
||||
.timeout()
|
||||
.max(rpc_proto.timeout())
|
||||
.max(identify_proto.timeout());
|
||||
|
||||
let select = SelectUpgrade::new(
|
||||
gossip_proto.into_upgrade().1,
|
||||
SelectUpgrade::new(rpc_proto.into_upgrade().1, identify_proto.into_upgrade().1),
|
||||
);
|
||||
|
||||
SubstreamProtocol::new(select).with_timeout(timeout)
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_inbound(
|
||||
&mut self,
|
||||
out: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
|
||||
) {
|
||||
match out {
|
||||
// Gossipsub
|
||||
EitherOutput::First(out) => self.gossip_handler.inject_fully_negotiated_inbound(out),
|
||||
// RPC
|
||||
EitherOutput::Second(EitherOutput::First(out)) => {
|
||||
self.rpc_handler.inject_fully_negotiated_inbound(out)
|
||||
}
|
||||
// Identify
|
||||
EitherOutput::Second(EitherOutput::Second(out)) => {
|
||||
self.identify_handler.inject_fully_negotiated_inbound(out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_outbound(
|
||||
&mut self,
|
||||
protocol: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
|
||||
info: Self::OutboundOpenInfo,
|
||||
) {
|
||||
match (protocol, info) {
|
||||
// Gossipsub
|
||||
(EitherOutput::First(protocol), EitherOutput::First(info)) => self
|
||||
.gossip_handler
|
||||
.inject_fully_negotiated_outbound(protocol, info),
|
||||
// RPC
|
||||
(
|
||||
EitherOutput::Second(EitherOutput::First(protocol)),
|
||||
EitherOutput::Second(EitherOutput::First(info)),
|
||||
) => self
|
||||
.rpc_handler
|
||||
.inject_fully_negotiated_outbound(protocol, info),
|
||||
// Identify
|
||||
(
|
||||
EitherOutput::Second(EitherOutput::Second(protocol)),
|
||||
EitherOutput::Second(EitherOutput::Second(())),
|
||||
) => self
|
||||
.identify_handler
|
||||
.inject_fully_negotiated_outbound(protocol, ()),
|
||||
// Reaching here means we got a protocol and info for different behaviours
|
||||
_ => unreachable!("output and protocol don't match"),
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_event(&mut self, event: Self::InEvent) {
|
||||
match event {
|
||||
DelegateIn::Gossipsub(ev) => self.gossip_handler.inject_event(ev),
|
||||
DelegateIn::RPC(ev) => self.rpc_handler.inject_event(ev),
|
||||
DelegateIn::Identify(()) => self.identify_handler.inject_event(()),
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_dial_upgrade_error(
|
||||
&mut self,
|
||||
info: Self::OutboundOpenInfo,
|
||||
error: ProtocolsHandlerUpgrErr<
|
||||
<Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Error,
|
||||
>,
|
||||
) {
|
||||
// TODO: find how to clean up
|
||||
match info {
|
||||
// Gossipsub
|
||||
EitherOutput::First(info) => match error {
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => {
|
||||
self.gossip_handler.inject_dial_upgrade_error(
|
||||
info,
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)),
|
||||
)
|
||||
}
|
||||
ProtocolsHandlerUpgrErr::Timer => self
|
||||
.gossip_handler
|
||||
.inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timer),
|
||||
ProtocolsHandlerUpgrErr::Timeout => self
|
||||
.gossip_handler
|
||||
.inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timeout),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::A(err))) => {
|
||||
self.gossip_handler.inject_dial_upgrade_error(
|
||||
info,
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)),
|
||||
)
|
||||
}
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => {
|
||||
unreachable!("info and error don't match")
|
||||
}
|
||||
},
|
||||
// RPC
|
||||
EitherOutput::Second(EitherOutput::First(info)) => match error {
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => {
|
||||
self.rpc_handler.inject_dial_upgrade_error(
|
||||
info,
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)),
|
||||
)
|
||||
}
|
||||
ProtocolsHandlerUpgrErr::Timer => self
|
||||
.rpc_handler
|
||||
.inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timer),
|
||||
ProtocolsHandlerUpgrErr::Timeout => self
|
||||
.rpc_handler
|
||||
.inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timeout),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B(
|
||||
EitherError::A(err),
|
||||
))) => self.rpc_handler.inject_dial_upgrade_error(
|
||||
info,
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)),
|
||||
),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => {
|
||||
unreachable!("info and error don't match")
|
||||
}
|
||||
},
|
||||
// Identify
|
||||
EitherOutput::Second(EitherOutput::Second(())) => match error {
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => {
|
||||
self.identify_handler.inject_dial_upgrade_error(
|
||||
(),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)),
|
||||
)
|
||||
}
|
||||
ProtocolsHandlerUpgrErr::Timer => self
|
||||
.identify_handler
|
||||
.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timer),
|
||||
ProtocolsHandlerUpgrErr::Timeout => self
|
||||
.identify_handler
|
||||
.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B(
|
||||
EitherError::B(err),
|
||||
))) => self.identify_handler.inject_dial_upgrade_error(
|
||||
(),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)),
|
||||
),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => {
|
||||
unreachable!("info and error don't match")
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn connection_keep_alive(&self) -> KeepAlive {
|
||||
self.gossip_handler
|
||||
.connection_keep_alive()
|
||||
.max(self.rpc_handler.connection_keep_alive())
|
||||
.max(self.identify_handler.connection_keep_alive())
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context,
|
||||
) -> Poll<
|
||||
ProtocolsHandlerEvent<
|
||||
Self::OutboundProtocol,
|
||||
Self::OutboundOpenInfo,
|
||||
Self::OutEvent,
|
||||
Self::Error,
|
||||
>,
|
||||
> {
|
||||
match self.gossip_handler.poll(cx) {
|
||||
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::Gossipsub(event)));
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::Close(event)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Gossipsub(
|
||||
event,
|
||||
)));
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info }) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
|
||||
protocol: protocol.map_upgrade(EitherUpgrade::A),
|
||||
info: EitherOutput::First(info),
|
||||
});
|
||||
}
|
||||
Poll::Pending => (),
|
||||
};
|
||||
|
||||
match self.rpc_handler.poll(cx) {
|
||||
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::RPC(event)));
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::Close(event)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::RPC(event)));
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info }) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
|
||||
protocol: protocol.map_upgrade(|u| EitherUpgrade::B(EitherUpgrade::A(u))),
|
||||
info: EitherOutput::Second(EitherOutput::First(info)),
|
||||
});
|
||||
}
|
||||
Poll::Pending => (),
|
||||
};
|
||||
|
||||
match self.identify_handler.poll(cx) {
|
||||
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::Identify(
|
||||
Box::new(event),
|
||||
)));
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::Close(event)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Identify(event)));
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () }) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
|
||||
protocol: protocol.map_upgrade(|u| EitherUpgrade::B(EitherUpgrade::B(u))),
|
||||
info: EitherOutput::Second(EitherOutput::Second(())),
|
||||
});
|
||||
}
|
||||
Poll::Pending => (),
|
||||
};
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
144
beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs
Normal file
144
beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs
Normal file
@@ -0,0 +1,144 @@
|
||||
use crate::rpc::*;
|
||||
use delegate::DelegatingHandler;
|
||||
pub(super) use delegate::{
|
||||
DelegateError, DelegateIn, DelegateInProto, DelegateOut, DelegateOutInfo, DelegateOutProto,
|
||||
};
|
||||
use libp2p::{
|
||||
core::upgrade::{InboundUpgrade, OutboundUpgrade},
|
||||
gossipsub::Gossipsub,
|
||||
identify::Identify,
|
||||
swarm::protocols_handler::{
|
||||
KeepAlive, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
|
||||
},
|
||||
swarm::{NegotiatedSubstream, ProtocolsHandler},
|
||||
};
|
||||
use std::task::{Context, Poll};
|
||||
use types::EthSpec;
|
||||
|
||||
mod delegate;
|
||||
|
||||
/// Handler that combines Lighthouse's Behaviours' handlers in a delegating manner.
|
||||
pub struct BehaviourHandler<TSpec: EthSpec> {
|
||||
/// Handler combining all sub behaviour's handlers.
|
||||
delegate: DelegatingHandler<TSpec>,
|
||||
/// Flag indicating if the handler is shutting down.
|
||||
shutting_down: bool,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> BehaviourHandler<TSpec> {
|
||||
pub fn new(gossipsub: &mut Gossipsub, rpc: &mut RPC<TSpec>, identify: &mut Identify) -> Self {
|
||||
BehaviourHandler {
|
||||
delegate: DelegatingHandler::new(gossipsub, rpc, identify),
|
||||
shutting_down: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum BehaviourHandlerIn<TSpec: EthSpec> {
|
||||
Delegate(DelegateIn<TSpec>),
|
||||
/// Start the shutdown process.
|
||||
Shutdown(Option<(RequestId, RPCRequest<TSpec>)>),
|
||||
}
|
||||
|
||||
pub enum BehaviourHandlerOut<TSpec: EthSpec> {
|
||||
Delegate(Box<DelegateOut<TSpec>>),
|
||||
// TODO: replace custom with events to send
|
||||
Custom,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> ProtocolsHandler for BehaviourHandler<TSpec> {
|
||||
type InEvent = BehaviourHandlerIn<TSpec>;
|
||||
type OutEvent = BehaviourHandlerOut<TSpec>;
|
||||
type Error = DelegateError<TSpec>;
|
||||
type InboundProtocol = DelegateInProto<TSpec>;
|
||||
type OutboundProtocol = DelegateOutProto<TSpec>;
|
||||
type OutboundOpenInfo = DelegateOutInfo<TSpec>;
|
||||
|
||||
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol> {
|
||||
self.delegate.listen_protocol()
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_inbound(
|
||||
&mut self,
|
||||
out: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
|
||||
) {
|
||||
self.delegate.inject_fully_negotiated_inbound(out)
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_outbound(
|
||||
&mut self,
|
||||
out: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
|
||||
info: Self::OutboundOpenInfo,
|
||||
) {
|
||||
self.delegate.inject_fully_negotiated_outbound(out, info)
|
||||
}
|
||||
|
||||
fn inject_event(&mut self, event: Self::InEvent) {
|
||||
match event {
|
||||
BehaviourHandlerIn::Delegate(delegated_ev) => self.delegate.inject_event(delegated_ev),
|
||||
/* Events comming from the behaviour */
|
||||
BehaviourHandlerIn::Shutdown(last_message) => {
|
||||
self.shutting_down = true;
|
||||
self.delegate.rpc_mut().shutdown(last_message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_dial_upgrade_error(
|
||||
&mut self,
|
||||
info: Self::OutboundOpenInfo,
|
||||
err: ProtocolsHandlerUpgrErr<
|
||||
<Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Error,
|
||||
>,
|
||||
) {
|
||||
self.delegate.inject_dial_upgrade_error(info, err)
|
||||
}
|
||||
|
||||
// We don't use the keep alive to disconnect. This is handled in the poll
|
||||
fn connection_keep_alive(&self) -> KeepAlive {
|
||||
KeepAlive::Yes
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context,
|
||||
) -> Poll<
|
||||
ProtocolsHandlerEvent<
|
||||
Self::OutboundProtocol,
|
||||
Self::OutboundOpenInfo,
|
||||
Self::OutEvent,
|
||||
Self::Error,
|
||||
>,
|
||||
> {
|
||||
// Disconnect if the sub-handlers are ready.
|
||||
if self.shutting_down {
|
||||
let rpc_keep_alive = self.delegate.rpc().connection_keep_alive();
|
||||
let identify_keep_alive = self.delegate.identify().connection_keep_alive();
|
||||
if KeepAlive::No == rpc_keep_alive.max(identify_keep_alive) {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Disconnected));
|
||||
}
|
||||
}
|
||||
|
||||
match self.delegate.poll(cx) {
|
||||
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(
|
||||
BehaviourHandlerOut::Delegate(Box::new(event)),
|
||||
))
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::Close(err)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(err))
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info }) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
|
||||
protocol,
|
||||
info,
|
||||
});
|
||||
}
|
||||
Poll::Pending => (),
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
1036
beacon_node/eth2_libp2p/src/behaviour/mod.rs
Normal file
1036
beacon_node/eth2_libp2p/src/behaviour/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,9 @@
|
||||
use crate::types::GossipKind;
|
||||
use crate::Enr;
|
||||
use discv5::{Discv5Config, Discv5ConfigBuilder};
|
||||
use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId};
|
||||
use libp2p::gossipsub::{
|
||||
GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId, ValidationMode,
|
||||
};
|
||||
use libp2p::Multiaddr;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
@@ -37,13 +39,7 @@ pub struct Config {
|
||||
pub enr_tcp_port: Option<u16>,
|
||||
|
||||
/// Target number of connected peers.
|
||||
pub max_peers: usize,
|
||||
|
||||
/// A secp256k1 secret key, as bytes in ASCII-encoded hex.
|
||||
///
|
||||
/// With or without `0x` prefix.
|
||||
#[serde(skip)]
|
||||
pub secret_key_hex: Option<String>,
|
||||
pub target_peers: usize,
|
||||
|
||||
/// Gossipsub configuration parameters.
|
||||
#[serde(skip)]
|
||||
@@ -62,13 +58,11 @@ pub struct Config {
|
||||
/// Client version
|
||||
pub client_version: String,
|
||||
|
||||
/// Disables the discovery protocol from starting.
|
||||
pub disable_discovery: bool,
|
||||
|
||||
/// List of extra topics to initially subscribe to as strings.
|
||||
pub topics: Vec<GossipKind>,
|
||||
|
||||
/// Introduces randomization in network propagation of messages. This should only be set for
|
||||
/// testing purposes and will likely be removed in future versions.
|
||||
// TODO: Remove this functionality for mainnet
|
||||
pub propagation_percentage: Option<u8>,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@@ -90,7 +84,7 @@ impl Default for Config {
|
||||
// The function used to generate a gossipsub message id
|
||||
// We use base64(SHA256(data)) for content addressing
|
||||
let gossip_message_id = |message: &GossipsubMessage| {
|
||||
MessageId(base64::encode_config(
|
||||
MessageId::from(base64::encode_config(
|
||||
&Sha256::digest(&message.data),
|
||||
base64::URL_SAFE_NO_PAD,
|
||||
))
|
||||
@@ -101,22 +95,32 @@ impl Default for Config {
|
||||
// parameter.
|
||||
let gs_config = GossipsubConfigBuilder::new()
|
||||
.max_transmit_size(GOSSIP_MAX_SIZE)
|
||||
.heartbeat_interval(Duration::from_secs(1))
|
||||
.manual_propagation() // require validation before propagation
|
||||
.no_source_id()
|
||||
.heartbeat_interval(Duration::from_millis(700))
|
||||
.mesh_n(6)
|
||||
.mesh_n_low(5)
|
||||
.mesh_n_high(12)
|
||||
.gossip_lazy(6)
|
||||
.fanout_ttl(Duration::from_secs(60))
|
||||
.history_length(6)
|
||||
.history_gossip(3)
|
||||
.validate_messages() // require validation before propagation
|
||||
.validation_mode(ValidationMode::Permissive)
|
||||
// prevent duplicates for 550 heartbeats(700millis * 550) = 385 secs
|
||||
.duplicate_cache_time(Duration::from_secs(385))
|
||||
.message_id_fn(gossip_message_id)
|
||||
.build();
|
||||
|
||||
// discv5 configuration
|
||||
let discv5_config = Discv5ConfigBuilder::new()
|
||||
.enable_packet_filter()
|
||||
.session_cache_capacity(1000)
|
||||
.request_timeout(Duration::from_secs(4))
|
||||
.request_retries(2)
|
||||
.enr_update(true) // update IP based on PONG responses
|
||||
.enr_peer_update_min(2) // prevents NAT's should be raised for mainnet
|
||||
.request_retries(1)
|
||||
.enr_peer_update_min(10)
|
||||
.query_parallelism(5)
|
||||
.query_timeout(Duration::from_secs(60))
|
||||
.query_timeout(Duration::from_secs(30))
|
||||
.query_peer_timeout(Duration::from_secs(2))
|
||||
.ip_limit(false) // limits /24 IP's in buckets. Enable for mainnet
|
||||
.ip_limit() // limits /24 IP's in buckets.
|
||||
.ping_interval(Duration::from_secs(300))
|
||||
.build();
|
||||
|
||||
@@ -129,15 +133,14 @@ impl Default for Config {
|
||||
enr_address: None,
|
||||
enr_udp_port: None,
|
||||
enr_tcp_port: None,
|
||||
max_peers: 50,
|
||||
secret_key_hex: None,
|
||||
target_peers: 50,
|
||||
gs_config,
|
||||
discv5_config,
|
||||
boot_nodes: vec![],
|
||||
libp2p_nodes: vec![],
|
||||
client_version: version::version(),
|
||||
client_version: lighthouse_version::version_with_platform(),
|
||||
disable_discovery: false,
|
||||
topics,
|
||||
propagation_percentage: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,12 @@
|
||||
//! Helper functions and an extension trait for Ethereum 2 ENRs.
|
||||
|
||||
pub use discv5::enr::{self, CombinedKey, EnrBuilder};
|
||||
pub use libp2p::core::identity::Keypair;
|
||||
|
||||
use super::enr_ext::CombinedKeyExt;
|
||||
use super::ENR_FILENAME;
|
||||
use crate::types::{Enr, EnrBitfield};
|
||||
use crate::CombinedKeyExt;
|
||||
use crate::NetworkConfig;
|
||||
use libp2p::core::identity::Keypair;
|
||||
use slog::{debug, warn};
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_types::BitVector;
|
||||
@@ -17,9 +17,9 @@ use std::str::FromStr;
|
||||
use types::{EnrForkId, EthSpec};
|
||||
|
||||
/// The ENR field specifying the fork id.
|
||||
pub const ETH2_ENR_KEY: &'static str = "eth2";
|
||||
pub const ETH2_ENR_KEY: &str = "eth2";
|
||||
/// The ENR field specifying the subnet bitfield.
|
||||
pub const BITFIELD_ENR_KEY: &'static str = "attnets";
|
||||
pub const BITFIELD_ENR_KEY: &str = "attnets";
|
||||
|
||||
/// Extension trait for ENR's within Eth2.
|
||||
pub trait Eth2Enr {
|
||||
@@ -12,6 +12,12 @@ pub trait EnrExt {
|
||||
/// Returns a list of multiaddrs if the ENR has an `ip` and either a `tcp` or `udp` key **or** an `ip6` and either a `tcp6` or `udp6`.
|
||||
/// The vector remains empty if these fields are not defined.
|
||||
fn multiaddr(&self) -> Vec<Multiaddr>;
|
||||
|
||||
/// Returns the multiaddr with the `PeerId` prepended.
|
||||
fn multiaddr_p2p(&self) -> Vec<Multiaddr>;
|
||||
|
||||
/// Returns any multiaddrs that contain the TCP protocol.
|
||||
fn multiaddr_tcp(&self) -> Vec<Multiaddr>;
|
||||
}
|
||||
|
||||
/// Extend ENR CombinedPublicKey for libp2p types.
|
||||
@@ -34,8 +40,6 @@ impl EnrExt for Enr {
|
||||
|
||||
/// Returns a list of multiaddrs if the ENR has an `ip` and either a `tcp` or `udp` key **or** an `ip6` and either a `tcp6` or `udp6`.
|
||||
/// The vector remains empty if these fields are not defined.
|
||||
///
|
||||
/// Note: Only available with the `libp2p` feature flag.
|
||||
fn multiaddr(&self) -> Vec<Multiaddr> {
|
||||
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
||||
if let Some(ip) = self.ip() {
|
||||
@@ -66,6 +70,67 @@ impl EnrExt for Enr {
|
||||
}
|
||||
multiaddrs
|
||||
}
|
||||
|
||||
/// Returns a list of multiaddrs if the ENR has an `ip` and either a `tcp` or `udp` key **or** an `ip6` and either a `tcp6` or `udp6`.
|
||||
/// The vector remains empty if these fields are not defined.
|
||||
///
|
||||
/// This also prepends the `PeerId` into each multiaddr with the `P2p` protocol.
|
||||
fn multiaddr_p2p(&self) -> Vec<Multiaddr> {
|
||||
let peer_id = self.peer_id();
|
||||
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
||||
if let Some(ip) = self.ip() {
|
||||
if let Some(udp) = self.udp() {
|
||||
let mut multiaddr: Multiaddr = ip.into();
|
||||
multiaddr.push(Protocol::Udp(udp));
|
||||
multiaddr.push(Protocol::P2p(peer_id.clone().into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
|
||||
if let Some(tcp) = self.tcp() {
|
||||
let mut multiaddr: Multiaddr = ip.into();
|
||||
multiaddr.push(Protocol::Tcp(tcp));
|
||||
multiaddr.push(Protocol::P2p(peer_id.clone().into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
if let Some(ip6) = self.ip6() {
|
||||
if let Some(udp6) = self.udp6() {
|
||||
let mut multiaddr: Multiaddr = ip6.into();
|
||||
multiaddr.push(Protocol::Udp(udp6));
|
||||
multiaddr.push(Protocol::P2p(peer_id.clone().into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
|
||||
if let Some(tcp6) = self.tcp6() {
|
||||
let mut multiaddr: Multiaddr = ip6.into();
|
||||
multiaddr.push(Protocol::Tcp(tcp6));
|
||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
multiaddrs
|
||||
}
|
||||
|
||||
/// Returns a list of multiaddrs if the ENR has an `ip` and either a `tcp` or `udp` key **or** an `ip6` and either a `tcp6` or `udp6`.
|
||||
/// The vector remains empty if these fields are not defined.
|
||||
fn multiaddr_tcp(&self) -> Vec<Multiaddr> {
|
||||
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
||||
if let Some(ip) = self.ip() {
|
||||
if let Some(tcp) = self.tcp() {
|
||||
let mut multiaddr: Multiaddr = ip.into();
|
||||
multiaddr.push(Protocol::Tcp(tcp));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
if let Some(ip6) = self.ip6() {
|
||||
if let Some(tcp6) = self.tcp6() {
|
||||
let mut multiaddr: Multiaddr = ip6.into();
|
||||
multiaddr.push(Protocol::Tcp(tcp6));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
multiaddrs
|
||||
}
|
||||
}
|
||||
|
||||
impl CombinedKeyPublicExt for CombinedPublicKey {
|
||||
@@ -132,7 +197,7 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result<discv5::enr::NodeId, Strin
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(&uncompressed_key_bytes);
|
||||
hasher.finalize(&mut output);
|
||||
return Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"));
|
||||
Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"))
|
||||
}
|
||||
PublicKey::Ed25519(pk) => {
|
||||
let uncompressed_key_bytes = pk.encode();
|
||||
@@ -140,9 +205,9 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result<discv5::enr::NodeId, Strin
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(&uncompressed_key_bytes);
|
||||
hasher.finalize(&mut output);
|
||||
return Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"));
|
||||
Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"))
|
||||
}
|
||||
_ => return Err("Unsupported public key".into()),
|
||||
_ => Err("Unsupported public key".into()),
|
||||
}
|
||||
}
|
||||
|
||||
759
beacon_node/eth2_libp2p/src/discovery/mod.rs
Normal file
759
beacon_node/eth2_libp2p/src/discovery/mod.rs
Normal file
@@ -0,0 +1,759 @@
|
||||
///! This manages the discovery and management of peers.
|
||||
pub(crate) mod enr;
|
||||
pub mod enr_ext;
|
||||
|
||||
// Allow external use of the lighthouse ENR builder
|
||||
pub use enr::{build_enr, CombinedKey, Eth2Enr};
|
||||
pub use enr_ext::{CombinedKeyExt, EnrExt};
|
||||
pub use libp2p::core::identity::Keypair;
|
||||
|
||||
use crate::metrics;
|
||||
use crate::{error, Enr, NetworkConfig, NetworkGlobals};
|
||||
use discv5::{enr::NodeId, Discv5, Discv5Event};
|
||||
use enr::{BITFIELD_ENR_KEY, ETH2_ENR_KEY};
|
||||
use futures::prelude::*;
|
||||
use futures::stream::FuturesUnordered;
|
||||
use libp2p::core::PeerId;
|
||||
use lru::LruCache;
|
||||
use slog::{crit, debug, info, warn};
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_types::BitVector;
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
net::SocketAddr,
|
||||
path::Path,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio::sync::mpsc;
|
||||
use types::{EnrForkId, EthSpec, SubnetId};
|
||||
|
||||
mod subnet_predicate;
|
||||
use subnet_predicate::subnet_predicate;
|
||||
|
||||
/// Local ENR storage filename.
|
||||
pub const ENR_FILENAME: &str = "enr.dat";
|
||||
/// Target number of peers we'd like to have connected to a given long-lived subnet.
|
||||
const TARGET_SUBNET_PEERS: usize = 3;
|
||||
/// Target number of peers to search for given a grouped subnet query.
|
||||
const TARGET_PEERS_FOR_GROUPED_QUERY: usize = 6;
|
||||
/// Number of times to attempt a discovery request.
|
||||
const MAX_DISCOVERY_RETRY: usize = 3;
|
||||
/// The maximum number of concurrent discovery queries.
|
||||
const MAX_CONCURRENT_QUERIES: usize = 2;
|
||||
/// The max number of subnets to search for in a single subnet discovery query.
|
||||
const MAX_SUBNETS_IN_QUERY: usize = 3;
|
||||
/// The number of closest peers to search for when doing a regular peer search.
|
||||
///
|
||||
/// We could reduce this constant to speed up queries however at the cost of security. It will
|
||||
/// make it easier to peers to eclipse this node. Kademlia suggests a value of 16.
|
||||
const FIND_NODE_QUERY_CLOSEST_PEERS: usize = 16;
|
||||
/// The threshold for updating `min_ttl` on a connected peer.
|
||||
const DURATION_DIFFERENCE: Duration = Duration::from_millis(1);
|
||||
|
||||
/// The events emitted by polling discovery.
|
||||
pub enum DiscoveryEvent {
|
||||
/// A query has completed. This result contains a mapping of discovered peer IDs to the `min_ttl`
|
||||
/// of the peer if it is specified.
|
||||
QueryResult(HashMap<PeerId, Option<Instant>>),
|
||||
/// This indicates that our local UDP socketaddr has been updated and we should inform libp2p.
|
||||
SocketUpdated(SocketAddr),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
struct SubnetQuery {
|
||||
subnet_id: SubnetId,
|
||||
min_ttl: Option<Instant>,
|
||||
retries: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
enum QueryType {
|
||||
/// We are searching for subnet peers.
|
||||
Subnet(SubnetQuery),
|
||||
/// We are searching for more peers without ENR or time constraints.
|
||||
FindPeers,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
enum GroupedQueryType {
|
||||
/// We are searching for peers on one of a few subnets.
|
||||
Subnet(Vec<SubnetQuery>),
|
||||
/// We are searching for more peers without ENR or time constraints.
|
||||
FindPeers,
|
||||
}
|
||||
|
||||
impl QueryType {
|
||||
/// Returns true if this query has expired.
|
||||
pub fn expired(&self) -> bool {
|
||||
match self {
|
||||
Self::FindPeers => false,
|
||||
Self::Subnet(subnet_query) => {
|
||||
if let Some(ttl) = subnet_query.min_ttl {
|
||||
ttl < Instant::now()
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The result of a query.
|
||||
struct QueryResult(GroupedQueryType, Result<Vec<Enr>, discv5::QueryError>);
|
||||
|
||||
// Awaiting the event stream future
|
||||
enum EventStream {
|
||||
/// Awaiting an event stream to be generated. This is required due to the poll nature of
|
||||
/// `Discovery`
|
||||
Awaiting(
|
||||
Pin<
|
||||
Box<
|
||||
dyn Future<Output = Result<mpsc::Receiver<Discv5Event>, discv5::Discv5Error>>
|
||||
+ Send,
|
||||
>,
|
||||
>,
|
||||
),
|
||||
/// The future has completed.
|
||||
Present(mpsc::Receiver<Discv5Event>),
|
||||
// The future has failed or discv5 has been disabled. There are no events from discv5.
|
||||
InActive,
|
||||
}
|
||||
|
||||
/// The main discovery service. This can be disabled via CLI arguements. When disabled the
|
||||
/// underlying processes are not started, but this struct still maintains our current ENR.
|
||||
pub struct Discovery<TSpec: EthSpec> {
|
||||
/// A collection of seen live ENRs for quick lookup and to map peer-id's to ENRs.
|
||||
cached_enrs: LruCache<PeerId, Enr>,
|
||||
|
||||
/// The directory where the ENR is stored.
|
||||
enr_dir: String,
|
||||
|
||||
/// The handle for the underlying discv5 Server.
|
||||
///
|
||||
/// This is behind a Reference counter to allow for futures to be spawned and polled with a
|
||||
/// static lifetime.
|
||||
discv5: Discv5,
|
||||
|
||||
/// A collection of network constants that can be read from other threads.
|
||||
network_globals: Arc<NetworkGlobals<TSpec>>,
|
||||
|
||||
/// Indicates if we are actively searching for peers. We only allow a single FindPeers query at
|
||||
/// a time, regardless of the query concurrency.
|
||||
find_peer_active: bool,
|
||||
|
||||
/// A queue of discovery queries to be processed.
|
||||
queued_queries: VecDeque<QueryType>,
|
||||
|
||||
/// Active discovery queries.
|
||||
active_queries: FuturesUnordered<std::pin::Pin<Box<dyn Future<Output = QueryResult> + Send>>>,
|
||||
|
||||
/// The discv5 event stream.
|
||||
event_stream: EventStream,
|
||||
|
||||
/// Indicates if the discovery service has been started. When the service is disabled, this is
|
||||
/// always false.
|
||||
started: bool,
|
||||
|
||||
/// Logger for the discovery behaviour.
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> Discovery<TSpec> {
|
||||
/// NOTE: Creating discovery requires running within a tokio execution environment.
|
||||
pub fn new(
|
||||
local_key: &Keypair,
|
||||
config: &NetworkConfig,
|
||||
network_globals: Arc<NetworkGlobals<TSpec>>,
|
||||
log: &slog::Logger,
|
||||
) -> error::Result<Self> {
|
||||
let log = log.clone();
|
||||
|
||||
let enr_dir = match config.network_dir.to_str() {
|
||||
Some(path) => String::from(path),
|
||||
None => String::from(""),
|
||||
};
|
||||
|
||||
let local_enr = network_globals.local_enr.read().clone();
|
||||
|
||||
info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> format!("{}",local_enr.node_id()), "ip" => format!("{:?}", local_enr.ip()), "udp"=> format!("{:?}", local_enr.udp()), "tcp" => format!("{:?}", local_enr.tcp()));
|
||||
|
||||
let listen_socket = SocketAddr::new(config.listen_address, config.discovery_port);
|
||||
|
||||
// convert the keypair into an ENR key
|
||||
let enr_key: CombinedKey = CombinedKey::from_libp2p(&local_key)?;
|
||||
|
||||
let mut discv5 = Discv5::new(local_enr, enr_key, config.discv5_config.clone())
|
||||
.map_err(|e| format!("Discv5 service failed. Error: {:?}", e))?;
|
||||
|
||||
// Add bootnodes to routing table
|
||||
for bootnode_enr in config.boot_nodes.clone() {
|
||||
debug!(
|
||||
log,
|
||||
"Adding node to routing table";
|
||||
"node_id" => format!("{}", bootnode_enr.node_id()),
|
||||
"peer_id" => format!("{}", bootnode_enr.peer_id()),
|
||||
"ip" => format!("{:?}", bootnode_enr.ip()),
|
||||
"udp" => format!("{:?}", bootnode_enr.udp()),
|
||||
"tcp" => format!("{:?}", bootnode_enr.tcp())
|
||||
);
|
||||
let _ = discv5.add_enr(bootnode_enr).map_err(|e| {
|
||||
debug!(
|
||||
log,
|
||||
"Could not add peer to the local routing table";
|
||||
"error" => e.to_string()
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
// Start the discv5 service and obtain an event stream
|
||||
let event_stream = if !config.disable_discovery {
|
||||
discv5.start(listen_socket);
|
||||
debug!(log, "Discovery service started");
|
||||
EventStream::Awaiting(Box::pin(discv5.event_stream()))
|
||||
} else {
|
||||
EventStream::InActive
|
||||
};
|
||||
|
||||
// Obtain the event stream
|
||||
|
||||
Ok(Self {
|
||||
cached_enrs: LruCache::new(50),
|
||||
network_globals,
|
||||
find_peer_active: false,
|
||||
queued_queries: VecDeque::with_capacity(10),
|
||||
active_queries: FuturesUnordered::new(),
|
||||
discv5,
|
||||
event_stream,
|
||||
started: !config.disable_discovery,
|
||||
log,
|
||||
enr_dir,
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the nodes local ENR.
|
||||
pub fn local_enr(&self) -> Enr {
|
||||
self.discv5.local_enr()
|
||||
}
|
||||
|
||||
/// This adds a new `FindPeers` query to the queue if one doesn't already exist.
|
||||
pub fn discover_peers(&mut self) {
|
||||
// If the discv5 service isn't running or we are in the process of a query, don't bother queuing a new one.
|
||||
if !self.started || self.find_peer_active {
|
||||
return;
|
||||
}
|
||||
|
||||
// If there is not already a find peer's query queued, add one
|
||||
let query = QueryType::FindPeers;
|
||||
if !self.queued_queries.contains(&query) {
|
||||
debug!(self.log, "Queuing a peer discovery request");
|
||||
self.queued_queries.push_back(query);
|
||||
// update the metrics
|
||||
metrics::set_gauge(&metrics::DISCOVERY_QUEUE, self.queued_queries.len() as i64);
|
||||
}
|
||||
}
|
||||
|
||||
/// Processes a request to search for more peers on a subnet.
|
||||
pub fn discover_subnet_peers(&mut self, subnet_id: SubnetId, min_ttl: Option<Instant>) {
|
||||
// If the discv5 service isn't running, ignore queries
|
||||
if !self.started {
|
||||
return;
|
||||
}
|
||||
self.add_subnet_query(subnet_id, min_ttl, 0);
|
||||
}
|
||||
|
||||
/// Add an ENR to the routing table of the discovery mechanism.
|
||||
pub fn add_enr(&mut self, enr: Enr) {
|
||||
// add the enr to seen caches
|
||||
self.cached_enrs.put(enr.peer_id(), enr.clone());
|
||||
|
||||
if let Err(e) = self.discv5.add_enr(enr) {
|
||||
debug!(
|
||||
self.log,
|
||||
"Could not add peer to the local routing table";
|
||||
"error" => e.to_string()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an iterator over all enr entries in the DHT.
|
||||
pub fn table_entries_enr(&mut self) -> Vec<Enr> {
|
||||
self.discv5.table_entries_enr()
|
||||
}
|
||||
|
||||
/// Returns the ENR of a known peer if it exists.
|
||||
pub fn enr_of_peer(&mut self, peer_id: &PeerId) -> Option<Enr> {
|
||||
// first search the local cache
|
||||
if let Some(enr) = self.cached_enrs.get(peer_id) {
|
||||
return Some(enr.clone());
|
||||
}
|
||||
// not in the local cache, look in the routing table
|
||||
if let Ok(node_id) = enr_ext::peer_id_to_node_id(peer_id) {
|
||||
self.discv5.find_enr(&node_id)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds/Removes a subnet from the ENR Bitfield
|
||||
pub fn update_enr_bitfield(&mut self, subnet_id: SubnetId, value: bool) -> Result<(), String> {
|
||||
let id = *subnet_id as usize;
|
||||
|
||||
let local_enr = self.discv5.local_enr();
|
||||
let mut current_bitfield = local_enr.bitfield::<TSpec>()?;
|
||||
|
||||
if id >= current_bitfield.len() {
|
||||
return Err(format!(
|
||||
"Subnet id: {} is outside the ENR bitfield length: {}",
|
||||
id,
|
||||
current_bitfield.len()
|
||||
));
|
||||
}
|
||||
|
||||
if current_bitfield
|
||||
.get(id)
|
||||
.map_err(|_| String::from("Subnet ID out of bounds"))?
|
||||
== value
|
||||
{
|
||||
return Err(format!(
|
||||
"Subnet id: {} already in the local ENR already has value: {}",
|
||||
id, value
|
||||
));
|
||||
}
|
||||
|
||||
// set the subnet bitfield in the ENR
|
||||
current_bitfield
|
||||
.set(id, value)
|
||||
.map_err(|_| String::from("Subnet ID out of bounds, could not set subnet ID"))?;
|
||||
|
||||
// insert the bitfield into the ENR record
|
||||
let _ = self
|
||||
.discv5
|
||||
.enr_insert(BITFIELD_ENR_KEY, current_bitfield.as_ssz_bytes());
|
||||
|
||||
// replace the global version
|
||||
*self.network_globals.local_enr.write() = self.discv5.local_enr();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Updates the `eth2` field of our local ENR.
|
||||
pub fn update_eth2_enr(&mut self, enr_fork_id: EnrForkId) {
|
||||
// to avoid having a reference to the spec constant, for the logging we assume
|
||||
// FAR_FUTURE_EPOCH is u64::max_value()
|
||||
let next_fork_epoch_log = if enr_fork_id.next_fork_epoch == u64::max_value() {
|
||||
String::from("No other fork")
|
||||
} else {
|
||||
format!("{:?}", enr_fork_id.next_fork_epoch)
|
||||
};
|
||||
|
||||
info!(self.log, "Updating the ENR fork version";
|
||||
"fork_digest" => format!("{:?}", enr_fork_id.fork_digest),
|
||||
"next_fork_version" => format!("{:?}", enr_fork_id.next_fork_version),
|
||||
"next_fork_epoch" => next_fork_epoch_log,
|
||||
);
|
||||
|
||||
let _ = self
|
||||
.discv5
|
||||
.enr_insert(ETH2_ENR_KEY, enr_fork_id.as_ssz_bytes())
|
||||
.map_err(|e| {
|
||||
warn!(
|
||||
self.log,
|
||||
"Could not update eth2 ENR field";
|
||||
"error" => format!("{:?}", e)
|
||||
)
|
||||
});
|
||||
|
||||
// replace the global version with discovery version
|
||||
*self.network_globals.local_enr.write() = self.discv5.local_enr();
|
||||
}
|
||||
|
||||
/* Internal Functions */
|
||||
|
||||
/// Adds a subnet query if one doesn't exist. If a subnet query already exists, this
|
||||
/// updates the min_ttl field.
|
||||
fn add_subnet_query(&mut self, subnet_id: SubnetId, min_ttl: Option<Instant>, retries: usize) {
|
||||
// remove the entry and complete the query if greater than the maximum search count
|
||||
if retries > MAX_DISCOVERY_RETRY {
|
||||
debug!(
|
||||
self.log,
|
||||
"Subnet peer discovery did not find sufficient peers. Reached max retry limit"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Search through any queued requests and update the timeout if a query for this subnet
|
||||
// already exists
|
||||
let mut found = false;
|
||||
for query in self.queued_queries.iter_mut() {
|
||||
if let QueryType::Subnet(ref mut subnet_query) = query {
|
||||
if subnet_query.subnet_id == subnet_id {
|
||||
if subnet_query.min_ttl < min_ttl {
|
||||
subnet_query.min_ttl = min_ttl;
|
||||
}
|
||||
// update the number of retries
|
||||
subnet_query.retries = retries;
|
||||
// mimic an `Iter::Find()` and short-circuit the loop
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
// Set up the query and add it to the queue
|
||||
let query = QueryType::Subnet(SubnetQuery {
|
||||
subnet_id,
|
||||
min_ttl,
|
||||
retries,
|
||||
});
|
||||
// update the metrics and insert into the queue.
|
||||
debug!(self.log, "Queuing subnet query"; "subnet" => *subnet_id, "retries" => retries);
|
||||
self.queued_queries.push_back(query);
|
||||
metrics::set_gauge(&metrics::DISCOVERY_QUEUE, self.queued_queries.len() as i64);
|
||||
}
|
||||
}
|
||||
|
||||
/// Consume the discovery queue and initiate queries when applicable.
|
||||
///
|
||||
/// This also sanitizes the queue removing out-dated queries.
|
||||
fn process_queue(&mut self) {
|
||||
// Sanitize the queue, removing any out-dated subnet queries
|
||||
self.queued_queries.retain(|query| !query.expired());
|
||||
|
||||
// use this to group subnet queries together for a single discovery request
|
||||
let mut subnet_queries: Vec<SubnetQuery> = Vec::new();
|
||||
|
||||
// Check that we are within our query concurrency limit
|
||||
while !self.at_capacity() && !self.queued_queries.is_empty() {
|
||||
// consume and process the query queue
|
||||
match self.queued_queries.pop_front() {
|
||||
Some(QueryType::FindPeers) => {
|
||||
// Only start a find peers query if it is the last message in the queue.
|
||||
// We want to prioritize subnet queries, so we don't miss attestations.
|
||||
if self.queued_queries.is_empty() {
|
||||
// This is a regular request to find additional peers
|
||||
debug!(self.log, "Discovery query started");
|
||||
self.find_peer_active = true;
|
||||
self.start_query(
|
||||
GroupedQueryType::FindPeers,
|
||||
FIND_NODE_QUERY_CLOSEST_PEERS,
|
||||
|_| true,
|
||||
);
|
||||
} else {
|
||||
self.queued_queries.push_back(QueryType::FindPeers);
|
||||
}
|
||||
}
|
||||
Some(QueryType::Subnet(subnet_query)) => {
|
||||
subnet_queries.push(subnet_query);
|
||||
|
||||
// We want to start a grouped subnet query if:
|
||||
// 1. We've grouped MAX_SUBNETS_IN_QUERY subnets together.
|
||||
// 2. There are no more messages in the queue.
|
||||
// 3. There is exactly one message in the queue and it is FindPeers.
|
||||
if subnet_queries.len() == MAX_SUBNETS_IN_QUERY
|
||||
|| self.queued_queries.is_empty()
|
||||
|| (self.queued_queries.front() == Some(&QueryType::FindPeers)
|
||||
&& self.queued_queries.len() == 1)
|
||||
{
|
||||
// This query is for searching for peers of a particular subnet
|
||||
// Drain subnet_queries so we can re-use it as we continue to process the queue
|
||||
let grouped_queries: Vec<SubnetQuery> = subnet_queries.drain(..).collect();
|
||||
self.start_subnet_query(grouped_queries);
|
||||
}
|
||||
}
|
||||
None => {} // Queue is empty
|
||||
}
|
||||
}
|
||||
// Update the queue metric
|
||||
metrics::set_gauge(&metrics::DISCOVERY_QUEUE, self.queued_queries.len() as i64);
|
||||
}
|
||||
|
||||
// Returns a boolean indicating if we are currently processing the maximum number of
|
||||
// concurrent queries or not.
|
||||
fn at_capacity(&self) -> bool {
|
||||
self.active_queries.len() >= MAX_CONCURRENT_QUERIES
|
||||
}
|
||||
|
||||
/// Runs a discovery request for a given group of subnets.
|
||||
fn start_subnet_query(&mut self, subnet_queries: Vec<SubnetQuery>) {
|
||||
let mut filtered_subnet_ids: Vec<SubnetId> = Vec::new();
|
||||
|
||||
// find subnet queries that are still necessary
|
||||
let filtered_subnet_queries: Vec<SubnetQuery> = subnet_queries
|
||||
.into_iter()
|
||||
.filter(|subnet_query| {
|
||||
// Determine if we have sufficient peers, which may make this discovery unnecessary.
|
||||
let peers_on_subnet = self
|
||||
.network_globals
|
||||
.peers
|
||||
.read()
|
||||
.peers_on_subnet(subnet_query.subnet_id)
|
||||
.count();
|
||||
|
||||
if peers_on_subnet > TARGET_SUBNET_PEERS {
|
||||
debug!(self.log, "Discovery ignored";
|
||||
"reason" => "Already connected to desired peers",
|
||||
"connected_peers_on_subnet" => peers_on_subnet,
|
||||
"target_subnet_peers" => TARGET_SUBNET_PEERS,
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
let target_peers = TARGET_SUBNET_PEERS - peers_on_subnet;
|
||||
debug!(self.log, "Discovery query started for subnet";
|
||||
"subnet_id" => *subnet_query.subnet_id,
|
||||
"connected_peers_on_subnet" => peers_on_subnet,
|
||||
"target_subnet_peers" => TARGET_SUBNET_PEERS,
|
||||
"peers_to_find" => target_peers,
|
||||
"attempt" => subnet_query.retries,
|
||||
"min_ttl" => format!("{:?}", subnet_query.min_ttl),
|
||||
);
|
||||
|
||||
filtered_subnet_ids.push(subnet_query.subnet_id);
|
||||
true
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Only start a discovery query if we have a subnet to look for.
|
||||
if !filtered_subnet_queries.is_empty() {
|
||||
// build the subnet predicate as a combination of the eth2_fork_predicate and the subnet predicate
|
||||
let subnet_predicate = subnet_predicate::<TSpec>(filtered_subnet_ids, &self.log);
|
||||
|
||||
self.start_query(
|
||||
GroupedQueryType::Subnet(filtered_subnet_queries),
|
||||
TARGET_PEERS_FOR_GROUPED_QUERY,
|
||||
subnet_predicate,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Search for a specified number of new peers using the underlying discovery mechanism.
|
||||
///
|
||||
/// This can optionally search for peers for a given predicate. Regardless of the predicate
|
||||
/// given, this will only search for peers on the same enr_fork_id as specified in the local
|
||||
/// ENR.
|
||||
fn start_query(
|
||||
&mut self,
|
||||
grouped_query: GroupedQueryType,
|
||||
target_peers: usize,
|
||||
additional_predicate: impl Fn(&Enr) -> bool + Send + 'static,
|
||||
) {
|
||||
// Make sure there are subnet queries included
|
||||
let contains_queries = match &grouped_query {
|
||||
GroupedQueryType::Subnet(queries) => !queries.is_empty(),
|
||||
GroupedQueryType::FindPeers => true,
|
||||
};
|
||||
|
||||
if !contains_queries {
|
||||
debug!(
|
||||
self.log,
|
||||
"No subnets included in this request. Skipping discovery request."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Generate a random target node id.
|
||||
let random_node = NodeId::random();
|
||||
|
||||
let enr_fork_id = match self.local_enr().eth2() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
crit!(self.log, "Local ENR has no fork id"; "error" => e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
// predicate for finding nodes with a matching fork
|
||||
let eth2_fork_predicate = move |enr: &Enr| enr.eth2() == Ok(enr_fork_id.clone());
|
||||
|
||||
// General predicate
|
||||
let predicate: Box<dyn Fn(&Enr) -> bool + Send> =
|
||||
Box::new(move |enr: &Enr| eth2_fork_predicate(enr) && additional_predicate(enr));
|
||||
|
||||
// Build the future
|
||||
let query_future = self
|
||||
.discv5
|
||||
.find_node_predicate(random_node, predicate, target_peers)
|
||||
.map(|v| QueryResult(grouped_query, v));
|
||||
|
||||
// Add the future to active queries, to be executed.
|
||||
self.active_queries.push(Box::pin(query_future));
|
||||
}
|
||||
|
||||
/// Drives the queries returning any results from completed queries.
|
||||
fn poll_queries(&mut self, cx: &mut Context) -> Option<HashMap<PeerId, Option<Instant>>> {
|
||||
while let Poll::Ready(Some(query_future)) = self.active_queries.poll_next_unpin(cx) {
|
||||
match query_future.0 {
|
||||
GroupedQueryType::FindPeers => {
|
||||
self.find_peer_active = false;
|
||||
match query_future.1 {
|
||||
Ok(r) if r.is_empty() => {
|
||||
debug!(self.log, "Discovery query yielded no results.");
|
||||
}
|
||||
Ok(r) => {
|
||||
debug!(self.log, "Discovery query completed"; "peers_found" => r.len());
|
||||
let mut results: HashMap<PeerId, Option<Instant>> = HashMap::new();
|
||||
r.iter().for_each(|enr| {
|
||||
// cache the found ENR's
|
||||
self.cached_enrs.put(enr.peer_id(), enr.clone());
|
||||
results.insert(enr.peer_id(), None);
|
||||
});
|
||||
return Some(results);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(self.log, "Discovery query failed"; "error" => e.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
GroupedQueryType::Subnet(queries) => {
|
||||
let subnets_searched_for: Vec<SubnetId> =
|
||||
queries.iter().map(|query| query.subnet_id).collect();
|
||||
match query_future.1 {
|
||||
Ok(r) if r.is_empty() => {
|
||||
debug!(self.log, "Grouped subnet discovery query yielded no results."; "subnets_searched_for" => format!("{:?}",subnets_searched_for));
|
||||
}
|
||||
Ok(r) => {
|
||||
debug!(self.log, "Peer grouped subnet discovery request completed"; "peers_found" => r.len(), "subnets_searched_for" => format!("{:?}",subnets_searched_for));
|
||||
|
||||
let mut mapped_results: HashMap<PeerId, Option<Instant>> =
|
||||
HashMap::new();
|
||||
|
||||
// cache the found ENR's
|
||||
for enr in r.iter().cloned() {
|
||||
self.cached_enrs.put(enr.peer_id(), enr);
|
||||
}
|
||||
|
||||
// Map each subnet query's min_ttl to the set of ENR's returned for that subnet.
|
||||
queries.iter().for_each(|query| {
|
||||
// A subnet query has completed. Add back to the queue, incrementing retries.
|
||||
self.add_subnet_query(
|
||||
query.subnet_id,
|
||||
query.min_ttl,
|
||||
query.retries + 1,
|
||||
);
|
||||
|
||||
// Check the specific subnet against the enr
|
||||
let subnet_predicate =
|
||||
subnet_predicate::<TSpec>(vec![query.subnet_id], &self.log);
|
||||
|
||||
r.iter()
|
||||
.filter(|enr| subnet_predicate(enr))
|
||||
.map(|enr| enr.peer_id())
|
||||
.for_each(|peer_id| {
|
||||
let other_min_ttl = mapped_results.get_mut(&peer_id);
|
||||
|
||||
// map peer IDs to the min_ttl furthest in the future
|
||||
match (query.min_ttl, other_min_ttl) {
|
||||
// update the mapping if the min_ttl is greater
|
||||
(
|
||||
Some(min_ttl_instant),
|
||||
Some(Some(other_min_ttl_instant)),
|
||||
) => {
|
||||
if min_ttl_instant.saturating_duration_since(
|
||||
*other_min_ttl_instant,
|
||||
) > DURATION_DIFFERENCE
|
||||
{
|
||||
*other_min_ttl_instant = min_ttl_instant;
|
||||
}
|
||||
}
|
||||
// update the mapping if we have a specified min_ttl
|
||||
(Some(min_ttl), Some(None)) => {
|
||||
mapped_results.insert(peer_id, Some(min_ttl));
|
||||
}
|
||||
// first seen min_ttl for this enr
|
||||
(Some(min_ttl), None) => {
|
||||
mapped_results.insert(peer_id, Some(min_ttl));
|
||||
}
|
||||
// first seen min_ttl for this enr
|
||||
(None, None) => {
|
||||
mapped_results.insert(peer_id, None);
|
||||
}
|
||||
(None, Some(Some(_))) => {} // Don't replace the existing specific min_ttl
|
||||
(None, Some(None)) => {} // No-op because this is a duplicate
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
if mapped_results.is_empty() {
|
||||
return None;
|
||||
} else {
|
||||
return Some(mapped_results);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(self.log,"Grouped subnet discovery query failed"; "subnets_searched_for" => format!("{:?}",subnets_searched_for), "error" => e.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
// Main execution loop to be driven by the peer manager.
|
||||
pub fn poll(&mut self, cx: &mut Context) -> Poll<DiscoveryEvent> {
|
||||
if !self.started {
|
||||
return Poll::Pending;
|
||||
}
|
||||
|
||||
// Process the query queue
|
||||
self.process_queue();
|
||||
|
||||
// Drive the queries and return any results from completed queries
|
||||
if let Some(results) = self.poll_queries(cx) {
|
||||
// return the result to the peer manager
|
||||
return Poll::Ready(DiscoveryEvent::QueryResult(results));
|
||||
}
|
||||
|
||||
// Process the server event stream
|
||||
match self.event_stream {
|
||||
EventStream::Awaiting(ref mut fut) => {
|
||||
// Still awaiting the event stream, poll it
|
||||
if let Poll::Ready(event_stream) = fut.poll_unpin(cx) {
|
||||
match event_stream {
|
||||
Ok(stream) => self.event_stream = EventStream::Present(stream),
|
||||
Err(e) => {
|
||||
slog::crit!(self.log, "Discv5 event stream failed"; "error" => e.to_string());
|
||||
self.event_stream = EventStream::InActive;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
EventStream::InActive => {} // ignore checking the stream
|
||||
EventStream::Present(ref mut stream) => {
|
||||
while let Ok(event) = stream.try_recv() {
|
||||
match event {
|
||||
// We filter out unwanted discv5 events here and only propagate useful results to
|
||||
// the peer manager.
|
||||
Discv5Event::Discovered(_enr) => {
|
||||
// Peers that get discovered during a query but are not contactable or
|
||||
// don't match a predicate can end up here. For debugging purposes we
|
||||
// log these to see if we are unnecessarily dropping discovered peers
|
||||
/*
|
||||
if enr.eth2() == self.local_enr().eth2() {
|
||||
trace!(self.log, "Peer found in process of query"; "peer_id" => format!("{}", enr.peer_id()), "tcp_socket" => enr.tcp_socket());
|
||||
} else {
|
||||
// this is temporary warning for debugging the DHT
|
||||
warn!(self.log, "Found peer during discovery not on correct fork"; "peer_id" => format!("{}", enr.peer_id()), "tcp_socket" => enr.tcp_socket());
|
||||
}
|
||||
*/
|
||||
}
|
||||
Discv5Event::SocketUpdated(socket) => {
|
||||
info!(self.log, "Address updated"; "ip" => format!("{}",socket.ip()), "udp_port" => format!("{}", socket.port()));
|
||||
metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT);
|
||||
// Discv5 will have updated our local ENR. We save the updated version
|
||||
// to disk.
|
||||
let enr = self.discv5.local_enr();
|
||||
enr::save_enr_to_disk(Path::new(&self.enr_dir), &enr, &self.log);
|
||||
// update network globals
|
||||
*self.network_globals.local_enr.write() = enr;
|
||||
return Poll::Ready(DiscoveryEvent::SocketUpdated(socket));
|
||||
}
|
||||
_ => {} // Ignore all other discv5 server events
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
51
beacon_node/eth2_libp2p/src/discovery/subnet_predicate.rs
Normal file
51
beacon_node/eth2_libp2p/src/discovery/subnet_predicate.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
///! The subnet predicate used for searching for a particular subnet.
|
||||
use super::*;
|
||||
use std::ops::Deref;
|
||||
|
||||
/// Returns the predicate for a given subnet.
|
||||
pub fn subnet_predicate<TSpec>(
|
||||
subnet_ids: Vec<SubnetId>,
|
||||
log: &slog::Logger,
|
||||
) -> impl Fn(&Enr) -> bool + Send
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
let log_clone = log.clone();
|
||||
|
||||
move |enr: &Enr| {
|
||||
if let Some(bitfield_bytes) = enr.get(BITFIELD_ENR_KEY) {
|
||||
let bitfield = match BitVector::<TSpec::SubnetBitfieldLength>::from_ssz_bytes(
|
||||
bitfield_bytes,
|
||||
) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
warn!(log_clone, "Could not decode ENR bitfield for peer"; "peer_id" => format!("{}", enr.peer_id()), "error" => format!("{:?}", e));
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
let matches: Vec<&SubnetId> = subnet_ids
|
||||
.iter()
|
||||
.filter(|id| bitfield.get(**id.deref() as usize).unwrap_or(false))
|
||||
.collect();
|
||||
|
||||
if matches.is_empty() {
|
||||
debug!(
|
||||
log_clone,
|
||||
"Peer found but not on any of the desired subnets";
|
||||
"peer_id" => format!("{}", enr.peer_id())
|
||||
);
|
||||
return false;
|
||||
} else {
|
||||
debug!(
|
||||
log_clone,
|
||||
"Peer found on desired subnet(s)";
|
||||
"peer_id" => format!("{}", enr.peer_id()),
|
||||
"subnets" => format!("{:?}", matches.as_slice())
|
||||
);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
@@ -15,12 +15,15 @@ mod service;
|
||||
pub mod types;
|
||||
|
||||
pub use crate::types::{error, Enr, GossipTopic, NetworkGlobals, PubsubMessage};
|
||||
pub use behaviour::BehaviourEvent;
|
||||
pub use behaviour::{BehaviourEvent, PeerRequestId, Request, Response};
|
||||
pub use config::Config as NetworkConfig;
|
||||
pub use discovery::enr_ext::{CombinedKeyExt, EnrExt};
|
||||
pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr};
|
||||
pub use discv5;
|
||||
pub use libp2p::gossipsub::{MessageId, Topic, TopicHash};
|
||||
pub use libp2p::{core::ConnectedPoint, PeerId, Swarm};
|
||||
pub use libp2p::{multiaddr, Multiaddr};
|
||||
pub use peer_manager::{client::Client, PeerDB, PeerInfo, PeerSyncStatus, SyncInfo};
|
||||
pub use rpc::RPCEvent;
|
||||
pub use metrics::scrape_discovery_metrics;
|
||||
pub use peer_manager::{
|
||||
client::Client, score::PeerAction, PeerDB, PeerInfo, PeerSyncStatus, SyncInfo,
|
||||
};
|
||||
pub use service::{Libp2pEvent, Service, NETWORK_KEY_FILENAME};
|
||||
64
beacon_node/eth2_libp2p/src/metrics.rs
Normal file
64
beacon_node/eth2_libp2p/src/metrics.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
pub use lighthouse_metrics::*;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref ADDRESS_UPDATE_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_address_update_total",
|
||||
"Count of libp2p socked updated events (when our view of our IP address has changed)"
|
||||
);
|
||||
pub static ref PEERS_CONNECTED: Result<IntGauge> = try_create_int_gauge(
|
||||
"libp2p_peer_connected_peers_total",
|
||||
"Count of libp2p peers currently connected"
|
||||
);
|
||||
pub static ref PEER_CONNECT_EVENT_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_peer_connect_event_total",
|
||||
"Count of libp2p peer connect events (not the current number of connected peers)"
|
||||
);
|
||||
pub static ref PEER_DISCONNECT_EVENT_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_peer_disconnect_event_total",
|
||||
"Count of libp2p peer disconnect events"
|
||||
);
|
||||
pub static ref DISCOVERY_QUEUE: Result<IntGauge> = try_create_int_gauge(
|
||||
"discovery_queue_size",
|
||||
"The number of discovery queries awaiting execution"
|
||||
);
|
||||
pub static ref DISCOVERY_REQS: Result<Gauge> = try_create_float_gauge(
|
||||
"discovery_requests",
|
||||
"The number of unsolicited discovery requests per second"
|
||||
);
|
||||
pub static ref DISCOVERY_SESSIONS: Result<IntGauge> = try_create_int_gauge(
|
||||
"discovery_sessions",
|
||||
"The number of active discovery sessions with peers"
|
||||
);
|
||||
pub static ref DISCOVERY_REQS_IP: Result<GaugeVec> = try_create_float_gauge_vec(
|
||||
"discovery_reqs_per_ip",
|
||||
"Unsolicited discovery requests per ip per second",
|
||||
&["Addresses"]
|
||||
);
|
||||
pub static ref GOSSIPSUB_SUBSCRIBED_PEERS_COUNT: Result<IntGaugeVec> = try_create_int_gauge_vec(
|
||||
"gossipsub_peers_per_topic_count",
|
||||
"Peers subscribed per topic",
|
||||
&["topic_hash"]
|
||||
);
|
||||
}
|
||||
|
||||
pub fn scrape_discovery_metrics() {
|
||||
let metrics = discv5::metrics::Metrics::from(discv5::Discv5::raw_metrics());
|
||||
|
||||
set_float_gauge(&DISCOVERY_REQS, metrics.unsolicited_requests_per_second);
|
||||
|
||||
set_gauge(&DISCOVERY_SESSIONS, metrics.active_sessions as i64);
|
||||
|
||||
let process_gauge_vec = |gauge: &Result<GaugeVec>, metrics: discv5::metrics::Metrics| {
|
||||
if let Ok(gauge_vec) = gauge {
|
||||
gauge_vec.reset();
|
||||
for (ip, value) in metrics.requests_per_ip_per_second.iter() {
|
||||
if let Ok(metric) = gauge_vec.get_metric_with_label_values(&[&format!("{:?}", ip)])
|
||||
{
|
||||
metric.set(*value);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
process_gauge_vec(&DISCOVERY_REQS_IP, metrics);
|
||||
}
|
||||
@@ -131,6 +131,20 @@ fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String
|
||||
let unknown = String::from("unknown");
|
||||
(kind, unknown.clone(), unknown)
|
||||
}
|
||||
Some("Prysm") => {
|
||||
let kind = ClientKind::Prysm;
|
||||
let mut version = String::from("unknown");
|
||||
let mut os_version = version.clone();
|
||||
if agent_split.next().is_some() {
|
||||
if let Some(agent_version) = agent_split.next() {
|
||||
version = agent_version.into();
|
||||
if let Some(agent_os_version) = agent_split.next() {
|
||||
os_version = agent_os_version.into();
|
||||
}
|
||||
}
|
||||
}
|
||||
(kind, version, os_version)
|
||||
}
|
||||
Some("nim-libp2p") => {
|
||||
let kind = ClientKind::Nimbus;
|
||||
let mut version = String::from("unknown");
|
||||
793
beacon_node/eth2_libp2p/src/peer_manager/mod.rs
Normal file
793
beacon_node/eth2_libp2p/src/peer_manager/mod.rs
Normal file
@@ -0,0 +1,793 @@
|
||||
//! Implementation of a Lighthouse's peer management system.
|
||||
|
||||
pub use self::peerdb::*;
|
||||
use crate::discovery::{Discovery, DiscoveryEvent};
|
||||
use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode};
|
||||
use crate::{error, metrics};
|
||||
use crate::{EnrExt, NetworkConfig, NetworkGlobals, PeerId};
|
||||
use futures::prelude::*;
|
||||
use futures::Stream;
|
||||
use hashset_delay::HashSetDelay;
|
||||
use libp2p::core::multiaddr::Protocol as MProtocol;
|
||||
use libp2p::identify::IdentifyInfo;
|
||||
use slog::{crit, debug, error};
|
||||
use smallvec::SmallVec;
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use types::{EthSpec, SubnetId};
|
||||
|
||||
pub use libp2p::core::{identity::Keypair, Multiaddr};
|
||||
|
||||
pub mod client;
|
||||
mod peer_info;
|
||||
mod peer_sync_status;
|
||||
mod peerdb;
|
||||
pub(crate) mod score;
|
||||
|
||||
pub use peer_info::{PeerConnectionStatus::*, PeerInfo};
|
||||
pub use peer_sync_status::{PeerSyncStatus, SyncInfo};
|
||||
use score::{PeerAction, ScoreState};
|
||||
use std::collections::HashMap;
|
||||
/// The time in seconds between re-status's peers.
|
||||
const STATUS_INTERVAL: u64 = 300;
|
||||
/// The time in seconds between PING events. We do not send a ping if the other peer as PING'd us within
|
||||
/// this time frame (Seconds)
|
||||
const PING_INTERVAL: u64 = 30;
|
||||
|
||||
/// The heartbeat performs regular updates such as updating reputations and performing discovery
|
||||
/// requests. This defines the interval in seconds.
|
||||
const HEARTBEAT_INTERVAL: u64 = 30;
|
||||
|
||||
/// A fraction of `PeerManager::target_peers` that we allow to connect to us in excess of
|
||||
/// `PeerManager::target_peers`. For clarity, if `PeerManager::target_peers` is 50 and
|
||||
/// PEER_EXCESS_FACTOR = 0.1 we allow 10% more nodes, i.e 55.
|
||||
const PEER_EXCESS_FACTOR: f32 = 0.1;
|
||||
|
||||
/// The main struct that handles peer's reputation and connection status.
|
||||
pub struct PeerManager<TSpec: EthSpec> {
|
||||
/// Storage of network globals to access the `PeerDB`.
|
||||
network_globals: Arc<NetworkGlobals<TSpec>>,
|
||||
/// A queue of events that the `PeerManager` is waiting to produce.
|
||||
events: SmallVec<[PeerManagerEvent; 16]>,
|
||||
/// A collection of peers awaiting to be Ping'd.
|
||||
ping_peers: HashSetDelay<PeerId>,
|
||||
/// A collection of peers awaiting to be Status'd.
|
||||
status_peers: HashSetDelay<PeerId>,
|
||||
/// The target number of peers we would like to connect to.
|
||||
target_peers: usize,
|
||||
/// The maximum number of peers we allow (exceptions for subnet peers)
|
||||
max_peers: usize,
|
||||
/// The discovery service.
|
||||
discovery: Discovery<TSpec>,
|
||||
/// The heartbeat interval to perform routine maintenance.
|
||||
heartbeat: tokio::time::Interval,
|
||||
/// The logger associated with the `PeerManager`.
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
/// The events that the `PeerManager` outputs (requests).
|
||||
pub enum PeerManagerEvent {
|
||||
/// Dial a PeerId.
|
||||
Dial(PeerId),
|
||||
/// Inform libp2p that our external socket addr has been updated.
|
||||
SocketUpdated(Multiaddr),
|
||||
/// Sends a STATUS to a peer.
|
||||
Status(PeerId),
|
||||
/// Sends a PING to a peer.
|
||||
Ping(PeerId),
|
||||
/// Request METADATA from a peer.
|
||||
MetaData(PeerId),
|
||||
/// The peer should be disconnected.
|
||||
DisconnectPeer(PeerId, GoodbyeReason),
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
// NOTE: Must be run inside a tokio executor.
|
||||
pub fn new(
|
||||
local_key: &Keypair,
|
||||
config: &NetworkConfig,
|
||||
network_globals: Arc<NetworkGlobals<TSpec>>,
|
||||
log: &slog::Logger,
|
||||
) -> error::Result<Self> {
|
||||
// start the discovery service
|
||||
let mut discovery = Discovery::new(local_key, config, network_globals.clone(), log)?;
|
||||
|
||||
// start searching for peers
|
||||
discovery.discover_peers();
|
||||
|
||||
let heartbeat = tokio::time::interval(tokio::time::Duration::from_secs(HEARTBEAT_INTERVAL));
|
||||
|
||||
Ok(PeerManager {
|
||||
network_globals,
|
||||
events: SmallVec::new(),
|
||||
ping_peers: HashSetDelay::new(Duration::from_secs(PING_INTERVAL)),
|
||||
status_peers: HashSetDelay::new(Duration::from_secs(STATUS_INTERVAL)),
|
||||
target_peers: config.target_peers,
|
||||
max_peers: (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as usize,
|
||||
discovery,
|
||||
heartbeat,
|
||||
log: log.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
/* Public accessible functions */
|
||||
|
||||
/// Attempts to connect to a peer.
|
||||
///
|
||||
/// Returns true if the peer was accepted into the database.
|
||||
pub fn dial_peer(&mut self, peer_id: &PeerId) -> bool {
|
||||
self.events.push(PeerManagerEvent::Dial(peer_id.clone()));
|
||||
self.connect_peer(peer_id, ConnectingType::Dialing)
|
||||
}
|
||||
|
||||
/// The application layer wants to disconnect from a peer for a particular reason.
|
||||
///
|
||||
/// All instant disconnections are fatal and we ban the associated peer.
|
||||
///
|
||||
/// This will send a goodbye and disconnect the peer if it is connected or dialing.
|
||||
pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason) {
|
||||
// get the peer info
|
||||
if let Some(info) = self.network_globals.peers.write().peer_info_mut(peer_id) {
|
||||
debug!(self.log, "Sending goodbye to peer"; "peer_id" => peer_id.to_string(), "reason" => reason.to_string(), "score" => info.score.to_string());
|
||||
// Goodbye's are fatal
|
||||
info.score.apply_peer_action(PeerAction::Fatal);
|
||||
if info.connection_status.is_connected_or_dialing() {
|
||||
self.events
|
||||
.push(PeerManagerEvent::DisconnectPeer(peer_id.clone(), reason));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reports a peer for some action.
|
||||
///
|
||||
/// If the peer doesn't exist, log a warning and insert defaults.
|
||||
pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction) {
|
||||
// TODO: Remove duplicate code - This is duplicated in the update_peer_scores()
|
||||
// function.
|
||||
|
||||
// Variables to update the PeerDb if required.
|
||||
let mut ban_peer = None;
|
||||
let mut unban_peer = None;
|
||||
|
||||
if let Some(info) = self.network_globals.peers.write().peer_info_mut(peer_id) {
|
||||
let previous_state = info.score.state();
|
||||
info.score.apply_peer_action(action);
|
||||
if previous_state != info.score.state() {
|
||||
match info.score.state() {
|
||||
ScoreState::Banned => {
|
||||
debug!(self.log, "Peer has been banned"; "peer_id" => peer_id.to_string(), "score" => info.score.to_string());
|
||||
ban_peer = Some(peer_id.clone());
|
||||
if info.connection_status.is_connected_or_dialing() {
|
||||
self.events.push(PeerManagerEvent::DisconnectPeer(
|
||||
peer_id.clone(),
|
||||
GoodbyeReason::BadScore,
|
||||
));
|
||||
}
|
||||
}
|
||||
ScoreState::Disconnected => {
|
||||
debug!(self.log, "Peer transitioned to disconnect state"; "peer_id" => peer_id.to_string(), "score" => info.score.to_string(), "past_state" => previous_state.to_string());
|
||||
// disconnect the peer if it's currently connected or dialing
|
||||
unban_peer = Some(peer_id.clone());
|
||||
if info.connection_status.is_connected_or_dialing() {
|
||||
self.events.push(PeerManagerEvent::DisconnectPeer(
|
||||
peer_id.clone(),
|
||||
GoodbyeReason::BadScore,
|
||||
));
|
||||
}
|
||||
// TODO: Update the peer manager to inform that the peer is disconnecting.
|
||||
}
|
||||
ScoreState::Healthy => {
|
||||
debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score.to_string(), "past_state" => previous_state.to_string());
|
||||
// unban the peer if it was previously banned.
|
||||
unban_peer = Some(peer_id.clone());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
debug!(self.log, "Peer score adjusted"; "peer_id" => peer_id.to_string(), "score" => info.score.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Update the PeerDB state.
|
||||
if let Some(peer_id) = ban_peer.take() {
|
||||
self.network_globals.peers.write().ban(&peer_id);
|
||||
} else if let Some(peer_id) = unban_peer.take() {
|
||||
self.network_globals.peers.write().unban(&peer_id);
|
||||
}
|
||||
}
|
||||
|
||||
/* Discovery Requests */
|
||||
|
||||
/// Provides a reference to the underlying discovery service.
|
||||
pub fn discovery(&self) -> &Discovery<TSpec> {
|
||||
&self.discovery
|
||||
}
|
||||
|
||||
/// Provides a mutable reference to the underlying discovery service.
|
||||
pub fn discovery_mut(&mut self) -> &mut Discovery<TSpec> {
|
||||
&mut self.discovery
|
||||
}
|
||||
|
||||
/// A request to find peers on a given subnet.
|
||||
pub fn discover_subnet_peers(&mut self, subnet_id: SubnetId, min_ttl: Option<Instant>) {
|
||||
// Extend the time to maintain peers if required.
|
||||
if let Some(min_ttl) = min_ttl {
|
||||
self.network_globals
|
||||
.peers
|
||||
.write()
|
||||
.extend_peers_on_subnet(subnet_id, min_ttl);
|
||||
}
|
||||
|
||||
// request the subnet query from discovery
|
||||
self.discovery.discover_subnet_peers(subnet_id, min_ttl);
|
||||
}
|
||||
|
||||
/// A STATUS message has been received from a peer. This resets the status timer.
|
||||
pub fn peer_statusd(&mut self, peer_id: &PeerId) {
|
||||
self.status_peers.insert(peer_id.clone());
|
||||
}
|
||||
|
||||
/* Notifications from the Swarm */
|
||||
|
||||
/// Updates the state of the peer as disconnected.
|
||||
///
|
||||
/// This is also called when dialing a peer fails.
|
||||
pub fn notify_disconnect(&mut self, peer_id: &PeerId) {
|
||||
self.network_globals.peers.write().disconnect(peer_id);
|
||||
|
||||
// remove the ping and status timer for the peer
|
||||
self.ping_peers.remove(peer_id);
|
||||
self.status_peers.remove(peer_id);
|
||||
metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT);
|
||||
metrics::set_gauge(
|
||||
&metrics::PEERS_CONNECTED,
|
||||
self.network_globals.connected_peers() as i64,
|
||||
);
|
||||
}
|
||||
|
||||
/// A dial attempt has failed.
|
||||
///
|
||||
/// NOTE: It can be the case that we are dialing a peer and during the dialing process the peer
|
||||
/// connects and the dial attempt later fails. To handle this, we only update the peer_db if
|
||||
/// the peer is not already connected.
|
||||
pub fn notify_dial_failure(&mut self, peer_id: &PeerId) {
|
||||
if !self.network_globals.peers.read().is_connected(peer_id) {
|
||||
self.notify_disconnect(peer_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets a peer as connected as long as their reputation allows it
|
||||
/// Informs if the peer was accepted
|
||||
pub fn connect_ingoing(&mut self, peer_id: &PeerId) -> bool {
|
||||
self.connect_peer(peer_id, ConnectingType::IngoingConnected)
|
||||
}
|
||||
|
||||
/// Sets a peer as connected as long as their reputation allows it
|
||||
/// Informs if the peer was accepted
|
||||
pub fn connect_outgoing(&mut self, peer_id: &PeerId) -> bool {
|
||||
self.connect_peer(peer_id, ConnectingType::OutgoingConnected)
|
||||
}
|
||||
|
||||
/// Updates the database informing that a peer is being disconnected.
|
||||
pub fn _disconnecting_peer(&mut self, _peer_id: &PeerId) -> bool {
|
||||
// TODO: implement
|
||||
true
|
||||
}
|
||||
|
||||
/// Reports if a peer is banned or not.
|
||||
///
|
||||
/// This is used to determine if we should accept incoming connections.
|
||||
pub fn is_banned(&self, peer_id: &PeerId) -> bool {
|
||||
self.network_globals.peers.read().is_banned(peer_id)
|
||||
}
|
||||
|
||||
/// Reports whether the peer limit is reached in which case we stop allowing new incoming
|
||||
/// connections.
|
||||
pub fn peer_limit_reached(&self) -> bool {
|
||||
self.network_globals.connected_or_dialing_peers() >= self.max_peers
|
||||
}
|
||||
|
||||
/// Updates `PeerInfo` with `identify` information.
|
||||
pub fn identify(&mut self, peer_id: &PeerId, info: &IdentifyInfo) {
|
||||
if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) {
|
||||
peer_info.client = client::Client::from_identify_info(info);
|
||||
peer_info.listening_addresses = info.listen_addrs.clone();
|
||||
} else {
|
||||
crit!(self.log, "Received an Identify response from an unknown peer"; "peer_id" => peer_id.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
/// An error has occured in the RPC.
|
||||
///
|
||||
/// This adjusts a peer's score based on the error.
|
||||
pub fn handle_rpc_error(&mut self, peer_id: &PeerId, protocol: Protocol, err: &RPCError) {
|
||||
let client = self.network_globals.client(peer_id);
|
||||
let score = self.network_globals.peers.read().score(peer_id);
|
||||
debug!(self.log, "RPC Error"; "protocol" => protocol.to_string(), "err" => err.to_string(), "client" => client.to_string(), "peer_id" => peer_id.to_string(), "score" => score.to_string());
|
||||
|
||||
// Map this error to a `PeerAction` (if any)
|
||||
let peer_action = match err {
|
||||
RPCError::IncompleteStream => {
|
||||
// They closed early, this could mean poor connection
|
||||
PeerAction::MidToleranceError
|
||||
}
|
||||
RPCError::InternalError(_) | RPCError::HandlerRejected => {
|
||||
// Our fault. Do nothing
|
||||
return;
|
||||
}
|
||||
RPCError::InvalidData => {
|
||||
// Peer is not complying with the protocol. This is considered a malicious action
|
||||
PeerAction::Fatal
|
||||
}
|
||||
RPCError::IoError(_e) => {
|
||||
// this could their fault or ours, so we tolerate this
|
||||
PeerAction::HighToleranceError
|
||||
}
|
||||
RPCError::ErrorResponse(code, _) => match code {
|
||||
RPCResponseErrorCode::Unknown => PeerAction::HighToleranceError,
|
||||
RPCResponseErrorCode::ServerError => PeerAction::MidToleranceError,
|
||||
RPCResponseErrorCode::InvalidRequest => PeerAction::LowToleranceError,
|
||||
RPCResponseErrorCode::RateLimited => PeerAction::LowToleranceError,
|
||||
},
|
||||
RPCError::SSZDecodeError(_) => PeerAction::Fatal,
|
||||
RPCError::UnsupportedProtocol => {
|
||||
// Not supporting a protocol shouldn't be considered a malicious action, but
|
||||
// it is an action that in some cases will make the peer unfit to continue
|
||||
// communicating.
|
||||
// TODO: To avoid punishing a peer repeatedly for not supporting a protocol, this
|
||||
// information could be stored and used to prevent sending requests for the given
|
||||
// protocol to this peer. Similarly, to avoid blacklisting a peer for a protocol
|
||||
// forever, if stored this information should expire.
|
||||
match protocol {
|
||||
Protocol::Ping => PeerAction::Fatal,
|
||||
Protocol::BlocksByRange => return,
|
||||
Protocol::BlocksByRoot => return,
|
||||
Protocol::Goodbye => return,
|
||||
Protocol::MetaData => PeerAction::LowToleranceError,
|
||||
Protocol::Status => PeerAction::LowToleranceError,
|
||||
}
|
||||
}
|
||||
RPCError::StreamTimeout => match protocol {
|
||||
Protocol::Ping => PeerAction::LowToleranceError,
|
||||
Protocol::BlocksByRange => PeerAction::MidToleranceError,
|
||||
Protocol::BlocksByRoot => PeerAction::MidToleranceError,
|
||||
Protocol::Goodbye => return,
|
||||
Protocol::MetaData => return,
|
||||
Protocol::Status => return,
|
||||
},
|
||||
RPCError::NegotiationTimeout => PeerAction::HighToleranceError,
|
||||
RPCError::RateLimited => match protocol {
|
||||
Protocol::Ping => PeerAction::MidToleranceError,
|
||||
Protocol::BlocksByRange => PeerAction::HighToleranceError,
|
||||
Protocol::BlocksByRoot => PeerAction::HighToleranceError,
|
||||
Protocol::Goodbye => PeerAction::LowToleranceError,
|
||||
Protocol::MetaData => PeerAction::LowToleranceError,
|
||||
Protocol::Status => PeerAction::LowToleranceError,
|
||||
},
|
||||
};
|
||||
|
||||
self.report_peer(peer_id, peer_action);
|
||||
}
|
||||
|
||||
/// A ping request has been received.
|
||||
// NOTE: The behaviour responds with a PONG automatically
|
||||
// TODO: Update last seen
|
||||
pub fn ping_request(&mut self, peer_id: &PeerId, seq: u64) {
|
||||
if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) {
|
||||
// received a ping
|
||||
// reset the to-ping timer for this peer
|
||||
debug!(self.log, "Received a ping request"; "peer_id" => peer_id.to_string(), "seq_no" => seq);
|
||||
self.ping_peers.insert(peer_id.clone());
|
||||
|
||||
// if the sequence number is unknown send an update the meta data of the peer.
|
||||
if let Some(meta_data) = &peer_info.meta_data {
|
||||
if meta_data.seq_number < seq {
|
||||
debug!(self.log, "Requesting new metadata from peer";
|
||||
"peer_id" => peer_id.to_string(), "known_seq_no" => meta_data.seq_number, "ping_seq_no" => seq);
|
||||
self.events
|
||||
.push(PeerManagerEvent::MetaData(peer_id.clone()));
|
||||
}
|
||||
} else {
|
||||
// if we don't know the meta-data, request it
|
||||
debug!(self.log, "Requesting first metadata from peer";
|
||||
"peer_id" => peer_id.to_string());
|
||||
self.events
|
||||
.push(PeerManagerEvent::MetaData(peer_id.clone()));
|
||||
}
|
||||
} else {
|
||||
crit!(self.log, "Received a PING from an unknown peer";
|
||||
"peer_id" => peer_id.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
/// A PONG has been returned from a peer.
|
||||
// TODO: Update last seen
|
||||
pub fn pong_response(&mut self, peer_id: &PeerId, seq: u64) {
|
||||
if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) {
|
||||
// received a pong
|
||||
|
||||
// if the sequence number is unknown send update the meta data of the peer.
|
||||
if let Some(meta_data) = &peer_info.meta_data {
|
||||
if meta_data.seq_number < seq {
|
||||
debug!(self.log, "Requesting new metadata from peer";
|
||||
"peer_id" => peer_id.to_string(), "known_seq_no" => meta_data.seq_number, "pong_seq_no" => seq);
|
||||
self.events
|
||||
.push(PeerManagerEvent::MetaData(peer_id.clone()));
|
||||
}
|
||||
} else {
|
||||
// if we don't know the meta-data, request it
|
||||
debug!(self.log, "Requesting first metadata from peer";
|
||||
"peer_id" => peer_id.to_string());
|
||||
self.events
|
||||
.push(PeerManagerEvent::MetaData(peer_id.clone()));
|
||||
}
|
||||
} else {
|
||||
crit!(self.log, "Received a PONG from an unknown peer"; "peer_id" => peer_id.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
/// Received a metadata response from a peer.
|
||||
// TODO: Update last seen
|
||||
pub fn meta_data_response(&mut self, peer_id: &PeerId, meta_data: MetaData<TSpec>) {
|
||||
if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) {
|
||||
if let Some(known_meta_data) = &peer_info.meta_data {
|
||||
if known_meta_data.seq_number < meta_data.seq_number {
|
||||
debug!(self.log, "Updating peer's metadata";
|
||||
"peer_id" => peer_id.to_string(), "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number);
|
||||
peer_info.meta_data = Some(meta_data);
|
||||
} else {
|
||||
debug!(self.log, "Received old metadata";
|
||||
"peer_id" => peer_id.to_string(), "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number);
|
||||
}
|
||||
} else {
|
||||
// we have no meta-data for this peer, update
|
||||
debug!(self.log, "Obtained peer's metadata";
|
||||
"peer_id" => peer_id.to_string(), "new_seq_no" => meta_data.seq_number);
|
||||
peer_info.meta_data = Some(meta_data);
|
||||
}
|
||||
} else {
|
||||
crit!(self.log, "Received METADATA from an unknown peer";
|
||||
"peer_id" => peer_id.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Handles the libp2p request to obtain multiaddrs for peer_id's in order to dial them.
|
||||
pub fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec<Multiaddr> {
|
||||
if let Some(enr) = self.discovery.enr_of_peer(peer_id) {
|
||||
// ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP
|
||||
// port is removed, which is assumed to be associated with the discv5 protocol (and
|
||||
// therefore irrelevant for other libp2p components).
|
||||
let mut out_list = enr.multiaddr();
|
||||
out_list.retain(|addr| {
|
||||
addr.iter()
|
||||
.find(|v| match v {
|
||||
MProtocol::Udp(_) => true,
|
||||
_ => false,
|
||||
})
|
||||
.is_none()
|
||||
});
|
||||
|
||||
out_list
|
||||
} else {
|
||||
// PeerId is not known
|
||||
Vec::new()
|
||||
}
|
||||
}
|
||||
|
||||
/* Internal functions */
|
||||
|
||||
// The underlying discovery server has updated our external IP address. We send this up to
|
||||
// notify libp2p.
|
||||
fn socket_updated(&mut self, socket: SocketAddr) {
|
||||
// Build a multiaddr to report to libp2p
|
||||
let mut multiaddr = Multiaddr::from(socket.ip());
|
||||
// NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling
|
||||
// should handle this.
|
||||
multiaddr.push(MProtocol::Tcp(self.network_globals.listen_port_tcp()));
|
||||
self.events.push(PeerManagerEvent::SocketUpdated(multiaddr));
|
||||
}
|
||||
|
||||
/// Peers that have been returned by discovery requests are dialed here if they are suitable.
|
||||
///
|
||||
/// NOTE: By dialing `PeerId`s and not multiaddrs, libp2p requests the multiaddr associated
|
||||
/// with a new `PeerId` which involves a discovery routing table lookup. We could dial the
|
||||
/// multiaddr here, however this could relate to duplicate PeerId's etc. If the lookup
|
||||
/// proves resource constraining, we should switch to multiaddr dialling here.
|
||||
fn peers_discovered(&mut self, results: HashMap<PeerId, Option<Instant>>) {
|
||||
let mut to_dial_peers = Vec::new();
|
||||
|
||||
let connected_or_dialing = self.network_globals.connected_or_dialing_peers();
|
||||
for (peer_id, min_ttl) in results {
|
||||
// we attempt a connection if this peer is a subnet peer or if the max peer count
|
||||
// is not yet filled (including dialling peers)
|
||||
if (min_ttl.is_some() || connected_or_dialing + to_dial_peers.len() < self.max_peers)
|
||||
&& !self
|
||||
.network_globals
|
||||
.peers
|
||||
.read()
|
||||
.is_connected_or_dialing(&peer_id)
|
||||
&& !self
|
||||
.network_globals
|
||||
.peers
|
||||
.read()
|
||||
.is_banned_or_disconnected(&peer_id)
|
||||
{
|
||||
// TODO: Update output
|
||||
// This should be updated with the peer dialing. In fact created once the peer is
|
||||
// dialed
|
||||
if let Some(min_ttl) = min_ttl {
|
||||
self.network_globals
|
||||
.peers
|
||||
.write()
|
||||
.update_min_ttl(&peer_id, min_ttl);
|
||||
}
|
||||
to_dial_peers.push(peer_id);
|
||||
}
|
||||
}
|
||||
for peer_id in to_dial_peers {
|
||||
debug!(self.log, "Dialing discovered peer"; "peer_id"=> peer_id.to_string());
|
||||
self.dial_peer(&peer_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Registers a peer as connected. The `ingoing` parameter determines if the peer is being
|
||||
/// dialed or connecting to us.
|
||||
///
|
||||
/// This is called by `connect_ingoing` and `connect_outgoing`.
|
||||
///
|
||||
/// This informs if the peer was accepted in to the db or not.
|
||||
fn connect_peer(&mut self, peer_id: &PeerId, connection: ConnectingType) -> bool {
|
||||
// TODO: remove after timed updates
|
||||
//self.update_reputations();
|
||||
|
||||
{
|
||||
let mut peerdb = self.network_globals.peers.write();
|
||||
if peerdb.connection_status(peer_id).map(|c| c.is_banned()) == Some(true) {
|
||||
// don't connect if the peer is banned
|
||||
slog::crit!(self.log, "Connection has been allowed to a banned peer"; "peer_id" => peer_id.to_string());
|
||||
}
|
||||
|
||||
match connection {
|
||||
ConnectingType::Dialing => peerdb.dialing_peer(peer_id),
|
||||
ConnectingType::IngoingConnected => peerdb.connect_outgoing(peer_id),
|
||||
ConnectingType::OutgoingConnected => peerdb.connect_ingoing(peer_id),
|
||||
}
|
||||
}
|
||||
|
||||
// start a ping and status timer for the peer
|
||||
self.ping_peers.insert(peer_id.clone());
|
||||
self.status_peers.insert(peer_id.clone());
|
||||
|
||||
// increment prometheus metrics
|
||||
metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT);
|
||||
metrics::set_gauge(
|
||||
&metrics::PEERS_CONNECTED,
|
||||
self.network_globals.connected_peers() as i64,
|
||||
);
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
/// Updates the scores of known peers according to their connection
|
||||
/// status and the time that has passed.
|
||||
/// NOTE: This is experimental and will likely be adjusted
|
||||
fn update_peer_scores(&mut self) {
|
||||
/* Check how long have peers been in this state and update their reputations if needed */
|
||||
let mut pdb = self.network_globals.peers.write();
|
||||
|
||||
let mut to_ban_peers = Vec::new();
|
||||
let mut to_unban_peers = Vec::new();
|
||||
|
||||
for (peer_id, info) in pdb.peers_mut() {
|
||||
let previous_state = info.score.state();
|
||||
// Update scores
|
||||
info.score.update();
|
||||
|
||||
/* TODO: Implement logic about connection lifetimes
|
||||
match info.connection_status {
|
||||
Connected { .. } => {
|
||||
// Connected peers gain reputation by sending useful messages
|
||||
}
|
||||
Disconnected { since } | Banned { since } => {
|
||||
// For disconnected peers, lower their reputation by 1 for every hour they
|
||||
// stay disconnected. This helps us slowly forget disconnected peers.
|
||||
// In the same way, slowly allow banned peers back again.
|
||||
let dc_hours = now
|
||||
.checked_duration_since(since)
|
||||
.unwrap_or_else(|| Duration::from_secs(0))
|
||||
.as_secs()
|
||||
/ 3600;
|
||||
let last_dc_hours = self
|
||||
._last_updated
|
||||
.checked_duration_since(since)
|
||||
.unwrap_or_else(|| Duration::from_secs(0))
|
||||
.as_secs()
|
||||
/ 3600;
|
||||
if dc_hours > last_dc_hours {
|
||||
// this should be 1 most of the time
|
||||
let rep_dif = (dc_hours - last_dc_hours)
|
||||
.try_into()
|
||||
.unwrap_or(Rep::max_value());
|
||||
|
||||
info.reputation = if info.connection_status.is_banned() {
|
||||
info.reputation.saturating_add(rep_dif)
|
||||
} else {
|
||||
info.reputation.saturating_sub(rep_dif)
|
||||
};
|
||||
}
|
||||
}
|
||||
Dialing { since } => {
|
||||
// A peer shouldn't be dialing for more than 2 minutes
|
||||
if since.elapsed().as_secs() > 120 {
|
||||
warn!(self.log,"Peer has been dialing for too long"; "peer_id" => id.to_string());
|
||||
// TODO: decide how to handle this
|
||||
}
|
||||
}
|
||||
Unknown => {} //TODO: Handle this case
|
||||
}
|
||||
// Check if the peer gets banned or unbanned and if it should be disconnected
|
||||
if info.reputation < _MIN_REP_BEFORE_BAN && !info.connection_status.is_banned() {
|
||||
// This peer gets banned. Check if we should request disconnection
|
||||
ban_queue.push(id.clone());
|
||||
} else if info.reputation >= _MIN_REP_BEFORE_BAN && info.connection_status.is_banned() {
|
||||
// This peer gets unbanned
|
||||
unban_queue.push(id.clone());
|
||||
}
|
||||
*/
|
||||
|
||||
// handle score transitions
|
||||
if previous_state != info.score.state() {
|
||||
match info.score.state() {
|
||||
ScoreState::Banned => {
|
||||
debug!(self.log, "Peer has been banned"; "peer_id" => peer_id.to_string(), "score" => info.score.to_string());
|
||||
to_ban_peers.push(peer_id.clone());
|
||||
if info.connection_status.is_connected_or_dialing() {
|
||||
self.events.push(PeerManagerEvent::DisconnectPeer(
|
||||
peer_id.clone(),
|
||||
GoodbyeReason::BadScore,
|
||||
));
|
||||
}
|
||||
}
|
||||
ScoreState::Disconnected => {
|
||||
debug!(self.log, "Peer transitioned to disconnect state"; "peer_id" => peer_id.to_string(), "score" => info.score.to_string(), "past_state" => previous_state.to_string());
|
||||
// disconnect the peer if it's currently connected or dialing
|
||||
to_unban_peers.push(peer_id.clone());
|
||||
if info.connection_status.is_connected_or_dialing() {
|
||||
self.events.push(PeerManagerEvent::DisconnectPeer(
|
||||
peer_id.clone(),
|
||||
GoodbyeReason::BadScore,
|
||||
));
|
||||
}
|
||||
// TODO: Update peer manager to report that it's disconnecting.
|
||||
}
|
||||
ScoreState::Healthy => {
|
||||
debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score.to_string(), "past_state" => previous_state.to_string());
|
||||
// unban the peer if it was previously banned.
|
||||
to_unban_peers.push(peer_id.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// process banning peers
|
||||
for peer_id in to_ban_peers {
|
||||
pdb.ban(&peer_id);
|
||||
}
|
||||
// process unbanning peers
|
||||
for peer_id in to_unban_peers {
|
||||
pdb.unban(&peer_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// The Peer manager's heartbeat maintains the peer count and maintains peer reputations.
|
||||
///
|
||||
/// It will request discovery queries if the peer count has not reached the desired number of
|
||||
/// peers.
|
||||
///
|
||||
/// NOTE: Discovery will only add a new query if one isn't already queued.
|
||||
fn heartbeat(&mut self) {
|
||||
// TODO: Provide a back-off time for discovery queries. I.e Queue many initially, then only
|
||||
// perform discoveries over a larger fixed interval. Perhaps one every 6 heartbeats
|
||||
let peer_count = self.network_globals.connected_or_dialing_peers();
|
||||
if peer_count < self.target_peers {
|
||||
// If we need more peers, queue a discovery lookup.
|
||||
debug!(self.log, "Starting a new peer discovery query"; "connected_peers" => peer_count, "target_peers" => self.target_peers);
|
||||
self.discovery.discover_peers();
|
||||
}
|
||||
|
||||
// Updates peer's scores.
|
||||
self.update_peer_scores();
|
||||
|
||||
let connected_peer_count = self.network_globals.connected_peers();
|
||||
if connected_peer_count > self.target_peers {
|
||||
//remove excess peers with the worst scores, but keep subnet peers
|
||||
for (peer_id, _) in self
|
||||
.network_globals
|
||||
.peers
|
||||
.read()
|
||||
.worst_connected_peers()
|
||||
.iter()
|
||||
.filter(|(_, info)| !info.has_future_duty())
|
||||
.take(connected_peer_count - self.target_peers)
|
||||
//we only need to disconnect peers with healthy scores, since the others got already
|
||||
//disconnected in update_peer_scores
|
||||
.filter(|(_, info)| info.score.state() == ScoreState::Healthy)
|
||||
{
|
||||
self.events.push(PeerManagerEvent::DisconnectPeer(
|
||||
(*peer_id).clone(),
|
||||
GoodbyeReason::TooManyPeers,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> Stream for PeerManager<TSpec> {
|
||||
type Item = PeerManagerEvent;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
// perform the heartbeat when necessary
|
||||
while let Poll::Ready(Some(_)) = self.heartbeat.poll_next_unpin(cx) {
|
||||
self.heartbeat();
|
||||
}
|
||||
|
||||
// handle any discovery events
|
||||
while let Poll::Ready(event) = self.discovery.poll(cx) {
|
||||
match event {
|
||||
DiscoveryEvent::SocketUpdated(socket_addr) => self.socket_updated(socket_addr),
|
||||
DiscoveryEvent::QueryResult(results) => self.peers_discovered(results),
|
||||
}
|
||||
}
|
||||
|
||||
// poll the timeouts for pings and status'
|
||||
loop {
|
||||
match self.ping_peers.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(peer_id))) => {
|
||||
self.ping_peers.insert(peer_id.clone());
|
||||
self.events.push(PeerManagerEvent::Ping(peer_id));
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
error!(self.log, "Failed to check for peers to ping"; "error" => e.to_string())
|
||||
}
|
||||
Poll::Ready(None) | Poll::Pending => break,
|
||||
}
|
||||
}
|
||||
|
||||
// We don't want to update peers during syncing, since this may result in a new chain being
|
||||
// synced which leads to inefficient re-downloads of blocks.
|
||||
if !self.network_globals.is_syncing() {
|
||||
loop {
|
||||
match self.status_peers.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(peer_id))) => {
|
||||
self.status_peers.insert(peer_id.clone());
|
||||
self.events.push(PeerManagerEvent::Status(peer_id))
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
error!(self.log, "Failed to check for peers to ping"; "error" => e.to_string())
|
||||
}
|
||||
Poll::Ready(None) | Poll::Pending => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !self.events.is_empty() {
|
||||
return Poll::Ready(Some(self.events.remove(0)));
|
||||
} else {
|
||||
self.events.shrink_to_fit();
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
enum ConnectingType {
|
||||
/// We are in the process of dialing this peer.
|
||||
Dialing,
|
||||
/// A peer has dialed us.
|
||||
IngoingConnected,
|
||||
/// We have successfully dialed a peer.
|
||||
OutgoingConnected,
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
use super::client::Client;
|
||||
use super::peerdb::{Rep, DEFAULT_REPUTATION};
|
||||
use super::score::Score;
|
||||
use super::PeerSyncStatus;
|
||||
use crate::rpc::MetaData;
|
||||
use crate::Multiaddr;
|
||||
@@ -18,7 +18,7 @@ pub struct PeerInfo<T: EthSpec> {
|
||||
/// The connection status of the peer
|
||||
_status: PeerStatus,
|
||||
/// The peers reputation
|
||||
pub reputation: Rep,
|
||||
pub score: Score,
|
||||
/// Client managing this peer
|
||||
pub client: Client,
|
||||
/// Connection status of this peer
|
||||
@@ -31,18 +31,23 @@ pub struct PeerInfo<T: EthSpec> {
|
||||
/// The ENR subnet bitfield of the peer. This may be determined after it's initial
|
||||
/// connection.
|
||||
pub meta_data: Option<MetaData<T>>,
|
||||
/// The time we would like to retain this peer. After this time, the peer is no longer
|
||||
/// necessary.
|
||||
#[serde(skip)]
|
||||
pub min_ttl: Option<Instant>,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> Default for PeerInfo<TSpec> {
|
||||
fn default() -> PeerInfo<TSpec> {
|
||||
PeerInfo {
|
||||
_status: Default::default(),
|
||||
reputation: DEFAULT_REPUTATION,
|
||||
score: Score::default(),
|
||||
client: Client::default(),
|
||||
connection_status: Default::default(),
|
||||
listening_addresses: vec![],
|
||||
sync_status: PeerSyncStatus::Unknown,
|
||||
meta_data: None,
|
||||
min_ttl: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -58,6 +63,11 @@ impl<T: EthSpec> PeerInfo<T> {
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Reports if this peer has some future validator duty in which case it is valuable to keep it.
|
||||
pub fn has_future_duty(&self) -> bool {
|
||||
self.min_ttl.map_or(false, |i| i >= Instant::now())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
@@ -141,7 +151,7 @@ impl Default for PeerConnectionStatus {
|
||||
}
|
||||
|
||||
impl PeerConnectionStatus {
|
||||
/// Checks if the status is connected
|
||||
/// Checks if the status is connected.
|
||||
pub fn is_connected(&self) -> bool {
|
||||
match self {
|
||||
PeerConnectionStatus::Connected { .. } => true,
|
||||
@@ -149,7 +159,7 @@ impl PeerConnectionStatus {
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the status is connected
|
||||
/// Checks if the status is connected.
|
||||
pub fn is_dialing(&self) -> bool {
|
||||
match self {
|
||||
PeerConnectionStatus::Dialing { .. } => true,
|
||||
@@ -157,7 +167,12 @@ impl PeerConnectionStatus {
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the status is banned
|
||||
/// The peer is either connected or in the process of being dialed.
|
||||
pub fn is_connected_or_dialing(&self) -> bool {
|
||||
self.is_connected() || self.is_dialing()
|
||||
}
|
||||
|
||||
/// Checks if the status is banned.
|
||||
pub fn is_banned(&self) -> bool {
|
||||
match self {
|
||||
PeerConnectionStatus::Banned { .. } => true,
|
||||
@@ -165,7 +180,7 @@ impl PeerConnectionStatus {
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the status is disconnected
|
||||
/// Checks if the status is disconnected.
|
||||
pub fn is_disconnected(&self) -> bool {
|
||||
match self {
|
||||
Disconnected { .. } => true,
|
||||
@@ -209,6 +224,13 @@ impl PeerConnectionStatus {
|
||||
};
|
||||
}
|
||||
|
||||
/// The score system has unbanned the peer. Update the connection status
|
||||
pub fn unban(&mut self) {
|
||||
if let PeerConnectionStatus::Banned { since } = self {
|
||||
*self = PeerConnectionStatus::Disconnected { since: *since }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn connections(&self) -> (u8, u8) {
|
||||
match self {
|
||||
Connected { n_in, n_out } => (*n_in, *n_out),
|
||||
@@ -1,74 +1,48 @@
|
||||
use super::peer_info::{PeerConnectionStatus, PeerInfo};
|
||||
use super::peer_sync_status::PeerSyncStatus;
|
||||
use super::score::{Score, ScoreState};
|
||||
use crate::rpc::methods::MetaData;
|
||||
use crate::PeerId;
|
||||
use slog::{crit, debug, warn};
|
||||
use std::collections::{hash_map::Entry, HashMap};
|
||||
use rand::seq::SliceRandom;
|
||||
use slog::{crit, debug, trace, warn};
|
||||
use std::collections::HashMap;
|
||||
use std::time::Instant;
|
||||
use types::{EthSpec, SubnetId};
|
||||
|
||||
/// A peer's reputation (perceived potential usefulness)
|
||||
pub type Rep = u8;
|
||||
|
||||
/// Reputation change (positive or negative)
|
||||
pub struct RepChange {
|
||||
is_good: bool,
|
||||
diff: Rep,
|
||||
}
|
||||
|
||||
/// Max number of disconnected nodes to remember
|
||||
const MAX_DC_PEERS: usize = 30;
|
||||
|
||||
/// The default starting reputation for an unknown peer.
|
||||
pub const DEFAULT_REPUTATION: Rep = 50;
|
||||
/// Max number of disconnected nodes to remember.
|
||||
const MAX_DC_PEERS: usize = 500;
|
||||
/// The maximum number of banned nodes to remember.
|
||||
const MAX_BANNED_PEERS: usize = 1000;
|
||||
|
||||
/// Storage of known peers, their reputation and information
|
||||
pub struct PeerDB<TSpec: EthSpec> {
|
||||
/// The collection of known connected peers, their status and reputation
|
||||
peers: HashMap<PeerId, PeerInfo<TSpec>>,
|
||||
/// Tracking of number of disconnected nodes
|
||||
n_dc: usize,
|
||||
/// The number of disconnected nodes in the database.
|
||||
disconnected_peers: usize,
|
||||
/// The number of banned peers in the database.
|
||||
banned_peers: usize,
|
||||
/// PeerDB's logger
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
impl RepChange {
|
||||
pub fn good(diff: Rep) -> Self {
|
||||
RepChange {
|
||||
is_good: true,
|
||||
diff,
|
||||
}
|
||||
}
|
||||
pub fn bad(diff: Rep) -> Self {
|
||||
RepChange {
|
||||
is_good: false,
|
||||
diff,
|
||||
}
|
||||
}
|
||||
pub const fn worst() -> Self {
|
||||
RepChange {
|
||||
is_good: false,
|
||||
diff: Rep::max_value(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
pub fn new(log: &slog::Logger) -> Self {
|
||||
Self {
|
||||
log: log.clone(),
|
||||
n_dc: 0,
|
||||
disconnected_peers: 0,
|
||||
banned_peers: 0,
|
||||
peers: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/* Getters */
|
||||
|
||||
/// Gives the reputation of a peer, or DEFAULT_REPUTATION if it is unknown.
|
||||
pub fn reputation(&self, peer_id: &PeerId) -> Rep {
|
||||
/// Gives the score of a peer, or default score if it is unknown.
|
||||
pub fn score(&self, peer_id: &PeerId) -> Score {
|
||||
self.peers
|
||||
.get(peer_id)
|
||||
.map_or(DEFAULT_REPUTATION, |info| info.reputation)
|
||||
.map_or(Score::default(), |info| info.score)
|
||||
}
|
||||
|
||||
/// Returns an iterator over all peers in the db.
|
||||
@@ -77,7 +51,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
}
|
||||
|
||||
/// Returns an iterator over all peers in the db.
|
||||
pub(super) fn _peers_mut(&mut self) -> impl Iterator<Item = (&PeerId, &mut PeerInfo<TSpec>)> {
|
||||
pub(super) fn peers_mut(&mut self) -> impl Iterator<Item = (&PeerId, &mut PeerInfo<TSpec>)> {
|
||||
self.peers.iter_mut()
|
||||
}
|
||||
|
||||
@@ -97,8 +71,25 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
self.peers.get_mut(peer_id)
|
||||
}
|
||||
|
||||
/// Returns if the peer is already connected.
|
||||
pub fn is_connected(&self, peer_id: &PeerId) -> bool {
|
||||
if let Some(PeerConnectionStatus::Connected { .. }) = self.connection_status(peer_id) {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// If we are connected or currently dialing the peer returns true.
|
||||
pub fn is_connected_or_dialing(&self, peer_id: &PeerId) -> bool {
|
||||
match self.connection_status(peer_id) {
|
||||
Some(PeerConnectionStatus::Connected { .. })
|
||||
| Some(PeerConnectionStatus::Dialing { .. }) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
/// Returns true if the peer is synced at least to our current head.
|
||||
pub fn peer_synced(&self, peer_id: &PeerId) -> bool {
|
||||
pub fn is_synced(&self, peer_id: &PeerId) -> bool {
|
||||
match self.peers.get(peer_id).map(|info| &info.sync_status) {
|
||||
Some(PeerSyncStatus::Synced { .. }) => true,
|
||||
Some(_) => false,
|
||||
@@ -106,6 +97,22 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the Peer is banned.
|
||||
pub fn is_banned(&self, peer_id: &PeerId) -> bool {
|
||||
match self.peers.get(peer_id).map(|info| info.score.state()) {
|
||||
Some(ScoreState::Banned) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the Peer is either banned or in the disconnected state.
|
||||
pub fn is_banned_or_disconnected(&self, peer_id: &PeerId) -> bool {
|
||||
match self.peers.get(peer_id).map(|info| info.score.state()) {
|
||||
Some(ScoreState::Banned) | Some(ScoreState::Disconnected) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gives the ids of all known connected peers.
|
||||
pub fn connected_peers(&self) -> impl Iterator<Item = (&PeerId, &PeerInfo<TSpec>)> {
|
||||
self.peers
|
||||
@@ -170,8 +177,22 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
.map(|(peer_id, _)| peer_id)
|
||||
}
|
||||
|
||||
/// Returns a vector of all connected peers sorted by score beginning with the worst scores.
|
||||
/// Ties get broken randomly.
|
||||
pub fn worst_connected_peers(&self) -> Vec<(&PeerId, &PeerInfo<TSpec>)> {
|
||||
let mut connected = self
|
||||
.peers
|
||||
.iter()
|
||||
.filter(|(_, info)| info.connection_status.is_connected())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
connected.shuffle(&mut rand::thread_rng());
|
||||
connected.sort_by_key(|(_, info)| info.score);
|
||||
connected
|
||||
}
|
||||
|
||||
/// Returns a vector containing peers (their ids and info), sorted by
|
||||
/// reputation from highest to lowest, and filtered using `is_status`
|
||||
/// score from highest to lowest, and filtered using `is_status`
|
||||
pub fn best_peers_by_status<F>(&self, is_status: F) -> Vec<(&PeerId, &PeerInfo<TSpec>)>
|
||||
where
|
||||
F: Fn(&PeerConnectionStatus) -> bool,
|
||||
@@ -181,8 +202,8 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
.iter()
|
||||
.filter(|(_, info)| is_status(&info.connection_status))
|
||||
.collect::<Vec<_>>();
|
||||
by_status.sort_by_key(|(_, info)| Rep::max_value() - info.reputation);
|
||||
by_status
|
||||
by_status.sort_by_key(|(_, info)| info.score);
|
||||
by_status.into_iter().rev().collect()
|
||||
}
|
||||
|
||||
/// Returns the peer with highest reputation that satisfies `is_status`
|
||||
@@ -193,7 +214,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
self.peers
|
||||
.iter()
|
||||
.filter(|(_, info)| is_status(&info.connection_status))
|
||||
.max_by_key(|(_, info)| info.reputation)
|
||||
.max_by_key(|(_, info)| info.score)
|
||||
.map(|(id, _)| id)
|
||||
}
|
||||
|
||||
@@ -203,24 +224,6 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
.map(|info| info.connection_status.clone())
|
||||
}
|
||||
|
||||
/// Returns if the peer is already connected.
|
||||
pub fn is_connected(&self, peer_id: &PeerId) -> bool {
|
||||
if let Some(PeerConnectionStatus::Connected { .. }) = self.connection_status(peer_id) {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// If we are connected or currently dialing the peer returns true.
|
||||
pub fn is_connected_or_dialing(&self, peer_id: &PeerId) -> bool {
|
||||
match self.connection_status(peer_id) {
|
||||
Some(PeerConnectionStatus::Connected { .. })
|
||||
| Some(PeerConnectionStatus::Dialing { .. }) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/* Setters */
|
||||
|
||||
/// A peer is being dialed.
|
||||
@@ -228,12 +231,50 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
let info = self.peers.entry(peer_id.clone()).or_default();
|
||||
|
||||
if info.connection_status.is_disconnected() {
|
||||
self.n_dc = self.n_dc.saturating_sub(1);
|
||||
self.disconnected_peers = self.disconnected_peers.saturating_sub(1);
|
||||
}
|
||||
if info.connection_status.is_banned() {
|
||||
self.banned_peers = self.banned_peers.saturating_sub(1);
|
||||
}
|
||||
info.connection_status = PeerConnectionStatus::Dialing {
|
||||
since: Instant::now(),
|
||||
};
|
||||
debug!(self.log, "Peer dialing in db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc);
|
||||
}
|
||||
|
||||
/// Update min ttl of a peer.
|
||||
pub fn update_min_ttl(&mut self, peer_id: &PeerId, min_ttl: Instant) {
|
||||
let info = self.peers.entry(peer_id.clone()).or_default();
|
||||
|
||||
// only update if the ttl is longer
|
||||
if info.min_ttl.is_none() || Some(min_ttl) > info.min_ttl {
|
||||
info.min_ttl = Some(min_ttl);
|
||||
|
||||
let min_ttl_secs = min_ttl
|
||||
.checked_duration_since(Instant::now())
|
||||
.map(|duration| duration.as_secs())
|
||||
.unwrap_or_else(|| 0);
|
||||
debug!(self.log, "Updating the time a peer is required for"; "peer_id" => peer_id.to_string(), "future_min_ttl_secs" => min_ttl_secs);
|
||||
}
|
||||
}
|
||||
|
||||
/// Extends the ttl of all peers on the given subnet that have a shorter
|
||||
/// min_ttl than what's given.
|
||||
pub fn extend_peers_on_subnet(&mut self, subnet_id: SubnetId, min_ttl: Instant) {
|
||||
let log = &self.log;
|
||||
self.peers.iter_mut()
|
||||
.filter(move |(_, info)| {
|
||||
info.connection_status.is_connected() && info.on_subnet(subnet_id)
|
||||
})
|
||||
.for_each(|(peer_id,info)| {
|
||||
if info.min_ttl.is_none() || Some(min_ttl) > info.min_ttl {
|
||||
info.min_ttl = Some(min_ttl);
|
||||
}
|
||||
let min_ttl_secs = min_ttl
|
||||
.checked_duration_since(Instant::now())
|
||||
.map(|duration| duration.as_secs())
|
||||
.unwrap_or_else(|| 0);
|
||||
trace!(log, "Updating minimum duration a peer is required for"; "peer_id" => peer_id.to_string(), "min_ttl" => min_ttl_secs);
|
||||
});
|
||||
}
|
||||
|
||||
/// Sets a peer as connected with an ingoing connection.
|
||||
@@ -241,10 +282,12 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
let info = self.peers.entry(peer_id.clone()).or_default();
|
||||
|
||||
if info.connection_status.is_disconnected() {
|
||||
self.n_dc = self.n_dc.saturating_sub(1);
|
||||
self.disconnected_peers = self.disconnected_peers.saturating_sub(1);
|
||||
}
|
||||
if info.connection_status.is_banned() {
|
||||
self.banned_peers = self.banned_peers.saturating_sub(1);
|
||||
}
|
||||
info.connection_status.connect_ingoing();
|
||||
debug!(self.log, "Peer connected to db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc);
|
||||
}
|
||||
|
||||
/// Sets a peer as connected with an outgoing connection.
|
||||
@@ -252,58 +295,109 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
let info = self.peers.entry(peer_id.clone()).or_default();
|
||||
|
||||
if info.connection_status.is_disconnected() {
|
||||
self.n_dc = self.n_dc.saturating_sub(1);
|
||||
self.disconnected_peers = self.disconnected_peers.saturating_sub(1);
|
||||
}
|
||||
if info.connection_status.is_banned() {
|
||||
self.banned_peers = self.banned_peers.saturating_sub(1);
|
||||
}
|
||||
info.connection_status.connect_outgoing();
|
||||
debug!(self.log, "Peer connected to db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc);
|
||||
}
|
||||
|
||||
/// Sets the peer as disconnected. A banned peer remains banned
|
||||
pub fn disconnect(&mut self, peer_id: &PeerId) {
|
||||
let log_ref = &self.log;
|
||||
let info = self.peers.entry(peer_id.clone()).or_insert_with(|| {
|
||||
warn!(log_ref, "Disconnecting unknown peer";
|
||||
"peer_id" => peer_id.to_string());
|
||||
PeerInfo::default()
|
||||
});
|
||||
if !info.connection_status.is_disconnected() && !info.connection_status.is_banned() {
|
||||
info.connection_status.disconnect();
|
||||
self.n_dc += 1;
|
||||
}
|
||||
debug!(self.log, "Peer disconnected from db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc);
|
||||
self.shrink_to_fit();
|
||||
}
|
||||
|
||||
/// Drops the peers with the lowest reputation so that the number of
|
||||
/// disconnected peers is less than MAX_DC_PEERS
|
||||
pub fn shrink_to_fit(&mut self) {
|
||||
// for caution, but the difference should never be > 1
|
||||
while self.n_dc > MAX_DC_PEERS {
|
||||
let to_drop = self
|
||||
.peers
|
||||
.iter()
|
||||
.filter(|(_, info)| info.connection_status.is_disconnected())
|
||||
.min_by_key(|(_, info)| info.reputation)
|
||||
.map(|(id, _)| id.clone())
|
||||
.unwrap(); // should be safe since n_dc > MAX_DC_PEERS > 0
|
||||
self.peers.remove(&to_drop);
|
||||
self.n_dc = self.n_dc.saturating_sub(1);
|
||||
// Note that it could be the case we prevent new nodes from joining. In this instance,
|
||||
// we don't bother tracking the new node.
|
||||
if let Some(info) = self.peers.get_mut(peer_id) {
|
||||
if !info.connection_status.is_disconnected() && !info.connection_status.is_banned() {
|
||||
info.connection_status.disconnect();
|
||||
self.disconnected_peers += 1;
|
||||
}
|
||||
self.shrink_to_fit();
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets a peer as banned
|
||||
/// Marks a peer as banned.
|
||||
pub fn ban(&mut self, peer_id: &PeerId) {
|
||||
let log_ref = &self.log;
|
||||
let info = self.peers.entry(peer_id.clone()).or_insert_with(|| {
|
||||
warn!(log_ref, "Banning unknown peer";
|
||||
"peer_id" => peer_id.to_string());
|
||||
"peer_id" => peer_id.to_string());
|
||||
PeerInfo::default()
|
||||
});
|
||||
|
||||
if info.connection_status.is_disconnected() {
|
||||
self.n_dc = self.n_dc.saturating_sub(1);
|
||||
self.disconnected_peers = self.disconnected_peers.saturating_sub(1);
|
||||
}
|
||||
if !info.connection_status.is_banned() {
|
||||
info.connection_status.ban();
|
||||
self.banned_peers += 1;
|
||||
}
|
||||
self.shrink_to_fit();
|
||||
}
|
||||
|
||||
/// Unbans a peer.
|
||||
pub fn unban(&mut self, peer_id: &PeerId) {
|
||||
let log_ref = &self.log;
|
||||
let info = self.peers.entry(peer_id.clone()).or_insert_with(|| {
|
||||
warn!(log_ref, "UnBanning unknown peer";
|
||||
"peer_id" => peer_id.to_string());
|
||||
PeerInfo::default()
|
||||
});
|
||||
|
||||
if info.connection_status.is_banned() {
|
||||
info.connection_status.unban();
|
||||
self.banned_peers = self.banned_peers.saturating_sub(1);
|
||||
}
|
||||
self.shrink_to_fit();
|
||||
}
|
||||
|
||||
/// Removes banned and disconnected peers from the DB if we have reached any of our limits.
|
||||
/// Drops the peers with the lowest reputation so that the number of
|
||||
/// disconnected peers is less than MAX_DC_PEERS
|
||||
pub fn shrink_to_fit(&mut self) {
|
||||
// Remove excess baned peers
|
||||
while self.banned_peers > MAX_BANNED_PEERS {
|
||||
if let Some(to_drop) = self
|
||||
.peers
|
||||
.iter()
|
||||
.filter(|(_, info)| info.connection_status.is_banned())
|
||||
.min_by(|(_, info_a), (_, info_b)| {
|
||||
info_a
|
||||
.score
|
||||
.partial_cmp(&info_b.score)
|
||||
.unwrap_or(std::cmp::Ordering::Equal)
|
||||
})
|
||||
.map(|(id, _)| id.clone())
|
||||
{
|
||||
debug!(self.log, "Removing old banned peer"; "peer_id" => to_drop.to_string());
|
||||
self.peers.remove(&to_drop);
|
||||
}
|
||||
// If there is no minimum, this is a coding error. For safety we decrease
|
||||
// the count to avoid a potential infinite loop.
|
||||
self.banned_peers = self.banned_peers.saturating_sub(1);
|
||||
}
|
||||
|
||||
// Remove excess disconnected peers
|
||||
while self.disconnected_peers > MAX_DC_PEERS {
|
||||
if let Some(to_drop) = self
|
||||
.peers
|
||||
.iter()
|
||||
.filter(|(_, info)| info.connection_status.is_disconnected())
|
||||
.min_by(|(_, info_a), (_, info_b)| {
|
||||
info_a
|
||||
.score
|
||||
.partial_cmp(&info_b.score)
|
||||
.unwrap_or(std::cmp::Ordering::Equal)
|
||||
})
|
||||
.map(|(id, _)| id.clone())
|
||||
{
|
||||
debug!(self.log, "Removing old disconnected peer"; "peer_id" => to_drop.to_string());
|
||||
self.peers.remove(&to_drop);
|
||||
}
|
||||
// If there is no minimum, this is a coding error. For safety we decrease
|
||||
// the count to avoid a potential infinite loop.
|
||||
self.disconnected_peers = self.disconnected_peers.saturating_sub(1);
|
||||
}
|
||||
debug!(self.log, "Peer banned"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc);
|
||||
info.connection_status.ban();
|
||||
}
|
||||
|
||||
/// Add the meta data of a peer.
|
||||
@@ -315,16 +409,6 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the reputation of peer.
|
||||
#[allow(dead_code)]
|
||||
pub(super) fn set_reputation(&mut self, peer_id: &PeerId, rep: Rep) {
|
||||
if let Some(peer_info) = self.peers.get_mut(peer_id) {
|
||||
peer_info.reputation = rep;
|
||||
} else {
|
||||
crit!(self.log, "Tried to modify reputation for an unknown peer"; "peer_id" => peer_id.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the syncing status of a peer.
|
||||
pub fn set_sync_status(&mut self, peer_id: &PeerId, sync_status: PeerSyncStatus) {
|
||||
if let Some(peer_info) = self.peers.get_mut(peer_id) {
|
||||
@@ -333,26 +417,6 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
crit!(self.log, "Tried to the sync status for an unknown peer"; "peer_id" => peer_id.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds to a peer's reputation by `change`. If the reputation exceeds Rep's
|
||||
/// upper (lower) bounds, it stays at the maximum (minimum) value.
|
||||
pub(super) fn add_reputation(&mut self, peer_id: &PeerId, change: RepChange) {
|
||||
let log_ref = &self.log;
|
||||
let info = match self.peers.entry(peer_id.clone()) {
|
||||
Entry::Vacant(_) => {
|
||||
warn!(log_ref, "Peer is unknown, no reputation change made";
|
||||
"peer_id" => peer_id.to_string());
|
||||
return;
|
||||
}
|
||||
Entry::Occupied(e) => e.into_mut(),
|
||||
};
|
||||
|
||||
info.reputation = if change.is_good {
|
||||
info.reputation.saturating_add(change.diff)
|
||||
} else {
|
||||
info.reputation.saturating_sub(change.diff)
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -374,8 +438,14 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn add_score<TSpec: EthSpec>(db: &mut PeerDB<TSpec>, peer_id: &PeerId, score: f64) {
|
||||
if let Some(info) = db.peer_info_mut(peer_id) {
|
||||
info.score.add(score);
|
||||
}
|
||||
}
|
||||
|
||||
fn get_db() -> PeerDB<M> {
|
||||
let log = build_log(slog::Level::Debug, true);
|
||||
let log = build_log(slog::Level::Debug, false);
|
||||
PeerDB::new(&log)
|
||||
}
|
||||
|
||||
@@ -398,9 +468,9 @@ mod tests {
|
||||
// this is the only peer
|
||||
assert_eq!(pdb.peers().count(), 1);
|
||||
// the peer has the default reputation
|
||||
assert_eq!(pdb.reputation(&random_peer), DEFAULT_REPUTATION);
|
||||
assert_eq!(pdb.score(&random_peer).score(), Score::default().score());
|
||||
// it should be connected, and therefore not counted as disconnected
|
||||
assert_eq!(pdb.n_dc, 0);
|
||||
assert_eq!(pdb.disconnected_peers, 0);
|
||||
assert!(peer_info.unwrap().connection_status.is_connected());
|
||||
assert_eq!(
|
||||
peer_info.unwrap().connection_status.connections(),
|
||||
@@ -408,50 +478,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_reputation() {
|
||||
let mut pdb = get_db();
|
||||
let random_peer = PeerId::random();
|
||||
pdb.connect_ingoing(&random_peer);
|
||||
|
||||
let mut rep = Rep::min_value();
|
||||
pdb.set_reputation(&random_peer, rep);
|
||||
assert_eq!(pdb.reputation(&random_peer), rep);
|
||||
|
||||
rep = Rep::max_value();
|
||||
pdb.set_reputation(&random_peer, rep);
|
||||
assert_eq!(pdb.reputation(&random_peer), rep);
|
||||
|
||||
rep = Rep::max_value() / 100;
|
||||
pdb.set_reputation(&random_peer, rep);
|
||||
assert_eq!(pdb.reputation(&random_peer), rep);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reputation_change() {
|
||||
let mut pdb = get_db();
|
||||
|
||||
// 0 change does not change de reputation
|
||||
let random_peer = PeerId::random();
|
||||
let change = RepChange::good(0);
|
||||
pdb.connect_ingoing(&random_peer);
|
||||
pdb.add_reputation(&random_peer, change);
|
||||
assert_eq!(pdb.reputation(&random_peer), DEFAULT_REPUTATION);
|
||||
|
||||
// overflowing change is capped
|
||||
let random_peer = PeerId::random();
|
||||
let change = RepChange::worst();
|
||||
pdb.connect_ingoing(&random_peer);
|
||||
pdb.add_reputation(&random_peer, change);
|
||||
assert_eq!(pdb.reputation(&random_peer), Rep::min_value());
|
||||
|
||||
let random_peer = PeerId::random();
|
||||
let change = RepChange::good(Rep::max_value());
|
||||
pdb.connect_ingoing(&random_peer);
|
||||
pdb.add_reputation(&random_peer, change);
|
||||
assert_eq!(pdb.reputation(&random_peer), Rep::max_value());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_disconnected_are_bounded() {
|
||||
let mut pdb = get_db();
|
||||
@@ -460,13 +486,30 @@ mod tests {
|
||||
let p = PeerId::random();
|
||||
pdb.connect_ingoing(&p);
|
||||
}
|
||||
assert_eq!(pdb.n_dc, 0);
|
||||
assert_eq!(pdb.disconnected_peers, 0);
|
||||
|
||||
for p in pdb.connected_peer_ids().cloned().collect::<Vec<_>>() {
|
||||
pdb.disconnect(&p);
|
||||
}
|
||||
|
||||
assert_eq!(pdb.n_dc, MAX_DC_PEERS);
|
||||
assert_eq!(pdb.disconnected_peers, MAX_DC_PEERS);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_banned_are_bounded() {
|
||||
let mut pdb = get_db();
|
||||
|
||||
for _ in 0..MAX_BANNED_PEERS + 1 {
|
||||
let p = PeerId::random();
|
||||
pdb.connect_ingoing(&p);
|
||||
}
|
||||
assert_eq!(pdb.banned_peers, 0);
|
||||
|
||||
for p in pdb.connected_peer_ids().cloned().collect::<Vec<_>>() {
|
||||
pdb.ban(&p);
|
||||
}
|
||||
|
||||
assert_eq!(pdb.banned_peers, MAX_BANNED_PEERS);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -479,14 +522,16 @@ mod tests {
|
||||
pdb.connect_ingoing(&p0);
|
||||
pdb.connect_ingoing(&p1);
|
||||
pdb.connect_ingoing(&p2);
|
||||
pdb.set_reputation(&p0, 70);
|
||||
pdb.set_reputation(&p1, 100);
|
||||
pdb.set_reputation(&p2, 50);
|
||||
add_score(&mut pdb, &p0, 70.0);
|
||||
add_score(&mut pdb, &p1, 100.0);
|
||||
add_score(&mut pdb, &p2, 50.0);
|
||||
|
||||
let best_peers = pdb.best_peers_by_status(PeerConnectionStatus::is_connected);
|
||||
assert!(vec![&p1, &p0, &p2]
|
||||
.into_iter()
|
||||
.eq(best_peers.into_iter().map(|p| p.0)));
|
||||
let best_peers: Vec<&PeerId> = pdb
|
||||
.best_peers_by_status(PeerConnectionStatus::is_connected)
|
||||
.iter()
|
||||
.map(|p| p.0)
|
||||
.collect();
|
||||
assert_eq!(vec![&p1, &p0, &p2], best_peers);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -499,15 +544,15 @@ mod tests {
|
||||
pdb.connect_ingoing(&p0);
|
||||
pdb.connect_ingoing(&p1);
|
||||
pdb.connect_ingoing(&p2);
|
||||
pdb.set_reputation(&p0, 70);
|
||||
pdb.set_reputation(&p1, 100);
|
||||
pdb.set_reputation(&p2, 50);
|
||||
add_score(&mut pdb, &p0, 70.0);
|
||||
add_score(&mut pdb, &p1, 100.0);
|
||||
add_score(&mut pdb, &p2, 50.0);
|
||||
|
||||
let the_best = pdb.best_by_status(PeerConnectionStatus::is_connected);
|
||||
assert!(the_best.is_some());
|
||||
// Consistency check
|
||||
let best_peers = pdb.best_peers_by_status(PeerConnectionStatus::is_connected);
|
||||
assert_eq!(the_best, best_peers.into_iter().map(|p| p.0).next());
|
||||
assert_eq!(the_best, best_peers.iter().next().map(|p| p.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -517,26 +562,86 @@ mod tests {
|
||||
let random_peer = PeerId::random();
|
||||
|
||||
pdb.connect_ingoing(&random_peer);
|
||||
assert_eq!(pdb.n_dc, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
dbg!("1");
|
||||
|
||||
pdb.connect_ingoing(&random_peer);
|
||||
assert_eq!(pdb.n_dc, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
dbg!("1");
|
||||
pdb.disconnect(&random_peer);
|
||||
assert_eq!(pdb.n_dc, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
dbg!("1");
|
||||
|
||||
pdb.connect_outgoing(&random_peer);
|
||||
assert_eq!(pdb.n_dc, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
dbg!("1");
|
||||
pdb.disconnect(&random_peer);
|
||||
assert_eq!(pdb.n_dc, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
dbg!("1");
|
||||
|
||||
pdb.ban(&random_peer);
|
||||
assert_eq!(pdb.n_dc, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
dbg!("1");
|
||||
pdb.disconnect(&random_peer);
|
||||
assert_eq!(pdb.n_dc, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
dbg!("1");
|
||||
|
||||
pdb.disconnect(&random_peer);
|
||||
assert_eq!(pdb.n_dc, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
dbg!("1");
|
||||
pdb.disconnect(&random_peer);
|
||||
assert_eq!(pdb.n_dc, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
dbg!("1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_disconnected_ban_consistency() {
|
||||
let mut pdb = get_db();
|
||||
|
||||
let random_peer = PeerId::random();
|
||||
let random_peer1 = PeerId::random();
|
||||
let random_peer2 = PeerId::random();
|
||||
let random_peer3 = PeerId::random();
|
||||
|
||||
pdb.connect_ingoing(&random_peer);
|
||||
pdb.connect_ingoing(&random_peer1);
|
||||
pdb.connect_ingoing(&random_peer2);
|
||||
pdb.connect_ingoing(&random_peer3);
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.banned_peers, pdb.banned_peers().count());
|
||||
|
||||
pdb.connect_ingoing(&random_peer);
|
||||
pdb.disconnect(&random_peer1);
|
||||
pdb.ban(&random_peer2);
|
||||
pdb.connect_ingoing(&random_peer3);
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.banned_peers, pdb.banned_peers().count());
|
||||
pdb.ban(&random_peer1);
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.banned_peers, pdb.banned_peers().count());
|
||||
|
||||
pdb.connect_outgoing(&random_peer2);
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.banned_peers, pdb.banned_peers().count());
|
||||
pdb.ban(&random_peer3);
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.banned_peers, pdb.banned_peers().count());
|
||||
|
||||
pdb.ban(&random_peer3);
|
||||
pdb.connect_ingoing(&random_peer1);
|
||||
pdb.disconnect(&random_peer2);
|
||||
pdb.ban(&random_peer3);
|
||||
pdb.connect_ingoing(&random_peer);
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.banned_peers, pdb.banned_peers().count());
|
||||
pdb.disconnect(&random_peer);
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.banned_peers, pdb.banned_peers().count());
|
||||
|
||||
pdb.disconnect(&random_peer);
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
assert_eq!(pdb.banned_peers, pdb.banned_peers().count());
|
||||
pdb.ban(&random_peer);
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
}
|
||||
}
|
||||
253
beacon_node/eth2_libp2p/src/peer_manager/score.rs
Normal file
253
beacon_node/eth2_libp2p/src/peer_manager/score.rs
Normal file
@@ -0,0 +1,253 @@
|
||||
//! This contains the scoring logic for peers.
|
||||
//!
|
||||
//! A peer's score is a rational number in the range [-100, 100].
|
||||
//!
|
||||
//! As the logic develops this documentation will advance.
|
||||
//!
|
||||
//! The scoring algorithms are currently experimental.
|
||||
use serde::Serialize;
|
||||
use std::time::Instant;
|
||||
|
||||
lazy_static! {
|
||||
static ref HALFLIFE_DECAY: f64 = -(2.0f64.ln()) / SCORE_HALFLIFE;
|
||||
}
|
||||
|
||||
/// The default score for new peers.
|
||||
pub(crate) const DEFAULT_SCORE: f64 = 0.0;
|
||||
/// The minimum reputation before a peer is disconnected.
|
||||
const MIN_SCORE_BEFORE_DISCONNECT: f64 = -20.0;
|
||||
/// The minimum reputation before a peer is banned.
|
||||
const MIN_SCORE_BEFORE_BAN: f64 = -50.0;
|
||||
/// The maximum score a peer can obtain.
|
||||
const MAX_SCORE: f64 = 100.0;
|
||||
/// The minimum score a peer can obtain.
|
||||
const MIN_SCORE: f64 = -100.0;
|
||||
/// The halflife of a peer's score. I.e the number of seconds it takes for the score to decay to half its value.
|
||||
const SCORE_HALFLIFE: f64 = 600.0;
|
||||
/// The number of seconds we ban a peer for before their score begins to decay.
|
||||
const BANNED_BEFORE_DECAY: u64 = 1800;
|
||||
|
||||
/// A collection of actions a peer can perform which will adjust its score.
|
||||
/// Each variant has an associated score change.
|
||||
// To easily assess the behaviour of scores changes the number of variants should stay low, and
|
||||
// somewhat generic.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum PeerAction {
|
||||
/// We should not communicate more with this peer.
|
||||
/// This action will cause the peer to get banned.
|
||||
Fatal,
|
||||
/// This peer's action is not malicious but will not be tolerated. A few occurrences will cause
|
||||
/// the peer to get kicked.
|
||||
/// NOTE: ~5 occurrences will get the peer banned
|
||||
LowToleranceError,
|
||||
/// An error occurred with this peer but it is not necessarily malicious.
|
||||
/// We have high tolerance for this actions: several occurrences are needed for a peer to get
|
||||
/// kicked.
|
||||
/// NOTE: ~10 occurrences will get the peer banned
|
||||
MidToleranceError,
|
||||
/// An error occurred with this peer but it is not necessarily malicious.
|
||||
/// We have high tolerance for this actions: several occurrences are needed for a peer to get
|
||||
/// kicked.
|
||||
/// NOTE: ~15 occurrences will get the peer banned
|
||||
HighToleranceError,
|
||||
/// Received an expected message.
|
||||
_ValidMessage,
|
||||
}
|
||||
|
||||
/// The expected state of the peer given the peer's score.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub(crate) enum ScoreState {
|
||||
/// We are content with the peers performance. We permit connections and messages.
|
||||
Healthy,
|
||||
/// The peer should be disconnected. We allow re-connections if the peer is persistent.
|
||||
Disconnected,
|
||||
/// The peer is banned. We disallow new connections until it's score has decayed into a
|
||||
/// tolerable threshold.
|
||||
Banned,
|
||||
}
|
||||
|
||||
/// A peer's score (perceived potential usefulness).
|
||||
///
|
||||
/// This simplistic version consists of a global score per peer which decays to 0 over time. The
|
||||
/// decay rate applies equally to positive and negative scores.
|
||||
#[derive(Copy, PartialEq, Clone, Debug, Serialize)]
|
||||
pub struct Score {
|
||||
/// The global score.
|
||||
// NOTE: In the future we may separate this into sub-scores involving the RPC, Gossipsub and
|
||||
// lighthouse.
|
||||
score: f64,
|
||||
/// The time the score was last updated to perform time-based adjustments such as score-decay.
|
||||
#[serde(skip)]
|
||||
last_updated: Instant,
|
||||
}
|
||||
|
||||
impl Default for Score {
|
||||
fn default() -> Self {
|
||||
Score {
|
||||
score: DEFAULT_SCORE,
|
||||
last_updated: Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Score {}
|
||||
|
||||
impl PartialOrd for Score {
|
||||
fn partial_cmp(&self, other: &Score) -> Option<std::cmp::Ordering> {
|
||||
self.score
|
||||
.partial_cmp(&other.score)
|
||||
.or_else(|| self.last_updated.partial_cmp(&other.last_updated))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for Score {
|
||||
fn cmp(&self, other: &Score) -> std::cmp::Ordering {
|
||||
self.partial_cmp(other)
|
||||
.unwrap_or_else(|| std::cmp::Ordering::Equal)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<f64> for Score {
|
||||
fn from(f: f64) -> Self {
|
||||
Score {
|
||||
score: f,
|
||||
last_updated: Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Score {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:.2}", self.score)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PeerAction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
PeerAction::Fatal => write!(f, "Fatal"),
|
||||
PeerAction::LowToleranceError => write!(f, "Low Tolerance Error"),
|
||||
PeerAction::MidToleranceError => write!(f, "Mid Tolerance Error"),
|
||||
PeerAction::HighToleranceError => write!(f, "High Tolerance Error"),
|
||||
PeerAction::_ValidMessage => write!(f, "Valid Message"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ScoreState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ScoreState::Healthy => write!(f, "Healthy"),
|
||||
ScoreState::Banned => write!(f, "Banned"),
|
||||
ScoreState::Disconnected => write!(f, "Disconnected"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Score {
|
||||
/// Access to the underlying score.
|
||||
pub fn score(&self) -> f64 {
|
||||
self.score
|
||||
}
|
||||
|
||||
/// Modifies the score based on a peer's action.
|
||||
pub fn apply_peer_action(&mut self, peer_action: PeerAction) {
|
||||
match peer_action {
|
||||
PeerAction::Fatal => self.score = MIN_SCORE, // The worst possible score
|
||||
PeerAction::LowToleranceError => self.add(-10.0),
|
||||
PeerAction::MidToleranceError => self.add(-5.0),
|
||||
PeerAction::HighToleranceError => self.add(-1.0),
|
||||
PeerAction::_ValidMessage => self.add(0.1),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the expected state of the peer given it's score.
|
||||
pub(crate) fn state(&self) -> ScoreState {
|
||||
match self.score {
|
||||
x if x <= MIN_SCORE_BEFORE_BAN => ScoreState::Banned,
|
||||
x if x <= MIN_SCORE_BEFORE_DISCONNECT => ScoreState::Disconnected,
|
||||
_ => ScoreState::Healthy,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add an f64 to the score abiding by the limits.
|
||||
pub fn add(&mut self, score: f64) {
|
||||
let mut new_score = self.score + score;
|
||||
if new_score > MAX_SCORE {
|
||||
new_score = MAX_SCORE;
|
||||
}
|
||||
if new_score < MIN_SCORE {
|
||||
new_score = MIN_SCORE;
|
||||
}
|
||||
|
||||
self.score = new_score;
|
||||
}
|
||||
|
||||
/// Applies time-based logic such as decay rates to the score.
|
||||
/// This function should be called periodically.
|
||||
pub fn update(&mut self) {
|
||||
// Apply decay logic
|
||||
//
|
||||
// There is two distinct decay processes. One for banned peers and one for all others. If
|
||||
// the score is below the banning threshold and the duration since it was last update is
|
||||
// shorter than the banning threshold, we do nothing.
|
||||
let now = Instant::now();
|
||||
if self.score <= MIN_SCORE_BEFORE_BAN
|
||||
&& now
|
||||
.checked_duration_since(self.last_updated)
|
||||
.map(|d| d.as_secs())
|
||||
<= Some(BANNED_BEFORE_DECAY)
|
||||
{
|
||||
// The peer is banned and still within the ban timeout. Do not update it's score.
|
||||
// Update last_updated so that the decay begins correctly when ready.
|
||||
self.last_updated = now;
|
||||
return;
|
||||
}
|
||||
|
||||
// Decay the current score
|
||||
// Using exponential decay based on a constant half life.
|
||||
if let Some(secs_since_update) = now
|
||||
.checked_duration_since(self.last_updated)
|
||||
.map(|d| d.as_secs())
|
||||
{
|
||||
// e^(-ln(2)/HL*t)
|
||||
let decay_factor = (*HALFLIFE_DECAY * secs_since_update as f64).exp();
|
||||
self.score *= decay_factor;
|
||||
self.last_updated = now;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_reputation_change() {
|
||||
let mut score = Score::default();
|
||||
|
||||
// 0 change does not change de reputation
|
||||
//
|
||||
let change = 0.0;
|
||||
score.add(change);
|
||||
assert_eq!(score.score(), DEFAULT_SCORE);
|
||||
|
||||
// underflowing change is capped
|
||||
let mut score = Score::default();
|
||||
let change = MIN_SCORE - 50.0;
|
||||
score.add(change);
|
||||
assert_eq!(score.score(), MIN_SCORE);
|
||||
|
||||
// overflowing change is capped
|
||||
let mut score = Score::default();
|
||||
let change = MAX_SCORE + 50.0;
|
||||
score.add(change);
|
||||
assert_eq!(score.score(), MAX_SCORE);
|
||||
|
||||
// Score adjusts
|
||||
let mut score = Score::default();
|
||||
let change = 1.32;
|
||||
score.add(change);
|
||||
assert_eq!(score.score(), DEFAULT_SCORE + change);
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
//! This handles the various supported encoding mechanism for the Eth 2.0 RPC.
|
||||
|
||||
use crate::rpc::{ErrorMessage, RPCCodedResponse, RPCRequest, RPCResponse};
|
||||
use crate::rpc::{RPCCodedResponse, RPCRequest, RPCResponse};
|
||||
use libp2p::bytes::BufMut;
|
||||
use libp2p::bytes::BytesMut;
|
||||
use std::marker::PhantomData;
|
||||
@@ -130,8 +130,8 @@ where
|
||||
impl<TCodec, TSpec> Decoder for BaseOutboundCodec<TCodec, TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
TCodec: OutboundCodec<RPCRequest<TSpec>, ErrorType = ErrorMessage>
|
||||
+ Decoder<Item = RPCResponse<TSpec>>,
|
||||
TCodec:
|
||||
OutboundCodec<RPCRequest<TSpec>, ErrorType = String> + Decoder<Item = RPCResponse<TSpec>>,
|
||||
{
|
||||
type Item = RPCCodedResponse<TSpec>;
|
||||
type Error = <TCodec as Decoder>::Error;
|
||||
@@ -174,7 +174,6 @@ where
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::super::ssz::*;
|
||||
use super::super::ssz_snappy::*;
|
||||
use super::*;
|
||||
use crate::rpc::protocol::*;
|
||||
@@ -189,29 +188,22 @@ mod tests {
|
||||
|
||||
let snappy_protocol_id =
|
||||
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy);
|
||||
let ssz_protocol_id = ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZ);
|
||||
|
||||
let mut snappy_outbound_codec =
|
||||
SSZSnappyOutboundCodec::<Spec>::new(snappy_protocol_id, 1_048_576);
|
||||
let mut ssz_outbound_codec = SSZOutboundCodec::<Spec>::new(ssz_protocol_id, 1_048_576);
|
||||
|
||||
// decode message just as snappy message
|
||||
let snappy_decoded_message = snappy_outbound_codec.decode(&mut buf.clone());
|
||||
// decode message just a ssz message
|
||||
let ssz_decoded_message = ssz_outbound_codec.decode(&mut buf.clone());
|
||||
|
||||
// build codecs for entire chunk
|
||||
let mut snappy_base_outbound_codec = BaseOutboundCodec::new(snappy_outbound_codec);
|
||||
let mut ssz_base_outbound_codec = BaseOutboundCodec::new(ssz_outbound_codec);
|
||||
|
||||
// decode message as ssz snappy chunk
|
||||
let snappy_decoded_chunk = snappy_base_outbound_codec.decode(&mut buf.clone());
|
||||
// decode message just a ssz chunk
|
||||
let ssz_decoded_chunk = ssz_base_outbound_codec.decode(&mut buf.clone());
|
||||
|
||||
let _ = dbg!(snappy_decoded_message);
|
||||
let _ = dbg!(ssz_decoded_message);
|
||||
let _ = dbg!(snappy_decoded_chunk);
|
||||
let _ = dbg!(ssz_decoded_chunk);
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,7 @@
|
||||
pub(crate) mod base;
|
||||
pub(crate) mod ssz;
|
||||
pub(crate) mod ssz_snappy;
|
||||
|
||||
use self::base::{BaseInboundCodec, BaseOutboundCodec};
|
||||
use self::ssz::{SSZInboundCodec, SSZOutboundCodec};
|
||||
use self::ssz_snappy::{SSZSnappyInboundCodec, SSZSnappyOutboundCodec};
|
||||
use crate::rpc::protocol::RPCError;
|
||||
use crate::rpc::{RPCCodedResponse, RPCRequest};
|
||||
@@ -14,12 +12,10 @@ use types::EthSpec;
|
||||
// Known types of codecs
|
||||
pub enum InboundCodec<TSpec: EthSpec> {
|
||||
SSZSnappy(BaseInboundCodec<SSZSnappyInboundCodec<TSpec>, TSpec>),
|
||||
SSZ(BaseInboundCodec<SSZInboundCodec<TSpec>, TSpec>),
|
||||
}
|
||||
|
||||
pub enum OutboundCodec<TSpec: EthSpec> {
|
||||
SSZSnappy(BaseOutboundCodec<SSZSnappyOutboundCodec<TSpec>, TSpec>),
|
||||
SSZ(BaseOutboundCodec<SSZOutboundCodec<TSpec>, TSpec>),
|
||||
}
|
||||
|
||||
impl<T: EthSpec> Encoder<RPCCodedResponse<T>> for InboundCodec<T> {
|
||||
@@ -27,7 +23,6 @@ impl<T: EthSpec> Encoder<RPCCodedResponse<T>> for InboundCodec<T> {
|
||||
|
||||
fn encode(&mut self, item: RPCCodedResponse<T>, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
match self {
|
||||
InboundCodec::SSZ(codec) => codec.encode(item, dst),
|
||||
InboundCodec::SSZSnappy(codec) => codec.encode(item, dst),
|
||||
}
|
||||
}
|
||||
@@ -39,7 +34,6 @@ impl<TSpec: EthSpec> Decoder for InboundCodec<TSpec> {
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
match self {
|
||||
InboundCodec::SSZ(codec) => codec.decode(src),
|
||||
InboundCodec::SSZSnappy(codec) => codec.decode(src),
|
||||
}
|
||||
}
|
||||
@@ -50,7 +44,6 @@ impl<TSpec: EthSpec> Encoder<RPCRequest<TSpec>> for OutboundCodec<TSpec> {
|
||||
|
||||
fn encode(&mut self, item: RPCRequest<TSpec>, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
match self {
|
||||
OutboundCodec::SSZ(codec) => codec.encode(item, dst),
|
||||
OutboundCodec::SSZSnappy(codec) => codec.encode(item, dst),
|
||||
}
|
||||
}
|
||||
@@ -62,7 +55,6 @@ impl<T: EthSpec> Decoder for OutboundCodec<T> {
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
match self {
|
||||
OutboundCodec::SSZ(codec) => codec.decode(src),
|
||||
OutboundCodec::SSZSnappy(codec) => codec.decode(src),
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,17 @@
|
||||
use crate::rpc::methods::*;
|
||||
use crate::rpc::{
|
||||
codec::base::OutboundCodec,
|
||||
protocol::{Encoding, Protocol, ProtocolId, RPCError, Version},
|
||||
protocol::{
|
||||
Encoding, Protocol, ProtocolId, RPCError, Version, BLOCKS_BY_ROOT_REQUEST_MAX,
|
||||
BLOCKS_BY_ROOT_REQUEST_MIN, SIGNED_BEACON_BLOCK_MAX, SIGNED_BEACON_BLOCK_MIN,
|
||||
},
|
||||
};
|
||||
use crate::rpc::{ErrorMessage, RPCCodedResponse, RPCRequest, RPCResponse};
|
||||
use crate::rpc::{RPCCodedResponse, RPCRequest, RPCResponse};
|
||||
use libp2p::bytes::BytesMut;
|
||||
use snap::read::FrameDecoder;
|
||||
use snap::write::FrameEncoder;
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_types::VariableList;
|
||||
use std::io::Cursor;
|
||||
use std::io::ErrorKind;
|
||||
use std::io::{Read, Write};
|
||||
@@ -60,9 +64,7 @@ impl<TSpec: EthSpec> Encoder<RPCCodedResponse<TSpec>> for SSZSnappyInboundCodec<
|
||||
RPCResponse::Pong(res) => res.data.as_ssz_bytes(),
|
||||
RPCResponse::MetaData(res) => res.as_ssz_bytes(),
|
||||
},
|
||||
RPCCodedResponse::InvalidRequest(err) => err.as_ssz_bytes(),
|
||||
RPCCodedResponse::ServerError(err) => err.as_ssz_bytes(),
|
||||
RPCCodedResponse::Unknown(err) => err.as_ssz_bytes(),
|
||||
RPCCodedResponse::Error(_, err) => err.as_ssz_bytes(),
|
||||
RPCCodedResponse::StreamTermination(_) => {
|
||||
unreachable!("Code error - attempting to encode a stream termination")
|
||||
}
|
||||
@@ -70,7 +72,7 @@ impl<TSpec: EthSpec> Encoder<RPCCodedResponse<TSpec>> for SSZSnappyInboundCodec<
|
||||
// SSZ encoded bytes should be within `max_packet_size`
|
||||
if bytes.len() > self.max_packet_size {
|
||||
return Err(RPCError::InternalError(
|
||||
"attempting to encode data > max_packet_size".into(),
|
||||
"attempting to encode data > max_packet_size",
|
||||
));
|
||||
}
|
||||
// Inserts the length prefix of the uncompressed bytes into dst
|
||||
@@ -122,33 +124,67 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyInboundCodec<TSpec> {
|
||||
let _read_bytes = src.split_to(n as usize);
|
||||
match self.protocol.message_name {
|
||||
Protocol::Status => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes(
|
||||
&decoded_buffer,
|
||||
)?))),
|
||||
Version::V1 => {
|
||||
if decoded_buffer.len() == <StatusMessage as Encode>::ssz_fixed_len() {
|
||||
Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes(
|
||||
&decoded_buffer,
|
||||
)?)))
|
||||
} else {
|
||||
Err(RPCError::InvalidData)
|
||||
}
|
||||
}
|
||||
},
|
||||
Protocol::Goodbye => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCRequest::Goodbye(
|
||||
GoodbyeReason::from_ssz_bytes(&decoded_buffer)?,
|
||||
))),
|
||||
Version::V1 => {
|
||||
if decoded_buffer.len() == <GoodbyeReason as Encode>::ssz_fixed_len() {
|
||||
Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes(
|
||||
&decoded_buffer,
|
||||
)?)))
|
||||
} else {
|
||||
Err(RPCError::InvalidData)
|
||||
}
|
||||
}
|
||||
},
|
||||
Protocol::BlocksByRange => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCRequest::BlocksByRange(
|
||||
BlocksByRangeRequest::from_ssz_bytes(&decoded_buffer)?,
|
||||
))),
|
||||
Version::V1 => {
|
||||
if decoded_buffer.len()
|
||||
== <BlocksByRangeRequest as Encode>::ssz_fixed_len()
|
||||
{
|
||||
Ok(Some(RPCRequest::BlocksByRange(
|
||||
BlocksByRangeRequest::from_ssz_bytes(&decoded_buffer)?,
|
||||
)))
|
||||
} else {
|
||||
Err(RPCError::InvalidData)
|
||||
}
|
||||
}
|
||||
},
|
||||
Protocol::BlocksByRoot => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCRequest::BlocksByRoot(BlocksByRootRequest {
|
||||
block_roots: Vec::from_ssz_bytes(&decoded_buffer)?,
|
||||
}))),
|
||||
Version::V1 => {
|
||||
if decoded_buffer.len() >= *BLOCKS_BY_ROOT_REQUEST_MIN
|
||||
&& decoded_buffer.len() <= *BLOCKS_BY_ROOT_REQUEST_MAX
|
||||
{
|
||||
Ok(Some(RPCRequest::BlocksByRoot(BlocksByRootRequest {
|
||||
block_roots: VariableList::from_ssz_bytes(&decoded_buffer)?,
|
||||
})))
|
||||
} else {
|
||||
Err(RPCError::InvalidData)
|
||||
}
|
||||
}
|
||||
},
|
||||
Protocol::Ping => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCRequest::Ping(Ping::from_ssz_bytes(
|
||||
&decoded_buffer,
|
||||
)?))),
|
||||
Version::V1 => {
|
||||
if decoded_buffer.len() == <Ping as Encode>::ssz_fixed_len() {
|
||||
Ok(Some(RPCRequest::Ping(Ping {
|
||||
data: u64::from_ssz_bytes(&decoded_buffer)?,
|
||||
})))
|
||||
} else {
|
||||
Err(RPCError::InvalidData)
|
||||
}
|
||||
}
|
||||
},
|
||||
Protocol::MetaData => match self.protocol.version {
|
||||
Version::V1 => {
|
||||
if decoded_buffer.len() > 0 {
|
||||
if !decoded_buffer.is_empty() {
|
||||
Err(RPCError::InvalidData)
|
||||
} else {
|
||||
Ok(Some(RPCRequest::MetaData(PhantomData)))
|
||||
@@ -160,10 +196,8 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyInboundCodec<TSpec> {
|
||||
Err(e) => match e.kind() {
|
||||
// Haven't received enough bytes to decode yet
|
||||
// TODO: check if this is the only Error variant where we return `Ok(None)`
|
||||
ErrorKind::UnexpectedEof => {
|
||||
return Ok(None);
|
||||
}
|
||||
_ => return Err(e).map_err(RPCError::from),
|
||||
ErrorKind::UnexpectedEof => Ok(None),
|
||||
_ => Err(e).map_err(RPCError::from),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -267,50 +301,80 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyOutboundCodec<TSpec> {
|
||||
let _read_byts = src.split_to(n as usize);
|
||||
match self.protocol.message_name {
|
||||
Protocol::Status => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCResponse::Status(
|
||||
StatusMessage::from_ssz_bytes(&decoded_buffer)?,
|
||||
))),
|
||||
Version::V1 => {
|
||||
if decoded_buffer.len() == <StatusMessage as Encode>::ssz_fixed_len() {
|
||||
Ok(Some(RPCResponse::Status(StatusMessage::from_ssz_bytes(
|
||||
&decoded_buffer,
|
||||
)?)))
|
||||
} else {
|
||||
Err(RPCError::InvalidData)
|
||||
}
|
||||
}
|
||||
},
|
||||
Protocol::Goodbye => {
|
||||
// Goodbye does not have a response
|
||||
Err(RPCError::InvalidData)
|
||||
}
|
||||
Protocol::Goodbye => Err(RPCError::InvalidData),
|
||||
Protocol::BlocksByRange => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCResponse::BlocksByRange(Box::new(
|
||||
SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?,
|
||||
)))),
|
||||
Version::V1 => {
|
||||
if decoded_buffer.len() >= *SIGNED_BEACON_BLOCK_MIN
|
||||
&& decoded_buffer.len() <= *SIGNED_BEACON_BLOCK_MAX
|
||||
{
|
||||
Ok(Some(RPCResponse::BlocksByRange(Box::new(
|
||||
SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?,
|
||||
))))
|
||||
} else {
|
||||
Err(RPCError::InvalidData)
|
||||
}
|
||||
}
|
||||
},
|
||||
Protocol::BlocksByRoot => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCResponse::BlocksByRoot(Box::new(
|
||||
SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?,
|
||||
)))),
|
||||
Version::V1 => {
|
||||
if decoded_buffer.len() >= *SIGNED_BEACON_BLOCK_MIN
|
||||
&& decoded_buffer.len() <= *SIGNED_BEACON_BLOCK_MAX
|
||||
{
|
||||
Ok(Some(RPCResponse::BlocksByRoot(Box::new(
|
||||
SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?,
|
||||
))))
|
||||
} else {
|
||||
Err(RPCError::InvalidData)
|
||||
}
|
||||
}
|
||||
},
|
||||
Protocol::Ping => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCResponse::Pong(Ping {
|
||||
data: u64::from_ssz_bytes(&decoded_buffer)?,
|
||||
}))),
|
||||
Version::V1 => {
|
||||
if decoded_buffer.len() == <Ping as Encode>::ssz_fixed_len() {
|
||||
Ok(Some(RPCResponse::Pong(Ping {
|
||||
data: u64::from_ssz_bytes(&decoded_buffer)?,
|
||||
})))
|
||||
} else {
|
||||
Err(RPCError::InvalidData)
|
||||
}
|
||||
}
|
||||
},
|
||||
Protocol::MetaData => match self.protocol.version {
|
||||
Version::V1 => Ok(Some(RPCResponse::MetaData(MetaData::from_ssz_bytes(
|
||||
&decoded_buffer,
|
||||
)?))),
|
||||
Version::V1 => {
|
||||
if decoded_buffer.len() == <MetaData<TSpec> as Encode>::ssz_fixed_len()
|
||||
{
|
||||
Ok(Some(RPCResponse::MetaData(MetaData::from_ssz_bytes(
|
||||
&decoded_buffer,
|
||||
)?)))
|
||||
} else {
|
||||
Err(RPCError::InvalidData)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
Err(e) => match e.kind() {
|
||||
// Haven't received enough bytes to decode yet
|
||||
// TODO: check if this is the only Error variant where we return `Ok(None)`
|
||||
ErrorKind::UnexpectedEof => {
|
||||
return Ok(None);
|
||||
}
|
||||
_ => return Err(e).map_err(RPCError::from),
|
||||
ErrorKind::UnexpectedEof => Ok(None),
|
||||
_ => Err(e).map_err(RPCError::from),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> OutboundCodec<RPCRequest<TSpec>> for SSZSnappyOutboundCodec<TSpec> {
|
||||
type ErrorType = ErrorMessage;
|
||||
type ErrorType = String;
|
||||
|
||||
fn decode_error(&mut self, src: &mut BytesMut) -> Result<Option<Self::ErrorType>, RPCError> {
|
||||
if self.len.is_none() {
|
||||
@@ -337,15 +401,15 @@ impl<TSpec: EthSpec> OutboundCodec<RPCRequest<TSpec>> for SSZSnappyOutboundCodec
|
||||
let n = reader.get_ref().position();
|
||||
self.len = None;
|
||||
let _read_bytes = src.split_to(n as usize);
|
||||
Ok(Some(ErrorMessage::from_ssz_bytes(&decoded_buffer)?))
|
||||
Ok(Some(
|
||||
String::from_utf8_lossy(&<Vec<u8>>::from_ssz_bytes(&decoded_buffer)?).into(),
|
||||
))
|
||||
}
|
||||
Err(e) => match e.kind() {
|
||||
// Haven't received enough bytes to decode yet
|
||||
// TODO: check if this is the only Error variant where we return `Ok(None)`
|
||||
ErrorKind::UnexpectedEof => {
|
||||
return Ok(None);
|
||||
}
|
||||
_ => return Err(e).map_err(RPCError::from),
|
||||
ErrorKind::UnexpectedEof => Ok(None),
|
||||
_ => Err(e).map_err(RPCError::from),
|
||||
},
|
||||
}
|
||||
}
|
||||
951
beacon_node/eth2_libp2p/src/rpc/handler.rs
Normal file
951
beacon_node/eth2_libp2p/src/rpc/handler.rs
Normal file
@@ -0,0 +1,951 @@
|
||||
#![allow(clippy::type_complexity)]
|
||||
#![allow(clippy::cognitive_complexity)]
|
||||
|
||||
use super::methods::{RPCCodedResponse, RPCResponseErrorCode, RequestId, ResponseTermination};
|
||||
use super::protocol::{Protocol, RPCError, RPCProtocol, RPCRequest};
|
||||
use super::{RPCReceived, RPCSend};
|
||||
use crate::rpc::protocol::{InboundFramed, OutboundFramed};
|
||||
use fnv::FnvHashMap;
|
||||
use futures::prelude::*;
|
||||
use libp2p::core::upgrade::{
|
||||
InboundUpgrade, NegotiationError, OutboundUpgrade, ProtocolError, UpgradeError,
|
||||
};
|
||||
use libp2p::swarm::protocols_handler::{
|
||||
KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
|
||||
};
|
||||
use libp2p::swarm::NegotiatedSubstream;
|
||||
use slog::{crit, debug, warn};
|
||||
use smallvec::SmallVec;
|
||||
use std::{
|
||||
collections::hash_map::Entry,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::time::{delay_queue, delay_until, Delay, DelayQueue, Instant as TInstant};
|
||||
use types::EthSpec;
|
||||
|
||||
//TODO: Implement check_timeout() on the substream types
|
||||
|
||||
/// The time (in seconds) before a substream that is awaiting a response from the user times out.
|
||||
pub const RESPONSE_TIMEOUT: u64 = 10;
|
||||
|
||||
/// The number of times to retry an outbound upgrade in the case of IO errors.
|
||||
const IO_ERROR_RETRIES: u8 = 3;
|
||||
|
||||
/// Maximum time given to the handler to perform shutdown operations.
|
||||
const SHUTDOWN_TIMEOUT_SECS: u8 = 15;
|
||||
|
||||
/// Identifier of inbound and outbound substreams from the handler's perspective.
|
||||
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
|
||||
pub struct SubstreamId(usize);
|
||||
|
||||
type InboundSubstream<TSpec> = InboundFramed<NegotiatedSubstream, TSpec>;
|
||||
|
||||
/// Output of the future handling the send of responses to a peer's request.
|
||||
type InboundProcessingOutput<TSpec> = (
|
||||
InboundSubstream<TSpec>, /* substream */
|
||||
Vec<RPCError>, /* Errors sending messages if any */
|
||||
bool, /* whether to remove the stream afterwards */
|
||||
u64, /* Chunks remaining to be sent after this processing finishes */
|
||||
);
|
||||
|
||||
/// An error encountered by the handler.
|
||||
pub enum HandlerErr {
|
||||
/// An error occurred for this peer's request. This can occur during protocol negotiation,
|
||||
/// message passing, or if the handler identifies that we are sending an error response to the peer.
|
||||
Inbound {
|
||||
/// Id of the peer's request for which an error occurred.
|
||||
id: SubstreamId,
|
||||
/// Information of the negotiated protocol.
|
||||
proto: Protocol,
|
||||
/// The error that occurred.
|
||||
error: RPCError,
|
||||
},
|
||||
/// An error occurred for this request. Such error can occur during protocol negotiation,
|
||||
/// message passing, or if we successfully received a response from the peer, but this response
|
||||
/// indicates an error.
|
||||
Outbound {
|
||||
/// Application-given Id of the request for which an error occurred.
|
||||
id: RequestId,
|
||||
/// Information of the protocol.
|
||||
proto: Protocol,
|
||||
/// The error that occurred.
|
||||
error: RPCError,
|
||||
},
|
||||
}
|
||||
|
||||
/// Implementation of `ProtocolsHandler` for the RPC protocol.
|
||||
pub struct RPCHandler<TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
/// The upgrade for inbound substreams.
|
||||
listen_protocol: SubstreamProtocol<RPCProtocol<TSpec>>,
|
||||
|
||||
/// Errors occurring on outbound and inbound connections queued for reporting back.
|
||||
pending_errors: Vec<HandlerErr>,
|
||||
|
||||
/// Queue of events to produce in `poll()`.
|
||||
events_out: SmallVec<[RPCReceived<TSpec>; 4]>,
|
||||
|
||||
/// Queue of outbound substreams to open.
|
||||
dial_queue: SmallVec<[(RequestId, RPCRequest<TSpec>); 4]>,
|
||||
|
||||
/// Current number of concurrent outbound substreams being opened.
|
||||
dial_negotiated: u32,
|
||||
|
||||
/// Current inbound substreams awaiting processing.
|
||||
inbound_substreams: FnvHashMap<SubstreamId, InboundInfo<TSpec>>,
|
||||
|
||||
/// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout.
|
||||
inbound_substreams_delay: DelayQueue<SubstreamId>,
|
||||
|
||||
/// Map of outbound substreams that need to be driven to completion.
|
||||
outbound_substreams: FnvHashMap<SubstreamId, OutboundInfo<TSpec>>,
|
||||
|
||||
/// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout.
|
||||
outbound_substreams_delay: DelayQueue<SubstreamId>,
|
||||
|
||||
/// Sequential ID for waiting substreams. For inbound substreams, this is also the inbound request ID.
|
||||
current_inbound_substream_id: SubstreamId,
|
||||
|
||||
/// Sequential ID for outbound substreams.
|
||||
current_outbound_substream_id: SubstreamId,
|
||||
|
||||
/// Maximum number of concurrent outbound substreams being opened. Value is never modified.
|
||||
max_dial_negotiated: u32,
|
||||
|
||||
/// Value to return from `connection_keep_alive`.
|
||||
keep_alive: KeepAlive,
|
||||
|
||||
/// State of the handler.
|
||||
state: HandlerState,
|
||||
|
||||
/// Try to negotiate the outbound upgrade a few times if there is an IO error before reporting the request as failed.
|
||||
/// This keeps track of the number of attempts.
|
||||
outbound_io_error_retries: u8,
|
||||
|
||||
/// Logger for handling RPC streams
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
enum HandlerState {
|
||||
/// The handler is active. All messages are sent and received.
|
||||
Active,
|
||||
/// The handler is shutting_down.
|
||||
///
|
||||
/// While in this state the handler rejects new requests but tries to finish existing ones.
|
||||
/// Once the timer expires, all messages are killed.
|
||||
ShuttingDown(Delay),
|
||||
/// The handler is deactivated. A goodbye has been sent and no more messages are sent or
|
||||
/// received.
|
||||
Deactivated,
|
||||
}
|
||||
|
||||
/// Contains the information the handler keeps on established inbound substreams.
|
||||
struct InboundInfo<TSpec: EthSpec> {
|
||||
/// State of the substream.
|
||||
state: InboundState<TSpec>,
|
||||
/// Responses queued for sending.
|
||||
pending_items: Vec<RPCCodedResponse<TSpec>>,
|
||||
/// Protocol of the original request we received from the peer.
|
||||
protocol: Protocol,
|
||||
/// Responses that the peer is still expecting from us.
|
||||
remaining_chunks: u64,
|
||||
/// Key to keep track of the substream's timeout via `self.inbound_substreams_delay`.
|
||||
delay_key: Option<delay_queue::Key>,
|
||||
}
|
||||
|
||||
/// Contains the information the handler keeps on established outbound substreams.
|
||||
struct OutboundInfo<TSpec: EthSpec> {
|
||||
/// State of the substream.
|
||||
state: OutboundSubstreamState<TSpec>,
|
||||
/// Key to keep track of the substream's timeout via `self.outbound_substreams_delay`.
|
||||
delay_key: delay_queue::Key,
|
||||
/// Info over the protocol this substream is handling.
|
||||
proto: Protocol,
|
||||
/// Number of chunks to be seen from the peer's response.
|
||||
// TODO: removing the option could allow clossing the streams after the number of
|
||||
// expected responses is met for all protocols.
|
||||
remaining_chunks: Option<u64>,
|
||||
/// `RequestId` as given by the application that sent the request.
|
||||
req_id: RequestId,
|
||||
}
|
||||
|
||||
/// State of an inbound substream connection.
|
||||
enum InboundState<TSpec: EthSpec> {
|
||||
/// The underlying substream is not being used.
|
||||
Idle(InboundSubstream<TSpec>),
|
||||
/// The underlying substream is processing responses.
|
||||
Busy(Pin<Box<dyn Future<Output = InboundProcessingOutput<TSpec>> + Send>>),
|
||||
/// Temporary state during processing
|
||||
Poisoned,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> InboundState<TSpec> {
|
||||
/// Sends the given items over the underlying substream, if the state allows it, and returns the
|
||||
/// final state.
|
||||
fn send_items(
|
||||
self,
|
||||
pending_items: &mut Vec<RPCCodedResponse<TSpec>>,
|
||||
remaining_chunks: u64,
|
||||
) -> Self {
|
||||
if let InboundState::Idle(substream) = self {
|
||||
// only send on Idle
|
||||
if !pending_items.is_empty() {
|
||||
// take the items that we need to send
|
||||
let to_send = std::mem::replace(pending_items, vec![]);
|
||||
let fut = process_inbound_substream(substream, remaining_chunks, to_send).boxed();
|
||||
InboundState::Busy(Box::pin(fut))
|
||||
} else {
|
||||
// nothing to do, keep waiting for responses
|
||||
InboundState::Idle(substream)
|
||||
}
|
||||
} else {
|
||||
self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// State of an outbound substream. Either waiting for a response, or in the process of sending.
|
||||
pub enum OutboundSubstreamState<TSpec: EthSpec> {
|
||||
/// A request has been sent, and we are awaiting a response. This future is driven in the
|
||||
/// handler because GOODBYE requests can be handled and responses dropped instantly.
|
||||
RequestPendingResponse {
|
||||
/// The framed negotiated substream.
|
||||
substream: Box<OutboundFramed<NegotiatedSubstream, TSpec>>,
|
||||
/// Keeps track of the actual request sent.
|
||||
request: RPCRequest<TSpec>,
|
||||
},
|
||||
/// Closing an outbound substream>
|
||||
Closing(Box<OutboundFramed<NegotiatedSubstream, TSpec>>),
|
||||
/// Temporary state during processing
|
||||
Poisoned,
|
||||
}
|
||||
|
||||
impl<TSpec> RPCHandler<TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
pub fn new(listen_protocol: SubstreamProtocol<RPCProtocol<TSpec>>, log: &slog::Logger) -> Self {
|
||||
RPCHandler {
|
||||
listen_protocol,
|
||||
pending_errors: Vec::new(),
|
||||
events_out: SmallVec::new(),
|
||||
dial_queue: SmallVec::new(),
|
||||
dial_negotiated: 0,
|
||||
inbound_substreams: FnvHashMap::default(),
|
||||
outbound_substreams: FnvHashMap::default(),
|
||||
inbound_substreams_delay: DelayQueue::new(),
|
||||
outbound_substreams_delay: DelayQueue::new(),
|
||||
current_inbound_substream_id: SubstreamId(0),
|
||||
current_outbound_substream_id: SubstreamId(0),
|
||||
state: HandlerState::Active,
|
||||
max_dial_negotiated: 8,
|
||||
keep_alive: KeepAlive::Yes,
|
||||
outbound_io_error_retries: 0,
|
||||
log: log.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a reference to the listen protocol configuration.
|
||||
///
|
||||
/// > **Note**: If you modify the protocol, modifications will only applies to future inbound
|
||||
/// > substreams, not the ones already being negotiated.
|
||||
pub fn listen_protocol_ref(&self) -> &SubstreamProtocol<RPCProtocol<TSpec>> {
|
||||
&self.listen_protocol
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the listen protocol configuration.
|
||||
///
|
||||
/// > **Note**: If you modify the protocol, modifications will only apply to future inbound
|
||||
/// > substreams, not the ones already being negotiated.
|
||||
pub fn listen_protocol_mut(&mut self) -> &mut SubstreamProtocol<RPCProtocol<TSpec>> {
|
||||
&mut self.listen_protocol
|
||||
}
|
||||
|
||||
/// Initiates the handler's shutdown process, sending an optional last message to the peer.
|
||||
pub fn shutdown(&mut self, final_msg: Option<(RequestId, RPCRequest<TSpec>)>) {
|
||||
if matches!(self.state, HandlerState::Active) {
|
||||
debug!(self.log, "Starting handler shutdown"; "unsent_queued_requests" => self.dial_queue.len());
|
||||
// we now drive to completion communications already dialed/established
|
||||
while let Some((id, req)) = self.dial_queue.pop() {
|
||||
self.pending_errors.push(HandlerErr::Outbound {
|
||||
id,
|
||||
proto: req.protocol(),
|
||||
error: RPCError::HandlerRejected,
|
||||
})
|
||||
}
|
||||
|
||||
// Queue our final message, if any
|
||||
if let Some((id, req)) = final_msg {
|
||||
self.dial_queue.push((id, req));
|
||||
}
|
||||
|
||||
self.state = HandlerState::ShuttingDown(delay_until(
|
||||
TInstant::now() + Duration::from_secs(SHUTDOWN_TIMEOUT_SECS as u64),
|
||||
));
|
||||
}
|
||||
self.update_keep_alive();
|
||||
}
|
||||
|
||||
/// Opens an outbound substream with a request.
|
||||
fn send_request(&mut self, id: RequestId, req: RPCRequest<TSpec>) {
|
||||
match self.state {
|
||||
HandlerState::Active => {
|
||||
self.dial_queue.push((id, req));
|
||||
self.update_keep_alive();
|
||||
}
|
||||
_ => {
|
||||
self.pending_errors.push(HandlerErr::Outbound {
|
||||
id,
|
||||
proto: req.protocol(),
|
||||
error: RPCError::HandlerRejected,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends a response to a peer's request.
|
||||
// NOTE: If the substream has closed due to inactivity, or the substream is in the
|
||||
// wrong state a response will fail silently.
|
||||
fn send_response(&mut self, inbound_id: SubstreamId, response: RPCCodedResponse<TSpec>) {
|
||||
// check if the stream matching the response still exists
|
||||
let inbound_info = if let Some(info) = self.inbound_substreams.get_mut(&inbound_id) {
|
||||
info
|
||||
} else {
|
||||
warn!(self.log, "Stream has expired. Response not sent";
|
||||
"response" => response.to_string(), "id" => inbound_id);
|
||||
return;
|
||||
};
|
||||
|
||||
// If the response we are sending is an error, report back for handling
|
||||
if let RPCCodedResponse::Error(ref code, ref reason) = response {
|
||||
let err = HandlerErr::Inbound {
|
||||
id: inbound_id,
|
||||
proto: inbound_info.protocol,
|
||||
error: RPCError::ErrorResponse(*code, reason.to_string()),
|
||||
};
|
||||
self.pending_errors.push(err);
|
||||
}
|
||||
|
||||
if matches!(self.state, HandlerState::Deactivated) {
|
||||
// we no longer send responses after the handler is deactivated
|
||||
debug!(self.log, "Response not sent. Deactivated handler";
|
||||
"response" => response.to_string(), "id" => inbound_id);
|
||||
return;
|
||||
}
|
||||
inbound_info.pending_items.push(response);
|
||||
}
|
||||
|
||||
/// Updates the `KeepAlive` returned by `connection_keep_alive`.
|
||||
///
|
||||
/// The handler stays alive as long as there are inbound/outbound substreams established and no
|
||||
/// items dialing/to be dialed. Otherwise it is given a grace period of inactivity of
|
||||
/// `self.inactive_timeout`.
|
||||
fn update_keep_alive(&mut self) {
|
||||
// Check that we don't have outbound items pending for dialing, nor dialing, nor
|
||||
// established. Also check that there are no established inbound substreams.
|
||||
// Errors and events need to be reported back, so check those too.
|
||||
let should_shutdown = if let HandlerState::ShuttingDown(_) = self.state {
|
||||
self.dial_queue.is_empty()
|
||||
&& self.outbound_substreams.is_empty()
|
||||
&& self.inbound_substreams.is_empty()
|
||||
&& self.pending_errors.is_empty()
|
||||
&& self.events_out.is_empty()
|
||||
&& self.dial_negotiated == 0
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
match self.keep_alive {
|
||||
KeepAlive::Yes if should_shutdown => self.keep_alive = KeepAlive::No,
|
||||
KeepAlive::Yes => {} // We continue being active
|
||||
KeepAlive::Until(_) if should_shutdown => self.keep_alive = KeepAlive::No, // Already deemed inactive
|
||||
KeepAlive::Until(_) => {
|
||||
// No longer idle
|
||||
self.keep_alive = KeepAlive::Yes;
|
||||
}
|
||||
KeepAlive::No => {} // currently not used
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec> ProtocolsHandler for RPCHandler<TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
type InEvent = RPCSend<TSpec>;
|
||||
type OutEvent = Result<RPCReceived<TSpec>, HandlerErr>;
|
||||
type Error = RPCError;
|
||||
type InboundProtocol = RPCProtocol<TSpec>;
|
||||
type OutboundProtocol = RPCRequest<TSpec>;
|
||||
type OutboundOpenInfo = (RequestId, RPCRequest<TSpec>); // Keep track of the id and the request
|
||||
|
||||
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol> {
|
||||
self.listen_protocol.clone()
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_inbound(
|
||||
&mut self,
|
||||
substream: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
|
||||
) {
|
||||
// only accept new peer requests when active
|
||||
if !matches!(self.state, HandlerState::Active) {
|
||||
return;
|
||||
}
|
||||
|
||||
let (req, substream) = substream;
|
||||
let expected_responses = req.expected_responses();
|
||||
|
||||
// store requests that expect responses
|
||||
if expected_responses > 0 {
|
||||
// Store the stream and tag the output.
|
||||
let delay_key = self.inbound_substreams_delay.insert(
|
||||
self.current_inbound_substream_id,
|
||||
Duration::from_secs(RESPONSE_TIMEOUT),
|
||||
);
|
||||
let awaiting_stream = InboundState::Idle(substream);
|
||||
self.inbound_substreams.insert(
|
||||
self.current_inbound_substream_id,
|
||||
InboundInfo {
|
||||
state: awaiting_stream,
|
||||
pending_items: vec![],
|
||||
delay_key: Some(delay_key),
|
||||
protocol: req.protocol(),
|
||||
remaining_chunks: expected_responses,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
self.events_out
|
||||
.push(RPCReceived::Request(self.current_inbound_substream_id, req));
|
||||
self.current_inbound_substream_id.0 += 1;
|
||||
|
||||
self.update_keep_alive();
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_outbound(
|
||||
&mut self,
|
||||
out: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
|
||||
request_info: Self::OutboundOpenInfo,
|
||||
) {
|
||||
self.dial_negotiated -= 1;
|
||||
let (id, request) = request_info;
|
||||
let proto = request.protocol();
|
||||
|
||||
// accept outbound connections only if the handler is not deactivated
|
||||
if matches!(self.state, HandlerState::Deactivated) {
|
||||
self.pending_errors.push(HandlerErr::Outbound {
|
||||
id,
|
||||
proto,
|
||||
error: RPCError::HandlerRejected,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// add the stream to substreams if we expect a response, otherwise drop the stream.
|
||||
let expected_responses = request.expected_responses();
|
||||
if expected_responses > 0 {
|
||||
// new outbound request. Store the stream and tag the output.
|
||||
let delay_key = self.outbound_substreams_delay.insert(
|
||||
self.current_outbound_substream_id,
|
||||
Duration::from_secs(RESPONSE_TIMEOUT),
|
||||
);
|
||||
let awaiting_stream = OutboundSubstreamState::RequestPendingResponse {
|
||||
substream: Box::new(out),
|
||||
request,
|
||||
};
|
||||
let expected_responses = if expected_responses > 1 {
|
||||
// Currently enforced only for multiple responses
|
||||
Some(expected_responses)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if self
|
||||
.outbound_substreams
|
||||
.insert(
|
||||
self.current_outbound_substream_id,
|
||||
OutboundInfo {
|
||||
state: awaiting_stream,
|
||||
delay_key,
|
||||
proto,
|
||||
remaining_chunks: expected_responses,
|
||||
req_id: id,
|
||||
},
|
||||
)
|
||||
.is_some()
|
||||
{
|
||||
crit!(self.log, "Duplicate outbound substream id"; "id" => format!("{:?}", self.current_outbound_substream_id));
|
||||
}
|
||||
self.current_outbound_substream_id.0 += 1;
|
||||
}
|
||||
|
||||
self.update_keep_alive();
|
||||
}
|
||||
|
||||
fn inject_event(&mut self, rpc_event: Self::InEvent) {
|
||||
match rpc_event {
|
||||
RPCSend::Request(id, req) => self.send_request(id, req),
|
||||
RPCSend::Response(inbound_id, response) => self.send_response(inbound_id, response),
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_dial_upgrade_error(
|
||||
&mut self,
|
||||
request_info: Self::OutboundOpenInfo,
|
||||
error: ProtocolsHandlerUpgrErr<
|
||||
<Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Error,
|
||||
>,
|
||||
) {
|
||||
let (id, req) = request_info;
|
||||
if let ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(RPCError::IoError(_))) = error {
|
||||
self.outbound_io_error_retries += 1;
|
||||
if self.outbound_io_error_retries < IO_ERROR_RETRIES {
|
||||
self.send_request(id, req);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// This dialing is now considered failed
|
||||
self.dial_negotiated -= 1;
|
||||
self.update_keep_alive();
|
||||
|
||||
self.outbound_io_error_retries = 0;
|
||||
// map the error
|
||||
let error = match error {
|
||||
ProtocolsHandlerUpgrErr::Timer => RPCError::InternalError("Timer failed"),
|
||||
ProtocolsHandlerUpgrErr::Timeout => RPCError::NegotiationTimeout,
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)) => e,
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => {
|
||||
RPCError::UnsupportedProtocol
|
||||
}
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(
|
||||
NegotiationError::ProtocolError(e),
|
||||
)) => match e {
|
||||
ProtocolError::IoError(io_err) => RPCError::IoError(io_err.to_string()),
|
||||
ProtocolError::InvalidProtocol => {
|
||||
RPCError::InternalError("Protocol was deemed invalid")
|
||||
}
|
||||
ProtocolError::InvalidMessage | ProtocolError::TooManyProtocols => {
|
||||
// Peer is sending invalid data during the negotiation phase, not
|
||||
// participating in the protocol
|
||||
RPCError::InvalidData
|
||||
}
|
||||
},
|
||||
};
|
||||
self.pending_errors.push(HandlerErr::Outbound {
|
||||
id,
|
||||
proto: req.protocol(),
|
||||
error,
|
||||
});
|
||||
}
|
||||
|
||||
fn connection_keep_alive(&self) -> KeepAlive {
|
||||
self.keep_alive
|
||||
}
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<
|
||||
ProtocolsHandlerEvent<
|
||||
Self::OutboundProtocol,
|
||||
Self::OutboundOpenInfo,
|
||||
Self::OutEvent,
|
||||
Self::Error,
|
||||
>,
|
||||
> {
|
||||
// report failures
|
||||
if !self.pending_errors.is_empty() {
|
||||
let err_info = self.pending_errors.remove(0);
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(Err(err_info)));
|
||||
}
|
||||
|
||||
// return any events that need to be reported
|
||||
if !self.events_out.is_empty() {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(Ok(self.events_out.remove(0))));
|
||||
} else {
|
||||
self.events_out.shrink_to_fit();
|
||||
}
|
||||
|
||||
// Check if we are shutting down, and if the timer ran out
|
||||
if let HandlerState::ShuttingDown(delay) = &self.state {
|
||||
if delay.is_elapsed() {
|
||||
self.state = HandlerState::Deactivated;
|
||||
debug!(self.log, "Handler deactivated");
|
||||
}
|
||||
}
|
||||
|
||||
// purge expired inbound substreams and send an error
|
||||
loop {
|
||||
match self.inbound_substreams_delay.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(inbound_id))) => {
|
||||
// handle a stream timeout for various states
|
||||
if let Some(info) = self.inbound_substreams.get_mut(inbound_id.get_ref()) {
|
||||
// the delay has been removed
|
||||
info.delay_key = None;
|
||||
self.pending_errors.push(HandlerErr::Inbound {
|
||||
id: *inbound_id.get_ref(),
|
||||
proto: info.protocol,
|
||||
error: RPCError::StreamTimeout,
|
||||
});
|
||||
|
||||
if info.pending_items.last().map(|l| l.close_after()) == Some(false) {
|
||||
// if the last chunk does not close the stream, append an error
|
||||
info.pending_items.push(RPCCodedResponse::Error(
|
||||
RPCResponseErrorCode::ServerError,
|
||||
"Request timed out".into(),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
warn!(self.log, "Inbound substream poll failed"; "error" => format!("{:?}", e));
|
||||
// drops the peer if we cannot read the delay queue
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::InternalError(
|
||||
"Could not poll inbound stream timer",
|
||||
)));
|
||||
}
|
||||
Poll::Pending | Poll::Ready(None) => break,
|
||||
}
|
||||
}
|
||||
|
||||
// purge expired outbound substreams
|
||||
loop {
|
||||
match self.outbound_substreams_delay.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(outbound_id))) => {
|
||||
if let Some(OutboundInfo { proto, req_id, .. }) =
|
||||
self.outbound_substreams.remove(outbound_id.get_ref())
|
||||
{
|
||||
self.update_keep_alive();
|
||||
|
||||
let outbound_err = HandlerErr::Outbound {
|
||||
id: req_id,
|
||||
proto,
|
||||
error: RPCError::StreamTimeout,
|
||||
};
|
||||
// notify the user
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(Err(outbound_err)));
|
||||
} else {
|
||||
crit!(self.log, "timed out substream not in the books"; "stream_id" => outbound_id.get_ref());
|
||||
}
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
warn!(self.log, "Outbound substream poll failed"; "error" => format!("{:?}", e));
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::InternalError(
|
||||
"Could not poll outbound stream timer",
|
||||
)));
|
||||
}
|
||||
Poll::Pending | Poll::Ready(None) => break,
|
||||
}
|
||||
}
|
||||
|
||||
// when deactivated, close all streams
|
||||
let deactivated = matches!(self.state, HandlerState::Deactivated);
|
||||
|
||||
// drive inbound streams that need to be processed
|
||||
let mut substreams_to_remove = Vec::new(); // Closed substreams that need to be removed
|
||||
for (id, info) in self.inbound_substreams.iter_mut() {
|
||||
match std::mem::replace(&mut info.state, InboundState::Poisoned) {
|
||||
state @ InboundState::Idle(..) if !deactivated => {
|
||||
info.state = state.send_items(&mut info.pending_items, info.remaining_chunks);
|
||||
}
|
||||
InboundState::Idle(mut substream) => {
|
||||
// handler is deactivated, close the stream and mark it for removal
|
||||
match substream.close().poll_unpin(cx) {
|
||||
// if we can't close right now, put the substream back and try again later
|
||||
Poll::Pending => info.state = InboundState::Idle(substream),
|
||||
Poll::Ready(res) => {
|
||||
substreams_to_remove.push(*id);
|
||||
if let Some(ref delay_key) = info.delay_key {
|
||||
self.inbound_substreams_delay.remove(delay_key);
|
||||
}
|
||||
if let Err(error) = res {
|
||||
self.pending_errors.push(HandlerErr::Inbound {
|
||||
id: *id,
|
||||
error,
|
||||
proto: info.protocol,
|
||||
});
|
||||
}
|
||||
if info.pending_items.last().map(|l| l.close_after()) == Some(false) {
|
||||
// if the request was still active, report back to cancel it
|
||||
self.pending_errors.push(HandlerErr::Inbound {
|
||||
id: *id,
|
||||
proto: info.protocol,
|
||||
error: RPCError::HandlerRejected,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
InboundState::Busy(mut fut) => {
|
||||
// first check if sending finished
|
||||
let state = match fut.poll_unpin(cx) {
|
||||
Poll::Ready((substream, errors, remove, new_remaining_chunks)) => {
|
||||
info.remaining_chunks = new_remaining_chunks;
|
||||
// report any error
|
||||
for error in errors {
|
||||
self.pending_errors.push(HandlerErr::Inbound {
|
||||
id: *id,
|
||||
error,
|
||||
proto: info.protocol,
|
||||
})
|
||||
}
|
||||
if remove {
|
||||
substreams_to_remove.push(*id);
|
||||
if let Some(ref delay_key) = info.delay_key {
|
||||
self.inbound_substreams_delay.remove(delay_key);
|
||||
}
|
||||
}
|
||||
InboundState::Idle(substream)
|
||||
}
|
||||
Poll::Pending => InboundState::Busy(fut),
|
||||
};
|
||||
info.state = if !deactivated {
|
||||
// if the last batch finished, send more.
|
||||
state.send_items(&mut info.pending_items, info.remaining_chunks)
|
||||
} else {
|
||||
state
|
||||
};
|
||||
}
|
||||
InboundState::Poisoned => unreachable!("Poisoned inbound substream"),
|
||||
}
|
||||
}
|
||||
|
||||
// remove closed substreams
|
||||
for inbound_id in substreams_to_remove {
|
||||
self.inbound_substreams.remove(&inbound_id);
|
||||
}
|
||||
|
||||
self.update_keep_alive();
|
||||
// drive outbound streams that need to be processed
|
||||
for outbound_id in self.outbound_substreams.keys().copied().collect::<Vec<_>>() {
|
||||
// get the state and mark it as poisoned
|
||||
let (mut entry, state) = match self.outbound_substreams.entry(outbound_id) {
|
||||
Entry::Occupied(mut entry) => {
|
||||
let state = std::mem::replace(
|
||||
&mut entry.get_mut().state,
|
||||
OutboundSubstreamState::Poisoned,
|
||||
);
|
||||
(entry, state)
|
||||
}
|
||||
Entry::Vacant(_) => unreachable!(),
|
||||
};
|
||||
|
||||
match state {
|
||||
OutboundSubstreamState::RequestPendingResponse {
|
||||
substream,
|
||||
request: _,
|
||||
} if deactivated => {
|
||||
// the handler is deactivated. Close the stream
|
||||
entry.get_mut().state = OutboundSubstreamState::Closing(substream);
|
||||
self.pending_errors.push(HandlerErr::Outbound {
|
||||
id: entry.get().req_id,
|
||||
proto: entry.get().proto,
|
||||
error: RPCError::HandlerRejected,
|
||||
})
|
||||
}
|
||||
OutboundSubstreamState::RequestPendingResponse {
|
||||
mut substream,
|
||||
request,
|
||||
} => match substream.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(response))) => {
|
||||
if request.expected_responses() > 1 && !response.close_after() {
|
||||
let substream_entry = entry.get_mut();
|
||||
let delay_key = &substream_entry.delay_key;
|
||||
// chunks left after this one
|
||||
let remaining_chunks = substream_entry
|
||||
.remaining_chunks
|
||||
.map(|count| count.saturating_sub(1))
|
||||
.unwrap_or_else(|| 0);
|
||||
if remaining_chunks == 0 {
|
||||
// this is the last expected message, close the stream as all expected chunks have been received
|
||||
substream_entry.state = OutboundSubstreamState::Closing(substream);
|
||||
} else {
|
||||
// If the response chunk was expected update the remaining number of chunks expected and reset the Timeout
|
||||
substream_entry.state =
|
||||
OutboundSubstreamState::RequestPendingResponse {
|
||||
substream,
|
||||
request,
|
||||
};
|
||||
substream_entry.remaining_chunks = Some(remaining_chunks);
|
||||
self.outbound_substreams_delay
|
||||
.reset(delay_key, Duration::from_secs(RESPONSE_TIMEOUT));
|
||||
}
|
||||
} else {
|
||||
// either this is a single response request or this response closes the
|
||||
// stream
|
||||
entry.get_mut().state = OutboundSubstreamState::Closing(substream);
|
||||
}
|
||||
|
||||
// Check what type of response we got and report it accordingly
|
||||
let id = entry.get().req_id;
|
||||
let proto = entry.get().proto;
|
||||
|
||||
let received = match response {
|
||||
RPCCodedResponse::StreamTermination(t) => {
|
||||
Ok(RPCReceived::EndOfStream(id, t))
|
||||
}
|
||||
RPCCodedResponse::Success(resp) => Ok(RPCReceived::Response(id, resp)),
|
||||
RPCCodedResponse::Error(ref code, ref r) => Err(HandlerErr::Outbound {
|
||||
id,
|
||||
proto,
|
||||
error: RPCError::ErrorResponse(*code, r.to_string()),
|
||||
}),
|
||||
};
|
||||
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(received));
|
||||
}
|
||||
Poll::Ready(None) => {
|
||||
// stream closed
|
||||
// if we expected multiple streams send a stream termination,
|
||||
// else report the stream terminating only.
|
||||
//trace!(self.log, "RPC Response - stream closed by remote");
|
||||
// drop the stream
|
||||
let delay_key = &entry.get().delay_key;
|
||||
let request_id = entry.get().req_id;
|
||||
self.outbound_substreams_delay.remove(delay_key);
|
||||
entry.remove_entry();
|
||||
self.update_keep_alive();
|
||||
// notify the application error
|
||||
if request.expected_responses() > 1 {
|
||||
// return an end of stream result
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(Ok(
|
||||
RPCReceived::EndOfStream(request_id, request.stream_termination()),
|
||||
)));
|
||||
}
|
||||
|
||||
// else we return an error, stream should not have closed early.
|
||||
let outbound_err = HandlerErr::Outbound {
|
||||
id: request_id,
|
||||
proto: request.protocol(),
|
||||
error: RPCError::IncompleteStream,
|
||||
};
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(Err(outbound_err)));
|
||||
}
|
||||
Poll::Pending => {
|
||||
entry.get_mut().state =
|
||||
OutboundSubstreamState::RequestPendingResponse { substream, request }
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
// drop the stream
|
||||
let delay_key = &entry.get().delay_key;
|
||||
self.outbound_substreams_delay.remove(delay_key);
|
||||
let outbound_err = HandlerErr::Outbound {
|
||||
id: entry.get().req_id,
|
||||
proto: entry.get().proto,
|
||||
error: e,
|
||||
};
|
||||
entry.remove_entry();
|
||||
self.update_keep_alive();
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(Err(outbound_err)));
|
||||
}
|
||||
},
|
||||
OutboundSubstreamState::Closing(mut substream) => {
|
||||
match Sink::poll_close(Pin::new(&mut substream), cx) {
|
||||
Poll::Ready(_) => {
|
||||
// drop the stream and its corresponding timeout
|
||||
let delay_key = &entry.get().delay_key;
|
||||
let protocol = entry.get().proto;
|
||||
let request_id = entry.get().req_id;
|
||||
self.outbound_substreams_delay.remove(delay_key);
|
||||
entry.remove_entry();
|
||||
self.update_keep_alive();
|
||||
|
||||
// report the stream termination to the user
|
||||
//
|
||||
// Streams can be terminated here if a responder tries to
|
||||
// continue sending responses beyond what we would expect. Here
|
||||
// we simply terminate the stream and report a stream
|
||||
// termination to the application
|
||||
let termination = match protocol {
|
||||
Protocol::BlocksByRange => Some(ResponseTermination::BlocksByRange),
|
||||
Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot),
|
||||
_ => None, // all other protocols are do not have multiple responses and we do not inform the user, we simply drop the stream.
|
||||
};
|
||||
|
||||
if let Some(termination) = termination {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(Ok(
|
||||
RPCReceived::EndOfStream(request_id, termination),
|
||||
)));
|
||||
}
|
||||
}
|
||||
Poll::Pending => {
|
||||
entry.get_mut().state = OutboundSubstreamState::Closing(substream);
|
||||
}
|
||||
}
|
||||
}
|
||||
OutboundSubstreamState::Poisoned => {
|
||||
crit!(self.log, "Poisoned outbound substream");
|
||||
unreachable!("Coding Error: Outbound substream is poisoned")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// establish outbound substreams
|
||||
if !self.dial_queue.is_empty() && self.dial_negotiated < self.max_dial_negotiated {
|
||||
self.dial_negotiated += 1;
|
||||
let (id, req) = self.dial_queue.remove(0);
|
||||
self.dial_queue.shrink_to_fit();
|
||||
self.update_keep_alive();
|
||||
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
|
||||
protocol: SubstreamProtocol::new(req.clone()),
|
||||
info: (id, req),
|
||||
});
|
||||
}
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
impl slog::Value for SubstreamId {
|
||||
fn serialize(
|
||||
&self,
|
||||
record: &slog::Record,
|
||||
key: slog::Key,
|
||||
serializer: &mut dyn slog::Serializer,
|
||||
) -> slog::Result {
|
||||
slog::Value::serialize(&self.0, record, key, serializer)
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends the queued items to the peer.
|
||||
async fn process_inbound_substream<TSpec: EthSpec>(
|
||||
mut substream: InboundSubstream<TSpec>,
|
||||
mut remaining_chunks: u64,
|
||||
pending_items: Vec<RPCCodedResponse<TSpec>>,
|
||||
) -> InboundProcessingOutput<TSpec> {
|
||||
let mut errors = Vec::new();
|
||||
let mut substream_closed = false;
|
||||
|
||||
for item in pending_items {
|
||||
if !substream_closed {
|
||||
if matches!(item, RPCCodedResponse::StreamTermination(_)) {
|
||||
substream.close().await.unwrap_or_else(|e| errors.push(e));
|
||||
substream_closed = true;
|
||||
} else {
|
||||
remaining_chunks = remaining_chunks.saturating_sub(1);
|
||||
// chunks that are not stream terminations get sent, and the stream is closed if
|
||||
// the response is an error
|
||||
let is_error = matches!(item, RPCCodedResponse::Error(..));
|
||||
|
||||
substream
|
||||
.send(item)
|
||||
.await
|
||||
.unwrap_or_else(|e| errors.push(e));
|
||||
|
||||
if remaining_chunks == 0 || is_error {
|
||||
substream.close().await.unwrap_or_else(|e| errors.push(e));
|
||||
substream_closed = true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// we have more items after a closed substream, report those as errors
|
||||
errors.push(RPCError::InternalError(
|
||||
"Sending responses to closed inbound substream",
|
||||
));
|
||||
}
|
||||
}
|
||||
(substream, errors, substream_closed, remaining_chunks)
|
||||
}
|
||||
@@ -3,13 +3,66 @@
|
||||
use crate::types::EnrBitfield;
|
||||
use serde::Serialize;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use ssz_types::{
|
||||
typenum::{U1024, U256},
|
||||
VariableList,
|
||||
};
|
||||
use std::ops::Deref;
|
||||
use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot};
|
||||
|
||||
/// Maximum number of blocks in a single request.
|
||||
pub type MaxRequestBlocks = U1024;
|
||||
pub const MAX_REQUEST_BLOCKS: u64 = 1024;
|
||||
|
||||
/// Maximum length of error message.
|
||||
type MaxErrorLen = U256;
|
||||
|
||||
/// Wrapper over SSZ List to represent error message in rpc responses.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ErrorType(VariableList<u8, MaxErrorLen>);
|
||||
|
||||
impl From<String> for ErrorType {
|
||||
fn from(s: String) -> Self {
|
||||
Self(VariableList::from(s.as_bytes().to_vec()))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for ErrorType {
|
||||
fn from(s: &str) -> Self {
|
||||
Self(VariableList::from(s.as_bytes().to_vec()))
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for ErrorType {
|
||||
type Target = VariableList<u8, MaxErrorLen>;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl ToString for ErrorType {
|
||||
fn to_string(&self) -> String {
|
||||
match std::str::from_utf8(self.0.deref()) {
|
||||
Ok(s) => s.to_string(),
|
||||
Err(_) => format!("{:?}", self.0.deref()), // Display raw bytes if not a UTF-8 string
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Request/Response data structures for RPC methods */
|
||||
|
||||
/* Requests */
|
||||
|
||||
pub type RequestId = usize;
|
||||
/// Identifier of a request.
|
||||
///
|
||||
// NOTE: The handler stores the `RequestId` to inform back of responses and errors, but it's execution
|
||||
// is independent of the contents on this type.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum RequestId {
|
||||
Router,
|
||||
Sync(usize),
|
||||
Behaviour,
|
||||
}
|
||||
|
||||
/// The STATUS request/response handshake message.
|
||||
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
||||
@@ -63,6 +116,18 @@ pub enum GoodbyeReason {
|
||||
/// Error/fault in the RPC.
|
||||
Fault = 3,
|
||||
|
||||
/// Teku uses this code for not being able to verify a network.
|
||||
UnableToVerifyNetwork = 128,
|
||||
|
||||
/// The node has too many connected peers.
|
||||
TooManyPeers = 129,
|
||||
|
||||
/// Scored poorly.
|
||||
BadScore = 250,
|
||||
|
||||
/// The peer is banned
|
||||
Banned = 251,
|
||||
|
||||
/// Unknown reason.
|
||||
Unknown = 0,
|
||||
}
|
||||
@@ -73,6 +138,10 @@ impl From<u64> for GoodbyeReason {
|
||||
1 => GoodbyeReason::ClientShutdown,
|
||||
2 => GoodbyeReason::IrrelevantNetwork,
|
||||
3 => GoodbyeReason::Fault,
|
||||
128 => GoodbyeReason::UnableToVerifyNetwork,
|
||||
129 => GoodbyeReason::TooManyPeers,
|
||||
250 => GoodbyeReason::BadScore,
|
||||
251 => GoodbyeReason::Banned,
|
||||
_ => GoodbyeReason::Unknown,
|
||||
}
|
||||
}
|
||||
@@ -113,7 +182,7 @@ impl ssz::Decode for GoodbyeReason {
|
||||
}
|
||||
|
||||
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
|
||||
u64::from_ssz_bytes(bytes).and_then(|n| Ok(n.into()))
|
||||
u64::from_ssz_bytes(bytes).map(|n| n.into())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,7 +207,7 @@ pub struct BlocksByRangeRequest {
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct BlocksByRootRequest {
|
||||
/// The list of beacon block bodies being requested.
|
||||
pub block_roots: Vec<Hash256>,
|
||||
pub block_roots: VariableList<Hash256, MaxRequestBlocks>,
|
||||
}
|
||||
|
||||
/* RPC Handling and Grouping */
|
||||
@@ -180,22 +249,16 @@ pub enum RPCCodedResponse<T: EthSpec> {
|
||||
/// The response is a successful.
|
||||
Success(RPCResponse<T>),
|
||||
|
||||
/// The response was invalid.
|
||||
InvalidRequest(ErrorMessage),
|
||||
|
||||
/// The response indicates a server error.
|
||||
ServerError(ErrorMessage),
|
||||
|
||||
/// There was an unknown response.
|
||||
Unknown(ErrorMessage),
|
||||
Error(RPCResponseErrorCode, ErrorType),
|
||||
|
||||
/// Received a stream termination indicating which response is being terminated.
|
||||
StreamTermination(ResponseTermination),
|
||||
}
|
||||
|
||||
/// The code assigned to an erroneous `RPCResponse`.
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum RPCResponseErrorCode {
|
||||
RateLimited,
|
||||
InvalidRequest,
|
||||
ServerError,
|
||||
Unknown,
|
||||
@@ -206,9 +269,7 @@ impl<T: EthSpec> RPCCodedResponse<T> {
|
||||
pub fn as_u8(&self) -> Option<u8> {
|
||||
match self {
|
||||
RPCCodedResponse::Success(_) => Some(0),
|
||||
RPCCodedResponse::InvalidRequest(_) => Some(1),
|
||||
RPCCodedResponse::ServerError(_) => Some(2),
|
||||
RPCCodedResponse::Unknown(_) => Some(255),
|
||||
RPCCodedResponse::Error(code, _) => Some(code.as_u8()),
|
||||
RPCCodedResponse::StreamTermination(_) => None,
|
||||
}
|
||||
}
|
||||
@@ -222,12 +283,13 @@ impl<T: EthSpec> RPCCodedResponse<T> {
|
||||
}
|
||||
|
||||
/// Builds an RPCCodedResponse from a response code and an ErrorMessage
|
||||
pub fn from_error(response_code: u8, err: ErrorMessage) -> Self {
|
||||
match response_code {
|
||||
1 => RPCCodedResponse::InvalidRequest(err),
|
||||
2 => RPCCodedResponse::ServerError(err),
|
||||
_ => RPCCodedResponse::Unknown(err),
|
||||
}
|
||||
pub fn from_error(response_code: u8, err: String) -> Self {
|
||||
let code = match response_code {
|
||||
1 => RPCResponseErrorCode::InvalidRequest,
|
||||
2 => RPCResponseErrorCode::ServerError,
|
||||
_ => RPCResponseErrorCode::Unknown,
|
||||
};
|
||||
RPCCodedResponse::Error(code, err.into())
|
||||
}
|
||||
|
||||
/// Specifies which response allows for multiple chunks for the stream handler.
|
||||
@@ -240,52 +302,39 @@ impl<T: EthSpec> RPCCodedResponse<T> {
|
||||
RPCResponse::Pong(_) => false,
|
||||
RPCResponse::MetaData(_) => false,
|
||||
},
|
||||
RPCCodedResponse::InvalidRequest(_) => true,
|
||||
RPCCodedResponse::ServerError(_) => true,
|
||||
RPCCodedResponse::Unknown(_) => true,
|
||||
RPCCodedResponse::Error(_, _) => true,
|
||||
// Stream terminations are part of responses that have chunks
|
||||
RPCCodedResponse::StreamTermination(_) => true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if this response is an error. Used to terminate the stream after an error is
|
||||
/// sent.
|
||||
pub fn is_error(&self) -> bool {
|
||||
/// Returns true if this response always terminates the stream.
|
||||
pub fn close_after(&self) -> bool {
|
||||
match self {
|
||||
RPCCodedResponse::Success(_) => false,
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn error_code(&self) -> Option<RPCResponseErrorCode> {
|
||||
impl RPCResponseErrorCode {
|
||||
fn as_u8(&self) -> u8 {
|
||||
match self {
|
||||
RPCCodedResponse::Success(_) => None,
|
||||
RPCCodedResponse::StreamTermination(_) => None,
|
||||
RPCCodedResponse::InvalidRequest(_) => Some(RPCResponseErrorCode::InvalidRequest),
|
||||
RPCCodedResponse::ServerError(_) => Some(RPCResponseErrorCode::ServerError),
|
||||
RPCCodedResponse::Unknown(_) => Some(RPCResponseErrorCode::Unknown),
|
||||
RPCResponseErrorCode::InvalidRequest => 1,
|
||||
RPCResponseErrorCode::ServerError => 2,
|
||||
RPCResponseErrorCode::Unknown => 255,
|
||||
RPCResponseErrorCode::RateLimited => 128,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Encode, Decode, Debug, Clone)]
|
||||
pub struct ErrorMessage {
|
||||
/// The UTF-8 encoded Error message string.
|
||||
pub error_message: Vec<u8>,
|
||||
}
|
||||
|
||||
impl std::string::ToString for ErrorMessage {
|
||||
fn to_string(&self) -> String {
|
||||
String::from_utf8(self.error_message.clone()).unwrap_or_else(|_| "".into())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for RPCResponseErrorCode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let repr = match self {
|
||||
RPCResponseErrorCode::InvalidRequest => "The request was invalid",
|
||||
RPCResponseErrorCode::ServerError => "Server error occurred",
|
||||
RPCResponseErrorCode::Unknown => "Unknown error occurred",
|
||||
RPCResponseErrorCode::RateLimited => "Rate limited",
|
||||
};
|
||||
f.write_str(repr)
|
||||
}
|
||||
@@ -317,9 +366,7 @@ impl<T: EthSpec> std::fmt::Display for RPCCodedResponse<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
RPCCodedResponse::Success(res) => write!(f, "{}", res),
|
||||
RPCCodedResponse::InvalidRequest(err) => write!(f, "Invalid Request: {:?}", err),
|
||||
RPCCodedResponse::ServerError(err) => write!(f, "Server Error: {:?}", err),
|
||||
RPCCodedResponse::Unknown(err) => write!(f, "Unknown Error: {:?}", err),
|
||||
RPCCodedResponse::Error(code, err) => write!(f, "{}: {}", code, err.to_string()),
|
||||
RPCCodedResponse::StreamTermination(_) => write!(f, "Stream Termination"),
|
||||
}
|
||||
}
|
||||
@@ -331,6 +378,10 @@ impl std::fmt::Display for GoodbyeReason {
|
||||
GoodbyeReason::ClientShutdown => write!(f, "Client Shutdown"),
|
||||
GoodbyeReason::IrrelevantNetwork => write!(f, "Irrelevant Network"),
|
||||
GoodbyeReason::Fault => write!(f, "Fault"),
|
||||
GoodbyeReason::UnableToVerifyNetwork => write!(f, "Unable to verify network"),
|
||||
GoodbyeReason::TooManyPeers => write!(f, "Too many peers"),
|
||||
GoodbyeReason::BadScore => write!(f, "Bad Score"),
|
||||
GoodbyeReason::Banned => write!(f, "Banned"),
|
||||
GoodbyeReason::Unknown => write!(f, "Unknown Reason"),
|
||||
}
|
||||
}
|
||||
@@ -345,3 +396,18 @@ impl std::fmt::Display for BlocksByRangeRequest {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl slog::Value for RequestId {
|
||||
fn serialize(
|
||||
&self,
|
||||
record: &slog::Record,
|
||||
key: slog::Key,
|
||||
serializer: &mut dyn slog::Serializer,
|
||||
) -> slog::Result {
|
||||
match self {
|
||||
RequestId::Behaviour => slog::Value::serialize("Behaviour", record, key, serializer),
|
||||
RequestId::Router => slog::Value::serialize("Router", record, key, serializer),
|
||||
RequestId::Sync(ref id) => slog::Value::serialize(id, record, key, serializer),
|
||||
}
|
||||
}
|
||||
}
|
||||
279
beacon_node/eth2_libp2p/src/rpc/mod.rs
Normal file
279
beacon_node/eth2_libp2p/src/rpc/mod.rs
Normal file
@@ -0,0 +1,279 @@
|
||||
//! The Ethereum 2.0 Wire Protocol
|
||||
//!
|
||||
//! This protocol is a purpose built Ethereum 2.0 libp2p protocol. It's role is to facilitate
|
||||
//! direct peer-to-peer communication primarily for sending/receiving chain information for
|
||||
//! syncing.
|
||||
|
||||
use futures::future::FutureExt;
|
||||
use handler::RPCHandler;
|
||||
use libp2p::core::{connection::ConnectionId, ConnectedPoint};
|
||||
use libp2p::swarm::{
|
||||
protocols_handler::ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler,
|
||||
PollParameters, SubstreamProtocol,
|
||||
};
|
||||
use libp2p::{Multiaddr, PeerId};
|
||||
use rate_limiter::{RPCRateLimiter as RateLimiter, RPCRateLimiterBuilder, RateLimitedErr};
|
||||
use slog::{crit, debug, o};
|
||||
use std::marker::PhantomData;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use types::EthSpec;
|
||||
|
||||
pub(crate) use handler::HandlerErr;
|
||||
pub(crate) use methods::{MetaData, Ping, RPCCodedResponse, RPCResponse};
|
||||
pub(crate) use protocol::{RPCProtocol, RPCRequest};
|
||||
|
||||
pub use handler::SubstreamId;
|
||||
pub use methods::{
|
||||
BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, MaxRequestBlocks,
|
||||
RPCResponseErrorCode, RequestId, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS,
|
||||
};
|
||||
pub use protocol::{Protocol, RPCError};
|
||||
|
||||
pub(crate) mod codec;
|
||||
mod handler;
|
||||
pub mod methods;
|
||||
mod protocol;
|
||||
mod rate_limiter;
|
||||
|
||||
/// RPC events sent from Lighthouse.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum RPCSend<T: EthSpec> {
|
||||
/// A request sent from Lighthouse.
|
||||
///
|
||||
/// The `RequestId` is given by the application making the request. These
|
||||
/// go over *outbound* connections.
|
||||
Request(RequestId, RPCRequest<T>),
|
||||
/// A response sent from Lighthouse.
|
||||
///
|
||||
/// The `SubstreamId` must correspond to the RPC-given ID of the original request received from the
|
||||
/// peer. The second parameter is a single chunk of a response. These go over *inbound*
|
||||
/// connections.
|
||||
Response(SubstreamId, RPCCodedResponse<T>),
|
||||
}
|
||||
|
||||
/// RPC events received from outside Lighthouse.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum RPCReceived<T: EthSpec> {
|
||||
/// A request received from the outside.
|
||||
///
|
||||
/// The `SubstreamId` is given by the `RPCHandler` as it identifies this request with the
|
||||
/// *inbound* substream over which it is managed.
|
||||
Request(SubstreamId, RPCRequest<T>),
|
||||
/// A response received from the outside.
|
||||
///
|
||||
/// The `RequestId` corresponds to the application given ID of the original request sent to the
|
||||
/// peer. The second parameter is a single chunk of a response. These go over *outbound*
|
||||
/// connections.
|
||||
Response(RequestId, RPCResponse<T>),
|
||||
/// Marks a request as completed
|
||||
EndOfStream(RequestId, ResponseTermination),
|
||||
}
|
||||
|
||||
impl<T: EthSpec> std::fmt::Display for RPCSend<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
RPCSend::Request(id, req) => write!(f, "RPC Request(id: {:?}, {})", id, req),
|
||||
RPCSend::Response(id, res) => write!(f, "RPC Response(id: {:?}, {})", id, res),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Messages sent to the user from the RPC protocol.
|
||||
pub struct RPCMessage<TSpec: EthSpec> {
|
||||
/// The peer that sent the message.
|
||||
pub peer_id: PeerId,
|
||||
/// Handler managing this message.
|
||||
pub conn_id: ConnectionId,
|
||||
/// The message that was sent.
|
||||
pub event: <RPCHandler<TSpec> as ProtocolsHandler>::OutEvent,
|
||||
}
|
||||
|
||||
/// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level
|
||||
/// logic.
|
||||
pub struct RPC<TSpec: EthSpec> {
|
||||
/// Rate limiter
|
||||
limiter: RateLimiter,
|
||||
/// Queue of events to be processed.
|
||||
events: Vec<NetworkBehaviourAction<RPCSend<TSpec>, RPCMessage<TSpec>>>,
|
||||
/// Slog logger for RPC behaviour.
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> RPC<TSpec> {
|
||||
pub fn new(log: slog::Logger) -> Self {
|
||||
let log = log.new(o!("service" => "libp2p_rpc"));
|
||||
let limiter = RPCRateLimiterBuilder::new()
|
||||
.n_every(Protocol::MetaData, 2, Duration::from_secs(5))
|
||||
.one_every(Protocol::Ping, Duration::from_secs(5))
|
||||
.n_every(Protocol::Status, 5, Duration::from_secs(15))
|
||||
.one_every(Protocol::Goodbye, Duration::from_secs(10))
|
||||
.n_every(
|
||||
Protocol::BlocksByRange,
|
||||
methods::MAX_REQUEST_BLOCKS,
|
||||
Duration::from_secs(10),
|
||||
)
|
||||
.n_every(
|
||||
Protocol::BlocksByRoot,
|
||||
methods::MAX_REQUEST_BLOCKS,
|
||||
Duration::from_secs(10),
|
||||
)
|
||||
.build()
|
||||
.unwrap();
|
||||
RPC {
|
||||
limiter,
|
||||
events: Vec::new(),
|
||||
log,
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends an RPC response.
|
||||
///
|
||||
/// The peer must be connected for this to succeed.
|
||||
pub fn send_response(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
id: (ConnectionId, SubstreamId),
|
||||
event: RPCCodedResponse<TSpec>,
|
||||
) {
|
||||
self.events.push(NetworkBehaviourAction::NotifyHandler {
|
||||
peer_id,
|
||||
handler: NotifyHandler::One(id.0),
|
||||
event: RPCSend::Response(id.1, event),
|
||||
});
|
||||
}
|
||||
|
||||
/// Submits an RPC request.
|
||||
///
|
||||
/// The peer must be connected for this to succeed.
|
||||
pub fn send_request(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
request_id: RequestId,
|
||||
event: RPCRequest<TSpec>,
|
||||
) {
|
||||
self.events.push(NetworkBehaviourAction::NotifyHandler {
|
||||
peer_id,
|
||||
handler: NotifyHandler::Any,
|
||||
event: RPCSend::Request(request_id, event),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSpec> NetworkBehaviour for RPC<TSpec>
|
||||
where
|
||||
TSpec: EthSpec,
|
||||
{
|
||||
type ProtocolsHandler = RPCHandler<TSpec>;
|
||||
type OutEvent = RPCMessage<TSpec>;
|
||||
|
||||
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
||||
RPCHandler::new(
|
||||
SubstreamProtocol::new(RPCProtocol {
|
||||
phantom: PhantomData,
|
||||
}),
|
||||
&self.log,
|
||||
)
|
||||
}
|
||||
|
||||
// handled by discovery
|
||||
fn addresses_of_peer(&mut self, _peer_id: &PeerId) -> Vec<Multiaddr> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
// Use connection established/closed instead of these currently
|
||||
fn inject_connected(&mut self, peer_id: &PeerId) {
|
||||
// find the peer's meta-data
|
||||
debug!(self.log, "Requesting new peer's metadata"; "peer_id" => format!("{}",peer_id));
|
||||
let rpc_event = RPCSend::Request(RequestId::Behaviour, RPCRequest::MetaData(PhantomData));
|
||||
self.events.push(NetworkBehaviourAction::NotifyHandler {
|
||||
peer_id: peer_id.clone(),
|
||||
handler: NotifyHandler::Any,
|
||||
event: rpc_event,
|
||||
});
|
||||
}
|
||||
|
||||
fn inject_disconnected(&mut self, _peer_id: &PeerId) {}
|
||||
|
||||
fn inject_connection_established(
|
||||
&mut self,
|
||||
_peer_id: &PeerId,
|
||||
_: &ConnectionId,
|
||||
_connected_point: &ConnectedPoint,
|
||||
) {
|
||||
}
|
||||
|
||||
fn inject_connection_closed(
|
||||
&mut self,
|
||||
_peer_id: &PeerId,
|
||||
_: &ConnectionId,
|
||||
_connected_point: &ConnectedPoint,
|
||||
) {
|
||||
}
|
||||
|
||||
fn inject_event(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
conn_id: ConnectionId,
|
||||
event: <Self::ProtocolsHandler as ProtocolsHandler>::OutEvent,
|
||||
) {
|
||||
if let Ok(RPCReceived::Request(ref id, ref req)) = event {
|
||||
// check if the request is conformant to the quota
|
||||
match self.limiter.allows(&peer_id, req) {
|
||||
Ok(()) => {
|
||||
// send the event to the user
|
||||
self.events
|
||||
.push(NetworkBehaviourAction::GenerateEvent(RPCMessage {
|
||||
peer_id,
|
||||
conn_id,
|
||||
event,
|
||||
}))
|
||||
}
|
||||
Err(RateLimitedErr::TooLarge) => {
|
||||
// we set the batch sizes, so this is a coding/config err
|
||||
crit!(self.log, "Batch too large to ever be processed";
|
||||
"protocol" => format!("{}", req.protocol()));
|
||||
}
|
||||
Err(RateLimitedErr::TooSoon(wait_time)) => {
|
||||
debug!(self.log, "Request exceeds the rate limit";
|
||||
"request" => req.to_string(), "peer_id" => peer_id.to_string(), "wait_time_ms" => wait_time.as_millis());
|
||||
// send an error code to the peer.
|
||||
// the handler upon receiving the error code will send it back to the behaviour
|
||||
self.send_response(
|
||||
peer_id,
|
||||
(conn_id, *id),
|
||||
RPCCodedResponse::Error(
|
||||
RPCResponseErrorCode::RateLimited,
|
||||
format!("Rate limited: wait {:?}", wait_time).into(),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
self.events
|
||||
.push(NetworkBehaviourAction::GenerateEvent(RPCMessage {
|
||||
peer_id,
|
||||
conn_id,
|
||||
event,
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context,
|
||||
_: &mut impl PollParameters,
|
||||
) -> Poll<
|
||||
NetworkBehaviourAction<
|
||||
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||
Self::OutEvent,
|
||||
>,
|
||||
> {
|
||||
// let the rate limiter prune
|
||||
let _ = self.limiter.poll_unpin(cx);
|
||||
if !self.events.is_empty() {
|
||||
return Poll::Ready(self.events.remove(0));
|
||||
}
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
@@ -1,29 +1,57 @@
|
||||
#![allow(clippy::type_complexity)]
|
||||
|
||||
use super::methods::*;
|
||||
use crate::rpc::{
|
||||
codec::{
|
||||
base::{BaseInboundCodec, BaseOutboundCodec},
|
||||
ssz::{SSZInboundCodec, SSZOutboundCodec},
|
||||
ssz_snappy::{SSZSnappyInboundCodec, SSZSnappyOutboundCodec},
|
||||
InboundCodec, OutboundCodec,
|
||||
},
|
||||
methods::ResponseTermination,
|
||||
MaxRequestBlocks, MAX_REQUEST_BLOCKS,
|
||||
};
|
||||
use futures::future::Ready;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::prelude::*;
|
||||
use futures::prelude::{AsyncRead, AsyncWrite};
|
||||
use libp2p::core::{InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo};
|
||||
use ssz::Encode;
|
||||
use ssz_types::VariableList;
|
||||
use std::io;
|
||||
use std::marker::PhantomData;
|
||||
use std::pin::Pin;
|
||||
use std::time::Duration;
|
||||
use tokio_io_timeout::TimeoutStream;
|
||||
use tokio_util::{
|
||||
codec::Framed,
|
||||
compat::{Compat, FuturesAsyncReadCompatExt},
|
||||
};
|
||||
use types::EthSpec;
|
||||
use types::{BeaconBlock, EthSpec, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock};
|
||||
|
||||
lazy_static! {
|
||||
// Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is
|
||||
// same across different `EthSpec` implementations.
|
||||
pub static ref SIGNED_BEACON_BLOCK_MIN: usize = SignedBeaconBlock::<MainnetEthSpec> {
|
||||
message: BeaconBlock::empty(&MainnetEthSpec::default_spec()),
|
||||
signature: Signature::empty(),
|
||||
}
|
||||
.as_ssz_bytes()
|
||||
.len();
|
||||
pub static ref SIGNED_BEACON_BLOCK_MAX: usize = SignedBeaconBlock::<MainnetEthSpec> {
|
||||
message: BeaconBlock::full(&MainnetEthSpec::default_spec()),
|
||||
signature: Signature::empty(),
|
||||
}
|
||||
.as_ssz_bytes()
|
||||
.len();
|
||||
pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize =
|
||||
VariableList::<Hash256, MaxRequestBlocks>::from(Vec::<Hash256>::new())
|
||||
.as_ssz_bytes()
|
||||
.len();
|
||||
pub static ref BLOCKS_BY_ROOT_REQUEST_MAX: usize =
|
||||
VariableList::<Hash256, MaxRequestBlocks>::from(vec![
|
||||
Hash256::zero();
|
||||
MAX_REQUEST_BLOCKS
|
||||
as usize
|
||||
])
|
||||
.as_ssz_bytes()
|
||||
.len();
|
||||
}
|
||||
|
||||
/// The maximum bytes that can be sent across the RPC.
|
||||
const MAX_RPC_SIZE: usize = 1_048_576; // 1M
|
||||
@@ -62,7 +90,6 @@ pub enum Version {
|
||||
/// RPC Encondings supported.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Encoding {
|
||||
SSZ,
|
||||
SSZSnappy,
|
||||
}
|
||||
|
||||
@@ -83,7 +110,6 @@ impl std::fmt::Display for Protocol {
|
||||
impl std::fmt::Display for Encoding {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let repr = match self {
|
||||
Encoding::SSZ => "ssz",
|
||||
Encoding::SSZSnappy => "ssz_snappy",
|
||||
};
|
||||
f.write_str(repr)
|
||||
@@ -112,17 +138,11 @@ impl<TSpec: EthSpec> UpgradeInfo for RPCProtocol<TSpec> {
|
||||
fn protocol_info(&self) -> Self::InfoIter {
|
||||
vec![
|
||||
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZ),
|
||||
ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZ),
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZ),
|
||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZ),
|
||||
ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZ),
|
||||
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZ),
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -174,13 +194,6 @@ impl ProtocolName for ProtocolId {
|
||||
pub type InboundOutput<TSocket, TSpec> = (RPCRequest<TSpec>, InboundFramed<TSocket, TSpec>);
|
||||
pub type InboundFramed<TSocket, TSpec> =
|
||||
Framed<TimeoutStream<Compat<TSocket>>, InboundCodec<TSpec>>;
|
||||
type FnAndThen<TSocket, TSpec> = fn(
|
||||
(
|
||||
Option<Result<RPCRequest<TSpec>, RPCError>>,
|
||||
InboundFramed<TSocket, TSpec>,
|
||||
),
|
||||
) -> Ready<Result<InboundOutput<TSocket, TSpec>, RPCError>>;
|
||||
type FnMapErr = fn(tokio::time::Elapsed) -> RPCError;
|
||||
|
||||
impl<TSocket, TSpec> InboundUpgrade<TSocket> for RPCProtocol<TSpec>
|
||||
where
|
||||
@@ -189,45 +202,43 @@ where
|
||||
{
|
||||
type Output = InboundOutput<TSocket, TSpec>;
|
||||
type Error = RPCError;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<Self::Output, Self::Error>> + Send>>;
|
||||
type Future = BoxFuture<'static, Result<Self::Output, Self::Error>>;
|
||||
|
||||
fn upgrade_inbound(self, socket: TSocket, protocol: ProtocolId) -> Self::Future {
|
||||
let protocol_name = protocol.message_name;
|
||||
// convert the socket to tokio compatible socket
|
||||
let socket = socket.compat();
|
||||
let codec = match protocol.encoding {
|
||||
Encoding::SSZSnappy => {
|
||||
let ssz_snappy_codec =
|
||||
BaseInboundCodec::new(SSZSnappyInboundCodec::new(protocol, MAX_RPC_SIZE));
|
||||
InboundCodec::SSZSnappy(ssz_snappy_codec)
|
||||
}
|
||||
Encoding::SSZ => {
|
||||
let ssz_codec = BaseInboundCodec::new(SSZInboundCodec::new(protocol, MAX_RPC_SIZE));
|
||||
InboundCodec::SSZ(ssz_codec)
|
||||
}
|
||||
};
|
||||
let mut timed_socket = TimeoutStream::new(socket);
|
||||
timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT)));
|
||||
async move {
|
||||
let protocol_name = protocol.message_name;
|
||||
// convert the socket to tokio compatible socket
|
||||
let socket = socket.compat();
|
||||
let codec = match protocol.encoding {
|
||||
Encoding::SSZSnappy => {
|
||||
let ssz_snappy_codec =
|
||||
BaseInboundCodec::new(SSZSnappyInboundCodec::new(protocol, MAX_RPC_SIZE));
|
||||
InboundCodec::SSZSnappy(ssz_snappy_codec)
|
||||
}
|
||||
};
|
||||
let mut timed_socket = TimeoutStream::new(socket);
|
||||
timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT)));
|
||||
|
||||
let socket = Framed::new(timed_socket, codec);
|
||||
let socket = Framed::new(timed_socket, codec);
|
||||
|
||||
// MetaData requests should be empty, return the stream
|
||||
Box::pin(match protocol_name {
|
||||
Protocol::MetaData => {
|
||||
future::Either::Left(future::ok((RPCRequest::MetaData(PhantomData), socket)))
|
||||
// MetaData requests should be empty, return the stream
|
||||
match protocol_name {
|
||||
Protocol::MetaData => Ok((RPCRequest::MetaData(PhantomData), socket)),
|
||||
_ => {
|
||||
match tokio::time::timeout(
|
||||
Duration::from_secs(REQUEST_TIMEOUT),
|
||||
socket.into_future(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Err(e) => Err(RPCError::from(e)),
|
||||
Ok((Some(Ok(request)), stream)) => Ok((request, stream)),
|
||||
Ok((Some(Err(_)), _)) | Ok((None, _)) => Err(RPCError::IncompleteStream),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ => future::Either::Right(
|
||||
tokio::time::timeout(Duration::from_secs(REQUEST_TIMEOUT), socket.into_future())
|
||||
.map_err(RPCError::from as FnMapErr)
|
||||
.and_then({
|
||||
|(req, stream)| match req {
|
||||
Some(Ok(request)) => future::ok((request, stream)),
|
||||
Some(Err(_)) | None => future::err(RPCError::IncompleteStream),
|
||||
}
|
||||
} as FnAndThen<TSocket, TSpec>),
|
||||
),
|
||||
})
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -261,61 +272,54 @@ impl<TSpec: EthSpec> RPCRequest<TSpec> {
|
||||
pub fn supported_protocols(&self) -> Vec<ProtocolId> {
|
||||
match self {
|
||||
// add more protocols when versions/encodings are supported
|
||||
RPCRequest::Status(_) => vec![
|
||||
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZ),
|
||||
],
|
||||
RPCRequest::Goodbye(_) => vec![
|
||||
ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZ),
|
||||
],
|
||||
RPCRequest::BlocksByRange(_) => vec![
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZ),
|
||||
],
|
||||
RPCRequest::BlocksByRoot(_) => vec![
|
||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZ),
|
||||
],
|
||||
RPCRequest::Ping(_) => vec![
|
||||
ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZ),
|
||||
],
|
||||
RPCRequest::MetaData(_) => vec![
|
||||
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZ),
|
||||
],
|
||||
RPCRequest::Status(_) => vec![ProtocolId::new(
|
||||
Protocol::Status,
|
||||
Version::V1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
RPCRequest::Goodbye(_) => vec![ProtocolId::new(
|
||||
Protocol::Goodbye,
|
||||
Version::V1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
RPCRequest::BlocksByRange(_) => vec![ProtocolId::new(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
RPCRequest::BlocksByRoot(_) => vec![ProtocolId::new(
|
||||
Protocol::BlocksByRoot,
|
||||
Version::V1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
RPCRequest::Ping(_) => vec![ProtocolId::new(
|
||||
Protocol::Ping,
|
||||
Version::V1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
RPCRequest::MetaData(_) => vec![ProtocolId::new(
|
||||
Protocol::MetaData,
|
||||
Version::V1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
}
|
||||
}
|
||||
|
||||
/* These functions are used in the handler for stream management */
|
||||
|
||||
/// This specifies whether a stream should remain open and await a response, given a request.
|
||||
/// A GOODBYE request has no response.
|
||||
pub fn expect_response(&self) -> bool {
|
||||
/// Number of responses expected for this request.
|
||||
pub fn expected_responses(&self) -> u64 {
|
||||
match self {
|
||||
RPCRequest::Status(_) => true,
|
||||
RPCRequest::Goodbye(_) => false,
|
||||
RPCRequest::BlocksByRange(_) => true,
|
||||
RPCRequest::BlocksByRoot(_) => true,
|
||||
RPCRequest::Ping(_) => true,
|
||||
RPCRequest::MetaData(_) => true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns which methods expect multiple responses from the stream. If this is false and
|
||||
/// the stream terminates, an error is given.
|
||||
pub fn multiple_responses(&self) -> bool {
|
||||
match self {
|
||||
RPCRequest::Status(_) => false,
|
||||
RPCRequest::Goodbye(_) => false,
|
||||
RPCRequest::BlocksByRange(_) => true,
|
||||
RPCRequest::BlocksByRoot(_) => true,
|
||||
RPCRequest::Ping(_) => false,
|
||||
RPCRequest::MetaData(_) => false,
|
||||
RPCRequest::Status(_) => 1,
|
||||
RPCRequest::Goodbye(_) => 0,
|
||||
RPCRequest::BlocksByRange(req) => req.count,
|
||||
RPCRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
|
||||
RPCRequest::Ping(_) => 1,
|
||||
RPCRequest::MetaData(_) => 1,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gives the corresponding `Protocol` to this request.
|
||||
pub fn protocol(&self) -> Protocol {
|
||||
match self {
|
||||
RPCRequest::Status(_) => Protocol::Status,
|
||||
@@ -356,7 +360,7 @@ where
|
||||
{
|
||||
type Output = OutboundFramed<TSocket, TSpec>;
|
||||
type Error = RPCError;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<Self::Output, Self::Error>> + Send>>;
|
||||
type Future = BoxFuture<'static, Result<Self::Output, Self::Error>>;
|
||||
|
||||
fn upgrade_outbound(self, socket: TSocket, protocol: Self::Info) -> Self::Future {
|
||||
// convert to a tokio compatible socket
|
||||
@@ -367,17 +371,16 @@ where
|
||||
BaseOutboundCodec::new(SSZSnappyOutboundCodec::new(protocol, MAX_RPC_SIZE));
|
||||
OutboundCodec::SSZSnappy(ssz_snappy_codec)
|
||||
}
|
||||
Encoding::SSZ => {
|
||||
let ssz_codec =
|
||||
BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, MAX_RPC_SIZE));
|
||||
OutboundCodec::SSZ(ssz_codec)
|
||||
}
|
||||
};
|
||||
|
||||
let mut socket = Framed::new(socket, codec);
|
||||
|
||||
let future = async { socket.send(self).await.map(|_| socket) };
|
||||
Box::pin(future)
|
||||
async {
|
||||
socket.send(self).await?;
|
||||
socket.close().await?;
|
||||
Ok(socket)
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -390,7 +393,7 @@ pub enum RPCError {
|
||||
/// IO Error.
|
||||
IoError(String),
|
||||
/// The peer returned a valid response but the response indicated an error.
|
||||
ErrorResponse(RPCResponseErrorCode),
|
||||
ErrorResponse(RPCResponseErrorCode, String),
|
||||
/// Timed out waiting for a response.
|
||||
StreamTimeout,
|
||||
/// Peer does not support the protocol.
|
||||
@@ -401,8 +404,12 @@ pub enum RPCError {
|
||||
InvalidData,
|
||||
/// An error occurred due to internal reasons. Ex: timer failure.
|
||||
InternalError(&'static str),
|
||||
/// Negotiation with this peer timed out
|
||||
/// Negotiation with this peer timed out.
|
||||
NegotiationTimeout,
|
||||
/// Handler rejected this request.
|
||||
HandlerRejected,
|
||||
/// The request exceeds the rate limit.
|
||||
RateLimited,
|
||||
}
|
||||
|
||||
impl From<ssz::DecodeError> for RPCError {
|
||||
@@ -430,12 +437,18 @@ impl std::fmt::Display for RPCError {
|
||||
RPCError::SSZDecodeError(ref err) => write!(f, "Error while decoding ssz: {:?}", err),
|
||||
RPCError::InvalidData => write!(f, "Peer sent unexpected data"),
|
||||
RPCError::IoError(ref err) => write!(f, "IO Error: {}", err),
|
||||
RPCError::ErrorResponse(ref code) => write!(f, "RPC response was an error: {}", code),
|
||||
RPCError::ErrorResponse(ref code, ref reason) => write!(
|
||||
f,
|
||||
"RPC response was an error: {} with reason: {}",
|
||||
code, reason
|
||||
),
|
||||
RPCError::StreamTimeout => write!(f, "Stream Timeout"),
|
||||
RPCError::UnsupportedProtocol => write!(f, "Peer does not support the protocol"),
|
||||
RPCError::IncompleteStream => write!(f, "Stream ended unexpectedly"),
|
||||
RPCError::InternalError(ref err) => write!(f, "Internal error: {}", err),
|
||||
RPCError::NegotiationTimeout => write!(f, "Negotiation timeout"),
|
||||
RPCError::HandlerRejected => write!(f, "Handler rejected the request"),
|
||||
RPCError::RateLimited => write!(f, "Request exceeds the rate limit"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -451,8 +464,10 @@ impl std::error::Error for RPCError {
|
||||
RPCError::IncompleteStream => None,
|
||||
RPCError::InvalidData => None,
|
||||
RPCError::InternalError(_) => None,
|
||||
RPCError::ErrorResponse(_) => None,
|
||||
RPCError::ErrorResponse(_, _) => None,
|
||||
RPCError::NegotiationTimeout => None,
|
||||
RPCError::HandlerRejected => None,
|
||||
RPCError::RateLimited => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
377
beacon_node/eth2_libp2p/src/rpc/rate_limiter.rs
Normal file
377
beacon_node/eth2_libp2p/src/rpc/rate_limiter.rs
Normal file
@@ -0,0 +1,377 @@
|
||||
use crate::rpc::{Protocol, RPCRequest};
|
||||
use fnv::FnvHashMap;
|
||||
use futures::StreamExt;
|
||||
use libp2p::PeerId;
|
||||
use std::convert::TryInto;
|
||||
use std::future::Future;
|
||||
use std::hash::Hash;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::time::Interval;
|
||||
use types::EthSpec;
|
||||
|
||||
/// Nanoseconds since a given time.
|
||||
// Maintained as u64 to reduce footprint
|
||||
// NOTE: this also implies that the rate limiter will manage checking if a batch is allowed for at
|
||||
// most <init time> + u64::MAX nanosecs, ~500 years. So it is realistic to assume this is fine.
|
||||
type Nanosecs = u64;
|
||||
|
||||
/// User-friendly rate limiting parameters of the GCRA.
|
||||
///
|
||||
/// A quota of `max_tokens` tokens every `replenish_all_every` units of time means that:
|
||||
/// 1. One token is replenished every `replenish_all_every`/`max_tokens` units of time.
|
||||
/// 2. Instantaneous bursts (batches) of up to `max_tokens` tokens are allowed.
|
||||
///
|
||||
/// The above implies that if `max_tokens` is greater than 1, the perceived rate may be higher (but
|
||||
/// bounded) than the defined rate when instantaneous bursts occur. For instance, for a rate of
|
||||
/// 4T/2s a first burst of 4T is allowed with subsequent requests of 1T every 0.5s forever,
|
||||
/// producing a perceived rate over the window of the first 2s of 8T. However, subsequent sliding
|
||||
/// windows of 2s keep the limit.
|
||||
///
|
||||
/// In this scenario using the same rate as above, the sender is always maxing out their tokens,
|
||||
/// except at seconds 1.5, 3, 3.5 and 4
|
||||
///
|
||||
/// ```ignore
|
||||
/// x
|
||||
/// used x
|
||||
/// tokens x x x
|
||||
/// at a x x x x x x
|
||||
/// given +--+--+--o--+--+--o--o--o--> seconds
|
||||
/// time | | | | | | | | |
|
||||
/// 0 1 2 3 4
|
||||
///
|
||||
/// 4 1 1 1 2 1 1 2 3 <= available tokens when the batch is received
|
||||
/// ```
|
||||
///
|
||||
/// For a sender to request a batch of `n`T, they would need to wait at least
|
||||
/// n*`replenish_all_every`/`max_tokens` units of time since their last request.
|
||||
///
|
||||
/// To produce hard limits, set `max_tokens` to 1.
|
||||
pub struct Quota {
|
||||
/// How often are `max_tokens` fully replenished.
|
||||
replenish_all_every: Duration,
|
||||
/// Token limit. This translates on how large can an instantaneous batch of
|
||||
/// tokens be.
|
||||
max_tokens: u64,
|
||||
}
|
||||
|
||||
/// Manages rate limiting of requests per peer, with differentiated rates per protocol.
|
||||
pub struct RPCRateLimiter {
|
||||
/// Interval to prune peers for which their timer ran out.
|
||||
prune_interval: Interval,
|
||||
/// Creation time of the rate limiter.
|
||||
init_time: Instant,
|
||||
/// Goodbye rate limiter.
|
||||
goodbye_rl: Limiter<PeerId>,
|
||||
/// Ping rate limiter.
|
||||
ping_rl: Limiter<PeerId>,
|
||||
/// MetaData rate limiter.
|
||||
metadata_rl: Limiter<PeerId>,
|
||||
/// Status rate limiter.
|
||||
status_rl: Limiter<PeerId>,
|
||||
/// BlocksByRange rate limiter.
|
||||
bbrange_rl: Limiter<PeerId>,
|
||||
/// BlocksByRoot rate limiter.
|
||||
bbroots_rl: Limiter<PeerId>,
|
||||
}
|
||||
|
||||
/// Error type for non conformant requests
|
||||
pub enum RateLimitedErr {
|
||||
/// Required tokens for this request exceed the maximum
|
||||
TooLarge,
|
||||
/// Request does not fit in the quota. Gives the earliest time the request could be accepted.
|
||||
TooSoon(Duration),
|
||||
}
|
||||
|
||||
/// User-friendly builder of a `RPCRateLimiter`
|
||||
#[derive(Default)]
|
||||
pub struct RPCRateLimiterBuilder {
|
||||
/// Quota for the Goodbye protocol.
|
||||
goodbye_quota: Option<Quota>,
|
||||
/// Quota for the Ping protocol.
|
||||
ping_quota: Option<Quota>,
|
||||
/// Quota for the MetaData protocol.
|
||||
metadata_quota: Option<Quota>,
|
||||
/// Quota for the Status protocol.
|
||||
status_quota: Option<Quota>,
|
||||
/// Quota for the BlocksByRange protocol.
|
||||
bbrange_quota: Option<Quota>,
|
||||
/// Quota for the BlocksByRoot protocol.
|
||||
bbroots_quota: Option<Quota>,
|
||||
}
|
||||
|
||||
impl RPCRateLimiterBuilder {
|
||||
/// Get an empty `RPCRateLimiterBuilder`.
|
||||
pub fn new() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
|
||||
/// Set a quota for a protocol.
|
||||
fn set_quota(mut self, protocol: Protocol, quota: Quota) -> Self {
|
||||
let q = Some(quota);
|
||||
match protocol {
|
||||
Protocol::Ping => self.ping_quota = q,
|
||||
Protocol::Status => self.status_quota = q,
|
||||
Protocol::MetaData => self.metadata_quota = q,
|
||||
Protocol::Goodbye => self.goodbye_quota = q,
|
||||
Protocol::BlocksByRange => self.bbrange_quota = q,
|
||||
Protocol::BlocksByRoot => self.bbroots_quota = q,
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// Allow one token every `time_period` to be used for this `protocol`.
|
||||
/// This produces a hard limit.
|
||||
pub fn one_every(self, protocol: Protocol, time_period: Duration) -> Self {
|
||||
self.set_quota(
|
||||
protocol,
|
||||
Quota {
|
||||
replenish_all_every: time_period,
|
||||
max_tokens: 1,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Allow `n` tokens to be use used every `time_period` for this `protocol`.
|
||||
pub fn n_every(self, protocol: Protocol, n: u64, time_period: Duration) -> Self {
|
||||
self.set_quota(
|
||||
protocol,
|
||||
Quota {
|
||||
max_tokens: n,
|
||||
replenish_all_every: time_period,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn build(self) -> Result<RPCRateLimiter, &'static str> {
|
||||
// get our quotas
|
||||
let ping_quota = self.ping_quota.ok_or("Ping quota not specified")?;
|
||||
let metadata_quota = self.metadata_quota.ok_or("MetaData quota not specified")?;
|
||||
let status_quota = self.status_quota.ok_or("Status quota not specified")?;
|
||||
let goodbye_quota = self.goodbye_quota.ok_or("Goodbye quota not specified")?;
|
||||
let bbroots_quota = self
|
||||
.bbroots_quota
|
||||
.ok_or("BlocksByRoot quota not specified")?;
|
||||
let bbrange_quota = self
|
||||
.bbrange_quota
|
||||
.ok_or("BlocksByRange quota not specified")?;
|
||||
|
||||
// create the rate limiters
|
||||
let ping_rl = Limiter::from_quota(ping_quota)?;
|
||||
let metadata_rl = Limiter::from_quota(metadata_quota)?;
|
||||
let status_rl = Limiter::from_quota(status_quota)?;
|
||||
let goodbye_rl = Limiter::from_quota(goodbye_quota)?;
|
||||
let bbroots_rl = Limiter::from_quota(bbroots_quota)?;
|
||||
let bbrange_rl = Limiter::from_quota(bbrange_quota)?;
|
||||
|
||||
// check for peers to prune every 30 seconds, starting in 30 seconds
|
||||
let prune_every = tokio::time::Duration::from_secs(30);
|
||||
let prune_start = tokio::time::Instant::now() + prune_every;
|
||||
let prune_interval = tokio::time::interval_at(prune_start, prune_every);
|
||||
Ok(RPCRateLimiter {
|
||||
prune_interval,
|
||||
ping_rl,
|
||||
metadata_rl,
|
||||
status_rl,
|
||||
goodbye_rl,
|
||||
bbroots_rl,
|
||||
bbrange_rl,
|
||||
init_time: Instant::now(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl RPCRateLimiter {
|
||||
pub fn allows<T: EthSpec>(
|
||||
&mut self,
|
||||
peer_id: &PeerId,
|
||||
request: &RPCRequest<T>,
|
||||
) -> Result<(), RateLimitedErr> {
|
||||
let time_since_start = self.init_time.elapsed();
|
||||
let tokens = request.expected_responses().max(1);
|
||||
let check =
|
||||
|limiter: &mut Limiter<PeerId>| limiter.allows(time_since_start, peer_id, tokens);
|
||||
let limiter = match request.protocol() {
|
||||
Protocol::Ping => &mut self.ping_rl,
|
||||
Protocol::Status => &mut self.status_rl,
|
||||
Protocol::MetaData => &mut self.metadata_rl,
|
||||
Protocol::Goodbye => &mut self.goodbye_rl,
|
||||
Protocol::BlocksByRange => &mut self.bbrange_rl,
|
||||
Protocol::BlocksByRoot => &mut self.bbroots_rl,
|
||||
};
|
||||
check(limiter)
|
||||
}
|
||||
|
||||
pub fn prune(&mut self) {
|
||||
let time_since_start = self.init_time.elapsed();
|
||||
self.ping_rl.prune(time_since_start);
|
||||
self.status_rl.prune(time_since_start);
|
||||
self.metadata_rl.prune(time_since_start);
|
||||
self.goodbye_rl.prune(time_since_start);
|
||||
self.bbrange_rl.prune(time_since_start);
|
||||
self.bbroots_rl.prune(time_since_start);
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for RPCRateLimiter {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
|
||||
while let Poll::Ready(Some(_)) = self.prune_interval.poll_next_unpin(cx) {
|
||||
self.prune();
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
/// Per key rate limiter using the token bucket / leaky bucket as a meter rate limiting algorithm,
|
||||
/// with the GCRA implementation.
|
||||
pub struct Limiter<Key: Hash + Eq + Clone> {
|
||||
/// After how long is the bucket considered full via replenishing 1T every `t`.
|
||||
tau: Nanosecs,
|
||||
/// How often is 1T replenished.
|
||||
t: Nanosecs,
|
||||
/// Time when the bucket will be full for each peer. TAT (theoretical arrival time) from GCRA.
|
||||
tat_per_key: FnvHashMap<Key, Nanosecs>,
|
||||
}
|
||||
|
||||
impl<Key: Hash + Eq + Clone> Limiter<Key> {
|
||||
pub fn from_quota(quota: Quota) -> Result<Self, &'static str> {
|
||||
if quota.max_tokens == 0 {
|
||||
return Err("Max number of tokens should be positive");
|
||||
}
|
||||
let tau = quota.replenish_all_every.as_nanos();
|
||||
if tau == 0 {
|
||||
return Err("Replenish time must be positive");
|
||||
}
|
||||
let t = (tau / quota.max_tokens as u128)
|
||||
.try_into()
|
||||
.map_err(|_| "total replenish time is too long")?;
|
||||
let tau = tau
|
||||
.try_into()
|
||||
.map_err(|_| "total replenish time is too long")?;
|
||||
Ok(Limiter {
|
||||
tau,
|
||||
t,
|
||||
tat_per_key: FnvHashMap::default(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn allows(
|
||||
&mut self,
|
||||
time_since_start: Duration,
|
||||
key: &Key,
|
||||
tokens: u64,
|
||||
) -> Result<(), RateLimitedErr> {
|
||||
let time_since_start = time_since_start.as_nanos() as u64;
|
||||
let tau = self.tau;
|
||||
let t = self.t;
|
||||
// how long does it take to replenish these tokens
|
||||
let additional_time = t * tokens;
|
||||
if additional_time > tau {
|
||||
// the time required to process this amount of tokens is longer than the time that
|
||||
// makes the bucket full. So, this batch can _never_ be processed
|
||||
return Err(RateLimitedErr::TooLarge);
|
||||
}
|
||||
// If the key is new, we consider their bucket full (which means, their request will be
|
||||
// allowed)
|
||||
let tat = self
|
||||
.tat_per_key
|
||||
.entry(key.clone())
|
||||
.or_insert(time_since_start);
|
||||
// check how soon could the request be made
|
||||
let earliest_time = (*tat + additional_time).saturating_sub(tau);
|
||||
// earliest_time is in the future
|
||||
if time_since_start < earliest_time {
|
||||
Err(RateLimitedErr::TooSoon(Duration::from_nanos(
|
||||
/* time they need to wait, i.e. how soon were they */
|
||||
earliest_time - time_since_start,
|
||||
)))
|
||||
} else {
|
||||
// calculate the new TAT
|
||||
*tat = time_since_start.max(*tat) + additional_time;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes keys for which their bucket is full by `time_limit`
|
||||
pub fn prune(&mut self, time_limit: Duration) {
|
||||
let lim = &mut (time_limit.as_nanos() as u64);
|
||||
// remove those for which tat < lim
|
||||
self.tat_per_key.retain(|_k, tat| tat >= lim)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::rpc::rate_limiter::{Limiter, Quota};
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn it_works_a() {
|
||||
let mut limiter = Limiter::from_quota(Quota {
|
||||
replenish_all_every: Duration::from_secs(2),
|
||||
max_tokens: 4,
|
||||
})
|
||||
.unwrap();
|
||||
let key = 10;
|
||||
// x
|
||||
// used x
|
||||
// tokens x x
|
||||
// x x x x
|
||||
// +--+--+--+--+----> seconds
|
||||
// | | | | |
|
||||
// 0 1 2
|
||||
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.0), &key, 4)
|
||||
.is_ok());
|
||||
limiter.prune(Duration::from_secs_f32(0.1));
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.1), &key, 1)
|
||||
.is_err());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.5), &key, 1)
|
||||
.is_ok());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(1.0), &key, 1)
|
||||
.is_ok());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(1.4), &key, 1)
|
||||
.is_err());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(2.0), &key, 2)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn it_works_b() {
|
||||
let mut limiter = Limiter::from_quota(Quota {
|
||||
replenish_all_every: Duration::from_secs(2),
|
||||
max_tokens: 4,
|
||||
})
|
||||
.unwrap();
|
||||
let key = 10;
|
||||
// if we limit to 4T per 2s, check that 4 requests worth 1 token can be sent before the
|
||||
// first half second, when one token will be available again. Check also that before
|
||||
// regaining a token, another request is rejected
|
||||
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.0), &key, 1)
|
||||
.is_ok());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.1), &key, 1)
|
||||
.is_ok());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.2), &key, 1)
|
||||
.is_ok());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.3), &key, 1)
|
||||
.is_ok());
|
||||
assert!(limiter
|
||||
.allows(Duration::from_secs_f32(0.4), &key, 1)
|
||||
.is_err());
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,10 @@
|
||||
use crate::behaviour::{Behaviour, BehaviourEvent};
|
||||
use crate::behaviour::{Behaviour, BehaviourEvent, PeerRequestId, Request, Response};
|
||||
use crate::discovery::enr;
|
||||
use crate::multiaddr::Protocol;
|
||||
use crate::rpc::{GoodbyeReason, RPCResponseErrorCode, RequestId};
|
||||
use crate::types::{error, GossipKind};
|
||||
use crate::EnrExt;
|
||||
use crate::{NetworkConfig, NetworkGlobals};
|
||||
use crate::{NetworkConfig, NetworkGlobals, PeerAction};
|
||||
use futures::prelude::*;
|
||||
use libp2p::core::{
|
||||
identity::Keypair,
|
||||
@@ -11,27 +12,22 @@ use libp2p::core::{
|
||||
muxing::StreamMuxerBox,
|
||||
transport::boxed::Boxed,
|
||||
upgrade::{InboundUpgradeExt, OutboundUpgradeExt},
|
||||
ConnectedPoint,
|
||||
};
|
||||
use libp2p::{
|
||||
core, noise, secio,
|
||||
swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent},
|
||||
swarm::{SwarmBuilder, SwarmEvent},
|
||||
PeerId, Swarm, Transport,
|
||||
};
|
||||
use slog::{crit, debug, error, info, o, trace, warn};
|
||||
use slog::{crit, debug, info, o, trace, warn};
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::time::DelayQueue;
|
||||
use types::{EnrForkId, EthSpec};
|
||||
|
||||
pub const NETWORK_KEY_FILENAME: &str = "key";
|
||||
/// The time in milliseconds to wait before banning a peer. This allows for any Goodbye messages to be
|
||||
/// flushed and protocols to be negotiated.
|
||||
const BAN_PEER_WAIT_TIMEOUT: u64 = 200;
|
||||
/// The maximum simultaneous libp2p connections per peer.
|
||||
const MAX_CONNECTIONS_PER_PEER: usize = 1;
|
||||
|
||||
@@ -44,46 +40,23 @@ pub enum Libp2pEvent<TSpec: EthSpec> {
|
||||
Behaviour(BehaviourEvent<TSpec>),
|
||||
/// A new listening address has been established.
|
||||
NewListenAddr(Multiaddr),
|
||||
/// A peer has established at least one connection.
|
||||
PeerConnected {
|
||||
/// The peer that connected.
|
||||
peer_id: PeerId,
|
||||
/// Whether the peer was a dialer or listener.
|
||||
endpoint: ConnectedPoint,
|
||||
},
|
||||
/// A peer no longer has any connections, i.e is disconnected.
|
||||
PeerDisconnected {
|
||||
/// The peer the disconnected.
|
||||
peer_id: PeerId,
|
||||
/// Whether the peer was a dialer or a listener.
|
||||
endpoint: ConnectedPoint,
|
||||
},
|
||||
}
|
||||
|
||||
/// The configuration and state of the libp2p components for the beacon node.
|
||||
pub struct Service<TSpec: EthSpec> {
|
||||
/// The libp2p Swarm handler.
|
||||
//TODO: Make this private
|
||||
pub swarm: Swarm<Behaviour<TSpec>>,
|
||||
|
||||
/// This node's PeerId.
|
||||
pub local_peer_id: PeerId,
|
||||
|
||||
/// Used for managing the state of peers.
|
||||
network_globals: Arc<NetworkGlobals<TSpec>>,
|
||||
|
||||
/// A current list of peers to ban after a given timeout.
|
||||
peers_to_ban: DelayQueue<PeerId>,
|
||||
|
||||
/// A list of timeouts after which peers become unbanned.
|
||||
peer_ban_timeout: DelayQueue<PeerId>,
|
||||
|
||||
/// The libp2p logger handle.
|
||||
pub log: slog::Logger,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> Service<TSpec> {
|
||||
pub fn new(
|
||||
executor: environment::TaskExecutor,
|
||||
config: &NetworkConfig,
|
||||
enr_fork_id: EnrForkId,
|
||||
log: &slog::Logger,
|
||||
@@ -92,11 +65,7 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
||||
trace!(log, "Libp2p Service starting");
|
||||
|
||||
// initialise the node's ID
|
||||
let local_keypair = if let Some(hex_bytes) = &config.secret_key_hex {
|
||||
keypair_from_hex(hex_bytes)?
|
||||
} else {
|
||||
load_private_key(config, &log)
|
||||
};
|
||||
let local_keypair = load_private_key(config, &log);
|
||||
|
||||
// Create an ENR or load from disk if appropriate
|
||||
let enr =
|
||||
@@ -112,25 +81,33 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
||||
));
|
||||
|
||||
info!(log, "Libp2p Service"; "peer_id" => format!("{:?}", enr.peer_id()));
|
||||
debug!(log, "Attempting to open listening ports"; "address" => format!("{}", config.listen_address), "tcp_port" => config.libp2p_port, "udp_port" => config.discovery_port);
|
||||
let discovery_string = if config.disable_discovery {
|
||||
"None".into()
|
||||
} else {
|
||||
config.discovery_port.to_string()
|
||||
};
|
||||
debug!(log, "Attempting to open listening ports"; "address" => format!("{}", config.listen_address), "tcp_port" => config.libp2p_port, "udp_port" => discovery_string);
|
||||
|
||||
let mut swarm = {
|
||||
// Set up the transport - tcp/ws with noise/secio and mplex/yamux
|
||||
// Set up the transport - tcp/ws with noise and yamux/mplex
|
||||
let transport = build_transport(local_keypair.clone())
|
||||
.map_err(|e| format!("Failed to build transport: {:?}", e))?;
|
||||
// Lighthouse network behaviour
|
||||
let behaviour = Behaviour::new(&local_keypair, config, network_globals.clone(), &log)?;
|
||||
|
||||
// use the executor for libp2p
|
||||
struct Executor(tokio::runtime::Handle);
|
||||
struct Executor(environment::TaskExecutor);
|
||||
impl libp2p::core::Executor for Executor {
|
||||
fn exec(&self, f: Pin<Box<dyn Future<Output = ()> + Send>>) {
|
||||
self.0.spawn(f);
|
||||
self.0.spawn(f, "libp2p");
|
||||
}
|
||||
}
|
||||
SwarmBuilder::new(transport, behaviour, local_peer_id.clone())
|
||||
.notify_handler_buffer_size(std::num::NonZeroUsize::new(32).expect("Not zero"))
|
||||
.connection_event_buffer_size(64)
|
||||
.incoming_connection_limit(10)
|
||||
.peer_connection_limit(MAX_CONNECTIONS_PER_PEER)
|
||||
.executor(Box::new(Executor(tokio::runtime::Handle::current())))
|
||||
.executor(Box::new(Executor(executor)))
|
||||
.build()
|
||||
};
|
||||
|
||||
@@ -159,7 +136,9 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
||||
};
|
||||
|
||||
// helper closure for dialing peers
|
||||
let mut dial_addr = |multiaddr: &Multiaddr| {
|
||||
let mut dial_addr = |mut multiaddr: Multiaddr| {
|
||||
// strip the p2p protocol if it exists
|
||||
strip_peer_id(&mut multiaddr);
|
||||
match Swarm::dial_addr(&mut swarm, multiaddr.clone()) {
|
||||
Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => format!("{}", multiaddr)),
|
||||
Err(err) => debug!(
|
||||
@@ -171,7 +150,7 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
||||
|
||||
// attempt to connect to user-input libp2p nodes
|
||||
for multiaddr in &config.libp2p_nodes {
|
||||
dial_addr(multiaddr);
|
||||
dial_addr(multiaddr.clone());
|
||||
}
|
||||
|
||||
// attempt to connect to any specified boot-nodes
|
||||
@@ -191,7 +170,7 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
||||
.read()
|
||||
.is_connected_or_dialing(&bootnode_enr.peer_id())
|
||||
{
|
||||
dial_addr(multiaddr);
|
||||
dial_addr(multiaddr.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -209,170 +188,134 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
||||
let service = Service {
|
||||
local_peer_id,
|
||||
swarm,
|
||||
network_globals: network_globals.clone(),
|
||||
peers_to_ban: DelayQueue::new(),
|
||||
peer_ban_timeout: DelayQueue::new(),
|
||||
log,
|
||||
};
|
||||
|
||||
Ok((network_globals, service))
|
||||
}
|
||||
|
||||
/// Adds a peer to be banned for a period of time, specified by a timeout.
|
||||
pub fn disconnect_and_ban_peer(&mut self, peer_id: PeerId, timeout: Duration) {
|
||||
error!(self.log, "Disconnecting and banning peer"; "peer_id" => format!("{:?}", peer_id), "timeout" => format!("{:?}", timeout));
|
||||
self.peers_to_ban.insert(
|
||||
peer_id.clone(),
|
||||
Duration::from_millis(BAN_PEER_WAIT_TIMEOUT),
|
||||
);
|
||||
self.peer_ban_timeout.insert(peer_id, timeout);
|
||||
/// Sends a request to a peer, with a given Id.
|
||||
pub fn send_request(&mut self, peer_id: PeerId, request_id: RequestId, request: Request) {
|
||||
self.swarm.send_request(peer_id, request_id, request);
|
||||
}
|
||||
|
||||
/// Informs the peer that their request failed.
|
||||
pub fn respond_with_error(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
id: PeerRequestId,
|
||||
error: RPCResponseErrorCode,
|
||||
reason: String,
|
||||
) {
|
||||
self.swarm._send_error_reponse(peer_id, id, error, reason);
|
||||
}
|
||||
|
||||
/// Report a peer's action.
|
||||
pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction) {
|
||||
self.swarm.report_peer(peer_id, action);
|
||||
}
|
||||
|
||||
// Disconnect and ban a peer, providing a reason.
|
||||
pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason) {
|
||||
self.swarm.goodbye_peer(peer_id, reason);
|
||||
}
|
||||
|
||||
/// Sends a response to a peer's request.
|
||||
pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response<TSpec>) {
|
||||
self.swarm.send_successful_response(peer_id, id, response);
|
||||
}
|
||||
|
||||
pub async fn next_event(&mut self) -> Libp2pEvent<TSpec> {
|
||||
loop {
|
||||
tokio::select! {
|
||||
event = self.swarm.next_event() => {
|
||||
match event {
|
||||
SwarmEvent::Behaviour(behaviour) => {
|
||||
return Libp2pEvent::Behaviour(behaviour)
|
||||
}
|
||||
SwarmEvent::ConnectionEstablished {
|
||||
peer_id,
|
||||
endpoint,
|
||||
num_established,
|
||||
} => {
|
||||
debug!(self.log, "Connection established"; "peer_id"=> peer_id.to_string(), "connections" => num_established.get());
|
||||
// if this is the first connection inform the network layer a new connection
|
||||
// has been established and update the db
|
||||
if num_established.get() == 1 {
|
||||
// update the peerdb
|
||||
match endpoint {
|
||||
ConnectedPoint::Listener { .. } => {
|
||||
self.swarm.peer_manager().connect_ingoing(&peer_id);
|
||||
}
|
||||
ConnectedPoint::Dialer { .. } => self
|
||||
.network_globals
|
||||
.peers
|
||||
.write()
|
||||
.connect_outgoing(&peer_id),
|
||||
}
|
||||
return Libp2pEvent::PeerConnected { peer_id, endpoint };
|
||||
}
|
||||
}
|
||||
SwarmEvent::ConnectionClosed {
|
||||
peer_id,
|
||||
cause,
|
||||
endpoint,
|
||||
num_established,
|
||||
} => {
|
||||
debug!(self.log, "Connection closed"; "peer_id"=> peer_id.to_string(), "cause" => cause.to_string(), "connections" => num_established);
|
||||
if num_established == 0 {
|
||||
// update the peer_db
|
||||
self.swarm.peer_manager().notify_disconnect(&peer_id);
|
||||
// the peer has disconnected
|
||||
return Libp2pEvent::PeerDisconnected {
|
||||
peer_id,
|
||||
endpoint,
|
||||
};
|
||||
}
|
||||
}
|
||||
SwarmEvent::NewListenAddr(multiaddr) => {
|
||||
return Libp2pEvent::NewListenAddr(multiaddr)
|
||||
}
|
||||
|
||||
SwarmEvent::IncomingConnection {
|
||||
local_addr,
|
||||
send_back_addr,
|
||||
} => {
|
||||
debug!(self.log, "Incoming connection"; "our_addr" => local_addr.to_string(), "from" => send_back_addr.to_string())
|
||||
}
|
||||
SwarmEvent::IncomingConnectionError {
|
||||
local_addr,
|
||||
send_back_addr,
|
||||
error,
|
||||
} => {
|
||||
debug!(self.log, "Failed incoming connection"; "our_addr" => local_addr.to_string(), "from" => send_back_addr.to_string(), "error" => error.to_string())
|
||||
}
|
||||
SwarmEvent::BannedPeer {
|
||||
peer_id,
|
||||
endpoint: _,
|
||||
} => {
|
||||
debug!(self.log, "Attempted to dial a banned peer"; "peer_id" => peer_id.to_string())
|
||||
}
|
||||
SwarmEvent::UnreachableAddr {
|
||||
peer_id,
|
||||
address,
|
||||
error,
|
||||
attempts_remaining,
|
||||
} => {
|
||||
debug!(self.log, "Failed to dial address"; "peer_id" => peer_id.to_string(), "address" => address.to_string(), "error" => error.to_string(), "attempts_remaining" => attempts_remaining);
|
||||
self.swarm.peer_manager().notify_disconnect(&peer_id);
|
||||
}
|
||||
SwarmEvent::UnknownPeerUnreachableAddr { address, error } => {
|
||||
debug!(self.log, "Peer not known at dialed address"; "address" => address.to_string(), "error" => error.to_string());
|
||||
}
|
||||
SwarmEvent::ExpiredListenAddr(multiaddr) => {
|
||||
debug!(self.log, "Listen address expired"; "multiaddr" => multiaddr.to_string())
|
||||
}
|
||||
SwarmEvent::ListenerClosed { addresses, reason } => {
|
||||
debug!(self.log, "Listener closed"; "addresses" => format!("{:?}", addresses), "reason" => format!("{:?}", reason))
|
||||
}
|
||||
SwarmEvent::ListenerError { error } => {
|
||||
debug!(self.log, "Listener error"; "error" => format!("{:?}", error.to_string()))
|
||||
}
|
||||
SwarmEvent::Dialing(peer_id) => {
|
||||
debug!(self.log, "Dialing peer"; "peer" => peer_id.to_string());
|
||||
self.swarm.peer_manager().dialing_peer(&peer_id);
|
||||
}
|
||||
match self.swarm.next_event().await {
|
||||
SwarmEvent::Behaviour(behaviour) => return Libp2pEvent::Behaviour(behaviour),
|
||||
SwarmEvent::ConnectionEstablished { .. } => {
|
||||
// A connection could be established with a banned peer. This is
|
||||
// handled inside the behaviour.
|
||||
}
|
||||
SwarmEvent::ConnectionClosed {
|
||||
peer_id,
|
||||
cause,
|
||||
endpoint: _,
|
||||
num_established,
|
||||
} => {
|
||||
debug!(self.log, "Connection closed"; "peer_id"=> peer_id.to_string(), "cause" => cause.to_string(), "connections" => num_established);
|
||||
}
|
||||
SwarmEvent::NewListenAddr(multiaddr) => {
|
||||
return Libp2pEvent::NewListenAddr(multiaddr)
|
||||
}
|
||||
SwarmEvent::IncomingConnection {
|
||||
local_addr,
|
||||
send_back_addr,
|
||||
} => {
|
||||
debug!(self.log, "Incoming connection"; "our_addr" => local_addr.to_string(), "from" => send_back_addr.to_string())
|
||||
}
|
||||
SwarmEvent::IncomingConnectionError {
|
||||
local_addr,
|
||||
send_back_addr,
|
||||
error,
|
||||
} => {
|
||||
debug!(self.log, "Failed incoming connection"; "our_addr" => local_addr.to_string(), "from" => send_back_addr.to_string(), "error" => error.to_string())
|
||||
}
|
||||
SwarmEvent::BannedPeer { .. } => {
|
||||
// We do not ban peers at the swarm layer, so this should never occur.
|
||||
}
|
||||
SwarmEvent::UnreachableAddr {
|
||||
peer_id,
|
||||
address,
|
||||
error,
|
||||
attempts_remaining,
|
||||
} => {
|
||||
debug!(self.log, "Failed to dial address"; "peer_id" => peer_id.to_string(), "address" => address.to_string(), "error" => error.to_string(), "attempts_remaining" => attempts_remaining);
|
||||
}
|
||||
SwarmEvent::UnknownPeerUnreachableAddr { address, error } => {
|
||||
debug!(self.log, "Peer not known at dialed address"; "address" => address.to_string(), "error" => error.to_string());
|
||||
}
|
||||
SwarmEvent::ExpiredListenAddr(multiaddr) => {
|
||||
debug!(self.log, "Listen address expired"; "multiaddr" => multiaddr.to_string())
|
||||
}
|
||||
SwarmEvent::ListenerClosed { addresses, reason } => {
|
||||
debug!(self.log, "Listener closed"; "addresses" => format!("{:?}", addresses), "reason" => format!("{:?}", reason))
|
||||
}
|
||||
SwarmEvent::ListenerError { error } => {
|
||||
debug!(self.log, "Listener error"; "error" => format!("{:?}", error.to_string()))
|
||||
}
|
||||
SwarmEvent::Dialing(peer_id) => {
|
||||
debug!(self.log, "Dialing peer"; "peer_id" => peer_id.to_string());
|
||||
}
|
||||
}
|
||||
Some(Ok(peer_to_ban)) = self.peers_to_ban.next() => {
|
||||
let peer_id = peer_to_ban.into_inner();
|
||||
Swarm::ban_peer_id(&mut self.swarm, peer_id.clone());
|
||||
// TODO: Correctly notify protocols of the disconnect
|
||||
// TODO: Also remove peer from the DHT: https://github.com/sigp/lighthouse/issues/629
|
||||
self.swarm.inject_disconnected(&peer_id);
|
||||
// inform the behaviour that the peer has been banned
|
||||
self.swarm.peer_banned(peer_id);
|
||||
}
|
||||
Some(Ok(peer_to_unban)) = self.peer_ban_timeout.next() => {
|
||||
debug!(self.log, "Peer has been unbanned"; "peer" => format!("{:?}", peer_to_unban));
|
||||
let unban_peer = peer_to_unban.into_inner();
|
||||
self.swarm.peer_unbanned(&unban_peer);
|
||||
Swarm::unban_peer_id(&mut self.swarm, unban_peer);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise/secio as the encryption layer, and
|
||||
/// mplex or yamux as the multiplexing layer.
|
||||
/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and
|
||||
/// yamux or mplex as the multiplexing layer.
|
||||
|
||||
fn build_transport(
|
||||
local_private_key: Keypair,
|
||||
) -> Result<Boxed<(PeerId, StreamMuxerBox), Error>, Error> {
|
||||
let transport = libp2p_tcp::TokioTcpConfig::new().nodelay(true);
|
||||
let transport = libp2p::tcp::TokioTcpConfig::new().nodelay(true);
|
||||
let transport = libp2p::dns::DnsConfig::new(transport)?;
|
||||
#[cfg(feature = "libp2p-websocket")]
|
||||
let transport = {
|
||||
let trans_clone = transport.clone();
|
||||
transport.or_transport(websocket::WsConfig::new(trans_clone))
|
||||
transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone))
|
||||
};
|
||||
// Authentication
|
||||
let transport = transport
|
||||
.and_then(move |stream, endpoint| {
|
||||
let upgrade = core::upgrade::SelectUpgrade::new(
|
||||
secio::SecioConfig::new(local_private_key.clone()),
|
||||
generate_noise_config(&local_private_key),
|
||||
secio::SecioConfig::new(local_private_key),
|
||||
);
|
||||
core::upgrade::apply(stream, upgrade, endpoint, core::upgrade::Version::V1).and_then(
|
||||
|out| async move {
|
||||
match out {
|
||||
// Noise was negotiated
|
||||
// Secio was negotiated
|
||||
core::either::EitherOutput::First((remote_id, out)) => {
|
||||
Ok((core::either::EitherOutput::First(out), remote_id))
|
||||
}
|
||||
// Secio was negotiated
|
||||
// Noise was negotiated
|
||||
core::either::EitherOutput::Second((remote_id, out)) => {
|
||||
Ok((core::either::EitherOutput::Second(out), remote_id))
|
||||
}
|
||||
@@ -387,8 +330,8 @@ fn build_transport(
|
||||
.and_then(move |(stream, peer_id), endpoint| {
|
||||
let peer_id2 = peer_id.clone();
|
||||
let upgrade = core::upgrade::SelectUpgrade::new(
|
||||
libp2p::yamux::Config::default(),
|
||||
libp2p::mplex::MplexConfig::new(),
|
||||
libp2p::yamux::Config::default(),
|
||||
)
|
||||
.map_inbound(move |muxer| (peer_id, muxer))
|
||||
.map_outbound(move |muxer| (peer_id2, muxer));
|
||||
@@ -402,6 +345,8 @@ fn build_transport(
|
||||
Ok(transport)
|
||||
}
|
||||
|
||||
// Useful helper functions for debugging. Currently not used in the client.
|
||||
#[allow(dead_code)]
|
||||
fn keypair_from_hex(hex_bytes: &str) -> error::Result<Keypair> {
|
||||
let hex_bytes = if hex_bytes.starts_with("0x") {
|
||||
hex_bytes[2..].to_string()
|
||||
@@ -414,6 +359,7 @@ fn keypair_from_hex(hex_bytes: &str) -> error::Result<Keypair> {
|
||||
.and_then(keypair_from_bytes)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn keypair_from_bytes(mut bytes: Vec<u8>) -> error::Result<Keypair> {
|
||||
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes)
|
||||
.map(|secret| {
|
||||
@@ -428,7 +374,6 @@ fn keypair_from_bytes(mut bytes: Vec<u8>) -> error::Result<Keypair> {
|
||||
///
|
||||
/// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5.
|
||||
fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair {
|
||||
// TODO: Currently using secp256k1 keypairs - currently required for discv5
|
||||
// check for key from disk
|
||||
let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME);
|
||||
if let Ok(mut network_key_file) = File::open(network_key_f.clone()) {
|
||||
@@ -474,9 +419,20 @@ fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair {
|
||||
/// Generate authenticated XX Noise config from identity keys
|
||||
fn generate_noise_config(
|
||||
identity_keypair: &Keypair,
|
||||
) -> noise::NoiseAuthenticated<noise::XX, noise::X25519, ()> {
|
||||
let static_dh_keys = noise::Keypair::<noise::X25519>::new()
|
||||
) -> noise::NoiseAuthenticated<noise::XX, noise::X25519Spec, ()> {
|
||||
let static_dh_keys = noise::Keypair::<noise::X25519Spec>::new()
|
||||
.into_authentic(identity_keypair)
|
||||
.expect("signing can fail only once during starting a node");
|
||||
noise::NoiseConfig::xx(static_dh_keys).into_authenticated()
|
||||
}
|
||||
|
||||
/// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p
|
||||
/// only supports dialing to an address without providing the peer id.
|
||||
fn strip_peer_id(addr: &mut Multiaddr) {
|
||||
let last = addr.pop();
|
||||
match last {
|
||||
Some(Protocol::P2p(_)) => {}
|
||||
Some(other) => addr.push(other),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user