mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-04 01:01:41 +00:00
Compare commits
557 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7aceff4d13 | ||
|
|
4fca306397 | ||
|
|
d85d5a435e | ||
|
|
bd39cc8e26 | ||
|
|
14ff38539c | ||
|
|
5d17eb899f | ||
|
|
1db8daae0c | ||
|
|
7b97c4ad30 | ||
|
|
371e1c1d5d | ||
|
|
a17f74896a | ||
|
|
49ab414594 | ||
|
|
2074beccdc | ||
|
|
e5fc6bab48 | ||
|
|
c9596fcf0e | ||
|
|
c6abc56113 | ||
|
|
7f1b936905 | ||
|
|
810de2f8b7 | ||
|
|
0525876882 | ||
|
|
d79366c503 | ||
|
|
b19cf02d2d | ||
|
|
dfe507715d | ||
|
|
0821e6b39f | ||
|
|
9cf8f45192 | ||
|
|
00cdc4bb35 | ||
|
|
19be7abfd2 | ||
|
|
9833eca024 | ||
|
|
2a9a815f29 | ||
|
|
a6376b4585 | ||
|
|
74fa87aa98 | ||
|
|
211109bbc0 | ||
|
|
638daa87fe | ||
|
|
2627463366 | ||
|
|
9c9176c1d1 | ||
|
|
87181204d0 | ||
|
|
fb9d828e5e | ||
|
|
8301a984eb | ||
|
|
7d71d98dc1 | ||
|
|
c34e8efb12 | ||
|
|
adea7992f8 | ||
|
|
c18d37c202 | ||
|
|
b6340ec495 | ||
|
|
967700c1ff | ||
|
|
d9f4819fe0 | ||
|
|
30bb7aecfb | ||
|
|
4763f03dcc | ||
|
|
175471a64b | ||
|
|
dfd02d6179 | ||
|
|
3569506acd | ||
|
|
c895dc8971 | ||
|
|
2bc9115a94 | ||
|
|
3cfd70d7fd | ||
|
|
3f0a113c7f | ||
|
|
ebb25b5569 | ||
|
|
bbed42f30c | ||
|
|
fdc6e2aa8e | ||
|
|
8e7dd7b2b1 | ||
|
|
33b2a3d0e0 | ||
|
|
93b7c3b7ff | ||
|
|
2d0b214b57 | ||
|
|
d4f763bbae | ||
|
|
e1e5002d3c | ||
|
|
46dd530476 | ||
|
|
8311074d68 | ||
|
|
3bb30754d9 | ||
|
|
cc44a64d15 | ||
|
|
46dbf027af | ||
|
|
9a97a0b14f | ||
|
|
719a69aee0 | ||
|
|
a58aa6ee55 | ||
|
|
73cbfbdfd0 | ||
|
|
f85485884f | ||
|
|
61d5b592cb | ||
|
|
3c689a6837 | ||
|
|
afdc4fea1d | ||
|
|
850a2d5985 | ||
|
|
113b40f321 | ||
|
|
99acfb50f2 | ||
|
|
c75c06cf16 | ||
|
|
6aeb896480 | ||
|
|
f4a7311008 | ||
|
|
619ad106cf | ||
|
|
b0a3731fff | ||
|
|
e3d45eda1e | ||
|
|
05a8399769 | ||
|
|
e6f45524f9 | ||
|
|
8a1a4051cf | ||
|
|
61367efa64 | ||
|
|
70089f5231 | ||
|
|
b063df5bf9 | ||
|
|
b83fcd5e5c | ||
|
|
1a67d15701 | ||
|
|
ec84183e05 | ||
|
|
95b55d7170 | ||
|
|
134676fd6f | ||
|
|
cbfae87aa6 | ||
|
|
04e4389efe | ||
|
|
08a31c5a1a | ||
|
|
a1f9769040 | ||
|
|
1d5d3e3ea7 | ||
|
|
b354a83faa | ||
|
|
0b287f6ece | ||
|
|
ee036cba7e | ||
|
|
f4fe2ac533 | ||
|
|
7d87e11e0f | ||
|
|
cfae5fbbc4 | ||
|
|
983f768034 | ||
|
|
138c0cf7f0 | ||
|
|
82a0973935 | ||
|
|
09a615b2c0 | ||
|
|
924ba66218 | ||
|
|
6206d8e79b | ||
|
|
5629126f45 | ||
|
|
20ee893969 | ||
|
|
0feb3cf19a | ||
|
|
f26adc0a36 | ||
|
|
d4dd25883f | ||
|
|
c5722093d3 | ||
|
|
1bbecbcf26 | ||
|
|
31707ccf45 | ||
|
|
1419501f2e | ||
|
|
6c17b4696f | ||
|
|
37679b8898 | ||
|
|
f634f073a8 | ||
|
|
142e033c34 | ||
|
|
3b5da8f35f | ||
|
|
3ea01ac26b | ||
|
|
d0f1a3e59f | ||
|
|
4d77784bb8 | ||
|
|
7d8acc20a0 | ||
|
|
2ede9caaa6 | ||
|
|
a37e75f44b | ||
|
|
febb300a2d | ||
|
|
36d3d37cb4 | ||
|
|
395d99ce03 | ||
|
|
f53dedb27d | ||
|
|
eaa9f9744f | ||
|
|
ba0f3daf9d | ||
|
|
09b40b7a5e | ||
|
|
9ae9df806c | ||
|
|
edf250cea9 | ||
|
|
5680355b31 | ||
|
|
a413b43fed | ||
|
|
5f013548c0 | ||
|
|
0b5be9b2c0 | ||
|
|
e5d9d6179f | ||
|
|
b73c497be2 | ||
|
|
21bcc8848d | ||
|
|
23a8f31f83 | ||
|
|
ba10c80633 | ||
|
|
3c4daec9af | ||
|
|
3a888d6ef3 | ||
|
|
41f7547645 | ||
|
|
ea0e936ac4 | ||
|
|
e26da35cbf | ||
|
|
393782f632 | ||
|
|
f61a7113ac | ||
|
|
2870172e0d | ||
|
|
0620f54f2f | ||
|
|
1c90c816b7 | ||
|
|
e940dcea47 | ||
|
|
b885d79ac3 | ||
|
|
fc5e6cbbb0 | ||
|
|
4a01f44206 | ||
|
|
4b213032b2 | ||
|
|
c80860c17e | ||
|
|
e164371083 | ||
|
|
00c89c51c8 | ||
|
|
3c7f2d651a | ||
|
|
e6a8635b38 | ||
|
|
9ae218bfac | ||
|
|
57e0b6a615 | ||
|
|
e8d5d37bc1 | ||
|
|
275148a152 | ||
|
|
559b7c8faa | ||
|
|
025b262e01 | ||
|
|
ac2ce2ba6b | ||
|
|
f500b24242 | ||
|
|
51fbaefe41 | ||
|
|
20a48df80a | ||
|
|
5bc8fea2e0 | ||
|
|
5977c00edb | ||
|
|
f631155304 | ||
|
|
bae4835308 | ||
|
|
e429c3eefe | ||
|
|
2856f5122d | ||
|
|
25cd91ce26 | ||
|
|
c7f47af9fb | ||
|
|
9dab928572 | ||
|
|
536728b975 | ||
|
|
ac89bb190a | ||
|
|
314c077870 | ||
|
|
a7a79ce4b7 | ||
|
|
916a133043 | ||
|
|
d4dd9fae07 | ||
|
|
2ccb9f48da | ||
|
|
821f91ec75 | ||
|
|
fc0b8adcd7 | ||
|
|
721323f045 | ||
|
|
163fda2c26 | ||
|
|
1e671a61d6 | ||
|
|
d90bd648d8 | ||
|
|
9f6ee212ff | ||
|
|
9fc290a344 | ||
|
|
95320f8ab0 | ||
|
|
66f183be02 | ||
|
|
6e7d5c6a7c | ||
|
|
3953204727 | ||
|
|
38b9bf98ac | ||
|
|
7688b5f1dd | ||
|
|
e0e41fc8e5 | ||
|
|
e3d9832fee | ||
|
|
69e15af0b2 | ||
|
|
02174e21d8 | ||
|
|
825aca0ef3 | ||
|
|
4ddfc032e2 | ||
|
|
b3c01bf09d | ||
|
|
81a89fb773 | ||
|
|
259502829e | ||
|
|
da6ab85e99 | ||
|
|
ea76faeeee | ||
|
|
920bfdaade | ||
|
|
6d507ef863 | ||
|
|
07a091ad95 | ||
|
|
decea48c78 | ||
|
|
710409c2ba | ||
|
|
f3d05c15d1 | ||
|
|
f3380c00b8 | ||
|
|
e379ad0f4e | ||
|
|
305724770d | ||
|
|
9450a0f30d | ||
|
|
bcb6afa0aa | ||
|
|
3199b1a6f2 | ||
|
|
06a72614cb | ||
|
|
065251b701 | ||
|
|
81c9fe3817 | ||
|
|
e6f97bf466 | ||
|
|
764cb2d32a | ||
|
|
9db0c28051 | ||
|
|
6b8c96662f | ||
|
|
7818447fd2 | ||
|
|
d15ec9b544 | ||
|
|
1a4de898bc | ||
|
|
7bbeca4fa1 | ||
|
|
6622bf9f03 | ||
|
|
320e72e2de | ||
|
|
bb8b88edcf | ||
|
|
2dfe77a8f9 | ||
|
|
39bf05e3e5 | ||
|
|
d2983c13df | ||
|
|
7f036a6e95 | ||
|
|
7ce9a252a4 | ||
|
|
ed4b3ef471 | ||
|
|
7baac70056 | ||
|
|
208f1da81b | ||
|
|
d9d00cc05d | ||
|
|
e20a2deebd | ||
|
|
036096ef61 | ||
|
|
0e37a16927 | ||
|
|
52d60cce1d | ||
|
|
042e80570c | ||
|
|
fe03ff0f21 | ||
|
|
197adeff0b | ||
|
|
ce10db15da | ||
|
|
a214032e1f | ||
|
|
723c7cbd27 | ||
|
|
cb26ddebb1 | ||
|
|
91cb14ac41 | ||
|
|
08e6b4961d | ||
|
|
d609a3f639 | ||
|
|
91a28e7438 | ||
|
|
812809913d | ||
|
|
5879f84d17 | ||
|
|
7d897a0519 | ||
|
|
6383c95f8b | ||
|
|
ea4a52984c | ||
|
|
58a9f979e0 | ||
|
|
61496d8dad | ||
|
|
5122b2c13a | ||
|
|
8bc82c573d | ||
|
|
d41a9f7aa6 | ||
|
|
f72094ca8d | ||
|
|
ad4e5adabc | ||
|
|
9718c5db07 | ||
|
|
103300c880 | ||
|
|
3c52b5c58d | ||
|
|
e889c2eb22 | ||
|
|
f8cac1b822 | ||
|
|
919c81fe7d | ||
|
|
a88afb7409 | ||
|
|
ea56dcb179 | ||
|
|
d79e07902e | ||
|
|
681e013d31 | ||
|
|
0b49a8507e | ||
|
|
ddd63c0de1 | ||
|
|
309cd95b2c | ||
|
|
c93f9c351b | ||
|
|
314fae41fe | ||
|
|
ac2ff01d1e | ||
|
|
dd51a72f1f | ||
|
|
4331834003 | ||
|
|
c571afb8d8 | ||
|
|
a4b07a833c | ||
|
|
2d8e2dd7f5 | ||
|
|
90b3953dda | ||
|
|
4995427884 | ||
|
|
ed2f0b797c | ||
|
|
b6408805a2 | ||
|
|
21901b1615 | ||
|
|
59ead67f76 | ||
|
|
1cb274008d | ||
|
|
01f42a4d17 | ||
|
|
637ba8120b | ||
|
|
62ff6d9191 | ||
|
|
0ea29d217f | ||
|
|
6fc2532cfd | ||
|
|
f9550ff5f2 | ||
|
|
d1864a8f01 | ||
|
|
cab6c58923 | ||
|
|
5530bb195f | ||
|
|
f30271ee9e | ||
|
|
294d007f64 | ||
|
|
2692c779a7 | ||
|
|
338cb2fba7 | ||
|
|
ad5bd6412a | ||
|
|
1552f9997e | ||
|
|
4afcf721b9 | ||
|
|
611a0c7d19 | ||
|
|
fcccf63d29 | ||
|
|
353e496bcb | ||
|
|
6c713d1e5f | ||
|
|
ebbc4e3630 | ||
|
|
90453181f8 | ||
|
|
36f213c092 | ||
|
|
b4a1a2e483 | ||
|
|
b6c027b9ec | ||
|
|
e0723dfc3b | ||
|
|
f4ac0422e2 | ||
|
|
8bf0ef8d30 | ||
|
|
dea01be00e | ||
|
|
78a08ec1e6 | ||
|
|
18ca94dc29 | ||
|
|
7f2121205a | ||
|
|
37dc3d463d | ||
|
|
41208d79b1 | ||
|
|
c102d9d1f9 | ||
|
|
1abb54dabd | ||
|
|
15a3af8966 | ||
|
|
d710da4a4a | ||
|
|
500f6b53d1 | ||
|
|
fa8154e3da | ||
|
|
a50ade3ffc | ||
|
|
30e8e8a337 | ||
|
|
79cc9473c1 | ||
|
|
6784a8b42a | ||
|
|
91648cc230 | ||
|
|
0b82e9f8a9 | ||
|
|
aacec7a4a7 | ||
|
|
2469bde6b1 | ||
|
|
2b6b2354e4 | ||
|
|
018a666731 | ||
|
|
ca538e887e | ||
|
|
dfecca72ef | ||
|
|
9e416a9bcd | ||
|
|
20b6baf11f | ||
|
|
b88b3ffe40 | ||
|
|
9e42a851e4 | ||
|
|
fa7147f7c5 | ||
|
|
11209ae966 | ||
|
|
9c3f76a33b | ||
|
|
7acb136974 | ||
|
|
2d3acadfb5 | ||
|
|
49c77fe74b | ||
|
|
b374ead24b | ||
|
|
32074f0d09 | ||
|
|
50ef0d7fbf | ||
|
|
2871253905 | ||
|
|
489ad90536 | ||
|
|
7b86c9a08f | ||
|
|
f9e8dad1fb | ||
|
|
62d7a71fd9 | ||
|
|
1a3d1b3077 | ||
|
|
0b2b379f14 | ||
|
|
6edb4f655c | ||
|
|
a8ee3389c2 | ||
|
|
6a21c9ba6f | ||
|
|
040628bf3e | ||
|
|
d3dfd72f4d | ||
|
|
be7aaa3dbe | ||
|
|
e5874f4565 | ||
|
|
db7847c34a | ||
|
|
bf361e5ca3 | ||
|
|
065ea15c9f | ||
|
|
fa9daa488d | ||
|
|
7bf1ea2356 | ||
|
|
19b8c5a9e0 | ||
|
|
1779aa6a8a | ||
|
|
b23f19272d | ||
|
|
869b0621d6 | ||
|
|
4cba745df6 | ||
|
|
c188227cc2 | ||
|
|
54782d896c | ||
|
|
6f3503b0e9 | ||
|
|
2fb6b7c793 | ||
|
|
d7e2938296 | ||
|
|
cc63c2b769 | ||
|
|
93bcee147d | ||
|
|
26bdc2927b | ||
|
|
88cecd6fb8 | ||
|
|
5eb4c7d682 | ||
|
|
d766b7fa48 | ||
|
|
7305e9e5d9 | ||
|
|
aa6f838c3c | ||
|
|
cf2cb26caa | ||
|
|
0d45250f80 | ||
|
|
f26bafe436 | ||
|
|
bb065e3d00 | ||
|
|
3606f0447d | ||
|
|
e04fc8ddb4 | ||
|
|
88d37e96fa | ||
|
|
0759806c89 | ||
|
|
5b984ad394 | ||
|
|
11a238900a | ||
|
|
5c397c49d8 | ||
|
|
5a6e90428b | ||
|
|
6ca4f4709b | ||
|
|
fbcf0f8e2e | ||
|
|
784997b09b | ||
|
|
58111cddb2 | ||
|
|
af1c5c326c | ||
|
|
2b6da4b8de | ||
|
|
f6a6de2c5d | ||
|
|
f8bc045a01 | ||
|
|
47aef629d1 | ||
|
|
a5fbaef469 | ||
|
|
24a384d0d6 | ||
|
|
e4ca896694 | ||
|
|
8c716b2e92 | ||
|
|
70e39cc6a1 | ||
|
|
f160f7a21b | ||
|
|
3c8fe00510 | ||
|
|
41c3294c16 | ||
|
|
95c8e476bc | ||
|
|
c198bddf9e | ||
|
|
756b110854 | ||
|
|
b1d23ec294 | ||
|
|
5d8a085880 | ||
|
|
6b2e9ff246 | ||
|
|
8c5bcfe53a | ||
|
|
a87e8c55fc | ||
|
|
6656cb00e4 | ||
|
|
c141f1cc03 | ||
|
|
08ca9504aa | ||
|
|
64ec3bd671 | ||
|
|
7f6ae4c2f5 | ||
|
|
12999fb06c | ||
|
|
1f16d8fe4d | ||
|
|
12d9b42188 | ||
|
|
ca0314ee55 | ||
|
|
871163aecc | ||
|
|
58fb144276 | ||
|
|
6368be148d | ||
|
|
fbb630793e | ||
|
|
f6f924a7a6 | ||
|
|
e85e337540 | ||
|
|
4d60694443 | ||
|
|
0c96c515a0 | ||
|
|
1cf8769c07 | ||
|
|
123c63119d | ||
|
|
2a9c718a20 | ||
|
|
26be30bc4c | ||
|
|
74c34d1602 | ||
|
|
7a880dd23c | ||
|
|
371e5adcf8 | ||
|
|
03e77390a3 | ||
|
|
e0b9fa599f | ||
|
|
c3182e3c1c | ||
|
|
f267bf2afe | ||
|
|
eef56e77ef | ||
|
|
8923f84af9 | ||
|
|
b771bbb60c | ||
|
|
cd401147ea | ||
|
|
70a80fb458 | ||
|
|
d98c00389a | ||
|
|
4a963423ca | ||
|
|
81b028b805 | ||
|
|
23a35c3767 | ||
|
|
89f05e4a4f | ||
|
|
8c96739cab | ||
|
|
fdb6e28f94 | ||
|
|
f8cff3bd2e | ||
|
|
7396cd2cab | ||
|
|
1abb964652 | ||
|
|
4632e9ce52 | ||
|
|
82b55ea418 | ||
|
|
3ba221e388 | ||
|
|
cb13129cd6 | ||
|
|
661ef65de8 | ||
|
|
a8da36b913 | ||
|
|
0e3e2bbbe7 | ||
|
|
13e74e5352 | ||
|
|
c184a98170 | ||
|
|
6285dc3aa7 | ||
|
|
03443c3e57 | ||
|
|
271fdd4fb9 | ||
|
|
55a6dc9ae3 | ||
|
|
3afa7b0dab | ||
|
|
370c658c7c | ||
|
|
5a8f2dd961 | ||
|
|
b3712d8e9b | ||
|
|
698af6d7ec | ||
|
|
efd73230cf | ||
|
|
1aaeb6b505 | ||
|
|
95fc840e2c | ||
|
|
d9e9c17d3b | ||
|
|
da95a73605 | ||
|
|
30f51df4cf | ||
|
|
f36a5a15d6 | ||
|
|
26dde26c48 | ||
|
|
8e1e6838d2 | ||
|
|
95cc5dd22f | ||
|
|
f04c55075e | ||
|
|
b0c8b2b700 | ||
|
|
2dddbed7ce | ||
|
|
212a5a8ffa | ||
|
|
4d0ef41a2c | ||
|
|
5427664cf4 | ||
|
|
55680ab1d3 | ||
|
|
7320f8497f | ||
|
|
647034b637 | ||
|
|
39df89521f | ||
|
|
50ea669e1e | ||
|
|
01ac60d638 | ||
|
|
3acb3cc640 | ||
|
|
41d6d5fafd | ||
|
|
45271abc16 | ||
|
|
74b327b50d | ||
|
|
251aea645c | ||
|
|
d756bc9ecd | ||
|
|
e9149f4580 | ||
|
|
03ed66e933 | ||
|
|
49eef178cf | ||
|
|
e7de1b3339 | ||
|
|
34f003adb8 | ||
|
|
61be1491a1 | ||
|
|
5e4cf2f3da | ||
|
|
0694d1d0ec | ||
|
|
b1d4284524 | ||
|
|
5e7803f00b | ||
|
|
b9d00ee8b8 | ||
|
|
ba1cbbba55 | ||
|
|
7f484db524 | ||
|
|
ed9643b846 | ||
|
|
4eba26572b | ||
|
|
8a62f3f456 | ||
|
|
8e7b737912 | ||
|
|
182b66f396 | ||
|
|
e4c56e6c5c |
@@ -1,2 +1,4 @@
|
||||
tests/ef_tests/eth2.0-spec-tests
|
||||
target/
|
||||
*.data
|
||||
*.tar.gz
|
||||
|
||||
5
.github/ISSUE_TEMPLATE.md
vendored
5
.github/ISSUE_TEMPLATE.md
vendored
@@ -2,6 +2,11 @@
|
||||
|
||||
Please provide a brief description of the issue.
|
||||
|
||||
## Version
|
||||
|
||||
Please provide your Lighthouse and Rust version. Are you building from
|
||||
`master`, which commit?
|
||||
|
||||
## Present Behaviour
|
||||
|
||||
Describe the present behaviour of the application, with regards to this
|
||||
|
||||
30
.github/workflows/book.yml
vendored
Normal file
30
.github/workflows/book.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: mdbook
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build-and-upload-to-s3:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
|
||||
- name: Setup mdBook
|
||||
uses: peaceiris/actions-mdbook@v1
|
||||
with:
|
||||
mdbook-version: '0.3.5'
|
||||
|
||||
- run: mdbook build
|
||||
working-directory: book
|
||||
|
||||
- uses: jakejarvis/s3-sync-action@be0c4ab89158cac4278689ebedd8407dd5f35a83
|
||||
with:
|
||||
args: --follow-symlinks --delete
|
||||
env:
|
||||
AWS_S3_BUCKET: ${{ secrets.AWS_S3_BOOK_BUCKET }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_REGION: 'ap-southeast-2'
|
||||
SOURCE_DIR: 'book/book'
|
||||
115
.github/workflows/test-suite.yml
vendored
115
.github/workflows/test-suite.yml
vendored
@@ -1,9 +1,19 @@
|
||||
name: test-suite
|
||||
|
||||
on: [push]
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- staging
|
||||
- trying
|
||||
- 'pr/*'
|
||||
pull_request:
|
||||
env:
|
||||
# Deny warnings in CI
|
||||
RUSTFLAGS: "-D warnings"
|
||||
jobs:
|
||||
cargo-fmt:
|
||||
name: cargo-fmt
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
@@ -12,6 +22,7 @@ jobs:
|
||||
- name: Check formatting with cargo fmt
|
||||
run: make cargo-fmt
|
||||
release-tests-ubuntu:
|
||||
name: release-tests-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
@@ -22,7 +33,22 @@ jobs:
|
||||
run: sudo npm install -g ganache-cli
|
||||
- name: Run tests in release
|
||||
run: make test-release
|
||||
release-tests-and-install-macos:
|
||||
name: release-tests-and-install-macos
|
||||
runs-on: macos-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Install ganache-cli
|
||||
run: sudo npm install -g ganache-cli
|
||||
- name: Run tests in release
|
||||
run: make test-release
|
||||
- name: Install Lighthouse
|
||||
run: make
|
||||
debug-tests-ubuntu:
|
||||
name: debug-tests-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
@@ -33,19 +59,100 @@ jobs:
|
||||
run: sudo npm install -g ganache-cli
|
||||
- name: Run tests in debug
|
||||
run: make test-debug
|
||||
ef-tests-ubuntu:
|
||||
state-transition-vectors-ubuntu:
|
||||
name: state-transition-vectors-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Run eth2.0-spec-tests with and without fake_crypto
|
||||
- name: Run state_transition_vectors in release.
|
||||
run: make run-state-transition-tests
|
||||
ef-tests-ubuntu:
|
||||
name: ef-tests-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Run eth2.0-spec-tests with blst, milagro and fake_crypto
|
||||
run: make test-ef
|
||||
dockerfile-ubuntu:
|
||||
name: dockerfile-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Build the root Dockerfile
|
||||
run: docker build .
|
||||
eth1-simulator-ubuntu:
|
||||
name: eth1-simulator-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Install ganache-cli
|
||||
run: sudo npm install -g ganache-cli
|
||||
- name: Run the beacon chain sim that starts from an eth1 contract
|
||||
run: cargo run --release --bin simulator eth1-sim
|
||||
no-eth1-simulator-ubuntu:
|
||||
name: no-eth1-simulator-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Install ganache-cli
|
||||
run: sudo npm install -g ganache-cli
|
||||
- name: Run the beacon chain sim without an eth1 connection
|
||||
run: cargo run --release --bin simulator no-eth1-sim
|
||||
check-benchmarks:
|
||||
name: check-benchmarks
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Typecheck benchmark code without running it
|
||||
run: make check-benches
|
||||
clippy:
|
||||
name: clippy
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Lint code for quality and style with Clippy
|
||||
run: make lint
|
||||
- name: Certify Cargo.lock freshness
|
||||
run: git diff --exit-code Cargo.lock
|
||||
arbitrary-check:
|
||||
name: arbitrary-check
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Validate state_processing feature arbitrary-fuzz
|
||||
run: make arbitrary-fuzz
|
||||
cargo-audit:
|
||||
name: cargo-audit
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database
|
||||
run: make audit
|
||||
cargo-udeps:
|
||||
name: cargo-udeps
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Install a nightly compiler with rustfmt, as a kind of quality control
|
||||
run: rustup toolchain install --component=rustfmt nightly
|
||||
- name: Install cargo-udeps
|
||||
run: cargo install cargo-udeps --locked
|
||||
- name: Run cargo udeps to identify unused crates in the dependency graph
|
||||
run: make udeps
|
||||
env:
|
||||
# Allow warnings on Nightly
|
||||
RUSTFLAGS: ""
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -6,3 +6,4 @@ target/
|
||||
flamegraph.svg
|
||||
perf.data*
|
||||
*.tar.gz
|
||||
bin/
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Contributors Guide
|
||||
|
||||
Lighthouse is an open-source Ethereum 2.0 client. We we're community driven and
|
||||
Lighthouse is an open-source Ethereum 2.0 client. We're community driven and
|
||||
welcome all contribution. We aim to provide a constructive, respectful and fun
|
||||
environment for collaboration.
|
||||
|
||||
@@ -31,7 +31,7 @@ same. We operate like a typical open-source project operating on GitHub: the
|
||||
repository [Issues](https://github.com/sigp/lighthouse/issues) is where we
|
||||
track what needs to be done and [Pull
|
||||
Requests](https://github.com/sigp/lighthouse/pulls) is where code gets
|
||||
reviewed. We use [gitter](https://gitter.im/sigp/lighthouse) to chat
|
||||
reviewed. We use [discord](https://discord.gg/cyAszAh) to chat
|
||||
informally.
|
||||
|
||||
### General Work-Flow
|
||||
@@ -48,7 +48,7 @@ questions.
|
||||
your changes in to the main repository.
|
||||
4. Wait for the repository maintainers to **review your changes** to ensure the
|
||||
issue is addressed satisfactorily. Optionally, mention your PR on
|
||||
[gitter](https://gitter.im/sigp/lighthouse).
|
||||
[discord](https://discord.gg/cyAszAh).
|
||||
5. If the issue is addressed the repository maintainers will **merge your
|
||||
pull-request** and you'll be an official contributor!
|
||||
|
||||
@@ -76,18 +76,19 @@ https://github.com/sigp/lighthouse.git` (pro-tip: [use SSH](https://help.github.
|
||||
name of your branch isn't critical but it should be short and instructive.
|
||||
E.g., if you're fixing a bug with serialization, you could name your branch
|
||||
`fix_serialization_bug`.
|
||||
4. Commit your changes and push them to your fork with `$ git push origin
|
||||
4. Make sure you sign your commits. See [relevant doc](https://help.github.com/en/github/authenticating-to-github/about-commit-signature-verification).
|
||||
5. Commit your changes and push them to your fork with `$ git push origin
|
||||
your_feature_name`.
|
||||
5. Go to your fork on github.com and use the web interface to create a pull
|
||||
6. Go to your fork on github.com and use the web interface to create a pull
|
||||
request into the sigp/lighthouse repo.
|
||||
|
||||
From there, the repository maintainers will review the PR and either accept it
|
||||
or provide some constructive feedback.
|
||||
|
||||
There's great
|
||||
There's a great
|
||||
[guide](https://akrabat.com/the-beginners-guide-to-contributing-to-a-github-project/)
|
||||
by Rob Allen that provides much more detail on each of these steps, if you're
|
||||
having trouble. As always, jump on [gitter](https://gitter.im/sigp/lighthouse)
|
||||
having trouble. As always, jump on [discord](https://discord.gg/cyAszAh)
|
||||
if you get stuck.
|
||||
|
||||
|
||||
|
||||
7349
Cargo.lock
generated
7349
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
123
Cargo.toml
123
Cargo.toml
@@ -1,58 +1,81 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"eth2/lmd_ghost",
|
||||
"eth2/operation_pool",
|
||||
"eth2/state_processing",
|
||||
"eth2/types",
|
||||
"eth2/utils/bls",
|
||||
"eth2/utils/compare_fields",
|
||||
"eth2/utils/compare_fields_derive",
|
||||
"eth2/utils/deposit_contract",
|
||||
"eth2/utils/eth2_config",
|
||||
"eth2/utils/eth2_interop_keypairs",
|
||||
"eth2/utils/eth2_testnet_config",
|
||||
"eth2/utils/logging",
|
||||
"eth2/utils/eth2_hashing",
|
||||
"eth2/utils/lighthouse_metrics",
|
||||
"eth2/utils/lighthouse_bootstrap",
|
||||
"eth2/utils/merkle_proof",
|
||||
"eth2/utils/int_to_bytes",
|
||||
"eth2/utils/serde_hex",
|
||||
"eth2/utils/slot_clock",
|
||||
"eth2/utils/ssz",
|
||||
"eth2/utils/ssz_derive",
|
||||
"eth2/utils/ssz_types",
|
||||
"eth2/utils/swap_or_not_shuffle",
|
||||
"eth2/utils/cached_tree_hash",
|
||||
"eth2/utils/tree_hash",
|
||||
"eth2/utils/tree_hash_derive",
|
||||
"eth2/utils/test_random_derive",
|
||||
"beacon_node",
|
||||
"beacon_node/store",
|
||||
"beacon_node/client",
|
||||
"beacon_node/rest_api",
|
||||
"beacon_node/network",
|
||||
"beacon_node/eth2-libp2p",
|
||||
"beacon_node/version",
|
||||
"beacon_node/eth1",
|
||||
"beacon_node/beacon_chain",
|
||||
"beacon_node/websocket_server",
|
||||
"tests/beacon_chain_sim",
|
||||
"tests/ef_tests",
|
||||
"tests/eth1_test_rig",
|
||||
"tests/node_test_rig",
|
||||
"lcli",
|
||||
"validator_client",
|
||||
"account_manager",
|
||||
|
||||
"beacon_node",
|
||||
"beacon_node/beacon_chain",
|
||||
"beacon_node/client",
|
||||
"beacon_node/eth1",
|
||||
"beacon_node/eth2_libp2p",
|
||||
"beacon_node/network",
|
||||
"beacon_node/rest_api",
|
||||
"beacon_node/store",
|
||||
"beacon_node/timer",
|
||||
"beacon_node/websocket_server",
|
||||
|
||||
"boot_node",
|
||||
|
||||
"common/account_utils",
|
||||
"common/clap_utils",
|
||||
"common/compare_fields",
|
||||
"common/compare_fields_derive",
|
||||
"common/deposit_contract",
|
||||
"common/eth2_config",
|
||||
"common/eth2_interop_keypairs",
|
||||
"common/eth2_testnet_config",
|
||||
"common/eth2_wallet_manager",
|
||||
"common/hashset_delay",
|
||||
"common/lighthouse_metrics",
|
||||
"common/lighthouse_version",
|
||||
"common/logging",
|
||||
"common/lru_cache",
|
||||
"common/remote_beacon_node",
|
||||
"common/rest_types",
|
||||
"common/slot_clock",
|
||||
"common/test_random_derive",
|
||||
"common/validator_dir",
|
||||
|
||||
"consensus/cached_tree_hash",
|
||||
"consensus/int_to_bytes",
|
||||
"consensus/fork_choice",
|
||||
"consensus/proto_array",
|
||||
"consensus/safe_arith",
|
||||
"consensus/ssz",
|
||||
"consensus/ssz_derive",
|
||||
"consensus/ssz_types",
|
||||
"consensus/serde_hex",
|
||||
"consensus/serde_utils",
|
||||
"consensus/state_processing",
|
||||
"consensus/swap_or_not_shuffle",
|
||||
"consensus/tree_hash",
|
||||
"consensus/tree_hash_derive",
|
||||
|
||||
"crypto/bls",
|
||||
"crypto/eth2_hashing",
|
||||
"crypto/eth2_key_derivation",
|
||||
"crypto/eth2_keystore",
|
||||
"crypto/eth2_wallet",
|
||||
|
||||
"lcli",
|
||||
|
||||
"lighthouse",
|
||||
"lighthouse/environment"
|
||||
"lighthouse/environment",
|
||||
|
||||
"testing/simulator",
|
||||
"testing/ef_tests",
|
||||
"testing/eth1_test_rig",
|
||||
"testing/node_test_rig",
|
||||
"testing/state_transition_vectors",
|
||||
|
||||
"validator_client",
|
||||
"validator_client/slashing_protection",
|
||||
]
|
||||
|
||||
[patch]
|
||||
[patch.crates-io]
|
||||
tree_hash = { path = "eth2/utils/tree_hash" }
|
||||
tree_hash_derive = { path = "eth2/utils/tree_hash_derive" }
|
||||
eth2_ssz = { path = "eth2/utils/ssz" }
|
||||
eth2_ssz_derive = { path = "eth2/utils/ssz_derive" }
|
||||
eth2_ssz_types = { path = "eth2/utils/ssz_types" }
|
||||
eth2_hashing = { path = "eth2/utils/eth2_hashing" }
|
||||
tree_hash = { path = "consensus/tree_hash" }
|
||||
tree_hash_derive = { path = "consensus/tree_hash_derive" }
|
||||
eth2_ssz = { path = "consensus/ssz" }
|
||||
eth2_ssz_derive = { path = "consensus/ssz_derive" }
|
||||
eth2_ssz_types = { path = "consensus/ssz_types" }
|
||||
eth2_hashing = { path = "crypto/eth2_hashing" }
|
||||
|
||||
4
Cross.toml
Normal file
4
Cross.toml
Normal file
@@ -0,0 +1,4 @@
|
||||
[build.env]
|
||||
passthrough = [
|
||||
"RUSTFLAGS",
|
||||
]
|
||||
16
Dockerfile
16
Dockerfile
@@ -1,4 +1,16 @@
|
||||
FROM rust:latest
|
||||
|
||||
FROM rust:1.45.1 AS builder
|
||||
RUN apt-get update && apt-get install -y cmake
|
||||
COPY . lighthouse
|
||||
ARG PORTABLE
|
||||
ENV PORTABLE $PORTABLE
|
||||
RUN cd lighthouse && make
|
||||
RUN cd lighthouse && make install-lcli
|
||||
|
||||
FROM debian:buster-slim
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libssl-dev \
|
||||
ca-certificates \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse
|
||||
COPY --from=builder /usr/local/cargo/bin/lcli /usr/local/bin/lcli
|
||||
|
||||
101
Makefile
101
Makefile
@@ -1,12 +1,79 @@
|
||||
.PHONY: tests
|
||||
|
||||
EF_TESTS = "tests/ef_tests"
|
||||
EF_TESTS = "testing/ef_tests"
|
||||
STATE_TRANSITION_VECTORS = "testing/state_transition_vectors"
|
||||
GIT_TAG := $(shell git describe --tags --candidates 1)
|
||||
BIN_DIR = "bin"
|
||||
|
||||
# Builds the entire workspace in release (optimized).
|
||||
X86_64_TAG = "x86_64-unknown-linux-gnu"
|
||||
BUILD_PATH_X86_64 = "target/$(X86_64_TAG)/release"
|
||||
AARCH64_TAG = "aarch64-unknown-linux-gnu"
|
||||
BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release"
|
||||
|
||||
# Builds the Lighthouse binary in release (optimized).
|
||||
#
|
||||
# Binaries will most likely be found in `./target/release`
|
||||
install:
|
||||
cargo install --path lighthouse --force
|
||||
ifeq ($(PORTABLE), true)
|
||||
cargo install --path lighthouse --force --locked --features portable
|
||||
else
|
||||
cargo install --path lighthouse --force --locked
|
||||
endif
|
||||
|
||||
# Builds the lcli binary in release (optimized).
|
||||
install-lcli:
|
||||
ifeq ($(PORTABLE), true)
|
||||
cargo install --path lcli --force --locked --features portable
|
||||
else
|
||||
cargo install --path lcli --force --locked
|
||||
endif
|
||||
|
||||
# The following commands use `cross` to build a cross-compile.
|
||||
#
|
||||
# These commands require that:
|
||||
#
|
||||
# - `cross` is installed (`cargo install cross`).
|
||||
# - Docker is running.
|
||||
# - The current user is in the `docker` group.
|
||||
#
|
||||
# The resulting binaries will be created in the `target/` directory.
|
||||
#
|
||||
# The *-portable options compile the blst library *without* the use of some
|
||||
# optimized CPU functions that may not be available on some systems. This
|
||||
# results in a more portable binary with ~20% slower BLS verification.
|
||||
build-x86_64:
|
||||
cross build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu
|
||||
build-x86_64-portable:
|
||||
cross build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features portable
|
||||
build-aarch64:
|
||||
cross build --release --manifest-path lighthouse/Cargo.toml --target aarch64-unknown-linux-gnu
|
||||
build-aarch64-portable:
|
||||
cross build --release --manifest-path lighthouse/Cargo.toml --target aarch64-unknown-linux-gnu --features portable
|
||||
|
||||
# Create a `.tar.gz` containing a binary for a specific target.
|
||||
define tarball_release_binary
|
||||
cp $(1)/lighthouse $(BIN_DIR)/lighthouse
|
||||
cd $(BIN_DIR) && \
|
||||
tar -czf lighthouse-$(GIT_TAG)-$(2)$(3).tar.gz lighthouse && \
|
||||
rm lighthouse
|
||||
endef
|
||||
|
||||
# Create a series of `.tar.gz` files in the BIN_DIR directory, each containing
|
||||
# a `lighthouse` binary for a different target.
|
||||
#
|
||||
# The current git tag will be used as the version in the output file names. You
|
||||
# will likely need to use `git tag` and create a semver tag (e.g., `v0.2.3`).
|
||||
build-release-tarballs:
|
||||
[ -d $(BIN_DIR) ] || mkdir -p $(BIN_DIR)
|
||||
$(MAKE) build-x86_64
|
||||
$(call tarball_release_binary,$(BUILD_PATH_X86_64),$(X86_64_TAG),"")
|
||||
$(MAKE) build-x86_64-portable
|
||||
$(call tarball_release_binary,$(BUILD_PATH_X86_64),$(X86_64_TAG),"-portable")
|
||||
$(MAKE) build-aarch64
|
||||
$(call tarball_release_binary,$(BUILD_PATH_AARCH64),$(AARCH64_TAG),"")
|
||||
$(MAKE) build-aarch64-portable
|
||||
$(call tarball_release_binary,$(BUILD_PATH_AARCH64),$(AARCH64_TAG),"-portable")
|
||||
|
||||
|
||||
# Runs the full workspace tests in **release**, without downloading any additional
|
||||
# test vectors.
|
||||
@@ -22,10 +89,19 @@ test-debug:
|
||||
cargo-fmt:
|
||||
cargo fmt --all -- --check
|
||||
|
||||
# Typechecks benchmark code
|
||||
check-benches:
|
||||
cargo check --all --benches
|
||||
|
||||
# Runs only the ef-test vectors.
|
||||
run-ef-tests:
|
||||
cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests"
|
||||
cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests,fake_crypto"
|
||||
cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests,milagro"
|
||||
|
||||
# Runs only the tests/state_transition_vectors tests.
|
||||
run-state-transition-tests:
|
||||
make -C $(STATE_TRANSITION_VECTORS) test
|
||||
|
||||
# Downloads and runs the EF test vectors.
|
||||
test-ef: make-ef-tests run-ef-tests
|
||||
@@ -37,6 +113,11 @@ test: test-release
|
||||
# Runs the entire test suite, downloading test vectors if required.
|
||||
test-full: cargo-fmt test-release test-debug test-ef
|
||||
|
||||
# Lints the code for bad style and potentially unsafe arithmetic using Clippy.
|
||||
# Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
|
||||
lint:
|
||||
cargo clippy --all -- -D warnings
|
||||
|
||||
# Runs the makefile in the `ef_tests` repo.
|
||||
#
|
||||
# May download and extract an archive of test vectors from the ethereum
|
||||
@@ -45,7 +126,21 @@ test-full: cargo-fmt test-release test-debug test-ef
|
||||
make-ef-tests:
|
||||
make -C $(EF_TESTS)
|
||||
|
||||
# Verifies that state_processing feature arbitrary-fuzz will compile
|
||||
arbitrary-fuzz:
|
||||
cargo check --manifest-path=consensus/state_processing/Cargo.toml --features arbitrary-fuzz
|
||||
|
||||
# Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
|
||||
audit:
|
||||
cargo install --force cargo-audit
|
||||
cargo audit
|
||||
|
||||
# Runs `cargo udeps` to check for unused dependencies
|
||||
udeps:
|
||||
cargo +nightly udeps --tests --all-targets --release
|
||||
|
||||
# Performs a `cargo` clean and cleans the `ef_tests` directory.
|
||||
clean:
|
||||
cargo clean
|
||||
make -C $(EF_TESTS) clean
|
||||
make -C $(STATE_TRANSITION_VECTORS) clean
|
||||
|
||||
25
README.md
25
README.md
@@ -2,7 +2,7 @@
|
||||
|
||||
An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prime.
|
||||
|
||||
[![Build Status]][Build Link] [![Book Status]][Book Link] [![RustDoc Status]][RustDoc Link] [![Chat Badge]][Chat Link] [![Swagger Badge]][Swagger Link]
|
||||
[![Build Status]][Build Link] [![Book Status]][Book Link] [![Chat Badge]][Chat Link]
|
||||
|
||||
[Build Status]: https://github.com/sigp/lighthouse/workflows/test-suite/badge.svg?branch=master
|
||||
[Build Link]: https://github.com/sigp/lighthouse/actions
|
||||
@@ -10,10 +10,6 @@ An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prim
|
||||
[Chat Link]: https://discord.gg/cyAszAh
|
||||
[Book Status]:https://img.shields.io/badge/user--docs-master-informational
|
||||
[Book Link]: http://lighthouse-book.sigmaprime.io/
|
||||
[RustDoc Status]:https://img.shields.io/badge/code--docs-master-orange
|
||||
[RustDoc Link]: http://lighthouse-docs.sigmaprime.io/
|
||||
[Swagger Badge]: https://img.shields.io/badge/Open%20API-0.2.0-success
|
||||
[Swagger Link]: https://app.swaggerhub.com/apis-docs/spble/lighthouse_rest_api/0.2.0
|
||||
|
||||
[Documentation](http://lighthouse-book.sigmaprime.io/)
|
||||
|
||||
@@ -24,8 +20,7 @@ An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prim
|
||||
Lighthouse is:
|
||||
|
||||
- Fully open-source, licensed under Apache 2.0.
|
||||
- Security-focused. Fuzzing has begun and security reviews are planned
|
||||
for late-2019.
|
||||
- Security-focused. Fuzzing has begun and security reviews are underway.
|
||||
- Built in [Rust](https://www.rust-lang.org/), a modern language providing unique safety guarantees and
|
||||
excellent performance (comparable to C++).
|
||||
- Funded by various organisations, including Sigma Prime, the
|
||||
@@ -39,7 +34,7 @@ Like all Ethereum 2.0 clients, Lighthouse is a work-in-progress.
|
||||
|
||||
Current development overview:
|
||||
|
||||
- Specification `v0.8.3` implemented, optimized and passing test vectors.
|
||||
- Specification `v0.12.1` implemented, optimized and passing test vectors.
|
||||
- Rust-native libp2p with Gossipsub and Discv5.
|
||||
- RESTful JSON API via HTTP server.
|
||||
- Events via WebSocket.
|
||||
@@ -49,11 +44,12 @@ Current development overview:
|
||||
|
||||
- ~~**April 2019**: Inital single-client testnets.~~
|
||||
- ~~**September 2019**: Inter-operability with other Ethereum 2.0 clients.~~
|
||||
- **Q4 2019**: `lighthouse-0.0.1` release: All major phase 0
|
||||
features implemented.
|
||||
- **Q4 2019**: Public, multi-client testnet with user-facing functionality.
|
||||
- **Q4 2019**: Third-party security review.
|
||||
- **Q1 2020**: Production Beacon Chain testnet (tentative).
|
||||
- ~~**Q1 2020**: `lighthouse-0.1.0` release: All major phase 0 features implemented.~~
|
||||
- ~~**Q2 2020**: Public, multi-client testnet with user-facing functionality.~~
|
||||
- ~~**Q2 2020**: Third-party security review.~~
|
||||
- **Q3 2020**: Additional third-party security reviews.
|
||||
- **Q3 2020**: Long-lived, multi-client Beacon Chain testnet
|
||||
- **Q4 2020**: Production Beacon Chain (tentative).
|
||||
|
||||
|
||||
## Documentation
|
||||
@@ -61,9 +57,6 @@ Current development overview:
|
||||
The [Lighthouse Book](http://lighthouse-book.sigmaprime.io/) contains information
|
||||
for testnet users and developers.
|
||||
|
||||
Code documentation is generated via `cargo doc` and hosted at
|
||||
[lighthouse-docs.sigmaprime.io](http://lighthouse-docs.sigmaprime.io/).
|
||||
|
||||
If you'd like some background on Sigma Prime, please see the [Lighthouse Update
|
||||
\#00](https://lighthouse.sigmaprime.io/update-00.html) blog post or
|
||||
[sigmaprime.io](https://sigmaprime.io).
|
||||
|
||||
@@ -1,28 +1,33 @@
|
||||
[package]
|
||||
name = "account_manager"
|
||||
version = "0.0.1"
|
||||
version = "0.2.11"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Luke Anderson <luke@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dev-dependencies]
|
||||
tempdir = "0.3"
|
||||
|
||||
[dependencies]
|
||||
bls = { path = "../eth2/utils/bls" }
|
||||
bls = { path = "../crypto/bls" }
|
||||
clap = "2.33.0"
|
||||
slog = "2.5.2"
|
||||
slog-term = "2.4.2"
|
||||
slog-async = "2.3.0"
|
||||
types = { path = "../eth2/types" }
|
||||
slog-term = "2.5.0"
|
||||
slog-async = "2.5.0"
|
||||
types = { path = "../consensus/types" }
|
||||
state_processing = { path = "../consensus/state_processing" }
|
||||
dirs = "2.0.2"
|
||||
environment = { path = "../lighthouse/environment" }
|
||||
deposit_contract = { path = "../eth2/utils/deposit_contract" }
|
||||
deposit_contract = { path = "../common/deposit_contract" }
|
||||
libc = "0.2.65"
|
||||
eth2_ssz = { path = "../eth2/utils/ssz" }
|
||||
eth2_ssz_derive = { path = "../eth2/utils/ssz_derive" }
|
||||
hex = "0.3"
|
||||
validator_client = { path = "../validator_client" }
|
||||
rayon = "1.2.0"
|
||||
eth2_testnet_config = { path = "../eth2/utils/eth2_testnet_config" }
|
||||
web3 = "0.8.0"
|
||||
futures = "0.1.25"
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
hex = "0.4.2"
|
||||
rayon = "1.3.0"
|
||||
eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
||||
web3 = "0.11.0"
|
||||
futures = { version = "0.3.5", features = ["compat"] }
|
||||
clap_utils = { path = "../common/clap_utils" }
|
||||
eth2_wallet = { path = "../crypto/eth2_wallet" }
|
||||
eth2_wallet_manager = { path = "../common/eth2_wallet_manager" }
|
||||
rand = "0.7.2"
|
||||
validator_dir = { path = "../common/validator_dir" }
|
||||
tokio = { version = "0.2.21", features = ["full"] }
|
||||
eth2_keystore = { path = "../crypto/eth2_keystore" }
|
||||
account_utils = { path = "../common/account_utils" }
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
use clap::{App, Arg, SubCommand};
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new("account_manager")
|
||||
.visible_aliases(&["a", "am", "account", "account_manager"])
|
||||
.about("Utilities for generating and managing Ethereum 2.0 accounts.")
|
||||
.subcommand(
|
||||
SubCommand::with_name("validator")
|
||||
.about("Generate or manage Etheruem 2.0 validators.")
|
||||
.subcommand(
|
||||
SubCommand::with_name("new")
|
||||
.about("Create a new Ethereum 2.0 validator.")
|
||||
.arg(
|
||||
Arg::with_name("deposit-value")
|
||||
.short("v")
|
||||
.long("deposit-value")
|
||||
.value_name("GWEI")
|
||||
.takes_value(true)
|
||||
.default_value("3200000000")
|
||||
.help("The deposit amount in Gwei (not Wei). Default is 3.2 ETH."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("send-deposits")
|
||||
.long("send-deposits")
|
||||
.help("If present, submit validator deposits to an eth1 endpoint /
|
||||
defined by the --eth1-endpoint. Requires either the /
|
||||
--deposit-contract or --testnet-dir flag.")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("eth1-endpoint")
|
||||
.short("e")
|
||||
.long("eth1-endpoint")
|
||||
.value_name("HTTP_SERVER")
|
||||
.takes_value(true)
|
||||
.default_value("http://localhost:8545")
|
||||
.help("The URL to the Eth1 JSON-RPC HTTP API (e.g., Geth/Parity-Ethereum)."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("account-index")
|
||||
.short("i")
|
||||
.long("account-index")
|
||||
.value_name("INDEX")
|
||||
.takes_value(true)
|
||||
.default_value("0")
|
||||
.help("The eth1 accounts[] index which will send the transaction"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("password")
|
||||
.short("p")
|
||||
.long("password")
|
||||
.value_name("FILE")
|
||||
.takes_value(true)
|
||||
.help("The password file to unlock the eth1 account (see --index)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("testnet-dir")
|
||||
.long("testnet-dir")
|
||||
.value_name("DIRECTORY")
|
||||
.takes_value(true)
|
||||
.help("The directory from which to read the deposit contract /
|
||||
address. Defaults to the current Lighthouse testnet."),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("insecure")
|
||||
.about("Produce insecure, ephemeral validators. DO NOT USE TO STORE VALUE.")
|
||||
.arg(
|
||||
Arg::with_name("first")
|
||||
.index(1)
|
||||
.value_name("INDEX")
|
||||
.help("Index of the first validator")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("last")
|
||||
.index(2)
|
||||
.value_name("INDEX")
|
||||
.help("Index of the last validator")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("random")
|
||||
.about("Produces public keys using entropy from the Rust 'rand' library.")
|
||||
.arg(
|
||||
Arg::with_name("validator_count")
|
||||
.index(1)
|
||||
.value_name("INTEGER")
|
||||
.help("The number of new validators to generate.")
|
||||
.takes_value(true)
|
||||
.default_value("1"),
|
||||
),
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
70
account_manager/src/common.rs
Normal file
70
account_manager/src/common.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
use account_utils::PlainText;
|
||||
use account_utils::{read_mnemonic_from_user, strip_off_newlines};
|
||||
use clap::ArgMatches;
|
||||
use eth2_wallet::bip39::{Language, Mnemonic};
|
||||
use std::fs;
|
||||
use std::fs::create_dir_all;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::str::from_utf8;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:";
|
||||
|
||||
pub fn ensure_dir_exists<P: AsRef<Path>>(path: P) -> Result<(), String> {
|
||||
let path = path.as_ref();
|
||||
|
||||
if !path.exists() {
|
||||
create_dir_all(path).map_err(|e| format!("Unable to create {:?}: {:?}", path, e))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn base_wallet_dir(matches: &ArgMatches, arg: &'static str) -> Result<PathBuf, String> {
|
||||
clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
arg,
|
||||
PathBuf::new().join(".lighthouse").join("wallets"),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn read_mnemonic_from_cli(
|
||||
mnemonic_path: Option<PathBuf>,
|
||||
stdin_password: bool,
|
||||
) -> Result<Mnemonic, String> {
|
||||
let mnemonic = match mnemonic_path {
|
||||
Some(path) => fs::read(&path)
|
||||
.map_err(|e| format!("Unable to read {:?}: {:?}", path, e))
|
||||
.and_then(|bytes| {
|
||||
let bytes_no_newlines: PlainText = strip_off_newlines(bytes).into();
|
||||
let phrase = from_utf8(&bytes_no_newlines.as_ref())
|
||||
.map_err(|e| format!("Unable to derive mnemonic: {:?}", e))?;
|
||||
Mnemonic::from_phrase(phrase, Language::English).map_err(|e| {
|
||||
format!(
|
||||
"Unable to derive mnemonic from string {:?}: {:?}",
|
||||
phrase, e
|
||||
)
|
||||
})
|
||||
})?,
|
||||
None => loop {
|
||||
eprintln!("");
|
||||
eprintln!("{}", MNEMONIC_PROMPT);
|
||||
|
||||
let mnemonic = read_mnemonic_from_user(stdin_password)?;
|
||||
|
||||
match Mnemonic::from_phrase(mnemonic.as_str(), Language::English) {
|
||||
Ok(mnemonic_m) => {
|
||||
eprintln!("Valid mnemonic provided.");
|
||||
eprintln!("");
|
||||
sleep(Duration::from_secs(1));
|
||||
break mnemonic_m;
|
||||
}
|
||||
Err(_) => {
|
||||
eprintln!("Invalid mnemonic");
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
Ok(mnemonic)
|
||||
}
|
||||
@@ -1,441 +1,37 @@
|
||||
mod cli;
|
||||
mod common;
|
||||
pub mod validator;
|
||||
pub mod wallet;
|
||||
|
||||
use clap::App;
|
||||
use clap::ArgMatches;
|
||||
use deposit_contract::DEPOSIT_GAS;
|
||||
use environment::{Environment, RuntimeContext};
|
||||
use eth2_testnet_config::Eth2TestnetConfig;
|
||||
use futures::{future, Future, IntoFuture, Stream};
|
||||
use rayon::prelude::*;
|
||||
use slog::{crit, error, info, Logger};
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::path::PathBuf;
|
||||
use types::{ChainSpec, EthSpec};
|
||||
use validator_client::validator_directory::{ValidatorDirectory, ValidatorDirectoryBuilder};
|
||||
use web3::{
|
||||
transports::Http,
|
||||
types::{Address, TransactionRequest, U256},
|
||||
Web3,
|
||||
};
|
||||
use environment::Environment;
|
||||
use types::EthSpec;
|
||||
|
||||
pub use cli::cli_app;
|
||||
pub const CMD: &str = "account_manager";
|
||||
pub const SECRETS_DIR_FLAG: &str = "secrets-dir";
|
||||
pub const VALIDATOR_DIR_FLAG: &str = "validator-dir";
|
||||
pub const BASE_DIR_FLAG: &str = "base-dir";
|
||||
|
||||
/// Run the account manager, logging an error if the operation did not succeed.
|
||||
pub fn run<T: EthSpec>(matches: &ArgMatches, mut env: Environment<T>) {
|
||||
let log = env.core_context().log.clone();
|
||||
match run_account_manager(matches, env) {
|
||||
Ok(()) => (),
|
||||
Err(e) => crit!(log, "Account manager failed"; "error" => e),
|
||||
}
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.visible_aliases(&["a", "am", "account", CMD])
|
||||
.about("Utilities for generating and managing Ethereum 2.0 accounts.")
|
||||
.subcommand(wallet::cli_app())
|
||||
.subcommand(validator::cli_app())
|
||||
}
|
||||
|
||||
/// Run the account manager, returning an error if the operation did not succeed.
|
||||
fn run_account_manager<T: EthSpec>(
|
||||
matches: &ArgMatches,
|
||||
mut env: Environment<T>,
|
||||
) -> Result<(), String> {
|
||||
let context = env.core_context();
|
||||
let log = context.log.clone();
|
||||
|
||||
// If the `datadir` was not provided, default to the home directory. If the home directory is
|
||||
// not known, use the current directory.
|
||||
let datadir = matches
|
||||
.value_of("datadir")
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|| {
|
||||
dirs::home_dir()
|
||||
.unwrap_or_else(|| PathBuf::from("."))
|
||||
.join(".lighthouse")
|
||||
.join("validators")
|
||||
});
|
||||
|
||||
fs::create_dir_all(&datadir).map_err(|e| format!("Failed to create datadir: {}", e))?;
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Located data directory";
|
||||
"path" => format!("{:?}", datadir)
|
||||
);
|
||||
|
||||
pub fn run<T: EthSpec>(matches: &ArgMatches<'_>, env: Environment<T>) -> Result<(), String> {
|
||||
match matches.subcommand() {
|
||||
("validator", Some(matches)) => match matches.subcommand() {
|
||||
("new", Some(matches)) => run_new_validator_subcommand(matches, datadir, env)?,
|
||||
_ => {
|
||||
return Err("Invalid 'validator new' command. See --help.".to_string());
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return Err("Invalid 'validator' command. See --help.".to_string());
|
||||
(wallet::CMD, Some(matches)) => wallet::cli_run(matches)?,
|
||||
(validator::CMD, Some(matches)) => validator::cli_run(matches, env)?,
|
||||
(unknown, _) => {
|
||||
return Err(format!(
|
||||
"{} is not a valid {} command. See --help.",
|
||||
unknown, CMD
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Describes the crypto key generation methods for a validator.
|
||||
enum KeygenMethod {
|
||||
/// Produce an insecure "deterministic" keypair. Used only for interop and testing.
|
||||
Insecure(usize),
|
||||
/// Generate a new key from the `rand` thread random RNG.
|
||||
ThreadRandom,
|
||||
}
|
||||
|
||||
/// Process the subcommand for creating new validators.
|
||||
fn run_new_validator_subcommand<T: EthSpec>(
|
||||
matches: &ArgMatches,
|
||||
datadir: PathBuf,
|
||||
mut env: Environment<T>,
|
||||
) -> Result<(), String> {
|
||||
let context = env.core_context();
|
||||
let log = context.log.clone();
|
||||
|
||||
let methods: Vec<KeygenMethod> = match matches.subcommand() {
|
||||
("insecure", Some(matches)) => {
|
||||
let first = matches
|
||||
.value_of("first")
|
||||
.ok_or_else(|| "No first index".to_string())?
|
||||
.parse::<usize>()
|
||||
.map_err(|e| format!("Unable to parse first index: {}", e))?;
|
||||
let last = matches
|
||||
.value_of("last")
|
||||
.ok_or_else(|| "No last index".to_string())?
|
||||
.parse::<usize>()
|
||||
.map_err(|e| format!("Unable to parse first index: {}", e))?;
|
||||
|
||||
(first..last).map(KeygenMethod::Insecure).collect()
|
||||
}
|
||||
("random", Some(matches)) => {
|
||||
let count = matches
|
||||
.value_of("validator_count")
|
||||
.ok_or_else(|| "No validator count".to_string())?
|
||||
.parse::<usize>()
|
||||
.map_err(|e| format!("Unable to parse validator count: {}", e))?;
|
||||
|
||||
(0..count).map(|_| KeygenMethod::ThreadRandom).collect()
|
||||
}
|
||||
_ => {
|
||||
return Err("Invalid 'validator' command. See --help.".to_string());
|
||||
}
|
||||
};
|
||||
|
||||
let deposit_value = matches
|
||||
.value_of("deposit-value")
|
||||
.ok_or_else(|| "No deposit-value".to_string())?
|
||||
.parse::<u64>()
|
||||
.map_err(|e| format!("Unable to parse deposit-value: {}", e))?;
|
||||
|
||||
let validators = make_validators(
|
||||
datadir.clone(),
|
||||
&methods,
|
||||
deposit_value,
|
||||
&context.eth2_config.spec,
|
||||
)?;
|
||||
|
||||
if matches.is_present("send-deposits") {
|
||||
let eth1_endpoint = matches
|
||||
.value_of("eth1-endpoint")
|
||||
.ok_or_else(|| "No eth1-endpoint".to_string())?;
|
||||
let account_index = matches
|
||||
.value_of("account-index")
|
||||
.ok_or_else(|| "No account-index".to_string())?
|
||||
.parse::<usize>()
|
||||
.map_err(|e| format!("Unable to parse account-index: {}", e))?;
|
||||
|
||||
// If supplied, load the eth1 account password from file.
|
||||
let password = if let Some(password_path) = matches.value_of("password") {
|
||||
Some(
|
||||
File::open(password_path)
|
||||
.map_err(|e| format!("Unable to open password file: {:?}", e))
|
||||
.and_then(|mut file| {
|
||||
let mut password = String::new();
|
||||
file.read_to_string(&mut password)
|
||||
.map_err(|e| format!("Unable to read password file to string: {:?}", e))
|
||||
.map(|_| password)
|
||||
})
|
||||
.map(|password| {
|
||||
// Trim the line feed from the end of the password file, if present.
|
||||
if password.ends_with("\n") {
|
||||
password[0..password.len() - 1].to_string()
|
||||
} else {
|
||||
password
|
||||
}
|
||||
})?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Submitting validator deposits";
|
||||
"eth1_node_http_endpoint" => eth1_endpoint
|
||||
);
|
||||
|
||||
// Load the testnet configuration from disk, or use the default testnet.
|
||||
let eth2_testnet_config: Eth2TestnetConfig<T> = if let Some(testnet_dir_str) =
|
||||
matches.value_of("testnet-dir")
|
||||
{
|
||||
let testnet_dir = testnet_dir_str
|
||||
.parse::<PathBuf>()
|
||||
.map_err(|e| format!("Unable to parse testnet-dir: {}", e))?;
|
||||
|
||||
if !testnet_dir.exists() {
|
||||
return Err(format!(
|
||||
"Testnet directory at {:?} does not exist",
|
||||
testnet_dir
|
||||
));
|
||||
}
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Loading deposit contract address";
|
||||
"testnet_dir" => format!("{:?}", &testnet_dir)
|
||||
);
|
||||
|
||||
Eth2TestnetConfig::load(testnet_dir.clone())
|
||||
.map_err(|e| format!("Failed to load testnet dir at {:?}: {}", testnet_dir, e))?
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Using Lighthouse testnet deposit contract";
|
||||
);
|
||||
|
||||
Eth2TestnetConfig::hard_coded()
|
||||
.map_err(|e| format!("Failed to load hard_coded testnet dir: {}", e))?
|
||||
};
|
||||
|
||||
// Convert from `types::Address` to `web3::types::Address`.
|
||||
let deposit_contract = Address::from_slice(
|
||||
eth2_testnet_config
|
||||
.deposit_contract_address()?
|
||||
.as_fixed_bytes(),
|
||||
);
|
||||
|
||||
if let Err(()) = env.runtime().block_on(deposit_validators(
|
||||
context.clone(),
|
||||
eth1_endpoint.to_string(),
|
||||
deposit_contract,
|
||||
validators.clone(),
|
||||
account_index,
|
||||
deposit_value,
|
||||
password,
|
||||
)) {
|
||||
error!(
|
||||
log,
|
||||
"Created validators but could not submit deposits";
|
||||
)
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Validator deposits complete";
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Generated validator directories";
|
||||
"base_path" => format!("{:?}", datadir),
|
||||
"count" => validators.len(),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Produces a validator directory for each of the key generation methods provided in `methods`.
|
||||
fn make_validators(
|
||||
datadir: PathBuf,
|
||||
methods: &[KeygenMethod],
|
||||
deposit_value: u64,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Vec<ValidatorDirectory>, String> {
|
||||
methods
|
||||
.par_iter()
|
||||
.map(|method| {
|
||||
let mut builder = ValidatorDirectoryBuilder::default()
|
||||
.spec(spec.clone())
|
||||
.custom_deposit_amount(deposit_value);
|
||||
|
||||
builder = match method {
|
||||
KeygenMethod::Insecure(index) => builder.insecure_keypairs(*index),
|
||||
KeygenMethod::ThreadRandom => builder.thread_random_keypairs(),
|
||||
};
|
||||
|
||||
builder
|
||||
.create_directory(datadir.clone())?
|
||||
.write_keypair_files()?
|
||||
.write_eth1_data_file()?
|
||||
.build()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// For each `ValidatorDirectory`, submit a deposit transaction to the `eth1_endpoint`.
|
||||
///
|
||||
/// Returns success as soon as the eth1 endpoint accepts the transaction (i.e., does not wait for
|
||||
/// transaction success/revert).
|
||||
fn deposit_validators<E: EthSpec>(
|
||||
context: RuntimeContext<E>,
|
||||
eth1_endpoint: String,
|
||||
deposit_contract: Address,
|
||||
validators: Vec<ValidatorDirectory>,
|
||||
account_index: usize,
|
||||
deposit_value: u64,
|
||||
password: Option<String>,
|
||||
) -> impl Future<Item = (), Error = ()> {
|
||||
let log_1 = context.log.clone();
|
||||
let log_2 = context.log.clone();
|
||||
|
||||
Http::new(ð1_endpoint)
|
||||
.map_err(move |e| {
|
||||
error!(
|
||||
log_1,
|
||||
"Failed to start web3 HTTP transport";
|
||||
"error" => format!("{:?}", e)
|
||||
)
|
||||
})
|
||||
.into_future()
|
||||
/*
|
||||
* Loop through the validator directories and submit the deposits.
|
||||
*/
|
||||
.and_then(move |(event_loop, transport)| {
|
||||
let web3 = Web3::new(transport);
|
||||
|
||||
futures::stream::iter_ok(validators)
|
||||
.for_each(move |validator| {
|
||||
let web3 = web3.clone();
|
||||
let log = log_2.clone();
|
||||
let password = password.clone();
|
||||
|
||||
deposit_validator(
|
||||
web3,
|
||||
deposit_contract,
|
||||
&validator,
|
||||
deposit_value,
|
||||
account_index,
|
||||
password,
|
||||
log,
|
||||
)
|
||||
})
|
||||
.map(|_| event_loop)
|
||||
})
|
||||
// Web3 gives errors if the event loop is dropped whilst performing requests.
|
||||
.map(|event_loop| drop(event_loop))
|
||||
}
|
||||
|
||||
/// For the given `ValidatorDirectory`, submit a deposit transaction to the `web3` node.
|
||||
///
|
||||
/// Returns success as soon as the eth1 endpoint accepts the transaction (i.e., does not wait for
|
||||
/// transaction success/revert).
|
||||
fn deposit_validator(
|
||||
web3: Web3<Http>,
|
||||
deposit_contract: Address,
|
||||
validator: &ValidatorDirectory,
|
||||
deposit_amount: u64,
|
||||
account_index: usize,
|
||||
password_opt: Option<String>,
|
||||
log: Logger,
|
||||
) -> impl Future<Item = (), Error = ()> {
|
||||
validator
|
||||
.voting_keypair
|
||||
.clone()
|
||||
.ok_or_else(|| error!(log, "Validator does not have voting keypair"))
|
||||
.and_then(|voting_keypair| {
|
||||
validator
|
||||
.deposit_data
|
||||
.clone()
|
||||
.ok_or_else(|| error!(log, "Validator does not have deposit data"))
|
||||
.map(|deposit_data| (voting_keypair, deposit_data))
|
||||
})
|
||||
.into_future()
|
||||
.and_then(move |(voting_keypair, deposit_data)| {
|
||||
let pubkey_1 = voting_keypair.pk.clone();
|
||||
let pubkey_2 = voting_keypair.pk.clone();
|
||||
|
||||
let web3_1 = web3.clone();
|
||||
let web3_2 = web3.clone();
|
||||
|
||||
let log_1 = log.clone();
|
||||
let log_2 = log.clone();
|
||||
|
||||
web3.eth()
|
||||
.accounts()
|
||||
.map_err(|e| format!("Failed to get accounts: {:?}", e))
|
||||
.and_then(move |accounts| {
|
||||
accounts
|
||||
.get(account_index)
|
||||
.cloned()
|
||||
.ok_or_else(|| "Insufficient accounts for deposit".to_string())
|
||||
})
|
||||
/*
|
||||
* If a password was supplied, unlock the account.
|
||||
*/
|
||||
.and_then(move |from_address| {
|
||||
let future: Box<dyn Future<Item = Address, Error = String> + Send> =
|
||||
if let Some(password) = password_opt {
|
||||
// Unlock for only a single transaction.
|
||||
let duration = None;
|
||||
|
||||
let future = web3_1
|
||||
.personal()
|
||||
.unlock_account(from_address, &password, duration)
|
||||
.then(move |result| match result {
|
||||
Ok(true) => Ok(from_address),
|
||||
Ok(false) => {
|
||||
Err("Eth1 node refused to unlock account. Check password."
|
||||
.to_string())
|
||||
}
|
||||
Err(e) => Err(format!("Eth1 unlock request failed: {:?}", e)),
|
||||
});
|
||||
|
||||
Box::new(future)
|
||||
} else {
|
||||
Box::new(future::ok(from_address))
|
||||
};
|
||||
|
||||
future
|
||||
})
|
||||
/*
|
||||
* Submit the deposit transaction.
|
||||
*/
|
||||
.and_then(move |from| {
|
||||
let tx_request = TransactionRequest {
|
||||
from,
|
||||
to: Some(deposit_contract),
|
||||
gas: Some(U256::from(DEPOSIT_GAS)),
|
||||
gas_price: None,
|
||||
value: Some(U256::from(from_gwei(deposit_amount))),
|
||||
data: Some(deposit_data.into()),
|
||||
nonce: None,
|
||||
condition: None,
|
||||
};
|
||||
|
||||
web3_2
|
||||
.eth()
|
||||
.send_transaction(tx_request)
|
||||
.map_err(|e| format!("Failed to call deposit fn: {:?}", e))
|
||||
})
|
||||
.map(move |tx| {
|
||||
info!(
|
||||
log_1,
|
||||
"Validator deposit successful";
|
||||
"eth1_tx_hash" => format!("{:?}", tx),
|
||||
"validator_voting_pubkey" => format!("{:?}", pubkey_1)
|
||||
)
|
||||
})
|
||||
.map_err(move |e| {
|
||||
error!(
|
||||
log_2,
|
||||
"Validator deposit_failed";
|
||||
"error" => e,
|
||||
"validator_voting_pubkey" => format!("{:?}", pubkey_2)
|
||||
)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Converts gwei to wei.
|
||||
fn from_gwei(gwei: u64) -> U256 {
|
||||
U256::from(gwei) * U256::exp10(9)
|
||||
}
|
||||
|
||||
206
account_manager/src/validator/create.rs
Normal file
206
account_manager/src/validator/create.rs
Normal file
@@ -0,0 +1,206 @@
|
||||
use crate::{common::ensure_dir_exists, SECRETS_DIR_FLAG, VALIDATOR_DIR_FLAG};
|
||||
use account_utils::{random_password, strip_off_newlines, validator_definitions};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use environment::Environment;
|
||||
use eth2_wallet::PlainText;
|
||||
use eth2_wallet_manager::WalletManager;
|
||||
use std::ffi::OsStr;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use types::EthSpec;
|
||||
use validator_dir::Builder as ValidatorDirBuilder;
|
||||
|
||||
pub const CMD: &str = "create";
|
||||
pub const BASE_DIR_FLAG: &str = "base-dir";
|
||||
pub const WALLET_NAME_FLAG: &str = "wallet-name";
|
||||
pub const WALLET_PASSWORD_FLAG: &str = "wallet-password";
|
||||
pub const DEPOSIT_GWEI_FLAG: &str = "deposit-gwei";
|
||||
pub const STORE_WITHDRAW_FLAG: &str = "store-withdrawal-keystore";
|
||||
pub const COUNT_FLAG: &str = "count";
|
||||
pub const AT_MOST_FLAG: &str = "at-most";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about(
|
||||
"Creates new validators from an existing EIP-2386 wallet using the EIP-2333 HD key \
|
||||
derivation scheme.",
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(WALLET_NAME_FLAG)
|
||||
.long(WALLET_NAME_FLAG)
|
||||
.value_name("WALLET_NAME")
|
||||
.help("Use the wallet identified by this name")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(WALLET_PASSWORD_FLAG)
|
||||
.long(WALLET_PASSWORD_FLAG)
|
||||
.value_name("WALLET_PASSWORD_PATH")
|
||||
.help("A path to a file containing the password which will unlock the wallet.")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
||||
.long(VALIDATOR_DIR_FLAG)
|
||||
.value_name("VALIDATOR_DIRECTORY")
|
||||
.help(
|
||||
"The path where the validator directories will be created. \
|
||||
Defaults to ~/.lighthouse/validators",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(SECRETS_DIR_FLAG)
|
||||
.long(SECRETS_DIR_FLAG)
|
||||
.value_name("SECRETS_DIR")
|
||||
.help(
|
||||
"The path where the validator keystore passwords will be stored. \
|
||||
Defaults to ~/.lighthouse/secrets",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(DEPOSIT_GWEI_FLAG)
|
||||
.long(DEPOSIT_GWEI_FLAG)
|
||||
.value_name("DEPOSIT_GWEI")
|
||||
.help(
|
||||
"The GWEI value of the deposit amount. Defaults to the minimum amount \
|
||||
required for an active validator (MAX_EFFECTIVE_BALANCE)",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(STORE_WITHDRAW_FLAG)
|
||||
.long(STORE_WITHDRAW_FLAG)
|
||||
.help(
|
||||
"If present, the withdrawal keystore will be stored alongside the voting \
|
||||
keypair. It is generally recommended to *not* store the withdrawal key and \
|
||||
instead generate them from the wallet seed when required.",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(COUNT_FLAG)
|
||||
.long(COUNT_FLAG)
|
||||
.value_name("VALIDATOR_COUNT")
|
||||
.help("The number of validators to create, regardless of how many already exist")
|
||||
.conflicts_with("at-most")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(AT_MOST_FLAG)
|
||||
.long(AT_MOST_FLAG)
|
||||
.value_name("AT_MOST_VALIDATORS")
|
||||
.help(
|
||||
"Observe the number of validators in --validator-dir, only creating enough to \
|
||||
reach the given count. Never deletes an existing validator.",
|
||||
)
|
||||
.conflicts_with("count")
|
||||
.takes_value(true),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run<T: EthSpec>(
|
||||
matches: &ArgMatches,
|
||||
mut env: Environment<T>,
|
||||
wallet_base_dir: PathBuf,
|
||||
) -> Result<(), String> {
|
||||
let spec = env.core_context().eth2_config.spec;
|
||||
|
||||
let name: String = clap_utils::parse_required(matches, WALLET_NAME_FLAG)?;
|
||||
let wallet_password_path: PathBuf = clap_utils::parse_required(matches, WALLET_PASSWORD_FLAG)?;
|
||||
let validator_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
VALIDATOR_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("validators"),
|
||||
)?;
|
||||
let secrets_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
SECRETS_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("secrets"),
|
||||
)?;
|
||||
let deposit_gwei = clap_utils::parse_optional(matches, DEPOSIT_GWEI_FLAG)?
|
||||
.unwrap_or_else(|| spec.max_effective_balance);
|
||||
let count: Option<usize> = clap_utils::parse_optional(matches, COUNT_FLAG)?;
|
||||
let at_most: Option<usize> = clap_utils::parse_optional(matches, AT_MOST_FLAG)?;
|
||||
|
||||
ensure_dir_exists(&validator_dir)?;
|
||||
ensure_dir_exists(&secrets_dir)?;
|
||||
|
||||
let starting_validator_count = existing_validator_count(&validator_dir)?;
|
||||
|
||||
let n = match (count, at_most) {
|
||||
(Some(_), Some(_)) => Err(format!(
|
||||
"Cannot supply --{} and --{}",
|
||||
COUNT_FLAG, AT_MOST_FLAG
|
||||
)),
|
||||
(None, None) => Err(format!(
|
||||
"Must supply either --{} or --{}",
|
||||
COUNT_FLAG, AT_MOST_FLAG
|
||||
)),
|
||||
(Some(count), None) => Ok(count),
|
||||
(None, Some(at_most)) => Ok(at_most.saturating_sub(starting_validator_count)),
|
||||
}?;
|
||||
|
||||
if n == 0 {
|
||||
eprintln!(
|
||||
"No validators to create. {}={:?}, {}={:?}",
|
||||
COUNT_FLAG, count, AT_MOST_FLAG, at_most
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let wallet_password = fs::read(&wallet_password_path)
|
||||
.map_err(|e| format!("Unable to read {:?}: {:?}", wallet_password_path, e))
|
||||
.map(|bytes| PlainText::from(strip_off_newlines(bytes)))?;
|
||||
|
||||
let mgr = WalletManager::open(&wallet_base_dir)
|
||||
.map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?;
|
||||
|
||||
let mut wallet = mgr
|
||||
.wallet_by_name(&name)
|
||||
.map_err(|e| format!("Unable to open wallet: {:?}", e))?;
|
||||
|
||||
for i in 0..n {
|
||||
let voting_password = random_password();
|
||||
let withdrawal_password = random_password();
|
||||
|
||||
let keystores = wallet
|
||||
.next_validator(
|
||||
wallet_password.as_bytes(),
|
||||
voting_password.as_bytes(),
|
||||
withdrawal_password.as_bytes(),
|
||||
)
|
||||
.map_err(|e| format!("Unable to create validator keys: {:?}", e))?;
|
||||
|
||||
let voting_pubkey = keystores.voting.pubkey().to_string();
|
||||
|
||||
ValidatorDirBuilder::new(validator_dir.clone(), secrets_dir.clone())
|
||||
.voting_keystore(keystores.voting, voting_password.as_bytes())
|
||||
.withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes())
|
||||
.create_eth1_tx_data(deposit_gwei, &spec)
|
||||
.store_withdrawal_keystore(matches.is_present(STORE_WITHDRAW_FLAG))
|
||||
.build()
|
||||
.map_err(|e| format!("Unable to build validator directory: {:?}", e))?;
|
||||
|
||||
println!("{}/{}\t0x{}", i + 1, n, voting_pubkey);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the number of validators that exist in the given `validator_dir`.
|
||||
///
|
||||
/// This function just assumes all files and directories, excluding the validator definitions YAML,
|
||||
/// are validator directories, making it likely to return a higher number than accurate
|
||||
/// but never a lower one.
|
||||
fn existing_validator_count<P: AsRef<Path>>(validator_dir: P) -> Result<usize, String> {
|
||||
fs::read_dir(validator_dir.as_ref())
|
||||
.map(|iter| {
|
||||
iter.filter_map(|e| e.ok())
|
||||
.filter(|e| e.file_name() != OsStr::new(validator_definitions::CONFIG_FILENAME))
|
||||
.count()
|
||||
})
|
||||
.map_err(|e| format!("Unable to read {:?}: {}", validator_dir.as_ref(), e))
|
||||
}
|
||||
405
account_manager/src/validator/deposit.rs
Normal file
405
account_manager/src/validator/deposit.rs
Normal file
@@ -0,0 +1,405 @@
|
||||
use crate::VALIDATOR_DIR_FLAG;
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use deposit_contract::DEPOSIT_GAS;
|
||||
use environment::Environment;
|
||||
use futures::{
|
||||
compat::Future01CompatExt,
|
||||
stream::{FuturesUnordered, StreamExt},
|
||||
};
|
||||
use slog::{info, Logger};
|
||||
use state_processing::per_block_processing::verify_deposit_signature;
|
||||
use std::path::PathBuf;
|
||||
use tokio::time::{delay_until, Duration, Instant};
|
||||
use types::EthSpec;
|
||||
use validator_dir::{Eth1DepositData, Manager as ValidatorManager, ValidatorDir};
|
||||
use web3::{
|
||||
transports::Http,
|
||||
transports::Ipc,
|
||||
types::{Address, SyncInfo, SyncState, TransactionRequest, U256},
|
||||
Transport, Web3,
|
||||
};
|
||||
|
||||
pub const CMD: &str = "deposit";
|
||||
pub const VALIDATOR_FLAG: &str = "validator";
|
||||
pub const ETH1_IPC_FLAG: &str = "eth1-ipc";
|
||||
pub const ETH1_HTTP_FLAG: &str = "eth1-http";
|
||||
pub const FROM_ADDRESS_FLAG: &str = "from-address";
|
||||
pub const CONFIRMATION_COUNT_FLAG: &str = "confirmation-count";
|
||||
pub const CONFIRMATION_BATCH_SIZE_FLAG: &str = "confirmation-batch-size";
|
||||
|
||||
const GWEI: u64 = 1_000_000_000;
|
||||
|
||||
const SYNCING_STATE_RETRY_DELAY: Duration = Duration::from_secs(2);
|
||||
|
||||
const CONFIRMATIONS_POLL_TIME: Duration = Duration::from_secs(2);
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new("deposit")
|
||||
.about(
|
||||
"Submits a deposit to an Eth1 validator registration contract via an IPC endpoint \
|
||||
of an Eth1 client (e.g., Geth, OpenEthereum, etc.). The validators must already \
|
||||
have been created and exist on the file-system. The process will exit immediately \
|
||||
with an error if any error occurs. After each deposit is submitted to the Eth1 \
|
||||
node, a file will be saved in the validator directory with the transaction hash. \
|
||||
If confirmations are set to non-zero then the application will wait for confirmations \
|
||||
before saving the transaction hash and moving onto the next batch of deposits. \
|
||||
The deposit contract address will be determined by the --testnet-dir flag on the \
|
||||
primary Lighthouse binary.",
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
||||
.long(VALIDATOR_DIR_FLAG)
|
||||
.value_name("VALIDATOR_DIRECTORY")
|
||||
.help(
|
||||
"The path to the validator client data directory. \
|
||||
Defaults to ~/.lighthouse/validators",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_FLAG)
|
||||
.long(VALIDATOR_FLAG)
|
||||
.value_name("VALIDATOR_NAME")
|
||||
.help(
|
||||
"The name of the directory in --data-dir for which to deposit. \
|
||||
Set to 'all' to deposit all validators in the --data-dir.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(ETH1_IPC_FLAG)
|
||||
.long(ETH1_IPC_FLAG)
|
||||
.value_name("ETH1_IPC_PATH")
|
||||
.help("Path to an Eth1 JSON-RPC IPC endpoint")
|
||||
.takes_value(true)
|
||||
.required(false),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(ETH1_HTTP_FLAG)
|
||||
.long(ETH1_HTTP_FLAG)
|
||||
.value_name("ETH1_HTTP_URL")
|
||||
.help("URL to an Eth1 JSON-RPC endpoint")
|
||||
.takes_value(true)
|
||||
.required(false),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(FROM_ADDRESS_FLAG)
|
||||
.long(FROM_ADDRESS_FLAG)
|
||||
.value_name("FROM_ETH1_ADDRESS")
|
||||
.help(
|
||||
"The address that will submit the eth1 deposit. \
|
||||
Must be unlocked on the node at --eth1-ipc.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(CONFIRMATION_COUNT_FLAG)
|
||||
.long(CONFIRMATION_COUNT_FLAG)
|
||||
.value_name("CONFIRMATION_COUNT")
|
||||
.help(
|
||||
"The number of Eth1 block confirmations required \
|
||||
before a transaction is considered complete. Set to \
|
||||
0 for no confirmations.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.default_value("1"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(CONFIRMATION_BATCH_SIZE_FLAG)
|
||||
.long(CONFIRMATION_BATCH_SIZE_FLAG)
|
||||
.value_name("BATCH_SIZE")
|
||||
.help(
|
||||
"Perform BATCH_SIZE deposits and wait for confirmations \
|
||||
in parallel. Useful for achieving faster bulk deposits.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.default_value("10"),
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn send_deposit_transactions<T1, T2: 'static>(
|
||||
mut env: Environment<T1>,
|
||||
log: Logger,
|
||||
mut eth1_deposit_datas: Vec<(ValidatorDir, Eth1DepositData)>,
|
||||
from_address: Address,
|
||||
deposit_contract: Address,
|
||||
transport: T2,
|
||||
confirmation_count: usize,
|
||||
confirmation_batch_size: usize,
|
||||
) -> Result<(), String>
|
||||
where
|
||||
T1: EthSpec,
|
||||
T2: Transport + std::marker::Send,
|
||||
<T2 as web3::Transport>::Out: std::marker::Send,
|
||||
{
|
||||
let web3 = Web3::new(transport);
|
||||
let spec = env.eth2_config.spec.clone();
|
||||
|
||||
let deposits_fut = async {
|
||||
poll_until_synced(web3.clone(), log.clone()).await?;
|
||||
|
||||
for chunk in eth1_deposit_datas.chunks_mut(confirmation_batch_size) {
|
||||
let futures = FuturesUnordered::default();
|
||||
|
||||
for (ref mut validator_dir, eth1_deposit_data) in chunk.iter_mut() {
|
||||
verify_deposit_signature(ð1_deposit_data.deposit_data, &spec).map_err(|e| {
|
||||
format!(
|
||||
"Deposit for {:?} fails verification, \
|
||||
are you using the correct testnet configuration?\nError: {:?}",
|
||||
eth1_deposit_data.deposit_data.pubkey, e
|
||||
)
|
||||
})?;
|
||||
|
||||
let web3 = web3.clone();
|
||||
let log = log.clone();
|
||||
futures.push(async move {
|
||||
let tx_hash = web3
|
||||
.send_transaction_with_confirmation(
|
||||
TransactionRequest {
|
||||
from: from_address,
|
||||
to: Some(deposit_contract),
|
||||
gas: Some(DEPOSIT_GAS.into()),
|
||||
gas_price: None,
|
||||
value: Some(from_gwei(eth1_deposit_data.deposit_data.amount)),
|
||||
data: Some(eth1_deposit_data.rlp.clone().into()),
|
||||
nonce: None,
|
||||
condition: None,
|
||||
},
|
||||
CONFIRMATIONS_POLL_TIME,
|
||||
confirmation_count,
|
||||
)
|
||||
.compat()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to send transaction: {:?}", e))?;
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Submitted deposit";
|
||||
"tx_hash" => format!("{:?}", tx_hash),
|
||||
);
|
||||
|
||||
validator_dir
|
||||
.save_eth1_deposit_tx_hash(&format!("{:?}", tx_hash))
|
||||
.map_err(|e| {
|
||||
format!("Failed to save tx hash {:?} to disk: {:?}", tx_hash, e)
|
||||
})?;
|
||||
|
||||
Ok::<(), String>(())
|
||||
});
|
||||
}
|
||||
|
||||
futures
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<_, _>>()?;
|
||||
}
|
||||
|
||||
Ok::<(), String>(())
|
||||
};
|
||||
|
||||
env.runtime().block_on(deposits_fut)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn cli_run<T: EthSpec>(
|
||||
matches: &ArgMatches<'_>,
|
||||
mut env: Environment<T>,
|
||||
) -> Result<(), String> {
|
||||
let log = env.core_context().log().clone();
|
||||
|
||||
let data_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
VALIDATOR_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("validators"),
|
||||
)?;
|
||||
let validator: String = clap_utils::parse_required(matches, VALIDATOR_FLAG)?;
|
||||
let eth1_ipc_path: Option<PathBuf> = clap_utils::parse_optional(matches, ETH1_IPC_FLAG)?;
|
||||
let eth1_http_url: Option<String> = clap_utils::parse_optional(matches, ETH1_HTTP_FLAG)?;
|
||||
let from_address: Address = clap_utils::parse_required(matches, FROM_ADDRESS_FLAG)?;
|
||||
let confirmation_count: usize = clap_utils::parse_required(matches, CONFIRMATION_COUNT_FLAG)?;
|
||||
let confirmation_batch_size: usize =
|
||||
clap_utils::parse_required(matches, CONFIRMATION_BATCH_SIZE_FLAG)?;
|
||||
|
||||
let manager = ValidatorManager::open(&data_dir)
|
||||
.map_err(|e| format!("Unable to read --{}: {:?}", VALIDATOR_DIR_FLAG, e))?;
|
||||
|
||||
let validators = match validator.as_ref() {
|
||||
"all" => manager
|
||||
.open_all_validators()
|
||||
.map_err(|e| format!("Unable to read all validators: {:?}", e)),
|
||||
name => {
|
||||
let path = manager
|
||||
.directory_names()
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Unable to read --{} directory names: {:?}",
|
||||
VALIDATOR_DIR_FLAG, e
|
||||
)
|
||||
})?
|
||||
.get(name)
|
||||
.ok_or_else(|| format!("Unknown validator: {}", name))?
|
||||
.clone();
|
||||
|
||||
manager
|
||||
.open_validator(&path)
|
||||
.map_err(|e| format!("Unable to open {}: {:?}", name, e))
|
||||
.map(|v| vec![v])
|
||||
}
|
||||
}?;
|
||||
|
||||
let eth1_deposit_datas = validators
|
||||
.into_iter()
|
||||
.filter(|v| !v.eth1_deposit_tx_hash_exists())
|
||||
.map(|v| match v.eth1_deposit_data() {
|
||||
Ok(Some(data)) => Ok((v, data)),
|
||||
Ok(None) => Err(format!(
|
||||
"Validator is missing deposit data file: {:?}",
|
||||
v.dir()
|
||||
)),
|
||||
Err(e) => Err(format!(
|
||||
"Unable to read deposit data for {:?}: {:?}",
|
||||
v.dir(),
|
||||
e
|
||||
)),
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
let total_gwei: u64 = eth1_deposit_datas
|
||||
.iter()
|
||||
.map(|(_, d)| d.deposit_data.amount)
|
||||
.sum();
|
||||
|
||||
if eth1_deposit_datas.is_empty() {
|
||||
info!(log, "No validators to deposit");
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Starting deposits";
|
||||
"deposit_count" => eth1_deposit_datas.len(),
|
||||
"total_eth" => total_gwei / GWEI,
|
||||
);
|
||||
|
||||
let deposit_contract = env
|
||||
.testnet
|
||||
.as_ref()
|
||||
.ok_or_else(|| "Unable to run account manager without a testnet dir".to_string())?
|
||||
.deposit_contract_address()
|
||||
.map_err(|e| format!("Unable to parse deposit contract address: {}", e))?;
|
||||
|
||||
if deposit_contract == Address::zero() {
|
||||
return Err("Refusing to deposit to the zero address. Check testnet configuration.".into());
|
||||
}
|
||||
|
||||
match (eth1_ipc_path, eth1_http_url) {
|
||||
(Some(_), Some(_)) => Err(format!(
|
||||
"error: Cannot supply both --{} and --{}",
|
||||
ETH1_IPC_FLAG, ETH1_HTTP_FLAG
|
||||
)),
|
||||
(None, None) => Err(format!(
|
||||
"error: Must supply one of --{} or --{}",
|
||||
ETH1_IPC_FLAG, ETH1_HTTP_FLAG
|
||||
)),
|
||||
(Some(ipc_path), None) => {
|
||||
let (_event_loop_handle, ipc_transport) = Ipc::new(ipc_path)
|
||||
.map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?;
|
||||
send_deposit_transactions(
|
||||
env,
|
||||
log,
|
||||
eth1_deposit_datas,
|
||||
from_address,
|
||||
deposit_contract,
|
||||
ipc_transport,
|
||||
confirmation_count,
|
||||
confirmation_batch_size,
|
||||
)
|
||||
}
|
||||
(None, Some(http_url)) => {
|
||||
let (_event_loop_handle, http_transport) = Http::new(http_url.as_str())
|
||||
.map_err(|e| format!("Unable to connect to eth1 http RPC: {:?}", e))?;
|
||||
send_deposit_transactions(
|
||||
env,
|
||||
log,
|
||||
eth1_deposit_datas,
|
||||
from_address,
|
||||
deposit_contract,
|
||||
http_transport,
|
||||
confirmation_count,
|
||||
confirmation_batch_size,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts gwei to wei.
|
||||
fn from_gwei(gwei: u64) -> U256 {
|
||||
U256::from(gwei) * U256::exp10(9)
|
||||
}
|
||||
|
||||
/// Run a poll on the `eth_syncing` endpoint, blocking until the node is synced.
|
||||
async fn poll_until_synced<T>(web3: Web3<T>, log: Logger) -> Result<(), String>
|
||||
where
|
||||
T: Transport + Send + 'static,
|
||||
<T as Transport>::Out: Send,
|
||||
{
|
||||
loop {
|
||||
let sync_state = web3
|
||||
.clone()
|
||||
.eth()
|
||||
.syncing()
|
||||
.compat()
|
||||
.await
|
||||
.map_err(|e| format!("Unable to read syncing state from eth1 node: {:?}", e))?;
|
||||
|
||||
match sync_state {
|
||||
SyncState::Syncing(SyncInfo {
|
||||
current_block,
|
||||
highest_block,
|
||||
..
|
||||
}) => {
|
||||
info!(
|
||||
log,
|
||||
"Waiting for eth1 node to sync";
|
||||
"est_highest_block" => format!("{}", highest_block),
|
||||
"current_block" => format!("{}", current_block),
|
||||
);
|
||||
|
||||
delay_until(Instant::now() + SYNCING_STATE_RETRY_DELAY).await;
|
||||
}
|
||||
SyncState::NotSyncing => {
|
||||
let block_number = web3
|
||||
.clone()
|
||||
.eth()
|
||||
.block_number()
|
||||
.compat()
|
||||
.await
|
||||
.map_err(|e| format!("Unable to read block number from eth1 node: {:?}", e))?;
|
||||
|
||||
if block_number > 0.into() {
|
||||
info!(
|
||||
log,
|
||||
"Eth1 node is synced";
|
||||
"head_block" => format!("{}", block_number),
|
||||
);
|
||||
break;
|
||||
} else {
|
||||
delay_until(Instant::now() + SYNCING_STATE_RETRY_DELAY).await;
|
||||
info!(
|
||||
log,
|
||||
"Waiting for eth1 node to sync";
|
||||
"current_block" => 0,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
231
account_manager/src/validator/import.rs
Normal file
231
account_manager/src/validator/import.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
use crate::{common::ensure_dir_exists, VALIDATOR_DIR_FLAG};
|
||||
use account_utils::{
|
||||
eth2_keystore::Keystore,
|
||||
read_password_from_user,
|
||||
validator_definitions::{
|
||||
recursively_find_voting_keystores, ValidatorDefinition, ValidatorDefinitions,
|
||||
CONFIG_FILENAME,
|
||||
},
|
||||
ZeroizeString,
|
||||
};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
pub const CMD: &str = "import";
|
||||
pub const KEYSTORE_FLAG: &str = "keystore";
|
||||
pub const DIR_FLAG: &str = "directory";
|
||||
pub const STDIN_PASSWORD_FLAG: &str = "stdin-passwords";
|
||||
pub const REUSE_PASSWORD_FLAG: &str = "reuse-password";
|
||||
|
||||
pub const PASSWORD_PROMPT: &str = "Enter the keystore password, or press enter to omit it:";
|
||||
pub const KEYSTORE_REUSE_WARNING: &str = "DO NOT USE THE ORIGINAL KEYSTORES TO VALIDATE WITH \
|
||||
ANOTHER CLIENT, OR YOU WILL GET SLASHED.";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about(
|
||||
"Imports one or more EIP-2335 passwords into a Lighthouse VC directory, \
|
||||
requesting passwords interactively. The directory flag provides a convenient \
|
||||
method for importing a directory of keys generated by the eth2-deposit-cli \
|
||||
Python utility.",
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(KEYSTORE_FLAG)
|
||||
.long(KEYSTORE_FLAG)
|
||||
.value_name("KEYSTORE_PATH")
|
||||
.help("Path to a single keystore to be imported.")
|
||||
.conflicts_with(DIR_FLAG)
|
||||
.required_unless(DIR_FLAG)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(DIR_FLAG)
|
||||
.long(DIR_FLAG)
|
||||
.value_name("KEYSTORES_DIRECTORY")
|
||||
.help(
|
||||
"Path to a directory which contains zero or more keystores \
|
||||
for import. This directory and all sub-directories will be \
|
||||
searched and any file name which contains 'keystore' and \
|
||||
has the '.json' extension will be attempted to be imported.",
|
||||
)
|
||||
.conflicts_with(KEYSTORE_FLAG)
|
||||
.required_unless(KEYSTORE_FLAG)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
||||
.long(VALIDATOR_DIR_FLAG)
|
||||
.value_name("VALIDATOR_DIRECTORY")
|
||||
.help(
|
||||
"The path where the validator directories will be created. \
|
||||
Defaults to ~/.lighthouse/validators",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(STDIN_PASSWORD_FLAG)
|
||||
.long(STDIN_PASSWORD_FLAG)
|
||||
.help("If present, read passwords from stdin instead of tty."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(REUSE_PASSWORD_FLAG)
|
||||
.long(REUSE_PASSWORD_FLAG)
|
||||
.help("If present, the same password will be used for all imported keystores."),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
||||
let keystore: Option<PathBuf> = clap_utils::parse_optional(matches, KEYSTORE_FLAG)?;
|
||||
let keystores_dir: Option<PathBuf> = clap_utils::parse_optional(matches, DIR_FLAG)?;
|
||||
let validator_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
VALIDATOR_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("validators"),
|
||||
)?;
|
||||
let stdin_password = matches.is_present(STDIN_PASSWORD_FLAG);
|
||||
let reuse_password = matches.is_present(REUSE_PASSWORD_FLAG);
|
||||
|
||||
ensure_dir_exists(&validator_dir)?;
|
||||
|
||||
let mut defs = ValidatorDefinitions::open_or_create(&validator_dir)
|
||||
.map_err(|e| format!("Unable to open {}: {:?}", CONFIG_FILENAME, e))?;
|
||||
|
||||
// Collect the paths for the keystores that should be imported.
|
||||
let keystore_paths = match (keystore, keystores_dir) {
|
||||
(Some(keystore), None) => vec![keystore],
|
||||
(None, Some(keystores_dir)) => {
|
||||
let mut keystores = vec![];
|
||||
|
||||
recursively_find_voting_keystores(&keystores_dir, &mut keystores)
|
||||
.map_err(|e| format!("Unable to search {:?}: {:?}", keystores_dir, e))?;
|
||||
|
||||
if keystores.is_empty() {
|
||||
eprintln!("No keystores found in {:?}", keystores_dir);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
keystores
|
||||
}
|
||||
_ => {
|
||||
return Err(format!(
|
||||
"Must supply either --{} or --{}",
|
||||
KEYSTORE_FLAG, DIR_FLAG
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
eprintln!("WARNING: {}", KEYSTORE_REUSE_WARNING);
|
||||
|
||||
// For each keystore:
|
||||
//
|
||||
// - Obtain the keystore password, if the user desires.
|
||||
// - Copy the keystore into the `validator_dir`.
|
||||
// - Add the keystore to the validator definitions file.
|
||||
//
|
||||
// Skip keystores that already exist, but exit early if any operation fails.
|
||||
// Reuses the same password for all keystores if the `REUSE_PASSWORD_FLAG` flag is set.
|
||||
let mut num_imported_keystores = 0;
|
||||
let mut previous_password: Option<ZeroizeString> = None;
|
||||
for src_keystore in &keystore_paths {
|
||||
let keystore = Keystore::from_json_file(src_keystore)
|
||||
.map_err(|e| format!("Unable to read keystore JSON {:?}: {:?}", src_keystore, e))?;
|
||||
|
||||
eprintln!("");
|
||||
eprintln!("Keystore found at {:?}:", src_keystore);
|
||||
eprintln!("");
|
||||
eprintln!(" - Public key: 0x{}", keystore.pubkey());
|
||||
eprintln!(" - UUID: {}", keystore.uuid());
|
||||
eprintln!("");
|
||||
eprintln!(
|
||||
"If you enter the password it will be stored as plain-text in {} so that it is not \
|
||||
required each time the validator client starts.",
|
||||
CONFIG_FILENAME
|
||||
);
|
||||
|
||||
let password_opt = loop {
|
||||
if let Some(password) = previous_password.clone() {
|
||||
eprintln!("Reuse previous password.");
|
||||
break Some(password);
|
||||
}
|
||||
eprintln!("");
|
||||
eprintln!("{}", PASSWORD_PROMPT);
|
||||
|
||||
let password = read_password_from_user(stdin_password)?;
|
||||
|
||||
if password.as_ref().is_empty() {
|
||||
eprintln!("Continuing without password.");
|
||||
sleep(Duration::from_secs(1)); // Provides nicer UX.
|
||||
break None;
|
||||
}
|
||||
|
||||
match keystore.decrypt_keypair(password.as_ref()) {
|
||||
Ok(_) => {
|
||||
eprintln!("Password is correct.");
|
||||
eprintln!("");
|
||||
sleep(Duration::from_secs(1)); // Provides nicer UX.
|
||||
if reuse_password {
|
||||
previous_password = Some(password.clone());
|
||||
}
|
||||
break Some(password);
|
||||
}
|
||||
Err(eth2_keystore::Error::InvalidPassword) => {
|
||||
eprintln!("Invalid password");
|
||||
}
|
||||
Err(e) => return Err(format!("Error whilst decrypting keypair: {:?}", e)),
|
||||
}
|
||||
};
|
||||
|
||||
// The keystore is placed in a directory that matches the name of the public key. This
|
||||
// provides some loose protection against adding the same keystore twice.
|
||||
let dest_dir = validator_dir.join(format!("0x{}", keystore.pubkey()));
|
||||
if dest_dir.exists() {
|
||||
eprintln!(
|
||||
"Skipping import of keystore for existing public key: {:?}",
|
||||
src_keystore
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
fs::create_dir_all(&dest_dir)
|
||||
.map_err(|e| format!("Unable to create import directory: {:?}", e))?;
|
||||
|
||||
// Retain the keystore file name, but place it in the new directory.
|
||||
let dest_keystore = src_keystore
|
||||
.file_name()
|
||||
.and_then(|file_name| file_name.to_str())
|
||||
.map(|file_name_str| dest_dir.join(file_name_str))
|
||||
.ok_or_else(|| format!("Badly formatted file name: {:?}", src_keystore))?;
|
||||
|
||||
// Copy the keystore to the new location.
|
||||
fs::copy(&src_keystore, &dest_keystore)
|
||||
.map_err(|e| format!("Unable to copy keystore: {:?}", e))?;
|
||||
|
||||
eprintln!("Successfully imported keystore.");
|
||||
num_imported_keystores += 1;
|
||||
|
||||
let validator_def =
|
||||
ValidatorDefinition::new_keystore_with_password(&dest_keystore, password_opt)
|
||||
.map_err(|e| format!("Unable to create new validator definition: {:?}", e))?;
|
||||
|
||||
defs.push(validator_def);
|
||||
|
||||
defs.save(&validator_dir)
|
||||
.map_err(|e| format!("Unable to save {}: {:?}", CONFIG_FILENAME, e))?;
|
||||
|
||||
eprintln!("Successfully updated {}.", CONFIG_FILENAME);
|
||||
}
|
||||
|
||||
eprintln!("");
|
||||
eprintln!(
|
||||
"Successfully imported {} validators ({} skipped).",
|
||||
num_imported_keystores,
|
||||
keystore_paths.len() - num_imported_keystores
|
||||
);
|
||||
eprintln!("");
|
||||
eprintln!("WARNING: {}", KEYSTORE_REUSE_WARNING);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
41
account_manager/src/validator/list.rs
Normal file
41
account_manager/src/validator/list.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
use crate::VALIDATOR_DIR_FLAG;
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use std::path::PathBuf;
|
||||
use validator_dir::Manager as ValidatorManager;
|
||||
|
||||
pub const CMD: &str = "list";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
||||
.long(VALIDATOR_DIR_FLAG)
|
||||
.value_name("VALIDATOR_DIRECTORY")
|
||||
.help(
|
||||
"The path to search for validator directories. \
|
||||
Defaults to ~/.lighthouse/validators",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.about("Lists the names of all validators.")
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches<'_>) -> Result<(), String> {
|
||||
let data_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
VALIDATOR_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("validators"),
|
||||
)?;
|
||||
|
||||
let mgr = ValidatorManager::open(&data_dir)
|
||||
.map_err(|e| format!("Unable to read --{}: {:?}", VALIDATOR_DIR_FLAG, e))?;
|
||||
|
||||
for (name, _path) in mgr
|
||||
.directory_names()
|
||||
.map_err(|e| format!("Unable to list wallets: {:?}", e))?
|
||||
{
|
||||
println!("{}", name)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
45
account_manager/src/validator/mod.rs
Normal file
45
account_manager/src/validator/mod.rs
Normal file
@@ -0,0 +1,45 @@
|
||||
pub mod create;
|
||||
pub mod deposit;
|
||||
pub mod import;
|
||||
pub mod list;
|
||||
pub mod recover;
|
||||
|
||||
use crate::common::base_wallet_dir;
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use environment::Environment;
|
||||
use types::EthSpec;
|
||||
|
||||
pub const CMD: &str = "validator";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about("Provides commands for managing Eth2 validators.")
|
||||
.arg(
|
||||
Arg::with_name("base-dir")
|
||||
.long("base-dir")
|
||||
.value_name("BASE_DIRECTORY")
|
||||
.help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/wallets")
|
||||
.takes_value(true),
|
||||
)
|
||||
.subcommand(create::cli_app())
|
||||
.subcommand(deposit::cli_app())
|
||||
.subcommand(import::cli_app())
|
||||
.subcommand(list::cli_app())
|
||||
.subcommand(recover::cli_app())
|
||||
}
|
||||
|
||||
pub fn cli_run<T: EthSpec>(matches: &ArgMatches, env: Environment<T>) -> Result<(), String> {
|
||||
let base_wallet_dir = base_wallet_dir(matches, "base-dir")?;
|
||||
|
||||
match matches.subcommand() {
|
||||
(create::CMD, Some(matches)) => create::cli_run::<T>(matches, env, base_wallet_dir),
|
||||
(deposit::CMD, Some(matches)) => deposit::cli_run::<T>(matches, env),
|
||||
(import::CMD, Some(matches)) => import::cli_run(matches),
|
||||
(list::CMD, Some(matches)) => list::cli_run(matches),
|
||||
(recover::CMD, Some(matches)) => recover::cli_run(matches),
|
||||
(unknown, _) => Err(format!(
|
||||
"{} does not have a {} command. See --help",
|
||||
CMD, unknown
|
||||
)),
|
||||
}
|
||||
}
|
||||
156
account_manager/src/validator/recover.rs
Normal file
156
account_manager/src/validator/recover.rs
Normal file
@@ -0,0 +1,156 @@
|
||||
use super::create::STORE_WITHDRAW_FLAG;
|
||||
use super::import::STDIN_PASSWORD_FLAG;
|
||||
use crate::common::{ensure_dir_exists, read_mnemonic_from_cli};
|
||||
use crate::validator::create::COUNT_FLAG;
|
||||
use crate::{SECRETS_DIR_FLAG, VALIDATOR_DIR_FLAG};
|
||||
use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder};
|
||||
use account_utils::random_password;
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use eth2_wallet::bip39::Seed;
|
||||
use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType, ValidatorKeystores};
|
||||
use std::path::PathBuf;
|
||||
use validator_dir::Builder as ValidatorDirBuilder;
|
||||
pub const CMD: &str = "recover";
|
||||
pub const FIRST_INDEX_FLAG: &str = "first-index";
|
||||
pub const MNEMONIC_FLAG: &str = "mnemonic-path";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about(
|
||||
"Recovers validator private keys given a BIP-39 mnemonic phrase. \
|
||||
If you did not specify a `--first-index` or count `--count`, by default this will \
|
||||
only recover the keys associated with the validator at index 0 for an HD wallet \
|
||||
in accordance with the EIP-2333 spec.")
|
||||
.arg(
|
||||
Arg::with_name(FIRST_INDEX_FLAG)
|
||||
.long(FIRST_INDEX_FLAG)
|
||||
.value_name("FIRST_INDEX")
|
||||
.help("The first of consecutive key indexes you wish to recover.")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("0"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(COUNT_FLAG)
|
||||
.long(COUNT_FLAG)
|
||||
.value_name("COUNT")
|
||||
.help("The number of validator keys you wish to recover. Counted consecutively from the provided `--first_index`.")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("1"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(MNEMONIC_FLAG)
|
||||
.long(MNEMONIC_FLAG)
|
||||
.value_name("MNEMONIC_PATH")
|
||||
.help(
|
||||
"If present, the mnemonic will be read in from this file.",
|
||||
)
|
||||
.takes_value(true)
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
||||
.long(VALIDATOR_DIR_FLAG)
|
||||
.value_name("VALIDATOR_DIRECTORY")
|
||||
.help(
|
||||
"The path where the validator directories will be created. \
|
||||
Defaults to ~/.lighthouse/validators",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(SECRETS_DIR_FLAG)
|
||||
.long(SECRETS_DIR_FLAG)
|
||||
.value_name("SECRETS_DIR")
|
||||
.help(
|
||||
"The path where the validator keystore passwords will be stored. \
|
||||
Defaults to ~/.lighthouse/secrets",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(STORE_WITHDRAW_FLAG)
|
||||
.long(STORE_WITHDRAW_FLAG)
|
||||
.help(
|
||||
"If present, the withdrawal keystore will be stored alongside the voting \
|
||||
keypair. It is generally recommended to *not* store the withdrawal key and \
|
||||
instead generate them from the wallet seed when required.",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(STDIN_PASSWORD_FLAG)
|
||||
.long(STDIN_PASSWORD_FLAG)
|
||||
.help("If present, read passwords from stdin instead of tty."),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
||||
let validator_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
VALIDATOR_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("validators"),
|
||||
)?;
|
||||
let secrets_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
SECRETS_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("secrets"),
|
||||
)?;
|
||||
let first_index: u32 = clap_utils::parse_required(matches, FIRST_INDEX_FLAG)?;
|
||||
let count: u32 = clap_utils::parse_required(matches, COUNT_FLAG)?;
|
||||
let mnemonic_path: Option<PathBuf> = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?;
|
||||
let stdin_password = matches.is_present(STDIN_PASSWORD_FLAG);
|
||||
|
||||
ensure_dir_exists(&validator_dir)?;
|
||||
ensure_dir_exists(&secrets_dir)?;
|
||||
|
||||
eprintln!("");
|
||||
eprintln!("WARNING: KEY RECOVERY CAN LEAD TO DUPLICATING VALIDATORS KEYS, WHICH CAN LEAD TO SLASHING.");
|
||||
eprintln!("");
|
||||
|
||||
let mnemonic = read_mnemonic_from_cli(mnemonic_path, stdin_password)?;
|
||||
|
||||
let seed = Seed::new(&mnemonic, "");
|
||||
|
||||
for index in first_index..first_index + count {
|
||||
let voting_password = random_password();
|
||||
let withdrawal_password = random_password();
|
||||
|
||||
let derive = |key_type: KeyType, password: &[u8]| -> Result<Keystore, String> {
|
||||
let (secret, path) =
|
||||
recover_validator_secret_from_mnemonic(seed.as_bytes(), index, key_type)
|
||||
.map_err(|e| format!("Unable to recover validator keys: {:?}", e))?;
|
||||
|
||||
let keypair = keypair_from_secret(secret.as_bytes())
|
||||
.map_err(|e| format!("Unable build keystore: {:?}", e))?;
|
||||
|
||||
KeystoreBuilder::new(&keypair, password, format!("{}", path))
|
||||
.map_err(|e| format!("Unable build keystore: {:?}", e))?
|
||||
.build()
|
||||
.map_err(|e| format!("Unable build keystore: {:?}", e))
|
||||
};
|
||||
|
||||
let keystores = ValidatorKeystores {
|
||||
voting: derive(KeyType::Voting, voting_password.as_bytes())?,
|
||||
withdrawal: derive(KeyType::Withdrawal, withdrawal_password.as_bytes())?,
|
||||
};
|
||||
|
||||
let voting_pubkey = keystores.voting.pubkey().to_string();
|
||||
|
||||
ValidatorDirBuilder::new(validator_dir.clone(), secrets_dir.clone())
|
||||
.voting_keystore(keystores.voting, voting_password.as_bytes())
|
||||
.withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes())
|
||||
.store_withdrawal_keystore(matches.is_present(STORE_WITHDRAW_FLAG))
|
||||
.build()
|
||||
.map_err(|e| format!("Unable to build validator directory: {:?}", e))?;
|
||||
|
||||
println!(
|
||||
"{}/{}\tIndex: {}\t0x{}",
|
||||
index - first_index,
|
||||
count - first_index,
|
||||
index,
|
||||
voting_pubkey
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
174
account_manager/src/wallet/create.rs
Normal file
174
account_manager/src/wallet/create.rs
Normal file
@@ -0,0 +1,174 @@
|
||||
use crate::BASE_DIR_FLAG;
|
||||
use account_utils::{random_password, strip_off_newlines};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use eth2_wallet::{
|
||||
bip39::{Language, Mnemonic, MnemonicType},
|
||||
PlainText,
|
||||
};
|
||||
use eth2_wallet_manager::{LockedWallet, WalletManager, WalletType};
|
||||
use std::ffi::OsStr;
|
||||
use std::fs::{self, File};
|
||||
use std::io::prelude::*;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
pub const CMD: &str = "create";
|
||||
pub const HD_TYPE: &str = "hd";
|
||||
pub const NAME_FLAG: &str = "name";
|
||||
pub const PASSWORD_FLAG: &str = "password-file";
|
||||
pub const TYPE_FLAG: &str = "type";
|
||||
pub const MNEMONIC_FLAG: &str = "mnemonic-output-path";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about("Creates a new HD (hierarchical-deterministic) EIP-2386 wallet.")
|
||||
.arg(
|
||||
Arg::with_name(NAME_FLAG)
|
||||
.long(NAME_FLAG)
|
||||
.value_name("WALLET_NAME")
|
||||
.help(
|
||||
"The wallet will be created with this name. It is not allowed to \
|
||||
create two wallets with the same name for the same --base-dir.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(PASSWORD_FLAG)
|
||||
.long(PASSWORD_FLAG)
|
||||
.value_name("WALLET_PASSWORD_PATH")
|
||||
.help(
|
||||
"A path to a file containing the password which will unlock the wallet. \
|
||||
If the file does not exist, a random password will be generated and \
|
||||
saved at that path. To avoid confusion, if the file does not already \
|
||||
exist it must include a '.pass' suffix.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(TYPE_FLAG)
|
||||
.long(TYPE_FLAG)
|
||||
.value_name("WALLET_TYPE")
|
||||
.help(
|
||||
"The type of wallet to create. Only HD (hierarchical-deterministic) \
|
||||
wallets are supported presently..",
|
||||
)
|
||||
.takes_value(true)
|
||||
.possible_values(&[HD_TYPE])
|
||||
.default_value(HD_TYPE),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(MNEMONIC_FLAG)
|
||||
.long(MNEMONIC_FLAG)
|
||||
.value_name("MNEMONIC_PATH")
|
||||
.help(
|
||||
"If present, the mnemonic will be saved to this file. DO NOT SHARE THE MNEMONIC.",
|
||||
)
|
||||
.takes_value(true)
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> {
|
||||
let mnemonic_output_path: Option<PathBuf> = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?;
|
||||
|
||||
// Create a new random mnemonic.
|
||||
//
|
||||
// The `tiny-bip39` crate uses `thread_rng()` for this entropy.
|
||||
let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English);
|
||||
|
||||
let wallet = create_wallet_from_mnemonic(matches, &base_dir.as_path(), &mnemonic)?;
|
||||
|
||||
if let Some(path) = mnemonic_output_path {
|
||||
create_with_600_perms(&path, mnemonic.phrase().as_bytes())
|
||||
.map_err(|e| format!("Unable to write mnemonic to {:?}: {:?}", path, e))?;
|
||||
}
|
||||
|
||||
println!("Your wallet's 12-word BIP-39 mnemonic is:");
|
||||
println!();
|
||||
println!("\t{}", mnemonic.phrase());
|
||||
println!();
|
||||
println!("This mnemonic can be used to fully restore your wallet, should ");
|
||||
println!("you lose the JSON file or your password. ");
|
||||
println!();
|
||||
println!("It is very important that you DO NOT SHARE this mnemonic as it will ");
|
||||
println!("reveal the private keys of all validators and keys generated with ");
|
||||
println!("this wallet. That would be catastrophic.");
|
||||
println!();
|
||||
println!("It is also important to store a backup of this mnemonic so you can ");
|
||||
println!("recover your private keys in the case of data loss. Writing it on ");
|
||||
println!("a piece of paper and storing it in a safe place would be prudent.");
|
||||
println!();
|
||||
println!("Your wallet's UUID is:");
|
||||
println!();
|
||||
println!("\t{}", wallet.wallet().uuid());
|
||||
println!();
|
||||
println!("You do not need to backup your UUID or keep it secret.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn create_wallet_from_mnemonic(
|
||||
matches: &ArgMatches,
|
||||
base_dir: &Path,
|
||||
mnemonic: &Mnemonic,
|
||||
) -> Result<LockedWallet, String> {
|
||||
let name: String = clap_utils::parse_required(matches, NAME_FLAG)?;
|
||||
let wallet_password_path: PathBuf = clap_utils::parse_required(matches, PASSWORD_FLAG)?;
|
||||
let type_field: String = clap_utils::parse_required(matches, TYPE_FLAG)?;
|
||||
|
||||
let wallet_type = match type_field.as_ref() {
|
||||
HD_TYPE => WalletType::Hd,
|
||||
unknown => return Err(format!("--{} {} is not supported", TYPE_FLAG, unknown)),
|
||||
};
|
||||
|
||||
let mgr = WalletManager::open(&base_dir)
|
||||
.map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?;
|
||||
|
||||
// Create a random password if the file does not exist.
|
||||
if !wallet_password_path.exists() {
|
||||
// To prevent users from accidentally supplying their password to the PASSWORD_FLAG and
|
||||
// create a file with that name, we require that the password has a .pass suffix.
|
||||
if wallet_password_path.extension() != Some(&OsStr::new("pass")) {
|
||||
return Err(format!(
|
||||
"Only creates a password file if that file ends in .pass: {:?}",
|
||||
wallet_password_path
|
||||
));
|
||||
}
|
||||
|
||||
create_with_600_perms(&wallet_password_path, random_password().as_bytes())
|
||||
.map_err(|e| format!("Unable to write to {:?}: {:?}", wallet_password_path, e))?;
|
||||
}
|
||||
|
||||
let wallet_password = fs::read(&wallet_password_path)
|
||||
.map_err(|e| format!("Unable to read {:?}: {:?}", wallet_password_path, e))
|
||||
.map(|bytes| PlainText::from(strip_off_newlines(bytes)))?;
|
||||
|
||||
let wallet = mgr
|
||||
.create_wallet(name, wallet_type, &mnemonic, wallet_password.as_bytes())
|
||||
.map_err(|e| format!("Unable to create wallet: {:?}", e))?;
|
||||
Ok(wallet)
|
||||
}
|
||||
|
||||
/// Creates a file with `600 (-rw-------)` permissions.
|
||||
pub fn create_with_600_perms<P: AsRef<Path>>(path: P, bytes: &[u8]) -> Result<(), String> {
|
||||
let path = path.as_ref();
|
||||
|
||||
let mut file =
|
||||
File::create(&path).map_err(|e| format!("Unable to create {:?}: {}", path, e))?;
|
||||
|
||||
let mut perm = file
|
||||
.metadata()
|
||||
.map_err(|e| format!("Unable to get {:?} metadata: {}", path, e))?
|
||||
.permissions();
|
||||
|
||||
perm.set_mode(0o600);
|
||||
|
||||
file.set_permissions(perm)
|
||||
.map_err(|e| format!("Unable to set {:?} permissions: {}", path, e))?;
|
||||
|
||||
file.write_all(bytes)
|
||||
.map_err(|e| format!("Unable to write to {:?}: {}", path, e))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
24
account_manager/src/wallet/list.rs
Normal file
24
account_manager/src/wallet/list.rs
Normal file
@@ -0,0 +1,24 @@
|
||||
use crate::BASE_DIR_FLAG;
|
||||
use clap::App;
|
||||
use eth2_wallet_manager::WalletManager;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub const CMD: &str = "list";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD).about("Lists the names of all wallets.")
|
||||
}
|
||||
|
||||
pub fn cli_run(base_dir: PathBuf) -> Result<(), String> {
|
||||
let mgr = WalletManager::open(&base_dir)
|
||||
.map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?;
|
||||
|
||||
for (name, _uuid) in mgr
|
||||
.wallets()
|
||||
.map_err(|e| format!("Unable to list wallets: {:?}", e))?
|
||||
{
|
||||
println!("{}", name)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
41
account_manager/src/wallet/mod.rs
Normal file
41
account_manager/src/wallet/mod.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
pub mod create;
|
||||
pub mod list;
|
||||
pub mod recover;
|
||||
|
||||
use crate::{
|
||||
common::{base_wallet_dir, ensure_dir_exists},
|
||||
BASE_DIR_FLAG,
|
||||
};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
|
||||
pub const CMD: &str = "wallet";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about("Manage wallets, from which validator keys can be derived.")
|
||||
.arg(
|
||||
Arg::with_name(BASE_DIR_FLAG)
|
||||
.long(BASE_DIR_FLAG)
|
||||
.value_name("BASE_DIRECTORY")
|
||||
.help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/wallets")
|
||||
.takes_value(true),
|
||||
)
|
||||
.subcommand(create::cli_app())
|
||||
.subcommand(list::cli_app())
|
||||
.subcommand(recover::cli_app())
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
||||
let base_dir = base_wallet_dir(matches, BASE_DIR_FLAG)?;
|
||||
ensure_dir_exists(&base_dir)?;
|
||||
|
||||
match matches.subcommand() {
|
||||
(create::CMD, Some(matches)) => create::cli_run(matches, base_dir),
|
||||
(list::CMD, Some(_)) => list::cli_run(base_dir),
|
||||
(recover::CMD, Some(matches)) => recover::cli_run(matches, base_dir),
|
||||
(unknown, _) => Err(format!(
|
||||
"{} does not have a {} command. See --help",
|
||||
CMD, unknown
|
||||
)),
|
||||
}
|
||||
}
|
||||
87
account_manager/src/wallet/recover.rs
Normal file
87
account_manager/src/wallet/recover.rs
Normal file
@@ -0,0 +1,87 @@
|
||||
use crate::common::read_mnemonic_from_cli;
|
||||
use crate::wallet::create::create_wallet_from_mnemonic;
|
||||
use crate::wallet::create::{HD_TYPE, NAME_FLAG, PASSWORD_FLAG, TYPE_FLAG};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub const CMD: &str = "recover";
|
||||
pub const MNEMONIC_FLAG: &str = "mnemonic-path";
|
||||
pub const STDIN_PASSWORD_FLAG: &str = "stdin-passwords";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about("Recovers an EIP-2386 wallet from a given a BIP-39 mnemonic phrase.")
|
||||
.arg(
|
||||
Arg::with_name(NAME_FLAG)
|
||||
.long(NAME_FLAG)
|
||||
.value_name("WALLET_NAME")
|
||||
.help(
|
||||
"The wallet will be created with this name. It is not allowed to \
|
||||
create two wallets with the same name for the same --base-dir.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(PASSWORD_FLAG)
|
||||
.long(PASSWORD_FLAG)
|
||||
.value_name("PASSWORD_FILE_PATH")
|
||||
.help(
|
||||
"This will be the new password for your recovered wallet. \
|
||||
A path to a file containing the password which will unlock the wallet. \
|
||||
If the file does not exist, a random password will be generated and \
|
||||
saved at that path. To avoid confusion, if the file does not already \
|
||||
exist it must include a '.pass' suffix.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(MNEMONIC_FLAG)
|
||||
.long(MNEMONIC_FLAG)
|
||||
.value_name("MNEMONIC_PATH")
|
||||
.help("If present, the mnemonic will be read in from this file.")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(TYPE_FLAG)
|
||||
.long(TYPE_FLAG)
|
||||
.value_name("WALLET_TYPE")
|
||||
.help(
|
||||
"The type of wallet to create. Only HD (hierarchical-deterministic) \
|
||||
wallets are supported presently..",
|
||||
)
|
||||
.takes_value(true)
|
||||
.possible_values(&[HD_TYPE])
|
||||
.default_value(HD_TYPE),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(STDIN_PASSWORD_FLAG)
|
||||
.long(STDIN_PASSWORD_FLAG)
|
||||
.help("If present, read passwords from stdin instead of tty."),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), String> {
|
||||
let mnemonic_path: Option<PathBuf> = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?;
|
||||
let stdin_password = matches.is_present(STDIN_PASSWORD_FLAG);
|
||||
|
||||
eprintln!("");
|
||||
eprintln!("WARNING: KEY RECOVERY CAN LEAD TO DUPLICATING VALIDATORS KEYS, WHICH CAN LEAD TO SLASHING.");
|
||||
eprintln!("");
|
||||
|
||||
let mnemonic = read_mnemonic_from_cli(mnemonic_path, stdin_password)?;
|
||||
|
||||
let wallet = create_wallet_from_mnemonic(matches, &wallet_base_dir.as_path(), &mnemonic)
|
||||
.map_err(|e| format!("Unable to create wallet: {:?}", e))?;
|
||||
|
||||
println!("Your wallet has been successfully recovered.");
|
||||
println!();
|
||||
println!("Your wallet's UUID is:");
|
||||
println!();
|
||||
println!("\t{}", wallet.wallet().uuid());
|
||||
println!();
|
||||
println!("You do not need to backup your UUID or keep it secret.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "beacon_node"
|
||||
version = "0.1.0"
|
||||
version = "0.2.11"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
||||
edition = "2018"
|
||||
|
||||
@@ -9,31 +9,34 @@ name = "beacon_node"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dev-dependencies]
|
||||
node_test_rig = { path = "../tests/node_test_rig" }
|
||||
node_test_rig = { path = "../testing/node_test_rig" }
|
||||
|
||||
[features]
|
||||
write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing.
|
||||
|
||||
[dependencies]
|
||||
eth2_config = { path = "../eth2/utils/eth2_config" }
|
||||
lighthouse_bootstrap = { path = "../eth2/utils/lighthouse_bootstrap" }
|
||||
eth2_config = { path = "../common/eth2_config" }
|
||||
beacon_chain = { path = "beacon_chain" }
|
||||
types = { path = "../eth2/types" }
|
||||
types = { path = "../consensus/types" }
|
||||
store = { path = "./store" }
|
||||
client = { path = "client" }
|
||||
version = { path = "version" }
|
||||
clap = "2.33.0"
|
||||
rand = "0.7.2"
|
||||
rand = "0.7.3"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] }
|
||||
slog-term = "2.4.2"
|
||||
slog-async = "2.3.0"
|
||||
ctrlc = { version = "3.1.3", features = ["termination"] }
|
||||
tokio = "0.1.22"
|
||||
tokio-timer = "0.2.12"
|
||||
exit-future = "0.1.4"
|
||||
env_logger = "0.7.1"
|
||||
slog-term = "2.5.0"
|
||||
slog-async = "2.5.0"
|
||||
ctrlc = { version = "3.1.4", features = ["termination"] }
|
||||
tokio = { version = "0.2.21", features = ["time"] }
|
||||
exit-future = "0.2.0"
|
||||
dirs = "2.0.2"
|
||||
logging = { path = "../eth2/utils/logging" }
|
||||
futures = "0.1.29"
|
||||
logging = { path = "../common/logging" }
|
||||
futures = "0.3.5"
|
||||
environment = { path = "../lighthouse/environment" }
|
||||
genesis = { path = "genesis" }
|
||||
eth2_testnet_config = { path = "../eth2/utils/eth2_testnet_config" }
|
||||
eth2-libp2p = { path = "./eth2-libp2p" }
|
||||
eth2_ssz = { path = "../eth2/utils/ssz" }
|
||||
eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
||||
eth2_libp2p = { path = "./eth2_libp2p" }
|
||||
eth2_ssz = "0.1.2"
|
||||
serde = "1.0.110"
|
||||
clap_utils = { path = "../common/clap_utils" }
|
||||
hyper = "0.13.5"
|
||||
lighthouse_version = { path = "../common/lighthouse_version" }
|
||||
|
||||
@@ -1,48 +1,61 @@
|
||||
[package]
|
||||
name = "beacon_chain"
|
||||
version = "0.1.0"
|
||||
version = "0.2.0"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[features]
|
||||
|
||||
default = ["participation_metrics"]
|
||||
write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing.
|
||||
|
||||
[dependencies]
|
||||
eth2_config = { path = "../../eth2/utils/eth2_config" }
|
||||
merkle_proof = { path = "../../eth2/utils/merkle_proof" }
|
||||
store = { path = "../store" }
|
||||
parking_lot = "0.9.0"
|
||||
lazy_static = "1.4.0"
|
||||
lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" }
|
||||
lighthouse_bootstrap = { path = "../../eth2/utils/lighthouse_bootstrap" }
|
||||
log = "0.4.8"
|
||||
operation_pool = { path = "../../eth2/operation_pool" }
|
||||
rayon = "1.2.0"
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
serde_yaml = "0.8.11"
|
||||
serde_json = "1.0.41"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
sloggers = "0.3.4"
|
||||
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
||||
eth2_hashing = "0.1.0"
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_ssz_types = { path = "../../eth2/utils/ssz_types" }
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
state_processing = { path = "../../eth2/state_processing" }
|
||||
tree_hash = "0.1.0"
|
||||
types = { path = "../../eth2/types" }
|
||||
lmd_ghost = { path = "../../eth2/lmd_ghost" }
|
||||
eth1 = { path = "../eth1" }
|
||||
websocket_server = { path = "../websocket_server" }
|
||||
futures = "0.1.25"
|
||||
exit-future = "0.1.3"
|
||||
genesis = { path = "../genesis" }
|
||||
integer-sqrt = "0.1"
|
||||
rand = "0.7.2"
|
||||
participation_metrics = [] # Exposes validator participation metrics to Prometheus.
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1.0"
|
||||
int_to_bytes = { path = "../../consensus/int_to_bytes" }
|
||||
maplit = "1.0.2"
|
||||
|
||||
[dependencies]
|
||||
eth2_config = { path = "../../common/eth2_config" }
|
||||
merkle_proof = { path = "../../consensus/merkle_proof" }
|
||||
store = { path = "../store" }
|
||||
parking_lot = "0.11.0"
|
||||
lazy_static = "1.4.0"
|
||||
smallvec = "1.4.1"
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||
log = "0.4.8"
|
||||
operation_pool = { path = "../operation_pool" }
|
||||
rayon = "1.3.0"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.110"
|
||||
serde_yaml = "0.8.11"
|
||||
serde_json = "1.0.52"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
slog-term = "2.6.0"
|
||||
sloggers = "1.0.0"
|
||||
slot_clock = { path = "../../common/slot_clock" }
|
||||
eth2_hashing = "0.1.0"
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
state_processing = { path = "../../consensus/state_processing" }
|
||||
tree_hash = "0.1.0"
|
||||
types = { path = "../../consensus/types" }
|
||||
tokio = "0.2.21"
|
||||
eth1 = { path = "../eth1" }
|
||||
websocket_server = { path = "../websocket_server" }
|
||||
futures = "0.3.5"
|
||||
genesis = { path = "../genesis" }
|
||||
integer-sqrt = "0.1.3"
|
||||
rand = "0.7.3"
|
||||
rand_core = "0.5.1"
|
||||
proto_array = { path = "../../consensus/proto_array" }
|
||||
lru = "0.5.1"
|
||||
tempfile = "3.1.0"
|
||||
bitvec = "0.17.4"
|
||||
bls = { path = "../../crypto/bls" }
|
||||
safe_arith = { path = "../../consensus/safe_arith" }
|
||||
fork_choice = { path = "../../consensus/fork_choice" }
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
bus = "2.2.3"
|
||||
derivative = "2.1.1"
|
||||
itertools = "0.9.0"
|
||||
regex = "1.3.9"
|
||||
|
||||
853
beacon_node/beacon_chain/src/attestation_verification.rs
Normal file
853
beacon_node/beacon_chain/src/attestation_verification.rs
Normal file
@@ -0,0 +1,853 @@
|
||||
//! Provides verification for the following attestations:
|
||||
//!
|
||||
//! - "Unaggregated" `Attestation` received from either gossip or the HTTP API.
|
||||
//! - "Aggregated" `SignedAggregateAndProof` received from gossip or the HTTP API.
|
||||
//!
|
||||
//! For clarity, we define:
|
||||
//!
|
||||
//! - Unaggregated: an `Attestation` object that has exactly one aggregation bit set.
|
||||
//! - Aggregated: a `SignedAggregateAndProof` which has zero or more signatures.
|
||||
//! - Note: "zero or more" may soon change to "one or more".
|
||||
//!
|
||||
//! Similar to the `crate::block_verification` module, we try to avoid doing duplicate verification
|
||||
//! work as an attestation passes through different stages of verification. We represent these
|
||||
//! different stages of verification with wrapper types. These wrapper-types flow in a particular
|
||||
//! pattern:
|
||||
//!
|
||||
//! ```ignore
|
||||
//! types::Attestation types::SignedAggregateAndProof
|
||||
//! | |
|
||||
//! ▼ ▼
|
||||
//! VerifiedUnaggregatedAttestation VerifiedAggregatedAttestation
|
||||
//! | |
|
||||
//! -------------------------------------
|
||||
//! |
|
||||
//! ▼
|
||||
//! impl SignatureVerifiedAttestation
|
||||
//! ```
|
||||
|
||||
use crate::{
|
||||
beacon_chain::{
|
||||
ATTESTATION_CACHE_LOCK_TIMEOUT, HEAD_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
|
||||
VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT,
|
||||
},
|
||||
metrics,
|
||||
observed_attestations::ObserveOutcome,
|
||||
observed_attesters::Error as ObservedAttestersError,
|
||||
BeaconChain, BeaconChainError, BeaconChainTypes,
|
||||
};
|
||||
use bls::verify_signature_sets;
|
||||
use slog::debug;
|
||||
use slot_clock::SlotClock;
|
||||
use state_processing::{
|
||||
common::get_indexed_attestation,
|
||||
per_block_processing::errors::AttestationValidationError,
|
||||
per_slot_processing,
|
||||
signature_sets::{
|
||||
indexed_attestation_signature_set_from_pubkeys,
|
||||
signed_aggregate_selection_proof_signature_set, signed_aggregate_signature_set,
|
||||
},
|
||||
};
|
||||
use std::borrow::Cow;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{
|
||||
Attestation, BeaconCommittee, CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation,
|
||||
RelativeEpoch, SelectionProof, SignedAggregateAndProof, Slot, SubnetId,
|
||||
};
|
||||
|
||||
/// Returned when an attestation was not successfully verified. It might not have been verified for
|
||||
/// two reasons:
|
||||
///
|
||||
/// - The attestation is malformed or inappropriate for the context (indicated by all variants
|
||||
/// other than `BeaconChainError`).
|
||||
/// - The application encountered an internal error whilst attempting to determine validity
|
||||
/// (the `BeaconChainError` variant)
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
/// The attestation is from a slot that is later than the current slot (with respect to the
|
||||
/// gossip clock disparity).
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||
FutureSlot {
|
||||
attestation_slot: Slot,
|
||||
latest_permissible_slot: Slot,
|
||||
},
|
||||
/// The attestation is from a slot that is prior to the earliest permissible slot (with
|
||||
/// respect to the gossip clock disparity).
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||
PastSlot {
|
||||
attestation_slot: Slot,
|
||||
earliest_permissible_slot: Slot,
|
||||
},
|
||||
/// The attestations aggregation bits were empty when they shouldn't be.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
EmptyAggregationBitfield,
|
||||
/// The `selection_proof` on the aggregate attestation does not elect it as an aggregator.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
InvalidSelectionProof { aggregator_index: u64 },
|
||||
/// The `selection_proof` on the aggregate attestation selects it as a validator, however the
|
||||
/// aggregator index is not in the committee for that attestation.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
AggregatorNotInCommittee { aggregator_index: u64 },
|
||||
/// The aggregator index refers to a validator index that we have not seen.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
AggregatorPubkeyUnknown(u64),
|
||||
/// The attestation has been seen before; either in a block, on the gossip network or from a
|
||||
/// local validator.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// It's unclear if this attestation is valid, however we have already observed it and do not
|
||||
/// need to observe it again.
|
||||
AttestationAlreadyKnown(Hash256),
|
||||
/// There has already been an aggregation observed for this validator, we refuse to process a
|
||||
/// second.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// It's unclear if this attestation is valid, however we have already observed an aggregate
|
||||
/// attestation from this validator for this epoch and should not observe another.
|
||||
AggregatorAlreadyKnown(u64),
|
||||
/// The aggregator index is higher than the maximum possible validator count.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
ValidatorIndexTooHigh(usize),
|
||||
/// The `attestation.data.beacon_block_root` block is unknown.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The attestation points to a block we have not yet imported. It's unclear if the attestation
|
||||
/// is valid or not.
|
||||
UnknownHeadBlock { beacon_block_root: Hash256 },
|
||||
/// The `attestation.data.slot` is not from the same epoch as `data.target.epoch`.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
BadTargetEpoch,
|
||||
/// The target root of the attestation points to a block that we have not verified.
|
||||
///
|
||||
/// This is invalid behaviour whilst we first check for `UnknownHeadBlock`.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
UnknownTargetRoot(Hash256),
|
||||
/// A signature on the attestation is invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
InvalidSignature,
|
||||
/// There is no committee for the slot and committee index of this attestation and the
|
||||
/// attestation should not have been produced.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
NoCommitteeForSlotAndIndex { slot: Slot, index: CommitteeIndex },
|
||||
/// The unaggregated attestation doesn't have only one aggregation bit set.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
NotExactlyOneAggregationBitSet(usize),
|
||||
/// We have already observed an attestation for the `validator_index` and refuse to process
|
||||
/// another.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// It's unclear if this attestation is valid, however we have already observed a
|
||||
/// single-participant attestation from this validator for this epoch and should not observe
|
||||
/// another.
|
||||
PriorAttestationKnown { validator_index: u64, epoch: Epoch },
|
||||
/// The attestation is for an epoch in the future (with respect to the gossip clock disparity).
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||
FutureEpoch {
|
||||
attestation_epoch: Epoch,
|
||||
current_epoch: Epoch,
|
||||
},
|
||||
/// The attestation is for an epoch in the past (with respect to the gossip clock disparity).
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||
PastEpoch {
|
||||
attestation_epoch: Epoch,
|
||||
current_epoch: Epoch,
|
||||
},
|
||||
/// The attestation is attesting to a state that is later than itself. (Viz., attesting to the
|
||||
/// future).
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
AttestsToFutureBlock { block: Slot, attestation: Slot },
|
||||
/// The attestation was received on an invalid attestation subnet.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
InvalidSubnetId {
|
||||
received: SubnetId,
|
||||
expected: SubnetId,
|
||||
},
|
||||
/// The attestation failed the `state_processing` verification stage.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
Invalid(AttestationValidationError),
|
||||
/// The attestation head block is too far behind the attestation slot, causing many skip slots.
|
||||
/// This is deemed a DoS risk.
|
||||
TooManySkippedSlots {
|
||||
head_block_slot: Slot,
|
||||
attestation_slot: Slot,
|
||||
},
|
||||
/// There was an error whilst processing the attestation. It is not known if it is valid or invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// We were unable to process this attestation due to an internal error. It's unclear if the
|
||||
/// attestation is valid.
|
||||
BeaconChainError(BeaconChainError),
|
||||
}
|
||||
|
||||
impl From<BeaconChainError> for Error {
|
||||
fn from(e: BeaconChainError) -> Self {
|
||||
Error::BeaconChainError(e)
|
||||
}
|
||||
}
|
||||
|
||||
/// Wraps a `SignedAggregateAndProof` that has been verified for propagation on the gossip network.
|
||||
pub struct VerifiedAggregatedAttestation<T: BeaconChainTypes> {
|
||||
signed_aggregate: SignedAggregateAndProof<T::EthSpec>,
|
||||
indexed_attestation: IndexedAttestation<T::EthSpec>,
|
||||
}
|
||||
|
||||
/// Wraps an `Attestation` that has been verified for propagation on the gossip network.
|
||||
pub struct VerifiedUnaggregatedAttestation<T: BeaconChainTypes> {
|
||||
attestation: Attestation<T::EthSpec>,
|
||||
indexed_attestation: IndexedAttestation<T::EthSpec>,
|
||||
}
|
||||
|
||||
/// Custom `Clone` implementation is to avoid the restrictive trait bounds applied by the usual derive
|
||||
/// macro.
|
||||
impl<T: BeaconChainTypes> Clone for VerifiedUnaggregatedAttestation<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
attestation: self.attestation.clone(),
|
||||
indexed_attestation: self.indexed_attestation.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A helper trait implemented on wrapper types that can be progressed to a state where they can be
|
||||
/// verified for application to fork choice.
|
||||
pub trait SignatureVerifiedAttestation<T: BeaconChainTypes> {
|
||||
fn indexed_attestation(&self) -> &IndexedAttestation<T::EthSpec>;
|
||||
}
|
||||
|
||||
impl<'a, T: BeaconChainTypes> SignatureVerifiedAttestation<T> for VerifiedAggregatedAttestation<T> {
|
||||
fn indexed_attestation(&self) -> &IndexedAttestation<T::EthSpec> {
|
||||
&self.indexed_attestation
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> SignatureVerifiedAttestation<T> for VerifiedUnaggregatedAttestation<T> {
|
||||
fn indexed_attestation(&self) -> &IndexedAttestation<T::EthSpec> {
|
||||
&self.indexed_attestation
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
|
||||
/// Returns `Ok(Self)` if the `signed_aggregate` is valid to be (re)published on the gossip
|
||||
/// network.
|
||||
pub fn verify(
|
||||
signed_aggregate: SignedAggregateAndProof<T::EthSpec>,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Self, Error> {
|
||||
let attestation = &signed_aggregate.message.aggregate;
|
||||
|
||||
// Ensure attestation is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (within a
|
||||
// MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
|
||||
//
|
||||
// We do not queue future attestations for later processing.
|
||||
verify_propagation_slot_range(chain, attestation)?;
|
||||
|
||||
// Ensure the valid aggregated attestation has not already been seen locally.
|
||||
let attestation_root = attestation.tree_hash_root();
|
||||
if chain
|
||||
.observed_attestations
|
||||
.is_known(attestation, attestation_root)
|
||||
.map_err(|e| Error::BeaconChainError(e.into()))?
|
||||
{
|
||||
return Err(Error::AttestationAlreadyKnown(attestation_root));
|
||||
}
|
||||
|
||||
let aggregator_index = signed_aggregate.message.aggregator_index;
|
||||
|
||||
// Ensure there has been no other observed aggregate for the given `aggregator_index`.
|
||||
//
|
||||
// Note: do not observe yet, only observe once the attestation has been verfied.
|
||||
match chain
|
||||
.observed_aggregators
|
||||
.validator_has_been_observed(attestation, aggregator_index as usize)
|
||||
{
|
||||
Ok(true) => Err(Error::AggregatorAlreadyKnown(aggregator_index)),
|
||||
Ok(false) => Ok(()),
|
||||
Err(ObservedAttestersError::ValidatorIndexTooHigh(i)) => {
|
||||
Err(Error::ValidatorIndexTooHigh(i))
|
||||
}
|
||||
Err(e) => Err(BeaconChainError::from(e).into()),
|
||||
}?;
|
||||
|
||||
// Ensure the block being voted for (attestation.data.beacon_block_root) passes validation.
|
||||
// Don't enforce the skip slot restriction for aggregates.
|
||||
//
|
||||
// This indirectly checks to see if the `attestation.data.beacon_block_root` is in our fork
|
||||
// choice. Any known, non-finalized, processed block should be in fork choice, so this
|
||||
// check immediately filters out attestations that attest to a block that has not been
|
||||
// processed.
|
||||
//
|
||||
// Attestations must be for a known block. If the block is unknown, we simply drop the
|
||||
// attestation and do not delay consideration for later.
|
||||
verify_head_block_is_known(chain, &attestation, None)?;
|
||||
|
||||
// Ensure that the attestation has participants.
|
||||
if attestation.aggregation_bits.is_zero() {
|
||||
return Err(Error::EmptyAggregationBitfield);
|
||||
}
|
||||
|
||||
let indexed_attestation =
|
||||
map_attestation_committee(chain, attestation, |(committee, _)| {
|
||||
// Note: this clones the signature which is known to be a relatively slow operation.
|
||||
//
|
||||
// Future optimizations should remove this clone.
|
||||
let selection_proof =
|
||||
SelectionProof::from(signed_aggregate.message.selection_proof.clone());
|
||||
|
||||
if !selection_proof
|
||||
.is_aggregator(committee.committee.len(), &chain.spec)
|
||||
.map_err(|e| Error::BeaconChainError(e.into()))?
|
||||
{
|
||||
return Err(Error::InvalidSelectionProof { aggregator_index });
|
||||
}
|
||||
|
||||
// Ensure the aggregator is a member of the committee for which it is aggregating.
|
||||
if !committee.committee.contains(&(aggregator_index as usize)) {
|
||||
return Err(Error::AggregatorNotInCommittee { aggregator_index });
|
||||
}
|
||||
|
||||
get_indexed_attestation(committee.committee, &attestation)
|
||||
.map_err(|e| BeaconChainError::from(e).into())
|
||||
})?;
|
||||
|
||||
// Ensure that all signatures are valid.
|
||||
if !verify_signed_aggregate_signatures(chain, &signed_aggregate, &indexed_attestation)? {
|
||||
return Err(Error::InvalidSignature);
|
||||
}
|
||||
|
||||
// Observe the valid attestation so we do not re-process it.
|
||||
//
|
||||
// It's important to double check that the attestation is not already known, otherwise two
|
||||
// attestations processed at the same time could be published.
|
||||
if let ObserveOutcome::AlreadyKnown = chain
|
||||
.observed_attestations
|
||||
.observe_attestation(attestation, Some(attestation_root))
|
||||
.map_err(|e| Error::BeaconChainError(e.into()))?
|
||||
{
|
||||
return Err(Error::AttestationAlreadyKnown(attestation_root));
|
||||
}
|
||||
|
||||
// Observe the aggregator so we don't process another aggregate from them.
|
||||
//
|
||||
// It's important to double check that the attestation is not already known, otherwise two
|
||||
// attestations processed at the same time could be published.
|
||||
if chain
|
||||
.observed_aggregators
|
||||
.observe_validator(&attestation, aggregator_index as usize)
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
return Err(Error::PriorAttestationKnown {
|
||||
validator_index: aggregator_index,
|
||||
epoch: attestation.data.target.epoch,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(VerifiedAggregatedAttestation {
|
||||
signed_aggregate,
|
||||
indexed_attestation,
|
||||
})
|
||||
}
|
||||
|
||||
/// A helper function to add this aggregate to `beacon_chain.op_pool`.
|
||||
pub fn add_to_pool(self, chain: &BeaconChain<T>) -> Result<Self, Error> {
|
||||
chain.add_to_block_inclusion_pool(self)
|
||||
}
|
||||
|
||||
/// Returns the underlying `attestation` for the `signed_aggregate`.
|
||||
pub fn attestation(&self) -> &Attestation<T::EthSpec> {
|
||||
&self.signed_aggregate.message.aggregate
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
||||
/// Returns `Ok(Self)` if the `attestation` is valid to be (re)published on the gossip
|
||||
/// network.
|
||||
///
|
||||
/// `subnet_id` is the subnet from which we received this attestation. This function will
|
||||
/// verify that it was received on the correct subnet.
|
||||
pub fn verify(
|
||||
attestation: Attestation<T::EthSpec>,
|
||||
subnet_id: SubnetId,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Self, Error> {
|
||||
// Ensure attestation is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (within a
|
||||
// MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
|
||||
//
|
||||
// We do not queue future attestations for later processing.
|
||||
verify_propagation_slot_range(chain, &attestation)?;
|
||||
|
||||
// Check to ensure that the attestation is "unaggregated". I.e., it has exactly one
|
||||
// aggregation bit set.
|
||||
let num_aggreagtion_bits = attestation.aggregation_bits.num_set_bits();
|
||||
if num_aggreagtion_bits != 1 {
|
||||
return Err(Error::NotExactlyOneAggregationBitSet(num_aggreagtion_bits));
|
||||
}
|
||||
|
||||
// Attestations must be for a known block. If the block is unknown, we simply drop the
|
||||
// attestation and do not delay consideration for later.
|
||||
//
|
||||
// Enforce a maximum skip distance for unaggregated attestations.
|
||||
verify_head_block_is_known(chain, &attestation, chain.config.import_max_skip_slots)?;
|
||||
|
||||
let (indexed_attestation, committees_per_slot) =
|
||||
obtain_indexed_attestation_and_committees_per_slot(chain, &attestation)?;
|
||||
|
||||
let expected_subnet_id = SubnetId::compute_subnet_for_attestation_data::<T::EthSpec>(
|
||||
&indexed_attestation.data,
|
||||
committees_per_slot,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::from)?;
|
||||
|
||||
// Ensure the attestation is from the correct subnet.
|
||||
if subnet_id != expected_subnet_id {
|
||||
return Err(Error::InvalidSubnetId {
|
||||
received: subnet_id,
|
||||
expected: expected_subnet_id,
|
||||
});
|
||||
}
|
||||
|
||||
let validator_index = *indexed_attestation
|
||||
.attesting_indices
|
||||
.first()
|
||||
.ok_or_else(|| Error::NotExactlyOneAggregationBitSet(0))?;
|
||||
|
||||
/*
|
||||
* The attestation is the first valid attestation received for the participating validator
|
||||
* for the slot, attestation.data.slot.
|
||||
*/
|
||||
if chain
|
||||
.observed_attesters
|
||||
.validator_has_been_observed(&attestation, validator_index as usize)
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
return Err(Error::PriorAttestationKnown {
|
||||
validator_index,
|
||||
epoch: attestation.data.target.epoch,
|
||||
});
|
||||
}
|
||||
|
||||
// The aggregate signature of the attestation is valid.
|
||||
verify_attestation_signature(chain, &indexed_attestation)?;
|
||||
|
||||
// Now that the attestation has been fully verified, store that we have received a valid
|
||||
// attestation from this validator.
|
||||
//
|
||||
// It's important to double check that the attestation still hasn't been observed, since
|
||||
// there can be a race-condition if we receive two attestations at the same time and
|
||||
// process them in different threads.
|
||||
if chain
|
||||
.observed_attesters
|
||||
.observe_validator(&attestation, validator_index as usize)
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
return Err(Error::PriorAttestationKnown {
|
||||
validator_index,
|
||||
epoch: attestation.data.target.epoch,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
attestation,
|
||||
indexed_attestation,
|
||||
})
|
||||
}
|
||||
|
||||
/// A helper function to add this attestation to `beacon_chain.naive_aggregation_pool`.
|
||||
pub fn add_to_pool(self, chain: &BeaconChain<T>) -> Result<Self, Error> {
|
||||
chain.add_to_naive_aggregation_pool(self)
|
||||
}
|
||||
|
||||
/// Returns the wrapped `attestation`.
|
||||
pub fn attestation(&self) -> &Attestation<T::EthSpec> {
|
||||
&self.attestation
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying attestation.
|
||||
///
|
||||
/// Only use during testing since modifying the `IndexedAttestation` can cause the attestation
|
||||
/// to no-longer be valid.
|
||||
pub fn __indexed_attestation_mut(&mut self) -> &mut IndexedAttestation<T::EthSpec> {
|
||||
&mut self.indexed_attestation
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `Ok(())` if the `attestation.data.beacon_block_root` is known to this chain.
|
||||
///
|
||||
/// The block root may not be known for two reasons:
|
||||
///
|
||||
/// 1. The block has never been verified by our application.
|
||||
/// 2. The block is prior to the latest finalized block.
|
||||
///
|
||||
/// Case (1) is the exact thing we're trying to detect. However case (2) is a little different, but
|
||||
/// it's still fine to reject here because there's no need for us to handle attestations that are
|
||||
/// already finalized.
|
||||
fn verify_head_block_is_known<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
attestation: &Attestation<T::EthSpec>,
|
||||
max_skip_slots: Option<u64>,
|
||||
) -> Result<(), Error> {
|
||||
if let Some(block) = chain
|
||||
.fork_choice
|
||||
.read()
|
||||
.get_block(&attestation.data.beacon_block_root)
|
||||
{
|
||||
// Reject any block that exceeds our limit on skipped slots.
|
||||
if let Some(max_skip_slots) = max_skip_slots {
|
||||
if attestation.data.slot > block.slot + max_skip_slots {
|
||||
return Err(Error::TooManySkippedSlots {
|
||||
head_block_slot: block.slot,
|
||||
attestation_slot: attestation.data.slot,
|
||||
});
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::UnknownHeadBlock {
|
||||
beacon_block_root: attestation.data.beacon_block_root,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify that the `attestation` is within the acceptable gossip propagation range, with reference
|
||||
/// to the current slot of the `chain`.
|
||||
///
|
||||
/// Accounts for `MAXIMUM_GOSSIP_CLOCK_DISPARITY`.
|
||||
pub fn verify_propagation_slot_range<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
attestation: &Attestation<T::EthSpec>,
|
||||
) -> Result<(), Error> {
|
||||
let attestation_slot = attestation.data.slot;
|
||||
|
||||
let latest_permissible_slot = chain
|
||||
.slot_clock
|
||||
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
|
||||
.ok_or_else(|| BeaconChainError::UnableToReadSlot)?;
|
||||
if attestation_slot > latest_permissible_slot {
|
||||
return Err(Error::FutureSlot {
|
||||
attestation_slot,
|
||||
latest_permissible_slot,
|
||||
});
|
||||
}
|
||||
|
||||
// Taking advantage of saturating subtraction on `Slot`.
|
||||
let earliest_permissible_slot = chain
|
||||
.slot_clock
|
||||
.now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
|
||||
.ok_or_else(|| BeaconChainError::UnableToReadSlot)?
|
||||
- T::EthSpec::slots_per_epoch();
|
||||
if attestation_slot < earliest_permissible_slot {
|
||||
return Err(Error::PastSlot {
|
||||
attestation_slot,
|
||||
earliest_permissible_slot,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verifies that the signature of the `indexed_attestation` is valid.
|
||||
pub fn verify_attestation_signature<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
indexed_attestation: &IndexedAttestation<T::EthSpec>,
|
||||
) -> Result<(), Error> {
|
||||
let signature_setup_timer =
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SIGNATURE_SETUP_TIMES);
|
||||
|
||||
let pubkey_cache = chain
|
||||
.validator_pubkey_cache
|
||||
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheLockTimeout)?;
|
||||
|
||||
let fork = chain
|
||||
.canonical_head
|
||||
.try_read_for(HEAD_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| BeaconChainError::CanonicalHeadLockTimeout)
|
||||
.map(|head| head.beacon_state.fork)?;
|
||||
|
||||
let signature_set = indexed_attestation_signature_set_from_pubkeys(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
&indexed_attestation.signature,
|
||||
&indexed_attestation,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::SignatureSetError)?;
|
||||
|
||||
metrics::stop_timer(signature_setup_timer);
|
||||
|
||||
let _signature_verification_timer =
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SIGNATURE_TIMES);
|
||||
|
||||
if signature_set.verify() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::InvalidSignature)
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies all the signatures in a `SignedAggregateAndProof` using BLS batch verification. This
|
||||
/// includes three signatures:
|
||||
///
|
||||
/// - `signed_aggregate.signature`
|
||||
/// - `signed_aggregate.message.selection_proof`
|
||||
/// - `signed_aggregate.message.aggregate.signature`
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// - `Ok(true)`: if all signatures are valid.
|
||||
/// - `Ok(false)`: if one or more signatures are invalid.
|
||||
/// - `Err(e)`: if there was an error preventing signature verification.
|
||||
pub fn verify_signed_aggregate_signatures<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
signed_aggregate: &SignedAggregateAndProof<T::EthSpec>,
|
||||
indexed_attestation: &IndexedAttestation<T::EthSpec>,
|
||||
) -> Result<bool, Error> {
|
||||
let pubkey_cache = chain
|
||||
.validator_pubkey_cache
|
||||
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheLockTimeout)?;
|
||||
|
||||
let aggregator_index = signed_aggregate.message.aggregator_index;
|
||||
if aggregator_index >= pubkey_cache.len() as u64 {
|
||||
return Err(Error::AggregatorPubkeyUnknown(aggregator_index));
|
||||
}
|
||||
|
||||
let fork = chain
|
||||
.canonical_head
|
||||
.try_read_for(HEAD_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| BeaconChainError::CanonicalHeadLockTimeout)
|
||||
.map(|head| head.beacon_state.fork)?;
|
||||
|
||||
let signature_sets = vec![
|
||||
signed_aggregate_selection_proof_signature_set(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
&signed_aggregate,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::SignatureSetError)?,
|
||||
signed_aggregate_signature_set(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
&signed_aggregate,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::SignatureSetError)?,
|
||||
indexed_attestation_signature_set_from_pubkeys(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
&indexed_attestation.signature,
|
||||
&indexed_attestation,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::SignatureSetError)?,
|
||||
];
|
||||
|
||||
Ok(verify_signature_sets(signature_sets.iter()))
|
||||
}
|
||||
|
||||
/// Assists in readability.
|
||||
type CommitteesPerSlot = u64;
|
||||
|
||||
/// Returns the `indexed_attestation` and committee count per slot for the `attestation` using the
|
||||
/// public keys cached in the `chain`.
|
||||
pub fn obtain_indexed_attestation_and_committees_per_slot<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
attestation: &Attestation<T::EthSpec>,
|
||||
) -> Result<(IndexedAttestation<T::EthSpec>, CommitteesPerSlot), Error> {
|
||||
map_attestation_committee(chain, attestation, |(committee, committees_per_slot)| {
|
||||
get_indexed_attestation(committee.committee, &attestation)
|
||||
.map(|attestation| (attestation, committees_per_slot))
|
||||
.map_err(|e| BeaconChainError::from(e).into())
|
||||
})
|
||||
}
|
||||
|
||||
/// Runs the `map_fn` with the committee and committee count per slot for the given `attestation`.
|
||||
///
|
||||
/// This function exists in this odd "map" pattern because efficiently obtaining the committee for
|
||||
/// an attestation can be complex. It might involve reading straight from the
|
||||
/// `beacon_chain.shuffling_cache` or it might involve reading it from a state from the DB. Due to
|
||||
/// the complexities of `RwLock`s on the shuffling cache, a simple `Cow` isn't suitable here.
|
||||
///
|
||||
/// If the committee for `attestation` isn't found in the `shuffling_cache`, we will read a state
|
||||
/// from disk and then update the `shuffling_cache`.
|
||||
pub fn map_attestation_committee<'a, T, F, R>(
|
||||
chain: &'a BeaconChain<T>,
|
||||
attestation: &Attestation<T::EthSpec>,
|
||||
map_fn: F,
|
||||
) -> Result<R, Error>
|
||||
where
|
||||
T: BeaconChainTypes,
|
||||
F: Fn((BeaconCommittee, CommitteesPerSlot)) -> Result<R, Error>,
|
||||
{
|
||||
let attestation_epoch = attestation.data.slot.epoch(T::EthSpec::slots_per_epoch());
|
||||
let target = &attestation.data.target;
|
||||
|
||||
// Attestation target must be for a known block.
|
||||
//
|
||||
// We use fork choice to find the target root, which means that we reject any attestation
|
||||
// that has a `target.root` earlier than our latest finalized root. There's no point in
|
||||
// processing an attestation that does not include our latest finalized block in its chain.
|
||||
//
|
||||
// We do not delay consideration for later, we simply drop the attestation.
|
||||
let target_block = chain
|
||||
.fork_choice
|
||||
.read()
|
||||
.get_block(&target.root)
|
||||
.ok_or_else(|| Error::UnknownTargetRoot(target.root))?;
|
||||
|
||||
// Obtain the shuffling cache, timing how long we wait.
|
||||
let cache_wait_timer =
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES);
|
||||
|
||||
let mut shuffling_cache = chain
|
||||
.shuffling_cache
|
||||
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| BeaconChainError::AttestationCacheLockTimeout)?;
|
||||
|
||||
metrics::stop_timer(cache_wait_timer);
|
||||
|
||||
if let Some(committee_cache) = shuffling_cache.get(attestation_epoch, target.root) {
|
||||
let committees_per_slot = committee_cache.committees_per_slot();
|
||||
committee_cache
|
||||
.get_beacon_committee(attestation.data.slot, attestation.data.index)
|
||||
.map(|committee| map_fn((committee, committees_per_slot)))
|
||||
.unwrap_or_else(|| {
|
||||
Err(Error::NoCommitteeForSlotAndIndex {
|
||||
slot: attestation.data.slot,
|
||||
index: attestation.data.index,
|
||||
})
|
||||
})
|
||||
} else {
|
||||
// Drop the shuffling cache to avoid holding the lock for any longer than
|
||||
// required.
|
||||
drop(shuffling_cache);
|
||||
|
||||
debug!(
|
||||
chain.log,
|
||||
"Attestation processing cache miss";
|
||||
"attn_epoch" => attestation_epoch.as_u64(),
|
||||
"target_block_epoch" => target_block.slot.epoch(T::EthSpec::slots_per_epoch()).as_u64(),
|
||||
);
|
||||
|
||||
let state_read_timer =
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES);
|
||||
|
||||
let mut state = chain
|
||||
.store
|
||||
.get_inconsistent_state_for_attestation_verification_only(
|
||||
&target_block.state_root,
|
||||
Some(target_block.slot),
|
||||
)
|
||||
.map_err(BeaconChainError::from)?
|
||||
.ok_or_else(|| BeaconChainError::MissingBeaconState(target_block.state_root))?;
|
||||
|
||||
metrics::stop_timer(state_read_timer);
|
||||
let state_skip_timer =
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_SKIP_TIMES);
|
||||
|
||||
while state.current_epoch() + 1 < attestation_epoch {
|
||||
// Here we tell `per_slot_processing` to skip hashing the state and just
|
||||
// use the zero hash instead.
|
||||
//
|
||||
// The state roots are not useful for the shuffling, so there's no need to
|
||||
// compute them.
|
||||
per_slot_processing(&mut state, Some(Hash256::zero()), &chain.spec)
|
||||
.map_err(BeaconChainError::from)?;
|
||||
}
|
||||
|
||||
metrics::stop_timer(state_skip_timer);
|
||||
let committee_building_timer =
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES);
|
||||
|
||||
let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), attestation_epoch)
|
||||
.map_err(BeaconChainError::IncorrectStateForAttestation)?;
|
||||
|
||||
state
|
||||
.build_committee_cache(relative_epoch, &chain.spec)
|
||||
.map_err(BeaconChainError::from)?;
|
||||
|
||||
let committee_cache = state
|
||||
.committee_cache(relative_epoch)
|
||||
.map_err(BeaconChainError::from)?;
|
||||
|
||||
chain
|
||||
.shuffling_cache
|
||||
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| BeaconChainError::AttestationCacheLockTimeout)?
|
||||
.insert(attestation_epoch, target.root, committee_cache);
|
||||
|
||||
metrics::stop_timer(committee_building_timer);
|
||||
|
||||
let committees_per_slot = committee_cache.committees_per_slot();
|
||||
committee_cache
|
||||
.get_beacon_committee(attestation.data.slot, attestation.data.index)
|
||||
.map(|committee| map_fn((committee, committees_per_slot)))
|
||||
.unwrap_or_else(|| {
|
||||
Err(Error::NoCommitteeForSlotAndIndex {
|
||||
slot: attestation.data.slot,
|
||||
index: attestation.data.index,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
352
beacon_node/beacon_chain/src/beacon_fork_choice_store.rs
Normal file
352
beacon_node/beacon_chain/src/beacon_fork_choice_store.rs
Normal file
@@ -0,0 +1,352 @@
|
||||
//! Defines the `BeaconForkChoiceStore` which provides the persistent storage for the `ForkChoice`
|
||||
//! struct.
|
||||
//!
|
||||
//! Additionally, the private `BalancesCache` struct is defined; a cache designed to avoid database
|
||||
//! reads when fork choice requires the validator balances of the justified state.
|
||||
|
||||
use crate::{metrics, BeaconSnapshot};
|
||||
use fork_choice::ForkChoiceStore;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use store::{Error as StoreError, HotColdDB, ItemStore};
|
||||
use types::{
|
||||
BeaconBlock, BeaconState, BeaconStateError, Checkpoint, EthSpec, Hash256, SignedBeaconBlock,
|
||||
Slot,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
UnableToReadSlot,
|
||||
UnableToReadTime,
|
||||
InvalidGenesisSnapshot(Slot),
|
||||
AncestorUnknown { ancestor_slot: Slot },
|
||||
UninitializedBestJustifiedBalances,
|
||||
FailedToReadBlock(StoreError),
|
||||
MissingBlock(Hash256),
|
||||
FailedToReadState(StoreError),
|
||||
MissingState(Hash256),
|
||||
InvalidPersistedBytes(ssz::DecodeError),
|
||||
BeaconStateError(BeaconStateError),
|
||||
}
|
||||
|
||||
impl From<BeaconStateError> for Error {
|
||||
fn from(e: BeaconStateError) -> Self {
|
||||
Error::BeaconStateError(e)
|
||||
}
|
||||
}
|
||||
|
||||
/// The number of validator balance sets that are cached within `BalancesCache`.
|
||||
const MAX_BALANCE_CACHE_SIZE: usize = 4;
|
||||
|
||||
/// Returns the effective balances for every validator in the given `state`.
|
||||
///
|
||||
/// Any validator who is not active in the epoch of the given `state` is assigned a balance of
|
||||
/// zero.
|
||||
pub fn get_effective_balances<T: EthSpec>(state: &BeaconState<T>) -> Vec<u64> {
|
||||
state
|
||||
.validators
|
||||
.iter()
|
||||
.map(|validator| {
|
||||
if validator.is_active_at(state.current_epoch()) {
|
||||
validator.effective_balance
|
||||
} else {
|
||||
0
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// An item that is stored in the `BalancesCache`.
|
||||
#[derive(PartialEq, Clone, Debug, Encode, Decode)]
|
||||
struct CacheItem {
|
||||
/// The block root at which `self.balances` are valid.
|
||||
block_root: Hash256,
|
||||
/// The effective balances from a `BeaconState` validator registry.
|
||||
balances: Vec<u64>,
|
||||
}
|
||||
|
||||
/// Provides a cache to avoid reading `BeaconState` from disk when updating the current justified
|
||||
/// checkpoint.
|
||||
///
|
||||
/// It is effectively a mapping of `epoch_boundary_block_root -> state.balances`.
|
||||
#[derive(PartialEq, Clone, Default, Debug, Encode, Decode)]
|
||||
struct BalancesCache {
|
||||
items: Vec<CacheItem>,
|
||||
}
|
||||
|
||||
impl BalancesCache {
|
||||
/// Inspect the given `state` and determine the root of the block at the first slot of
|
||||
/// `state.current_epoch`. If there is not already some entry for the given block root, then
|
||||
/// add the effective balances from the `state` to the cache.
|
||||
pub fn process_state<E: EthSpec>(
|
||||
&mut self,
|
||||
block_root: Hash256,
|
||||
state: &BeaconState<E>,
|
||||
) -> Result<(), Error> {
|
||||
// We are only interested in balances from states that are at the start of an epoch,
|
||||
// because this is where the `current_justified_checkpoint.root` will point.
|
||||
if !Self::is_first_block_in_epoch(block_root, state)? {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let epoch_boundary_slot = state.current_epoch().start_slot(E::slots_per_epoch());
|
||||
let epoch_boundary_root = if epoch_boundary_slot == state.slot {
|
||||
block_root
|
||||
} else {
|
||||
// This call remains sensible as long as `state.block_roots` is larger than a single
|
||||
// epoch.
|
||||
*state.get_block_root(epoch_boundary_slot)?
|
||||
};
|
||||
|
||||
if self.position(epoch_boundary_root).is_none() {
|
||||
let item = CacheItem {
|
||||
block_root: epoch_boundary_root,
|
||||
balances: get_effective_balances(state),
|
||||
};
|
||||
|
||||
if self.items.len() == MAX_BALANCE_CACHE_SIZE {
|
||||
self.items.remove(0);
|
||||
}
|
||||
|
||||
self.items.push(item);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns `true` if the given `block_root` is the first/only block to have been processed in
|
||||
/// the epoch of the given `state`.
|
||||
///
|
||||
/// We can determine if it is the first block by looking back through `state.block_roots` to
|
||||
/// see if there is a block in the current epoch with a different root.
|
||||
fn is_first_block_in_epoch<E: EthSpec>(
|
||||
block_root: Hash256,
|
||||
state: &BeaconState<E>,
|
||||
) -> Result<bool, Error> {
|
||||
let mut prior_block_found = false;
|
||||
|
||||
for slot in state.current_epoch().slot_iter(E::slots_per_epoch()) {
|
||||
if slot < state.slot {
|
||||
if *state.get_block_root(slot)? != block_root {
|
||||
prior_block_found = true;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(!prior_block_found)
|
||||
}
|
||||
|
||||
fn position(&self, block_root: Hash256) -> Option<usize> {
|
||||
self.items
|
||||
.iter()
|
||||
.position(|item| item.block_root == block_root)
|
||||
}
|
||||
|
||||
/// Get the balances for the given `block_root`, if any.
|
||||
///
|
||||
/// If some balances are found, they are removed from the cache.
|
||||
pub fn get(&mut self, block_root: Hash256) -> Option<Vec<u64>> {
|
||||
let i = self.position(block_root)?;
|
||||
Some(self.items.remove(i).balances)
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements `fork_choice::ForkChoiceStore` in order to provide a persistent backing to the
|
||||
/// `fork_choice::ForkChoice` struct.
|
||||
#[derive(Debug)]
|
||||
pub struct BeaconForkChoiceStore<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
balances_cache: BalancesCache,
|
||||
time: Slot,
|
||||
finalized_checkpoint: Checkpoint,
|
||||
justified_checkpoint: Checkpoint,
|
||||
justified_balances: Vec<u64>,
|
||||
best_justified_checkpoint: Checkpoint,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<E, Hot, Cold> PartialEq for BeaconForkChoiceStore<E, Hot, Cold>
|
||||
where
|
||||
E: EthSpec,
|
||||
Hot: ItemStore<E>,
|
||||
Cold: ItemStore<E>,
|
||||
{
|
||||
/// This implementation ignores the `store` and `slot_clock`.
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.balances_cache == other.balances_cache
|
||||
&& self.time == other.time
|
||||
&& self.finalized_checkpoint == other.finalized_checkpoint
|
||||
&& self.justified_checkpoint == other.justified_checkpoint
|
||||
&& self.justified_balances == other.justified_balances
|
||||
&& self.best_justified_checkpoint == other.best_justified_checkpoint
|
||||
}
|
||||
}
|
||||
|
||||
impl<E, Hot, Cold> BeaconForkChoiceStore<E, Hot, Cold>
|
||||
where
|
||||
E: EthSpec,
|
||||
Hot: ItemStore<E>,
|
||||
Cold: ItemStore<E>,
|
||||
{
|
||||
/// Initialize `Self` from some `anchor` checkpoint which may or may not be the genesis state.
|
||||
///
|
||||
/// ## Specification
|
||||
///
|
||||
/// Equivalent to:
|
||||
///
|
||||
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#get_forkchoice_store
|
||||
///
|
||||
/// ## Notes:
|
||||
///
|
||||
/// It is assumed that `anchor` is already persisted in `store`.
|
||||
pub fn get_forkchoice_store(
|
||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
anchor: &BeaconSnapshot<E>,
|
||||
) -> Self {
|
||||
let anchor_state = &anchor.beacon_state;
|
||||
let mut anchor_block_header = anchor_state.latest_block_header.clone();
|
||||
if anchor_block_header.state_root == Hash256::zero() {
|
||||
anchor_block_header.state_root = anchor.beacon_state_root;
|
||||
}
|
||||
let anchor_root = anchor_block_header.canonical_root();
|
||||
let anchor_epoch = anchor_state.current_epoch();
|
||||
let justified_checkpoint = Checkpoint {
|
||||
epoch: anchor_epoch,
|
||||
root: anchor_root,
|
||||
};
|
||||
let finalized_checkpoint = justified_checkpoint;
|
||||
|
||||
Self {
|
||||
store,
|
||||
balances_cache: <_>::default(),
|
||||
time: anchor_state.slot,
|
||||
justified_checkpoint,
|
||||
justified_balances: anchor_state.balances.clone().into(),
|
||||
finalized_checkpoint,
|
||||
best_justified_checkpoint: justified_checkpoint,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Save the current state of `Self` to a `PersistedForkChoiceStore` which can be stored to the
|
||||
/// on-disk database.
|
||||
pub fn to_persisted(&self) -> PersistedForkChoiceStore {
|
||||
PersistedForkChoiceStore {
|
||||
balances_cache: self.balances_cache.clone(),
|
||||
time: self.time,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
justified_balances: self.justified_balances.clone(),
|
||||
best_justified_checkpoint: self.best_justified_checkpoint,
|
||||
}
|
||||
}
|
||||
|
||||
/// Restore `Self` from a previously-generated `PersistedForkChoiceStore`.
|
||||
pub fn from_persisted(
|
||||
persisted: PersistedForkChoiceStore,
|
||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
) -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
store,
|
||||
balances_cache: persisted.balances_cache,
|
||||
time: persisted.time,
|
||||
finalized_checkpoint: persisted.finalized_checkpoint,
|
||||
justified_checkpoint: persisted.justified_checkpoint,
|
||||
justified_balances: persisted.justified_balances,
|
||||
best_justified_checkpoint: persisted.best_justified_checkpoint,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<E, Hot, Cold> ForkChoiceStore<E> for BeaconForkChoiceStore<E, Hot, Cold>
|
||||
where
|
||||
E: EthSpec,
|
||||
Hot: ItemStore<E>,
|
||||
Cold: ItemStore<E>,
|
||||
{
|
||||
type Error = Error;
|
||||
|
||||
fn get_current_slot(&self) -> Slot {
|
||||
self.time
|
||||
}
|
||||
|
||||
fn set_current_slot(&mut self, slot: Slot) {
|
||||
self.time = slot
|
||||
}
|
||||
|
||||
fn on_verified_block(
|
||||
&mut self,
|
||||
_block: &BeaconBlock<E>,
|
||||
block_root: Hash256,
|
||||
state: &BeaconState<E>,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.balances_cache.process_state(block_root, state)
|
||||
}
|
||||
|
||||
fn justified_checkpoint(&self) -> &Checkpoint {
|
||||
&self.justified_checkpoint
|
||||
}
|
||||
|
||||
fn justified_balances(&self) -> &[u64] {
|
||||
&self.justified_balances
|
||||
}
|
||||
|
||||
fn best_justified_checkpoint(&self) -> &Checkpoint {
|
||||
&self.best_justified_checkpoint
|
||||
}
|
||||
|
||||
fn finalized_checkpoint(&self) -> &Checkpoint {
|
||||
&self.finalized_checkpoint
|
||||
}
|
||||
|
||||
fn set_finalized_checkpoint(&mut self, checkpoint: Checkpoint) {
|
||||
self.finalized_checkpoint = checkpoint
|
||||
}
|
||||
|
||||
fn set_justified_checkpoint(&mut self, checkpoint: Checkpoint) -> Result<(), Error> {
|
||||
self.justified_checkpoint = checkpoint;
|
||||
|
||||
if let Some(balances) = self.balances_cache.get(self.justified_checkpoint.root) {
|
||||
metrics::inc_counter(&metrics::BALANCES_CACHE_HITS);
|
||||
self.justified_balances = balances;
|
||||
} else {
|
||||
metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES);
|
||||
let justified_block = self
|
||||
.store
|
||||
.get_item::<SignedBeaconBlock<E>>(&self.justified_checkpoint.root)
|
||||
.map_err(Error::FailedToReadBlock)?
|
||||
.ok_or_else(|| Error::MissingBlock(self.justified_checkpoint.root))?
|
||||
.message;
|
||||
|
||||
self.justified_balances = self
|
||||
.store
|
||||
.get_state(&justified_block.state_root, Some(justified_block.slot))
|
||||
.map_err(Error::FailedToReadState)?
|
||||
.ok_or_else(|| Error::MissingState(justified_block.state_root))?
|
||||
.balances
|
||||
.into();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint) {
|
||||
self.best_justified_checkpoint = checkpoint
|
||||
}
|
||||
}
|
||||
|
||||
/// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database.
|
||||
#[derive(Encode, Decode)]
|
||||
pub struct PersistedForkChoiceStore {
|
||||
balances_cache: BalancesCache,
|
||||
time: Slot,
|
||||
finalized_checkpoint: Checkpoint,
|
||||
justified_checkpoint: Checkpoint,
|
||||
justified_balances: Vec<u64>,
|
||||
best_justified_checkpoint: Checkpoint,
|
||||
}
|
||||
@@ -1,21 +1,21 @@
|
||||
use serde_derive::Serialize;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use types::{BeaconBlock, BeaconState, EthSpec, Hash256};
|
||||
use types::{BeaconState, EthSpec, Hash256, SignedBeaconBlock};
|
||||
|
||||
/// Represents some block and it's associated state. Generally, this will be used for tracking the
|
||||
/// Represents some block and its associated state. Generally, this will be used for tracking the
|
||||
/// head, justified head and finalized head.
|
||||
#[derive(Clone, Serialize, PartialEq, Debug, Encode, Decode)]
|
||||
pub struct CheckPoint<E: EthSpec> {
|
||||
pub beacon_block: BeaconBlock<E>,
|
||||
pub struct BeaconSnapshot<E: EthSpec> {
|
||||
pub beacon_block: SignedBeaconBlock<E>,
|
||||
pub beacon_block_root: Hash256,
|
||||
pub beacon_state: BeaconState<E>,
|
||||
pub beacon_state_root: Hash256,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> CheckPoint<E> {
|
||||
impl<E: EthSpec> BeaconSnapshot<E> {
|
||||
/// Create a new checkpoint.
|
||||
pub fn new(
|
||||
beacon_block: BeaconBlock<E>,
|
||||
beacon_block: SignedBeaconBlock<E>,
|
||||
beacon_block_root: Hash256,
|
||||
beacon_state: BeaconState<E>,
|
||||
beacon_state_root: Hash256,
|
||||
@@ -31,7 +31,7 @@ impl<E: EthSpec> CheckPoint<E> {
|
||||
/// Update all fields of the checkpoint.
|
||||
pub fn update(
|
||||
&mut self,
|
||||
beacon_block: BeaconBlock<E>,
|
||||
beacon_block: SignedBeaconBlock<E>,
|
||||
beacon_block_root: Hash256,
|
||||
beacon_state: BeaconState<E>,
|
||||
beacon_state_root: Hash256,
|
||||
@@ -41,4 +41,13 @@ impl<E: EthSpec> CheckPoint<E> {
|
||||
self.beacon_state = beacon_state;
|
||||
self.beacon_state_root = beacon_state_root;
|
||||
}
|
||||
|
||||
pub fn clone_with_only_committee_caches(&self) -> Self {
|
||||
Self {
|
||||
beacon_block: self.beacon_block.clone(),
|
||||
beacon_block_root: self.beacon_block_root,
|
||||
beacon_state: self.beacon_state.clone_with_only_committee_caches(),
|
||||
beacon_state_root: self.beacon_state_root,
|
||||
}
|
||||
}
|
||||
}
|
||||
1181
beacon_node/beacon_chain/src/block_verification.rs
Normal file
1181
beacon_node/beacon_chain/src/block_verification.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,70 +1,85 @@
|
||||
use crate::checkpoint_cache::CheckPointCache;
|
||||
use crate::eth1_chain::CachingEth1Backend;
|
||||
use crate::beacon_chain::{
|
||||
BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY,
|
||||
};
|
||||
use crate::eth1_chain::{CachingEth1Backend, SszEth1};
|
||||
use crate::events::NullEventHandler;
|
||||
use crate::head_tracker::HeadTracker;
|
||||
use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY};
|
||||
use crate::migrate::Migrate;
|
||||
use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||
use crate::persisted_fork_choice::PersistedForkChoice;
|
||||
use crate::shuffling_cache::ShufflingCache;
|
||||
use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE};
|
||||
use crate::timeout_rw_lock::TimeoutRwLock;
|
||||
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
|
||||
use crate::ChainConfig;
|
||||
use crate::{
|
||||
BeaconChain, BeaconChainTypes, CheckPoint, Eth1Chain, Eth1ChainBackend, EventHandler,
|
||||
ForkChoice,
|
||||
BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, BeaconSnapshot, Eth1Chain,
|
||||
Eth1ChainBackend, EventHandler,
|
||||
};
|
||||
use eth1::Config as Eth1Config;
|
||||
use lmd_ghost::{LmdGhost, ThreadSafeReducedTree};
|
||||
use operation_pool::OperationPool;
|
||||
use fork_choice::ForkChoice;
|
||||
use operation_pool::{OperationPool, PersistedOperationPool};
|
||||
use parking_lot::RwLock;
|
||||
use slog::{info, Logger};
|
||||
use slot_clock::{SlotClock, TestingSlotClock};
|
||||
use std::marker::PhantomData;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::Store;
|
||||
use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Slot};
|
||||
use store::{HotColdDB, ItemStore};
|
||||
use types::{
|
||||
BeaconBlock, BeaconState, ChainSpec, EthSpec, Graffiti, Hash256, Signature, SignedBeaconBlock,
|
||||
Slot,
|
||||
};
|
||||
|
||||
pub const PUBKEY_CACHE_FILENAME: &str = "pubkey_cache.ssz";
|
||||
|
||||
/// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing
|
||||
/// functionality and only exists to satisfy the type system.
|
||||
pub struct Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>(
|
||||
PhantomData<(
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
)>,
|
||||
);
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
BeaconChainTypes
|
||||
for Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
{
|
||||
type Store = TStore;
|
||||
type HotStore = THotStore;
|
||||
type ColdStore = TColdStore;
|
||||
type StoreMigrator = TStoreMigrator;
|
||||
type SlotClock = TSlotClock;
|
||||
type LmdGhost = TLmdGhost;
|
||||
type Eth1Chain = TEth1Backend;
|
||||
type EthSpec = TEthSpec;
|
||||
type EventHandler = TEventHandler;
|
||||
@@ -79,40 +94,47 @@ where
|
||||
///
|
||||
/// See the tests for an example of a complete working example.
|
||||
pub struct BeaconChainBuilder<T: BeaconChainTypes> {
|
||||
store: Option<Arc<T::Store>>,
|
||||
#[allow(clippy::type_complexity)]
|
||||
store: Option<Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>>,
|
||||
store_migrator: Option<T::StoreMigrator>,
|
||||
/// The finalized checkpoint to anchor the chain. May be genesis or a higher
|
||||
/// checkpoint.
|
||||
pub finalized_checkpoint: Option<CheckPoint<T::EthSpec>>,
|
||||
pub genesis_time: Option<u64>,
|
||||
genesis_block_root: Option<Hash256>,
|
||||
#[allow(clippy::type_complexity)]
|
||||
fork_choice: Option<
|
||||
ForkChoice<BeaconForkChoiceStore<T::EthSpec, T::HotStore, T::ColdStore>, T::EthSpec>,
|
||||
>,
|
||||
op_pool: Option<OperationPool<T::EthSpec>>,
|
||||
fork_choice: Option<ForkChoice<T>>,
|
||||
eth1_chain: Option<Eth1Chain<T::Eth1Chain, T::EthSpec>>,
|
||||
event_handler: Option<T::EventHandler>,
|
||||
slot_clock: Option<T::SlotClock>,
|
||||
persisted_beacon_chain: Option<PersistedBeaconChain<T>>,
|
||||
head_tracker: Option<HeadTracker>,
|
||||
data_dir: Option<PathBuf>,
|
||||
pubkey_cache_path: Option<PathBuf>,
|
||||
validator_pubkey_cache: Option<ValidatorPubkeyCache>,
|
||||
spec: ChainSpec,
|
||||
chain_config: ChainConfig,
|
||||
disabled_forks: Vec<String>,
|
||||
log: Option<Logger>,
|
||||
graffiti: Graffiti,
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
BeaconChainBuilder<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
@@ -125,17 +147,22 @@ where
|
||||
Self {
|
||||
store: None,
|
||||
store_migrator: None,
|
||||
finalized_checkpoint: None,
|
||||
genesis_time: None,
|
||||
genesis_block_root: None,
|
||||
op_pool: None,
|
||||
fork_choice: None,
|
||||
op_pool: None,
|
||||
eth1_chain: None,
|
||||
event_handler: None,
|
||||
slot_clock: None,
|
||||
persisted_beacon_chain: None,
|
||||
head_tracker: None,
|
||||
pubkey_cache_path: None,
|
||||
data_dir: None,
|
||||
disabled_forks: Vec::new(),
|
||||
validator_pubkey_cache: None,
|
||||
spec: TEthSpec::default_spec(),
|
||||
chain_config: ChainConfig::default(),
|
||||
log: None,
|
||||
graffiti: Graffiti::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -148,10 +175,19 @@ where
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the maximum number of blocks that will be skipped when processing
|
||||
/// some consensus messages.
|
||||
///
|
||||
/// Set to `None` for no limit.
|
||||
pub fn import_max_skip_slots(mut self, n: Option<u64>) -> Self {
|
||||
self.chain_config.import_max_skip_slots = n;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the store (database).
|
||||
///
|
||||
/// Should generally be called early in the build chain.
|
||||
pub fn store(mut self, store: Arc<TStore>) -> Self {
|
||||
pub fn store(mut self, store: Arc<HotColdDB<TEthSpec, THotStore, TColdStore>>) -> Self {
|
||||
self.store = Some(store);
|
||||
self
|
||||
}
|
||||
@@ -170,6 +206,46 @@ where
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the location to the pubkey cache file.
|
||||
///
|
||||
/// Should generally be called early in the build chain.
|
||||
pub fn data_dir(mut self, path: PathBuf) -> Self {
|
||||
self.pubkey_cache_path = Some(path.join(PUBKEY_CACHE_FILENAME));
|
||||
self.data_dir = Some(path);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets a list of hard-coded forks that will not be activated.
|
||||
pub fn disabled_forks(mut self, disabled_forks: Vec<String>) -> Self {
|
||||
self.disabled_forks = disabled_forks;
|
||||
self
|
||||
}
|
||||
|
||||
/// Attempt to load an existing eth1 cache from the builder's `Store`.
|
||||
pub fn get_persisted_eth1_backend(&self) -> Result<Option<SszEth1>, String> {
|
||||
let store = self
|
||||
.store
|
||||
.clone()
|
||||
.ok_or_else(|| "get_persisted_eth1_backend requires a store.".to_string())?;
|
||||
|
||||
store
|
||||
.get_item::<SszEth1>(&Hash256::from_slice(Ð1_CACHE_DB_KEY))
|
||||
.map_err(|e| format!("DB error whilst reading eth1 cache: {:?}", e))
|
||||
}
|
||||
|
||||
/// Returns true if `self.store` contains a persisted beacon chain.
|
||||
pub fn store_contains_beacon_chain(&self) -> Result<bool, String> {
|
||||
let store = self
|
||||
.store
|
||||
.clone()
|
||||
.ok_or_else(|| "store_contains_beacon_chain requires a store.".to_string())?;
|
||||
|
||||
Ok(store
|
||||
.get_item::<PersistedBeaconChain>(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY))
|
||||
.map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))?
|
||||
.is_some())
|
||||
}
|
||||
|
||||
/// Attempt to load an existing chain from the builder's `Store`.
|
||||
///
|
||||
/// May initialize several components; including the op_pool and finalized checkpoints.
|
||||
@@ -179,6 +255,11 @@ where
|
||||
.as_ref()
|
||||
.ok_or_else(|| "resume_from_db requires a log".to_string())?;
|
||||
|
||||
let pubkey_cache_path = self
|
||||
.pubkey_cache_path
|
||||
.as_ref()
|
||||
.ok_or_else(|| "resume_from_db requires a data_dir".to_string())?;
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Starting beacon chain";
|
||||
@@ -188,43 +269,60 @@ where
|
||||
let store = self
|
||||
.store
|
||||
.clone()
|
||||
.ok_or_else(|| "load_from_store requires a store.".to_string())?;
|
||||
.ok_or_else(|| "resume_from_db requires a store.".to_string())?;
|
||||
|
||||
let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes());
|
||||
let p: PersistedBeaconChain<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
>,
|
||||
> = match store.get(&key) {
|
||||
Err(e) => {
|
||||
return Err(format!(
|
||||
"DB error when reading persisted beacon chain: {:?}",
|
||||
e
|
||||
))
|
||||
}
|
||||
Ok(None) => return Err("No persisted beacon chain found in store".into()),
|
||||
Ok(Some(p)) => p,
|
||||
};
|
||||
let chain = store
|
||||
.get_item::<PersistedBeaconChain>(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY))
|
||||
.map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))?
|
||||
.ok_or_else(|| {
|
||||
"No persisted beacon chain found in store. Try purging the beacon chain database."
|
||||
.to_string()
|
||||
})?;
|
||||
|
||||
let persisted_fork_choice = store
|
||||
.get_item::<PersistedForkChoice>(&Hash256::from_slice(&FORK_CHOICE_DB_KEY))
|
||||
.map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))?
|
||||
.ok_or_else(|| "No persisted fork choice present in database.".to_string())?;
|
||||
|
||||
let fc_store = BeaconForkChoiceStore::from_persisted(
|
||||
persisted_fork_choice.fork_choice_store,
|
||||
store.clone(),
|
||||
)
|
||||
.map_err(|e| format!("Unable to load ForkChoiceStore: {:?}", e))?;
|
||||
|
||||
let fork_choice =
|
||||
ForkChoice::from_persisted(persisted_fork_choice.fork_choice, fc_store)
|
||||
.map_err(|e| format!("Unable to parse persisted fork choice from disk: {:?}", e))?;
|
||||
|
||||
let genesis_block = store
|
||||
.get_item::<SignedBeaconBlock<TEthSpec>>(&chain.genesis_block_root)
|
||||
.map_err(|e| format!("DB error when reading genesis block: {:?}", e))?
|
||||
.ok_or_else(|| "Genesis block not found in store".to_string())?;
|
||||
let genesis_state = store
|
||||
.get_state(&genesis_block.state_root(), Some(genesis_block.slot()))
|
||||
.map_err(|e| format!("DB error when reading genesis state: {:?}", e))?
|
||||
.ok_or_else(|| "Genesis block not found in store".to_string())?;
|
||||
|
||||
self.genesis_time = Some(genesis_state.genesis_time);
|
||||
|
||||
self.op_pool = Some(
|
||||
p.op_pool
|
||||
.clone()
|
||||
.into_operation_pool(&p.canonical_head.beacon_state, &self.spec),
|
||||
store
|
||||
.get_item::<PersistedOperationPool<TEthSpec>>(&Hash256::from_slice(&OP_POOL_DB_KEY))
|
||||
.map_err(|e| format!("DB error whilst reading persisted op pool: {:?}", e))?
|
||||
.map(PersistedOperationPool::into_operation_pool)
|
||||
.unwrap_or_else(OperationPool::new),
|
||||
);
|
||||
|
||||
self.finalized_checkpoint = Some(p.finalized_checkpoint.clone());
|
||||
self.genesis_block_root = Some(p.genesis_block_root);
|
||||
let pubkey_cache = ValidatorPubkeyCache::load_from_file(pubkey_cache_path)
|
||||
.map_err(|e| format!("Unable to open persisted pubkey cache: {:?}", e))?;
|
||||
|
||||
self.genesis_block_root = Some(chain.genesis_block_root);
|
||||
self.head_tracker = Some(
|
||||
HeadTracker::from_ssz_container(&p.ssz_head_tracker)
|
||||
HeadTracker::from_ssz_container(&chain.ssz_head_tracker)
|
||||
.map_err(|e| format!("Failed to decode head tracker for database: {:?}", e))?,
|
||||
);
|
||||
self.persisted_beacon_chain = Some(p);
|
||||
self.validator_pubkey_cache = Some(pubkey_cache);
|
||||
self.fork_choice = Some(fork_choice);
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
@@ -239,14 +337,13 @@ where
|
||||
.clone()
|
||||
.ok_or_else(|| "genesis_state requires a store")?;
|
||||
|
||||
let mut beacon_block = genesis_block(&beacon_state, &self.spec);
|
||||
let beacon_block = genesis_block(&mut beacon_state, &self.spec)?;
|
||||
|
||||
beacon_state
|
||||
.build_all_caches(&self.spec)
|
||||
.map_err(|e| format!("Failed to build genesis state caches: {:?}", e))?;
|
||||
|
||||
let beacon_state_root = beacon_state.canonical_root();
|
||||
beacon_block.state_root = beacon_state_root;
|
||||
let beacon_state_root = beacon_block.message.state_root;
|
||||
let beacon_block_root = beacon_block.canonical_root();
|
||||
|
||||
self.genesis_block_root = Some(beacon_block_root);
|
||||
@@ -255,23 +352,33 @@ where
|
||||
.put_state(&beacon_state_root, &beacon_state)
|
||||
.map_err(|e| format!("Failed to store genesis state: {:?}", e))?;
|
||||
store
|
||||
.put(&beacon_block_root, &beacon_block)
|
||||
.put_item(&beacon_block_root, &beacon_block)
|
||||
.map_err(|e| format!("Failed to store genesis block: {:?}", e))?;
|
||||
|
||||
// Store the genesis block under the `ZERO_HASH` key.
|
||||
store.put(&Hash256::zero(), &beacon_block).map_err(|e| {
|
||||
format!(
|
||||
"Failed to store genesis block under 0x00..00 alias: {:?}",
|
||||
e
|
||||
)
|
||||
})?;
|
||||
store
|
||||
.put_item(&Hash256::zero(), &beacon_block)
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Failed to store genesis block under 0x00..00 alias: {:?}",
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
self.finalized_checkpoint = Some(CheckPoint {
|
||||
let genesis = BeaconSnapshot {
|
||||
beacon_block_root,
|
||||
beacon_block,
|
||||
beacon_state_root,
|
||||
beacon_state,
|
||||
});
|
||||
};
|
||||
|
||||
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis);
|
||||
|
||||
let fork_choice = ForkChoice::from_genesis(fc_store, &genesis.beacon_block.message)
|
||||
.map_err(|e| format!("Unable to build initialize ForkChoice: {:?}", e))?;
|
||||
|
||||
self.fork_choice = Some(fork_choice);
|
||||
self.genesis_time = Some(genesis.beacon_state.genesis_time);
|
||||
|
||||
Ok(self.empty_op_pool())
|
||||
}
|
||||
@@ -304,6 +411,18 @@ where
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the `graffiti` field.
|
||||
pub fn graffiti(mut self, graffiti: Graffiti) -> Self {
|
||||
self.graffiti = graffiti;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the `ChainConfig` that determines `BeaconChain` runtime behaviour.
|
||||
pub fn chain_config(mut self, config: ChainConfig) -> Self {
|
||||
self.chain_config = config;
|
||||
self
|
||||
}
|
||||
|
||||
/// Consumes `self`, returning a `BeaconChain` if all required parameters have been supplied.
|
||||
///
|
||||
/// An error will be returned at runtime if all required parameters have not been configured.
|
||||
@@ -316,13 +435,13 @@ where
|
||||
) -> Result<
|
||||
BeaconChain<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>,
|
||||
String,
|
||||
@@ -330,152 +449,172 @@ where
|
||||
let log = self
|
||||
.log
|
||||
.ok_or_else(|| "Cannot build without a logger".to_string())?;
|
||||
let slot_clock = self
|
||||
.slot_clock
|
||||
.ok_or_else(|| "Cannot build without a slot_clock.".to_string())?;
|
||||
let store = self
|
||||
.store
|
||||
.clone()
|
||||
.ok_or_else(|| "Cannot build without a store.".to_string())?;
|
||||
let mut fork_choice = self
|
||||
.fork_choice
|
||||
.ok_or_else(|| "Cannot build without fork choice.".to_string())?;
|
||||
let genesis_block_root = self
|
||||
.genesis_block_root
|
||||
.ok_or_else(|| "Cannot build without a genesis block root".to_string())?;
|
||||
|
||||
// If this beacon chain is being loaded from disk, use the stored head. Otherwise, just use
|
||||
// the finalized checkpoint (which is probably genesis).
|
||||
let mut canonical_head = if let Some(persisted_beacon_chain) = self.persisted_beacon_chain {
|
||||
persisted_beacon_chain.canonical_head
|
||||
let current_slot = if slot_clock
|
||||
.is_prior_to_genesis()
|
||||
.ok_or_else(|| "Unable to read slot clock".to_string())?
|
||||
{
|
||||
self.spec.genesis_slot
|
||||
} else {
|
||||
self.finalized_checkpoint
|
||||
.ok_or_else(|| "Cannot build without a state".to_string())?
|
||||
slot_clock
|
||||
.now()
|
||||
.ok_or_else(|| "Unable to read slot".to_string())?
|
||||
};
|
||||
|
||||
let head_block_root = fork_choice
|
||||
.get_head(current_slot)
|
||||
.map_err(|e| format!("Unable to get fork choice head: {:?}", e))?;
|
||||
|
||||
let head_block = store
|
||||
.get_item::<SignedBeaconBlock<TEthSpec>>(&head_block_root)
|
||||
.map_err(|e| format!("DB error when reading head block: {:?}", e))?
|
||||
.ok_or_else(|| "Head block not found in store".to_string())?;
|
||||
let head_state_root = head_block.state_root();
|
||||
let head_state = store
|
||||
.get_state(&head_state_root, Some(head_block.slot()))
|
||||
.map_err(|e| format!("DB error when reading head state: {:?}", e))?
|
||||
.ok_or_else(|| "Head state not found in store".to_string())?;
|
||||
|
||||
let mut canonical_head = BeaconSnapshot {
|
||||
beacon_block_root: head_block_root,
|
||||
beacon_block: head_block,
|
||||
beacon_state_root: head_state_root,
|
||||
beacon_state: head_state,
|
||||
};
|
||||
|
||||
if canonical_head.beacon_block.state_root() != canonical_head.beacon_state_root {
|
||||
return Err("beacon_block.state_root != beacon_state".to_string());
|
||||
}
|
||||
|
||||
canonical_head
|
||||
.beacon_state
|
||||
.build_all_caches(&self.spec)
|
||||
.map_err(|e| format!("Failed to build state caches: {:?}", e))?;
|
||||
|
||||
if canonical_head.beacon_block.state_root != canonical_head.beacon_state_root {
|
||||
return Err("beacon_block.state_root != beacon_state".to_string());
|
||||
// Perform a check to ensure that the finalization points of the head and fork choice are
|
||||
// consistent.
|
||||
//
|
||||
// This is a sanity check to detect database corruption.
|
||||
let fc_finalized = fork_choice.finalized_checkpoint();
|
||||
let head_finalized = canonical_head.beacon_state.finalized_checkpoint;
|
||||
if fc_finalized != head_finalized {
|
||||
if head_finalized.root == Hash256::zero()
|
||||
&& head_finalized.epoch == fc_finalized.epoch
|
||||
&& fc_finalized.root == genesis_block_root
|
||||
{
|
||||
// This is a legal edge-case encountered during genesis.
|
||||
} else {
|
||||
return Err(format!(
|
||||
"Database corrupt: fork choice is finalized at {:?} whilst head is finalized at \
|
||||
{:?}",
|
||||
fc_finalized, head_finalized
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let pubkey_cache_path = self
|
||||
.pubkey_cache_path
|
||||
.ok_or_else(|| "Cannot build without a pubkey cache path".to_string())?;
|
||||
|
||||
let validator_pubkey_cache = self.validator_pubkey_cache.map(Ok).unwrap_or_else(|| {
|
||||
ValidatorPubkeyCache::new(&canonical_head.beacon_state, pubkey_cache_path)
|
||||
.map_err(|e| format!("Unable to init validator pubkey cache: {:?}", e))
|
||||
})?;
|
||||
|
||||
let beacon_chain = BeaconChain {
|
||||
spec: self.spec,
|
||||
store: self
|
||||
.store
|
||||
.ok_or_else(|| "Cannot build without store".to_string())?,
|
||||
config: self.chain_config,
|
||||
store,
|
||||
store_migrator: self
|
||||
.store_migrator
|
||||
.ok_or_else(|| "Cannot build without store migrator".to_string())?,
|
||||
slot_clock: self
|
||||
.slot_clock
|
||||
.ok_or_else(|| "Cannot build without slot clock".to_string())?,
|
||||
slot_clock,
|
||||
op_pool: self
|
||||
.op_pool
|
||||
.ok_or_else(|| "Cannot build without op pool".to_string())?,
|
||||
// TODO: allow for persisting and loading the pool from disk.
|
||||
naive_aggregation_pool: <_>::default(),
|
||||
// TODO: allow for persisting and loading the pool from disk.
|
||||
observed_attestations: <_>::default(),
|
||||
// TODO: allow for persisting and loading the pool from disk.
|
||||
observed_attesters: <_>::default(),
|
||||
// TODO: allow for persisting and loading the pool from disk.
|
||||
observed_aggregators: <_>::default(),
|
||||
// TODO: allow for persisting and loading the pool from disk.
|
||||
observed_block_producers: <_>::default(),
|
||||
// TODO: allow for persisting and loading the pool from disk.
|
||||
observed_voluntary_exits: <_>::default(),
|
||||
observed_proposer_slashings: <_>::default(),
|
||||
observed_attester_slashings: <_>::default(),
|
||||
eth1_chain: self.eth1_chain,
|
||||
canonical_head: RwLock::new(canonical_head),
|
||||
genesis_block_root: self
|
||||
.genesis_block_root
|
||||
.ok_or_else(|| "Cannot build without a genesis block root".to_string())?,
|
||||
fork_choice: self
|
||||
.fork_choice
|
||||
.ok_or_else(|| "Cannot build without a fork choice".to_string())?,
|
||||
genesis_validators_root: canonical_head.beacon_state.genesis_validators_root,
|
||||
canonical_head: TimeoutRwLock::new(canonical_head.clone()),
|
||||
genesis_block_root,
|
||||
fork_choice: RwLock::new(fork_choice),
|
||||
event_handler: self
|
||||
.event_handler
|
||||
.ok_or_else(|| "Cannot build without an event handler".to_string())?,
|
||||
head_tracker: self.head_tracker.unwrap_or_default(),
|
||||
checkpoint_cache: CheckPointCache::default(),
|
||||
head_tracker: Arc::new(self.head_tracker.unwrap_or_default()),
|
||||
snapshot_cache: TimeoutRwLock::new(SnapshotCache::new(
|
||||
DEFAULT_SNAPSHOT_CACHE_SIZE,
|
||||
canonical_head,
|
||||
)),
|
||||
shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()),
|
||||
validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache),
|
||||
disabled_forks: self.disabled_forks,
|
||||
log: log.clone(),
|
||||
graffiti: self.graffiti,
|
||||
};
|
||||
|
||||
let head = beacon_chain
|
||||
.head()
|
||||
.map_err(|e| format!("Failed to get head: {:?}", e))?;
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Beacon chain initialized";
|
||||
"head_state" => format!("{}", beacon_chain.head().beacon_state_root),
|
||||
"head_block" => format!("{}", beacon_chain.head().beacon_block_root),
|
||||
"head_slot" => format!("{}", beacon_chain.head().beacon_block.slot),
|
||||
"head_state" => format!("{}", head.beacon_state_root),
|
||||
"head_block" => format!("{}", head.beacon_block_root),
|
||||
"head_slot" => format!("{}", head.beacon_block.slot()),
|
||||
);
|
||||
|
||||
Ok(beacon_chain)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TSlotClock, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
BeaconChainBuilder<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
ThreadSafeReducedTree<TStore, TEthSpec>,
|
||||
TEth1Backend,
|
||||
CachingEth1Backend<TEthSpec>,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
{
|
||||
/// Initializes a fork choice with the `ThreadSafeReducedTree` backend.
|
||||
///
|
||||
/// If this builder is being "resumed" from disk, then rebuild the last fork choice stored to
|
||||
/// the database. Otherwise, create a new, empty fork choice.
|
||||
pub fn reduced_tree_fork_choice(mut self) -> Result<Self, String> {
|
||||
let store = self
|
||||
.store
|
||||
.clone()
|
||||
.ok_or_else(|| "reduced_tree_fork_choice requires a store")?;
|
||||
|
||||
let fork_choice = if let Some(persisted_beacon_chain) = &self.persisted_beacon_chain {
|
||||
ForkChoice::from_ssz_container(
|
||||
persisted_beacon_chain.fork_choice.clone(),
|
||||
store.clone(),
|
||||
)
|
||||
.map_err(|e| format!("Unable to decode fork choice from db: {:?}", e))?
|
||||
} else {
|
||||
let finalized_checkpoint = &self
|
||||
.finalized_checkpoint
|
||||
.as_ref()
|
||||
.ok_or_else(|| "fork_choice_backend requires a finalized_checkpoint")?;
|
||||
let genesis_block_root = self
|
||||
.genesis_block_root
|
||||
.ok_or_else(|| "fork_choice_backend requires a genesis_block_root")?;
|
||||
|
||||
let backend = ThreadSafeReducedTree::new(
|
||||
store.clone(),
|
||||
&finalized_checkpoint.beacon_block,
|
||||
finalized_checkpoint.beacon_block_root,
|
||||
);
|
||||
|
||||
ForkChoice::new(store, backend, genesis_block_root, self.spec.genesis_slot)
|
||||
};
|
||||
|
||||
self.fork_choice = Some(fork_choice);
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEthSpec, TEventHandler>
|
||||
BeaconChainBuilder<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
CachingEth1Backend<TEthSpec, TStore>,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
{
|
||||
/// Sets the `BeaconChain` eth1 back-end to `CachingEth1Backend`.
|
||||
pub fn caching_eth1_backend(self, backend: CachingEth1Backend<TEthSpec, TStore>) -> Self {
|
||||
self.eth1_backend(Some(backend))
|
||||
}
|
||||
|
||||
/// Do not use any eth1 backend. The client will not be able to produce beacon blocks.
|
||||
pub fn no_eth1_backend(self) -> Self {
|
||||
self.eth1_backend(None)
|
||||
@@ -487,38 +626,32 @@ where
|
||||
.log
|
||||
.as_ref()
|
||||
.ok_or_else(|| "dummy_eth1_backend requires a log".to_string())?;
|
||||
let store = self
|
||||
.store
|
||||
.clone()
|
||||
.ok_or_else(|| "dummy_eth1_backend requires a store.".to_string())?;
|
||||
|
||||
let backend = CachingEth1Backend::new(Eth1Config::default(), log.clone(), store);
|
||||
let backend =
|
||||
CachingEth1Backend::new(Eth1Config::default(), log.clone(), self.spec.clone());
|
||||
|
||||
let mut eth1_chain = Eth1Chain::new(backend);
|
||||
eth1_chain.use_dummy_backend = true;
|
||||
|
||||
self.eth1_chain = Some(eth1_chain);
|
||||
self.eth1_chain = Some(Eth1Chain::new_dummy(backend));
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
BeaconChainBuilder<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TestingSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
@@ -528,11 +661,8 @@ where
|
||||
/// Requires the state to be initialized.
|
||||
pub fn testing_slot_clock(self, slot_duration: Duration) -> Result<Self, String> {
|
||||
let genesis_time = self
|
||||
.finalized_checkpoint
|
||||
.as_ref()
|
||||
.ok_or_else(|| "testing_slot_clock requires an initialized state")?
|
||||
.beacon_state
|
||||
.genesis_time;
|
||||
.genesis_time
|
||||
.ok_or_else(|| "testing_slot_clock requires an initialized state")?;
|
||||
|
||||
let slot_clock = TestingSlotClock::new(
|
||||
Slot::new(0),
|
||||
@@ -544,23 +674,23 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec>
|
||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, THotStore, TColdStore>
|
||||
BeaconChainBuilder<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
NullEventHandler<TEthSpec>,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
{
|
||||
@@ -571,23 +701,35 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn genesis_block<T: EthSpec>(genesis_state: &BeaconState<T>, spec: &ChainSpec) -> BeaconBlock<T> {
|
||||
let mut genesis_block = BeaconBlock::empty(&spec);
|
||||
|
||||
genesis_block.state_root = genesis_state.canonical_root();
|
||||
|
||||
genesis_block
|
||||
fn genesis_block<T: EthSpec>(
|
||||
genesis_state: &mut BeaconState<T>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<SignedBeaconBlock<T>, String> {
|
||||
let mut genesis_block = SignedBeaconBlock {
|
||||
message: BeaconBlock::empty(&spec),
|
||||
// Empty signature, which should NEVER be read. This isn't to-spec, but makes the genesis
|
||||
// block consistent with every other block.
|
||||
signature: Signature::empty(),
|
||||
};
|
||||
genesis_block.message.state_root = genesis_state
|
||||
.update_tree_hash_cache()
|
||||
.map_err(|e| format!("Error hashing genesis state: {:?}", e))?;
|
||||
Ok(genesis_block)
|
||||
}
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::migrate::NullMigrator;
|
||||
use eth2_hashing::hash;
|
||||
use genesis::{generate_deterministic_keypairs, interop_genesis_state};
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use ssz::Encode;
|
||||
use std::time::Duration;
|
||||
use store::{migrate::NullMigrator, MemoryStore};
|
||||
use store::config::StoreConfig;
|
||||
use store::{HotColdDB, MemoryStore};
|
||||
use tempfile::tempdir;
|
||||
use types::{EthSpec, MinimalEthSpec, Slot};
|
||||
|
||||
type TestEthSpec = MinimalEthSpec;
|
||||
@@ -599,12 +741,18 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn recent_genesis() {
|
||||
let validator_count = 8;
|
||||
let genesis_time = 13371337;
|
||||
let validator_count = 1;
|
||||
let genesis_time = 13_371_337;
|
||||
|
||||
let log = get_logger();
|
||||
let store = Arc::new(MemoryStore::open());
|
||||
let store: HotColdDB<
|
||||
MinimalEthSpec,
|
||||
MemoryStore<MinimalEthSpec>,
|
||||
MemoryStore<MinimalEthSpec>,
|
||||
> = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone())
|
||||
.unwrap();
|
||||
let spec = MinimalEthSpec::default_spec();
|
||||
let data_dir = tempdir().expect("should create temporary data_dir");
|
||||
|
||||
let genesis_state = interop_genesis_state(
|
||||
&generate_deterministic_keypairs(validator_count),
|
||||
@@ -615,8 +763,9 @@ mod test {
|
||||
|
||||
let chain = BeaconChainBuilder::new(MinimalEthSpec)
|
||||
.logger(log.clone())
|
||||
.store(store.clone())
|
||||
.store(Arc::new(store))
|
||||
.store_migrator(NullMigrator)
|
||||
.data_dir(data_dir.path().to_path_buf())
|
||||
.genesis_state(genesis_state)
|
||||
.expect("should build state using recent genesis")
|
||||
.dummy_eth1_backend()
|
||||
@@ -624,29 +773,28 @@ mod test {
|
||||
.null_event_handler()
|
||||
.testing_slot_clock(Duration::from_secs(1))
|
||||
.expect("should configure testing slot clock")
|
||||
.reduced_tree_fork_choice()
|
||||
.expect("should add fork choice to builder")
|
||||
.build()
|
||||
.expect("should build");
|
||||
|
||||
let head = chain.head();
|
||||
let head = chain.head().expect("should get head");
|
||||
|
||||
let state = head.beacon_state;
|
||||
let block = head.beacon_block;
|
||||
|
||||
assert_eq!(state.slot, Slot::new(0), "should start from genesis");
|
||||
assert_eq!(
|
||||
state.genesis_time, 13371337,
|
||||
state.genesis_time, 13_371_337,
|
||||
"should have the correct genesis time"
|
||||
);
|
||||
assert_eq!(
|
||||
block.state_root,
|
||||
block.state_root(),
|
||||
state.canonical_root(),
|
||||
"block should have correct state root"
|
||||
);
|
||||
assert_eq!(
|
||||
chain
|
||||
.store
|
||||
.get::<BeaconBlock<_>>(&Hash256::zero())
|
||||
.get_block(&Hash256::zero())
|
||||
.expect("should read db")
|
||||
.expect("should find genesis block"),
|
||||
block,
|
||||
|
||||
21
beacon_node/beacon_chain/src/chain_config.rs
Normal file
21
beacon_node/beacon_chain/src/chain_config.rs
Normal file
@@ -0,0 +1,21 @@
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
|
||||
/// There is a 693 block skip in the current canonical Medalla chain, we use 700 to be safe.
|
||||
pub const DEFAULT_IMPORT_BLOCK_MAX_SKIP_SLOTS: u64 = 700;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)]
|
||||
pub struct ChainConfig {
|
||||
/// Maximum number of slots to skip when importing a consensus message (e.g., block,
|
||||
/// attestation, etc).
|
||||
///
|
||||
/// If `None`, there is no limit.
|
||||
pub import_max_skip_slots: Option<u64>,
|
||||
}
|
||||
|
||||
impl Default for ChainConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
import_max_skip_slots: Some(DEFAULT_IMPORT_BLOCK_MAX_SKIP_SLOTS),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
use crate::checkpoint::CheckPoint;
|
||||
use crate::metrics;
|
||||
use parking_lot::RwLock;
|
||||
use types::{BeaconBlock, BeaconState, EthSpec, Hash256};
|
||||
|
||||
const CACHE_SIZE: usize = 4;
|
||||
|
||||
struct Inner<T: EthSpec> {
|
||||
oldest: usize,
|
||||
limit: usize,
|
||||
checkpoints: Vec<CheckPoint<T>>,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> Default for Inner<T> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
oldest: 0,
|
||||
limit: CACHE_SIZE,
|
||||
checkpoints: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CheckPointCache<T: EthSpec> {
|
||||
inner: RwLock<Inner<T>>,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> Default for CheckPointCache<T> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
inner: RwLock::new(Inner::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> CheckPointCache<T> {
|
||||
pub fn insert(&self, checkpoint: &CheckPoint<T>) {
|
||||
if self
|
||||
.inner
|
||||
.read()
|
||||
.checkpoints
|
||||
.iter()
|
||||
// This is `O(n)` but whilst `n == 4` it ain't no thing.
|
||||
.any(|local| local.beacon_state_root == checkpoint.beacon_state_root)
|
||||
{
|
||||
// Adding a known checkpoint to the cache should be a no-op.
|
||||
return;
|
||||
}
|
||||
|
||||
let mut inner = self.inner.write();
|
||||
|
||||
if inner.checkpoints.len() < inner.limit {
|
||||
inner.checkpoints.push(checkpoint.clone())
|
||||
} else {
|
||||
let i = inner.oldest; // to satisfy the borrow checker.
|
||||
inner.checkpoints[i] = checkpoint.clone();
|
||||
inner.oldest += 1;
|
||||
inner.oldest %= inner.limit;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_state(&self, state_root: &Hash256) -> Option<BeaconState<T>> {
|
||||
self.inner
|
||||
.read()
|
||||
.checkpoints
|
||||
.iter()
|
||||
// Also `O(n)`.
|
||||
.find(|checkpoint| checkpoint.beacon_state_root == *state_root)
|
||||
.map(|checkpoint| {
|
||||
metrics::inc_counter(&metrics::CHECKPOINT_CACHE_HITS);
|
||||
|
||||
checkpoint.beacon_state.clone()
|
||||
})
|
||||
.or_else(|| {
|
||||
metrics::inc_counter(&metrics::CHECKPOINT_CACHE_MISSES);
|
||||
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_state_only_with_committee_cache(
|
||||
&self,
|
||||
state_root: &Hash256,
|
||||
) -> Option<BeaconState<T>> {
|
||||
self.inner
|
||||
.read()
|
||||
.checkpoints
|
||||
.iter()
|
||||
// Also `O(n)`.
|
||||
.find(|checkpoint| checkpoint.beacon_state_root == *state_root)
|
||||
.map(|checkpoint| {
|
||||
metrics::inc_counter(&metrics::CHECKPOINT_CACHE_HITS);
|
||||
|
||||
let mut state = checkpoint.beacon_state.clone_without_caches();
|
||||
state.committee_caches = checkpoint.beacon_state.committee_caches.clone();
|
||||
|
||||
state
|
||||
})
|
||||
.or_else(|| {
|
||||
metrics::inc_counter(&metrics::CHECKPOINT_CACHE_MISSES);
|
||||
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_block(&self, block_root: &Hash256) -> Option<BeaconBlock<T>> {
|
||||
self.inner
|
||||
.read()
|
||||
.checkpoints
|
||||
.iter()
|
||||
// Also `O(n)`.
|
||||
.find(|checkpoint| checkpoint.beacon_block_root == *block_root)
|
||||
.map(|checkpoint| {
|
||||
metrics::inc_counter(&metrics::CHECKPOINT_CACHE_HITS);
|
||||
|
||||
checkpoint.beacon_block.clone()
|
||||
})
|
||||
.or_else(|| {
|
||||
metrics::inc_counter(&metrics::CHECKPOINT_CACHE_MISSES);
|
||||
|
||||
None
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,22 @@
|
||||
use crate::beacon_chain::ForkChoiceError;
|
||||
use crate::eth1_chain::Error as Eth1ChainError;
|
||||
use crate::fork_choice::Error as ForkChoiceError;
|
||||
use crate::migrate::PruningError;
|
||||
use crate::naive_aggregation_pool::Error as NaiveAggregationError;
|
||||
use crate::observed_attestations::Error as ObservedAttestationsError;
|
||||
use crate::observed_attesters::Error as ObservedAttestersError;
|
||||
use crate::observed_block_producers::Error as ObservedBlockProducersError;
|
||||
use operation_pool::OpPoolError;
|
||||
use safe_arith::ArithError;
|
||||
use ssz_types::Error as SszTypesError;
|
||||
use state_processing::per_block_processing::errors::AttestationValidationError;
|
||||
use state_processing::BlockProcessingError;
|
||||
use state_processing::SlotProcessingError;
|
||||
use state_processing::{
|
||||
block_signature_verifier::Error as BlockSignatureVerifierError,
|
||||
per_block_processing::errors::{
|
||||
AttestationValidationError, AttesterSlashingValidationError, ExitValidationError,
|
||||
ProposerSlashingValidationError,
|
||||
},
|
||||
signature_sets::Error as SignatureSetError,
|
||||
BlockProcessingError, SlotProcessingError,
|
||||
};
|
||||
use std::time::Duration;
|
||||
use types::*;
|
||||
|
||||
@@ -17,7 +30,7 @@ macro_rules! easy_from_to {
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug)]
|
||||
pub enum BeaconChainError {
|
||||
InsufficientValidators,
|
||||
UnableToReadSlot,
|
||||
@@ -39,22 +52,55 @@ pub enum BeaconChainError {
|
||||
NoStateForAttestation {
|
||||
beacon_block_root: Hash256,
|
||||
},
|
||||
CannotAttestToFutureState,
|
||||
AttestationValidationError(AttestationValidationError),
|
||||
ExitValidationError(ExitValidationError),
|
||||
ProposerSlashingValidationError(ProposerSlashingValidationError),
|
||||
AttesterSlashingValidationError(AttesterSlashingValidationError),
|
||||
StateSkipTooLarge {
|
||||
start_slot: Slot,
|
||||
requested_slot: Slot,
|
||||
max_task_runtime: Duration,
|
||||
},
|
||||
MissingFinalizedStateRoot(Slot),
|
||||
/// Returned when an internal check fails, indicating corrupt data.
|
||||
InvariantViolated(String),
|
||||
SszTypesError(SszTypesError),
|
||||
CanonicalHeadLockTimeout,
|
||||
AttestationCacheLockTimeout,
|
||||
ValidatorPubkeyCacheLockTimeout,
|
||||
IncorrectStateForAttestation(RelativeEpochError),
|
||||
InvalidValidatorPubkeyBytes(bls::Error),
|
||||
ValidatorPubkeyCacheIncomplete(usize),
|
||||
SignatureSetError(SignatureSetError),
|
||||
BlockSignatureVerifierError(state_processing::block_signature_verifier::Error),
|
||||
DuplicateValidatorPublicKey,
|
||||
ValidatorPubkeyCacheFileError(String),
|
||||
OpPoolError(OpPoolError),
|
||||
NaiveAggregationError(NaiveAggregationError),
|
||||
ObservedAttestationsError(ObservedAttestationsError),
|
||||
ObservedAttestersError(ObservedAttestersError),
|
||||
ObservedBlockProducersError(ObservedBlockProducersError),
|
||||
PruningError(PruningError),
|
||||
ArithError(ArithError),
|
||||
}
|
||||
|
||||
easy_from_to!(SlotProcessingError, BeaconChainError);
|
||||
easy_from_to!(AttestationValidationError, BeaconChainError);
|
||||
easy_from_to!(ExitValidationError, BeaconChainError);
|
||||
easy_from_to!(ProposerSlashingValidationError, BeaconChainError);
|
||||
easy_from_to!(AttesterSlashingValidationError, BeaconChainError);
|
||||
easy_from_to!(SszTypesError, BeaconChainError);
|
||||
easy_from_to!(OpPoolError, BeaconChainError);
|
||||
easy_from_to!(NaiveAggregationError, BeaconChainError);
|
||||
easy_from_to!(ObservedAttestationsError, BeaconChainError);
|
||||
easy_from_to!(ObservedAttestersError, BeaconChainError);
|
||||
easy_from_to!(ObservedBlockProducersError, BeaconChainError);
|
||||
easy_from_to!(BlockSignatureVerifierError, BeaconChainError);
|
||||
easy_from_to!(PruningError, BeaconChainError);
|
||||
easy_from_to!(ArithError, BeaconChainError);
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug)]
|
||||
pub enum BlockProductionError {
|
||||
UnableToGetBlockRootFromState,
|
||||
UnableToReadSlot,
|
||||
@@ -63,6 +109,7 @@ pub enum BlockProductionError {
|
||||
BlockProcessingError(BlockProcessingError),
|
||||
Eth1ChainError(Eth1ChainError),
|
||||
BeaconStateError(BeaconStateError),
|
||||
OpPoolError(OpPoolError),
|
||||
/// The `BeaconChain` was explicitly configured _without_ a connection to eth1, therefore it
|
||||
/// cannot produce blocks.
|
||||
NoEth1ChainConnection,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,10 @@
|
||||
use bus::Bus;
|
||||
use parking_lot::Mutex;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use slog::{error, Logger};
|
||||
use std::marker::PhantomData;
|
||||
use types::{Attestation, BeaconBlock, Epoch, EthSpec, Hash256};
|
||||
use std::sync::Arc;
|
||||
use types::{Attestation, Epoch, EthSpec, Hash256, SignedBeaconBlock, SignedBeaconBlockHash};
|
||||
pub use websocket_server::WebSocketSender;
|
||||
|
||||
pub trait EventHandler<T: EthSpec>: Sized + Send + Sync {
|
||||
@@ -18,6 +22,84 @@ impl<T: EthSpec> EventHandler<T> for WebSocketSender<T> {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ServerSentEvents<T: EthSpec> {
|
||||
// Bus<> is itself Sync + Send. We use Mutex<> here only because of the surrounding code does
|
||||
// not enforce mutability statically (i.e. relies on interior mutability).
|
||||
head_changed_queue: Arc<Mutex<Bus<SignedBeaconBlockHash>>>,
|
||||
log: Logger,
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> ServerSentEvents<T> {
|
||||
pub fn new(log: Logger) -> (Self, Arc<Mutex<Bus<SignedBeaconBlockHash>>>) {
|
||||
let bus = Bus::new(T::slots_per_epoch() as usize);
|
||||
let mutex = Mutex::new(bus);
|
||||
let arc = Arc::new(mutex);
|
||||
let this = Self {
|
||||
head_changed_queue: arc.clone(),
|
||||
log,
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
(this, arc)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> EventHandler<T> for ServerSentEvents<T> {
|
||||
fn register(&self, kind: EventKind<T>) -> Result<(), String> {
|
||||
match kind {
|
||||
EventKind::BeaconHeadChanged {
|
||||
current_head_beacon_block_root,
|
||||
..
|
||||
} => {
|
||||
let mut guard = self.head_changed_queue.lock();
|
||||
if guard
|
||||
.try_broadcast(current_head_beacon_block_root.into())
|
||||
.is_err()
|
||||
{
|
||||
error!(
|
||||
self.log,
|
||||
"Head change streaming queue full";
|
||||
"dropped_change" => format!("{}", current_head_beacon_block_root),
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// An event handler that pushes events to both the websockets handler and the SSE handler.
|
||||
// Named after the unix `tee` command. Meant as a temporary solution before ditching WebSockets
|
||||
// completely once SSE functions well enough.
|
||||
pub struct TeeEventHandler<E: EthSpec> {
|
||||
websockets_handler: WebSocketSender<E>,
|
||||
sse_handler: ServerSentEvents<E>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> TeeEventHandler<E> {
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn new(
|
||||
log: Logger,
|
||||
websockets_handler: WebSocketSender<E>,
|
||||
) -> Result<(Self, Arc<Mutex<Bus<SignedBeaconBlockHash>>>), String> {
|
||||
let (sse_handler, bus) = ServerSentEvents::new(log);
|
||||
let result = Self {
|
||||
websockets_handler,
|
||||
sse_handler,
|
||||
};
|
||||
Ok((result, bus))
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> EventHandler<E> for TeeEventHandler<E> {
|
||||
fn register(&self, kind: EventKind<E>) -> Result<(), String> {
|
||||
self.websockets_handler.register(kind.clone())?;
|
||||
self.sse_handler.register(kind)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> EventHandler<T> for NullEventHandler<T> {
|
||||
fn register(&self, _kind: EventKind<T>) -> Result<(), String> {
|
||||
Ok(())
|
||||
@@ -30,7 +112,7 @@ impl<T: EthSpec> Default for NullEventHandler<T> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[serde(
|
||||
bound = "T: EthSpec",
|
||||
rename_all = "snake_case",
|
||||
@@ -49,11 +131,11 @@ pub enum EventKind<T: EthSpec> {
|
||||
},
|
||||
BeaconBlockImported {
|
||||
block_root: Hash256,
|
||||
block: Box<BeaconBlock<T>>,
|
||||
block: Box<SignedBeaconBlock<T>>,
|
||||
},
|
||||
BeaconBlockRejected {
|
||||
reason: String,
|
||||
block: Box<BeaconBlock<T>>,
|
||||
block: Box<SignedBeaconBlock<T>>,
|
||||
},
|
||||
BeaconAttestationImported {
|
||||
attestation: Box<Attestation<T>>,
|
||||
|
||||
@@ -1,365 +0,0 @@
|
||||
use crate::{errors::BeaconChainError, metrics, BeaconChain, BeaconChainTypes};
|
||||
use lmd_ghost::LmdGhost;
|
||||
use parking_lot::RwLock;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use state_processing::{common::get_attesting_indices, per_slot_processing};
|
||||
use std::sync::Arc;
|
||||
use store::{Error as StoreError, Store};
|
||||
use types::{
|
||||
Attestation, BeaconBlock, BeaconState, BeaconStateError, Checkpoint, EthSpec, Hash256, Slot,
|
||||
};
|
||||
|
||||
type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
MissingBlock(Hash256),
|
||||
MissingState(Hash256),
|
||||
BackendError(String),
|
||||
BeaconStateError(BeaconStateError),
|
||||
StoreError(StoreError),
|
||||
BeaconChainError(Box<BeaconChainError>),
|
||||
}
|
||||
|
||||
pub struct ForkChoice<T: BeaconChainTypes> {
|
||||
store: Arc<T::Store>,
|
||||
backend: T::LmdGhost,
|
||||
/// Used for resolving the `0x00..00` alias back to genesis.
|
||||
///
|
||||
/// Does not necessarily need to be the _actual_ genesis, it suffices to be the finalized root
|
||||
/// whenever the struct was instantiated.
|
||||
genesis_block_root: Hash256,
|
||||
/// The fork choice rule's current view of the justified checkpoint.
|
||||
justified_checkpoint: RwLock<Checkpoint>,
|
||||
/// The best justified checkpoint we've seen, which may be ahead of `justified_checkpoint`.
|
||||
best_justified_checkpoint: RwLock<Checkpoint>,
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> PartialEq for ForkChoice<T> {
|
||||
/// This implementation ignores the `store`.
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.backend == other.backend
|
||||
&& self.genesis_block_root == other.genesis_block_root
|
||||
&& *self.justified_checkpoint.read() == *other.justified_checkpoint.read()
|
||||
&& *self.best_justified_checkpoint.read() == *other.best_justified_checkpoint.read()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> ForkChoice<T> {
|
||||
/// Instantiate a new fork chooser.
|
||||
///
|
||||
/// "Genesis" does not necessarily need to be the absolute genesis, it can be some finalized
|
||||
/// block.
|
||||
pub fn new(
|
||||
store: Arc<T::Store>,
|
||||
backend: T::LmdGhost,
|
||||
genesis_block_root: Hash256,
|
||||
genesis_slot: Slot,
|
||||
) -> Self {
|
||||
let justified_checkpoint = Checkpoint {
|
||||
epoch: genesis_slot.epoch(T::EthSpec::slots_per_epoch()),
|
||||
root: genesis_block_root,
|
||||
};
|
||||
Self {
|
||||
store: store.clone(),
|
||||
backend,
|
||||
genesis_block_root,
|
||||
justified_checkpoint: RwLock::new(justified_checkpoint.clone()),
|
||||
best_justified_checkpoint: RwLock::new(justified_checkpoint),
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine whether the fork choice's view of the justified checkpoint should be updated.
|
||||
///
|
||||
/// To prevent the bouncing attack, an update is allowed only in these conditions:
|
||||
///
|
||||
/// * We're in the first SAFE_SLOTS_TO_UPDATE_JUSTIFIED slots of the epoch, or
|
||||
/// * The new justified checkpoint is a descendant of the current justified checkpoint
|
||||
fn should_update_justified_checkpoint(
|
||||
&self,
|
||||
chain: &BeaconChain<T>,
|
||||
new_justified_checkpoint: &Checkpoint,
|
||||
) -> Result<bool> {
|
||||
if Self::compute_slots_since_epoch_start(chain.slot()?)
|
||||
< chain.spec.safe_slots_to_update_justified
|
||||
{
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let justified_checkpoint = self.justified_checkpoint.read().clone();
|
||||
|
||||
let current_justified_block = chain
|
||||
.get_block(&justified_checkpoint.root)?
|
||||
.ok_or_else(|| Error::MissingBlock(justified_checkpoint.root))?;
|
||||
|
||||
let new_justified_block = chain
|
||||
.get_block(&new_justified_checkpoint.root)?
|
||||
.ok_or_else(|| Error::MissingBlock(new_justified_checkpoint.root))?;
|
||||
|
||||
let slots_per_epoch = T::EthSpec::slots_per_epoch();
|
||||
|
||||
Ok(
|
||||
new_justified_block.slot > justified_checkpoint.epoch.start_slot(slots_per_epoch)
|
||||
&& chain.get_ancestor_block_root(
|
||||
new_justified_checkpoint.root,
|
||||
current_justified_block.slot,
|
||||
)? == Some(justified_checkpoint.root),
|
||||
)
|
||||
}
|
||||
|
||||
/// Calculate how far `slot` lies from the start of its epoch.
|
||||
fn compute_slots_since_epoch_start(slot: Slot) -> u64 {
|
||||
let slots_per_epoch = T::EthSpec::slots_per_epoch();
|
||||
(slot - slot.epoch(slots_per_epoch).start_slot(slots_per_epoch)).as_u64()
|
||||
}
|
||||
|
||||
/// Run the fork choice rule to determine the head.
|
||||
pub fn find_head(&self, chain: &BeaconChain<T>) -> Result<Hash256> {
|
||||
let timer = metrics::start_timer(&metrics::FORK_CHOICE_FIND_HEAD_TIMES);
|
||||
|
||||
let (start_state, start_block_root, start_block_slot) = {
|
||||
// Check if we should update our view of the justified checkpoint.
|
||||
// Doing this check here should be quasi-equivalent to the update in the `on_tick`
|
||||
// function of the spec, so long as `find_head` is called at least once during the first
|
||||
// SAFE_SLOTS_TO_UPDATE_JUSTIFIED slots.
|
||||
let best_justified_checkpoint = self.best_justified_checkpoint.read();
|
||||
if self.should_update_justified_checkpoint(chain, &best_justified_checkpoint)? {
|
||||
*self.justified_checkpoint.write() = best_justified_checkpoint.clone();
|
||||
}
|
||||
|
||||
let current_justified_checkpoint = self.justified_checkpoint.read().clone();
|
||||
|
||||
let (block_root, block_justified_slot) = (
|
||||
current_justified_checkpoint.root,
|
||||
current_justified_checkpoint
|
||||
.epoch
|
||||
.start_slot(T::EthSpec::slots_per_epoch()),
|
||||
);
|
||||
|
||||
let block = chain
|
||||
.store
|
||||
.get::<BeaconBlock<T::EthSpec>>(&block_root)?
|
||||
.ok_or_else(|| Error::MissingBlock(block_root))?;
|
||||
|
||||
// Resolve the `0x00.. 00` alias back to genesis
|
||||
let block_root = if block_root == Hash256::zero() {
|
||||
self.genesis_block_root
|
||||
} else {
|
||||
block_root
|
||||
};
|
||||
|
||||
let mut state: BeaconState<T::EthSpec> = chain
|
||||
.store
|
||||
.get_state(&block.state_root, Some(block.slot))?
|
||||
.ok_or_else(|| Error::MissingState(block.state_root))?;
|
||||
|
||||
// Fast-forward the state to the start slot of the epoch where it was justified.
|
||||
for _ in block.slot.as_u64()..block_justified_slot.as_u64() {
|
||||
per_slot_processing(&mut state, &chain.spec)
|
||||
.map_err(BeaconChainError::SlotProcessingError)?
|
||||
}
|
||||
|
||||
(state, block_root, block_justified_slot)
|
||||
};
|
||||
|
||||
// A function that returns the weight for some validator index.
|
||||
let weight = |validator_index: usize| -> Option<u64> {
|
||||
start_state
|
||||
.validators
|
||||
.get(validator_index)
|
||||
.map(|v| v.effective_balance)
|
||||
};
|
||||
|
||||
let result = self
|
||||
.backend
|
||||
.find_head(start_block_slot, start_block_root, weight)
|
||||
.map_err(Into::into);
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Process all attestations in the given `block`.
|
||||
///
|
||||
/// Assumes the block (and therefore its attestations) are valid. It is a logic error to
|
||||
/// provide an invalid block.
|
||||
pub fn process_block(
|
||||
&self,
|
||||
chain: &BeaconChain<T>,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
block: &BeaconBlock<T::EthSpec>,
|
||||
block_root: Hash256,
|
||||
) -> Result<()> {
|
||||
let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES);
|
||||
// Note: we never count the block as a latest message, only attestations.
|
||||
//
|
||||
// I (Paul H) do not have an explicit reference to this, but I derive it from this
|
||||
// document:
|
||||
//
|
||||
// https://github.com/ethereum/eth2.0-specs/blob/v0.7.0/specs/core/0_fork-choice.md
|
||||
for attestation in &block.body.attestations {
|
||||
// If the `data.beacon_block_root` block is not known to us, simply ignore the latest
|
||||
// vote.
|
||||
if let Some(block) = self
|
||||
.store
|
||||
.get::<BeaconBlock<T::EthSpec>>(&attestation.data.beacon_block_root)?
|
||||
{
|
||||
self.process_attestation(state, attestation, &block)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we should update our view of the justified checkpoint
|
||||
if state.current_justified_checkpoint.epoch > self.justified_checkpoint.read().epoch {
|
||||
*self.best_justified_checkpoint.write() = state.current_justified_checkpoint.clone();
|
||||
if self
|
||||
.should_update_justified_checkpoint(chain, &state.current_justified_checkpoint)?
|
||||
{
|
||||
*self.justified_checkpoint.write() = state.current_justified_checkpoint.clone();
|
||||
}
|
||||
}
|
||||
|
||||
// This does not apply a vote to the block, it just makes fork choice aware of the block so
|
||||
// it can still be identified as the head even if it doesn't have any votes.
|
||||
//
|
||||
// A case where a block without any votes can be the head is where it is the only child of
|
||||
// a block that has the majority of votes applied to it.
|
||||
self.backend.process_block(block, block_root)?;
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process an attestation which references `block` in `attestation.data.beacon_block_root`.
|
||||
///
|
||||
/// Assumes the attestation is valid.
|
||||
pub fn process_attestation(
|
||||
&self,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
attestation: &Attestation<T::EthSpec>,
|
||||
block: &BeaconBlock<T::EthSpec>,
|
||||
) -> Result<()> {
|
||||
let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES);
|
||||
|
||||
let block_hash = attestation.data.beacon_block_root;
|
||||
|
||||
// Ignore any attestations to the zero hash.
|
||||
//
|
||||
// This is an edge case that results from the spec aliasing the zero hash to the genesis
|
||||
// block. Attesters may attest to the zero hash if they have never seen a block.
|
||||
//
|
||||
// We have two options here:
|
||||
//
|
||||
// 1. Apply all zero-hash attestations to the zero hash.
|
||||
// 2. Ignore all attestations to the zero hash.
|
||||
//
|
||||
// (1) becomes weird once we hit finality and fork choice drops the genesis block. (2) is
|
||||
// fine because votes to the genesis block are not useful; all validators implicitly attest
|
||||
// to genesis just by being present in the chain.
|
||||
//
|
||||
// Additionally, don't add any block hash to fork choice unless we have imported the block.
|
||||
if block_hash != Hash256::zero() {
|
||||
let validator_indices =
|
||||
get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?;
|
||||
|
||||
for validator_index in validator_indices {
|
||||
self.backend
|
||||
.process_attestation(validator_index, block_hash, block.slot)?;
|
||||
}
|
||||
}
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the latest message for a given validator, if any.
|
||||
///
|
||||
/// Returns `(block_root, block_slot)`.
|
||||
pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> {
|
||||
self.backend.latest_message(validator_index)
|
||||
}
|
||||
|
||||
/// Runs an integrity verification function on the underlying fork choice algorithm.
|
||||
///
|
||||
/// Returns `Ok(())` if the underlying fork choice has maintained it's integrity,
|
||||
/// `Err(description)` otherwise.
|
||||
pub fn verify_integrity(&self) -> core::result::Result<(), String> {
|
||||
self.backend.verify_integrity()
|
||||
}
|
||||
|
||||
/// Inform the fork choice that the given block (and corresponding root) have been finalized so
|
||||
/// it may prune it's storage.
|
||||
///
|
||||
/// `finalized_block_root` must be the root of `finalized_block`.
|
||||
pub fn process_finalization(
|
||||
&self,
|
||||
finalized_block: &BeaconBlock<T::EthSpec>,
|
||||
finalized_block_root: Hash256,
|
||||
) -> Result<()> {
|
||||
self.backend
|
||||
.update_finalized_root(finalized_block, finalized_block_root)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Returns a `SszForkChoice` which contains the current state of `Self`.
|
||||
pub fn as_ssz_container(&self) -> SszForkChoice {
|
||||
SszForkChoice {
|
||||
genesis_block_root: self.genesis_block_root.clone(),
|
||||
justified_checkpoint: self.justified_checkpoint.read().clone(),
|
||||
best_justified_checkpoint: self.best_justified_checkpoint.read().clone(),
|
||||
backend_bytes: self.backend.as_bytes(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Instantiates `Self` from a prior `SszForkChoice`.
|
||||
///
|
||||
/// The created `Self` will have the same state as the `Self` that created the `SszForkChoice`.
|
||||
pub fn from_ssz_container(ssz_container: SszForkChoice, store: Arc<T::Store>) -> Result<Self> {
|
||||
let backend = LmdGhost::from_bytes(&ssz_container.backend_bytes, store.clone())?;
|
||||
|
||||
Ok(Self {
|
||||
store,
|
||||
backend,
|
||||
genesis_block_root: ssz_container.genesis_block_root,
|
||||
justified_checkpoint: RwLock::new(ssz_container.justified_checkpoint),
|
||||
best_justified_checkpoint: RwLock::new(ssz_container.best_justified_checkpoint),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper struct that is used to encode/decode the state of the `ForkChoice` as SSZ bytes.
|
||||
///
|
||||
/// This is used when persisting the state of the `BeaconChain` to disk.
|
||||
#[derive(Encode, Decode, Clone)]
|
||||
pub struct SszForkChoice {
|
||||
genesis_block_root: Hash256,
|
||||
justified_checkpoint: Checkpoint,
|
||||
best_justified_checkpoint: Checkpoint,
|
||||
backend_bytes: Vec<u8>,
|
||||
}
|
||||
|
||||
impl From<BeaconStateError> for Error {
|
||||
fn from(e: BeaconStateError) -> Error {
|
||||
Error::BeaconStateError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BeaconChainError> for Error {
|
||||
fn from(e: BeaconChainError) -> Error {
|
||||
Error::BeaconChainError(Box::new(e))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StoreError> for Error {
|
||||
fn from(e: StoreError) -> Error {
|
||||
Error::StoreError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Error {
|
||||
fn from(e: String) -> Error {
|
||||
Error::BackendError(e)
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@ use parking_lot::RwLock;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use std::collections::HashMap;
|
||||
use std::iter::FromIterator;
|
||||
use types::{BeaconBlock, EthSpec, Hash256, Slot};
|
||||
use types::{Hash256, Slot};
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
@@ -23,11 +23,22 @@ impl HeadTracker {
|
||||
/// This function assumes that no block is imported without its parent having already been
|
||||
/// imported. It cannot detect an error if this is not the case, it is the responsibility of
|
||||
/// the upstream user.
|
||||
pub fn register_block<E: EthSpec>(&self, block_root: Hash256, block: &BeaconBlock<E>) {
|
||||
pub fn register_block(&self, block_root: Hash256, parent_root: Hash256, slot: Slot) {
|
||||
let mut map = self.0.write();
|
||||
map.remove(&parent_root);
|
||||
map.insert(block_root, slot);
|
||||
}
|
||||
|
||||
map.remove(&block.parent_root);
|
||||
map.insert(block_root, block.slot);
|
||||
/// Removes abandoned head.
|
||||
pub fn remove_head(&self, block_root: Hash256) {
|
||||
let mut map = self.0.write();
|
||||
debug_assert!(map.contains_key(&block_root));
|
||||
map.remove(&block_root);
|
||||
}
|
||||
|
||||
/// Returns true iff `block_root` is a recognized head.
|
||||
pub fn contains_head(&self, block_root: Hash256) -> bool {
|
||||
self.0.read().contains_key(&block_root)
|
||||
}
|
||||
|
||||
/// Returns the list of heads in the chain.
|
||||
@@ -59,10 +70,10 @@ impl HeadTracker {
|
||||
let slots_len = ssz_container.slots.len();
|
||||
|
||||
if roots_len != slots_len {
|
||||
return Err(Error::MismatchingLengths {
|
||||
Err(Error::MismatchingLengths {
|
||||
roots_len,
|
||||
slots_len,
|
||||
});
|
||||
})
|
||||
} else {
|
||||
let map = HashMap::from_iter(
|
||||
ssz_container
|
||||
@@ -96,7 +107,7 @@ pub struct SszHeadTracker {
|
||||
mod test {
|
||||
use super::*;
|
||||
use ssz::{Decode, Encode};
|
||||
use types::MainnetEthSpec;
|
||||
use types::{BeaconBlock, EthSpec, MainnetEthSpec};
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
|
||||
@@ -107,7 +118,7 @@ mod test {
|
||||
let head_tracker = HeadTracker::default();
|
||||
|
||||
for i in 0..16 {
|
||||
let mut block = BeaconBlock::empty(spec);
|
||||
let mut block: BeaconBlock<E> = BeaconBlock::empty(spec);
|
||||
let block_root = Hash256::from_low_u64_be(i);
|
||||
|
||||
block.slot = Slot::new(i);
|
||||
@@ -117,7 +128,7 @@ mod test {
|
||||
Hash256::from_low_u64_be(i - 1)
|
||||
};
|
||||
|
||||
head_tracker.register_block::<E>(block_root, &block);
|
||||
head_tracker.register_block(block_root, block.parent_root, block.slot);
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
@@ -126,11 +137,11 @@ mod test {
|
||||
"should only have one head"
|
||||
);
|
||||
|
||||
let mut block = BeaconBlock::empty(spec);
|
||||
let mut block: BeaconBlock<E> = BeaconBlock::empty(spec);
|
||||
let block_root = Hash256::from_low_u64_be(42);
|
||||
block.slot = Slot::new(15);
|
||||
block.parent_root = Hash256::from_low_u64_be(14);
|
||||
head_tracker.register_block::<E>(block_root, &block);
|
||||
head_tracker.register_block(block_root, block.parent_root, block.slot);
|
||||
|
||||
let heads = head_tracker.heads();
|
||||
|
||||
|
||||
@@ -2,28 +2,48 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate slog_term;
|
||||
|
||||
pub mod attestation_verification;
|
||||
mod beacon_chain;
|
||||
mod beacon_fork_choice_store;
|
||||
mod beacon_snapshot;
|
||||
mod block_verification;
|
||||
pub mod builder;
|
||||
mod checkpoint;
|
||||
mod checkpoint_cache;
|
||||
pub mod chain_config;
|
||||
mod errors;
|
||||
pub mod eth1_chain;
|
||||
pub mod events;
|
||||
mod fork_choice;
|
||||
mod head_tracker;
|
||||
mod metrics;
|
||||
pub mod migrate;
|
||||
mod naive_aggregation_pool;
|
||||
mod observed_attestations;
|
||||
mod observed_attesters;
|
||||
mod observed_block_producers;
|
||||
pub mod observed_operations;
|
||||
mod persisted_beacon_chain;
|
||||
mod persisted_fork_choice;
|
||||
mod shuffling_cache;
|
||||
mod snapshot_cache;
|
||||
pub mod test_utils;
|
||||
mod timeout_rw_lock;
|
||||
mod validator_pubkey_cache;
|
||||
|
||||
pub use self::beacon_chain::{
|
||||
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BlockProcessingOutcome,
|
||||
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, ChainSegmentResult,
|
||||
ForkChoiceError, StateSkipConfig,
|
||||
};
|
||||
pub use self::checkpoint::CheckPoint;
|
||||
pub use self::beacon_snapshot::BeaconSnapshot;
|
||||
pub use self::chain_config::ChainConfig;
|
||||
pub use self::errors::{BeaconChainError, BlockProductionError};
|
||||
pub use attestation_verification::Error as AttestationError;
|
||||
pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError};
|
||||
pub use block_verification::{BlockError, GossipVerifiedBlock};
|
||||
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
||||
pub use events::EventHandler;
|
||||
pub use fork_choice::ForkChoice;
|
||||
pub use lmd_ghost;
|
||||
pub use metrics::scrape_for_metrics;
|
||||
pub use parking_lot;
|
||||
pub use slot_clock;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use crate::{BeaconChain, BeaconChainTypes};
|
||||
pub use lighthouse_metrics::*;
|
||||
use types::{BeaconState, Epoch, Hash256, Slot};
|
||||
use slot_clock::SlotClock;
|
||||
use types::{BeaconState, Epoch, EthSpec, Hash256, Slot};
|
||||
|
||||
lazy_static! {
|
||||
/*
|
||||
@@ -32,6 +33,10 @@ lazy_static! {
|
||||
"beacon_block_processing_committee_building_seconds",
|
||||
"Time spent building/obtaining committees for block processing."
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_SIGNATURE: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_signature_seconds",
|
||||
"Time spent doing signature verification for a block."
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_CORE: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_core_seconds",
|
||||
"Time spent doing the core per_block_processing state processing."
|
||||
@@ -44,9 +49,9 @@ lazy_static! {
|
||||
"beacon_block_processing_db_write_seconds",
|
||||
"Time spent writing a newly processed block and state to DB"
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_FORK_CHOICE_REGISTER: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_fork_choice_register_seconds",
|
||||
"Time spent registering the new block with fork choice (but not finding head)"
|
||||
pub static ref BLOCK_PROCESSING_ATTESTATION_OBSERVATION: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_attestation_observation_seconds",
|
||||
"Time spent hashing and remembering all the attestations in the block"
|
||||
);
|
||||
|
||||
/*
|
||||
@@ -71,25 +76,109 @@ lazy_static! {
|
||||
"Number of attestations in a block"
|
||||
);
|
||||
|
||||
/*
|
||||
* Unaggregated Attestation Verification
|
||||
*/
|
||||
pub static ref UNAGGREGATED_ATTESTATION_PROCESSING_REQUESTS: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_unaggregated_attestation_processing_requests_total",
|
||||
"Count of all unaggregated attestations submitted for processing"
|
||||
);
|
||||
pub static ref UNAGGREGATED_ATTESTATION_PROCESSING_SUCCESSES: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_unaggregated_attestation_processing_successes_total",
|
||||
"Number of unaggregated attestations verified for gossip"
|
||||
);
|
||||
pub static ref UNAGGREGATED_ATTESTATION_GOSSIP_VERIFICATION_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_unaggregated_attestation_gossip_verification_seconds",
|
||||
"Full runtime of aggregated attestation gossip verification"
|
||||
);
|
||||
|
||||
/*
|
||||
* Aggregated Attestation Verification
|
||||
*/
|
||||
pub static ref AGGREGATED_ATTESTATION_PROCESSING_REQUESTS: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_aggregated_attestation_processing_requests_total",
|
||||
"Count of all aggregated attestations submitted for processing"
|
||||
);
|
||||
pub static ref AGGREGATED_ATTESTATION_PROCESSING_SUCCESSES: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_aggregated_attestation_processing_successes_total",
|
||||
"Number of aggregated attestations verified for gossip"
|
||||
);
|
||||
pub static ref AGGREGATED_ATTESTATION_GOSSIP_VERIFICATION_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_aggregated_attestation_gossip_verification_seconds",
|
||||
"Full runtime of aggregated attestation gossip verification"
|
||||
);
|
||||
|
||||
/*
|
||||
* General Attestation Processing
|
||||
*/
|
||||
pub static ref ATTESTATION_PROCESSING_APPLY_TO_AGG_POOL: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_apply_to_agg_pool",
|
||||
"Time spent applying an attestation to the naive aggregation pool"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_AGG_POOL_MAPS_WRITE_LOCK: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_agg_pool_maps_write_lock",
|
||||
"Time spent waiting for the maps write lock when adding to the agg poll"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_AGG_POOL_PRUNE: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_agg_pool_prune",
|
||||
"Time spent for the agg pool to prune"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_AGG_POOL_INSERT: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_agg_pool_insert",
|
||||
"Time spent for the outer pool.insert() function of agg pool"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_AGG_POOL_CORE_INSERT: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_agg_pool_core_insert",
|
||||
"Time spent for the core map.insert() function of agg pool"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_AGG_POOL_AGGREGATION: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_agg_pool_aggregation",
|
||||
"Time spent doing signature aggregation when adding to the agg poll"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_AGG_POOL_CREATE_MAP: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_agg_pool_create_map",
|
||||
"Time spent for creating a map for a new slot"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_APPLY_TO_OP_POOL: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_apply_to_op_pool",
|
||||
"Time spent applying an attestation to the block inclusion pool"
|
||||
);
|
||||
|
||||
/*
|
||||
* Attestation Processing
|
||||
*/
|
||||
pub static ref ATTESTATION_PROCESSING_REQUESTS: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_attestation_processing_requests_total",
|
||||
"Count of all attestations submitted for processing"
|
||||
pub static ref ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_shuffling_cache_wait_seconds",
|
||||
"Time spent on waiting for the shuffling cache lock during attestation processing"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_SUCCESSES: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_attestation_processing_successes_total",
|
||||
"total_attestation_processing_successes"
|
||||
pub static ref ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_committee_building_seconds",
|
||||
"Time spent on building committees during attestation processing"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_seconds",
|
||||
"Full runtime of attestation processing"
|
||||
pub static ref ATTESTATION_PROCESSING_STATE_READ_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_state_read_seconds",
|
||||
"Time spent on reading the state during attestation processing"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_CORE: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_core_seconds",
|
||||
"Time spent on the core spec processing of attestation processing"
|
||||
pub static ref ATTESTATION_PROCESSING_STATE_SKIP_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_state_skip_seconds",
|
||||
"Time spent on reading the state during attestation processing"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_SIGNATURE_SETUP_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_signature_setup_seconds",
|
||||
"Time spent on setting up for the signature verification of attestation processing"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_SIGNATURE_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_signature_seconds",
|
||||
"Time spent on the signature verification of attestation processing"
|
||||
);
|
||||
|
||||
/*
|
||||
* Shuffling cache
|
||||
*/
|
||||
pub static ref SHUFFLING_CACHE_HITS: Result<IntCounter> =
|
||||
try_create_int_counter("beacon_shuffling_cache_hits_total", "Count of times shuffling cache fulfils request");
|
||||
pub static ref SHUFFLING_CACHE_MISSES: Result<IntCounter> =
|
||||
try_create_int_counter("beacon_shuffling_cache_misses_total", "Count of times shuffling cache fulfils request");
|
||||
|
||||
/*
|
||||
* Attestation Production
|
||||
@@ -106,7 +195,10 @@ lazy_static! {
|
||||
"beacon_attestation_production_seconds",
|
||||
"Full runtime of attestation production"
|
||||
);
|
||||
}
|
||||
|
||||
// Second lazy-static block is used to account for macro recursion limit.
|
||||
lazy_static! {
|
||||
/*
|
||||
* Fork Choice
|
||||
*/
|
||||
@@ -138,20 +230,28 @@ lazy_static! {
|
||||
"beacon_fork_choice_process_attestation_seconds",
|
||||
"Time taken to add an attestation to fork choice"
|
||||
);
|
||||
pub static ref BALANCES_CACHE_HITS: Result<IntCounter> =
|
||||
try_create_int_counter("beacon_balances_cache_hits_total", "Count of times balances cache fulfils request");
|
||||
pub static ref BALANCES_CACHE_MISSES: Result<IntCounter> =
|
||||
try_create_int_counter("beacon_balances_cache_misses_total", "Count of times balances cache fulfils request");
|
||||
|
||||
/*
|
||||
* Persisting BeaconChain to disk
|
||||
* Persisting BeaconChain components to disk
|
||||
*/
|
||||
pub static ref PERSIST_CHAIN: Result<Histogram> =
|
||||
try_create_histogram("beacon_persist_chain", "Time taken to update the canonical head");
|
||||
pub static ref PERSIST_HEAD: Result<Histogram> =
|
||||
try_create_histogram("beacon_persist_head", "Time taken to persist the canonical head");
|
||||
pub static ref PERSIST_OP_POOL: Result<Histogram> =
|
||||
try_create_histogram("beacon_persist_op_pool", "Time taken to persist the operations pool");
|
||||
pub static ref PERSIST_ETH1_CACHE: Result<Histogram> =
|
||||
try_create_histogram("beacon_persist_eth1_cache", "Time taken to persist the eth1 caches");
|
||||
pub static ref PERSIST_FORK_CHOICE: Result<Histogram> =
|
||||
try_create_histogram("beacon_persist_fork_choice", "Time taken to persist the fork choice struct");
|
||||
|
||||
/*
|
||||
* Checkpoint cache
|
||||
* Eth1
|
||||
*/
|
||||
pub static ref CHECKPOINT_CACHE_HITS: Result<IntCounter> =
|
||||
try_create_int_counter("beacon_checkpoint_cache_hits_total", "Count of times checkpoint cache fulfils request");
|
||||
pub static ref CHECKPOINT_CACHE_MISSES: Result<IntCounter> =
|
||||
try_create_int_counter("beacon_checkpoint_cache_misses_total", "Count of times checkpoint cache fulfils request");
|
||||
pub static ref DEFAULT_ETH1_VOTES: Result<IntCounter> =
|
||||
try_create_int_counter("beacon_eth1_default_votes", "Count of times we have voted default value for eth1 data");
|
||||
|
||||
/*
|
||||
* Chain Head
|
||||
@@ -188,14 +288,74 @@ lazy_static! {
|
||||
try_create_int_gauge("beacon_head_state_withdrawn_validators_total", "Sum of all validator balances at the head of the chain");
|
||||
pub static ref HEAD_STATE_ETH1_DEPOSIT_INDEX: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_eth1_deposit_index", "Eth1 deposit index at the head of the chain");
|
||||
|
||||
/*
|
||||
* Operation Pool
|
||||
*/
|
||||
pub static ref OP_POOL_NUM_ATTESTATIONS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_op_pool_attestations_total", "Count of attestations in the op pool");
|
||||
pub static ref OP_POOL_NUM_ATTESTER_SLASHINGS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_op_pool_attester_slashings_total", "Count of attester slashings in the op pool");
|
||||
pub static ref OP_POOL_NUM_PROPOSER_SLASHINGS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_op_pool_proposer_slashings_total", "Count of proposer slashings in the op pool");
|
||||
pub static ref OP_POOL_NUM_VOLUNTARY_EXITS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_op_pool_voluntary_exits_total", "Count of voluntary exits in the op pool");
|
||||
|
||||
/*
|
||||
* Participation Metrics
|
||||
*/
|
||||
pub static ref PARTICIPATION_PREV_EPOCH_ATTESTER: Result<Gauge> = try_create_float_gauge(
|
||||
"beacon_participation_prev_epoch_attester",
|
||||
"Ratio of attesting balances to total balances"
|
||||
);
|
||||
pub static ref PARTICIPATION_PREV_EPOCH_TARGET_ATTESTER: Result<Gauge> = try_create_float_gauge(
|
||||
"beacon_participation_prev_epoch_target_attester",
|
||||
"Ratio of target-attesting balances to total balances"
|
||||
);
|
||||
pub static ref PARTICIPATION_PREV_EPOCH_HEAD_ATTESTER: Result<Gauge> = try_create_float_gauge(
|
||||
"beacon_participation_prev_epoch_head_attester",
|
||||
"Ratio of head-attesting balances to total balances"
|
||||
);
|
||||
|
||||
/*
|
||||
* Attestation Observation Metrics
|
||||
*/
|
||||
pub static ref ATTN_OBSERVATION_PREV_EPOCH_ATTESTERS: Result<IntGauge> = try_create_int_gauge(
|
||||
"beacon_attn_observation_epoch_attesters",
|
||||
"Count of attesters that have been seen by the beacon chain in the previous epoch"
|
||||
);
|
||||
pub static ref ATTN_OBSERVATION_PREV_EPOCH_AGGREGATORS: Result<IntGauge> = try_create_int_gauge(
|
||||
"beacon_attn_observation_epoch_aggregators",
|
||||
"Count of aggregators that have been seen by the beacon chain in the previous epoch"
|
||||
);
|
||||
}
|
||||
|
||||
/// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot,
|
||||
/// head state info, etc) and update the Prometheus `DEFAULT_REGISTRY`.
|
||||
pub fn scrape_for_metrics<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>) {
|
||||
scrape_head_state::<T>(
|
||||
&beacon_chain.head().beacon_state,
|
||||
beacon_chain.head().beacon_state_root,
|
||||
if let Ok(head) = beacon_chain.head() {
|
||||
scrape_head_state::<T>(&head.beacon_state, head.beacon_state_root)
|
||||
}
|
||||
|
||||
if let Some(slot) = beacon_chain.slot_clock.now() {
|
||||
scrape_attestation_observation(slot, beacon_chain);
|
||||
}
|
||||
|
||||
set_gauge_by_usize(
|
||||
&OP_POOL_NUM_ATTESTATIONS,
|
||||
beacon_chain.op_pool.num_attestations(),
|
||||
);
|
||||
set_gauge_by_usize(
|
||||
&OP_POOL_NUM_ATTESTER_SLASHINGS,
|
||||
beacon_chain.op_pool.num_attester_slashings(),
|
||||
);
|
||||
set_gauge_by_usize(
|
||||
&OP_POOL_NUM_PROPOSER_SLASHINGS,
|
||||
beacon_chain.op_pool.num_proposer_slashings(),
|
||||
);
|
||||
set_gauge_by_usize(
|
||||
&OP_POOL_NUM_VOLUNTARY_EXITS,
|
||||
beacon_chain.op_pool.num_voluntary_exits(),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -253,6 +413,24 @@ fn scrape_head_state<T: BeaconChainTypes>(state: &BeaconState<T::EthSpec>, state
|
||||
set_gauge_by_u64(&HEAD_STATE_ETH1_DEPOSIT_INDEX, state.eth1_deposit_index);
|
||||
}
|
||||
|
||||
fn scrape_attestation_observation<T: BeaconChainTypes>(slot_now: Slot, chain: &BeaconChain<T>) {
|
||||
let prev_epoch = slot_now.epoch(T::EthSpec::slots_per_epoch()) - 1;
|
||||
|
||||
if let Some(count) = chain
|
||||
.observed_attesters
|
||||
.observed_validator_count(prev_epoch)
|
||||
{
|
||||
set_gauge_by_usize(&ATTN_OBSERVATION_PREV_EPOCH_ATTESTERS, count);
|
||||
}
|
||||
|
||||
if let Some(count) = chain
|
||||
.observed_aggregators
|
||||
.observed_validator_count(prev_epoch)
|
||||
{
|
||||
set_gauge_by_usize(&ATTN_OBSERVATION_PREV_EPOCH_AGGREGATORS, count);
|
||||
}
|
||||
}
|
||||
|
||||
fn set_gauge_by_slot(gauge: &Result<IntGauge>, value: Slot) {
|
||||
set_gauge(gauge, value.as_u64() as i64);
|
||||
}
|
||||
|
||||
441
beacon_node/beacon_chain/src/migrate.rs
Normal file
441
beacon_node/beacon_chain/src/migrate.rs
Normal file
@@ -0,0 +1,441 @@
|
||||
use crate::errors::BeaconChainError;
|
||||
use crate::head_tracker::HeadTracker;
|
||||
use parking_lot::Mutex;
|
||||
use slog::{debug, warn, Logger};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::mem;
|
||||
use std::sync::mpsc;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use store::hot_cold_store::{migrate_database, HotColdDBError};
|
||||
use store::iter::RootsIterator;
|
||||
use store::{Error, ItemStore, StoreOp};
|
||||
pub use store::{HotColdDB, MemoryStore};
|
||||
use types::{
|
||||
BeaconState, BeaconStateError, BeaconStateHash, Checkpoint, EthSpec, Hash256,
|
||||
SignedBeaconBlockHash, Slot,
|
||||
};
|
||||
|
||||
/// Logic errors that can occur during pruning, none of these should ever happen.
|
||||
#[derive(Debug)]
|
||||
pub enum PruningError {
|
||||
IncorrectFinalizedState {
|
||||
state_slot: Slot,
|
||||
new_finalized_slot: Slot,
|
||||
},
|
||||
MissingInfoForCanonicalChain {
|
||||
slot: Slot,
|
||||
},
|
||||
UnexpectedEqualStateRoots,
|
||||
UnexpectedUnequalStateRoots,
|
||||
}
|
||||
|
||||
/// Trait for migration processes that update the database upon finalization.
|
||||
pub trait Migrate<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>:
|
||||
Send + Sync + 'static
|
||||
{
|
||||
fn new(db: Arc<HotColdDB<E, Hot, Cold>>, log: Logger) -> Self;
|
||||
|
||||
fn process_finalization(
|
||||
&self,
|
||||
_finalized_state_root: BeaconStateHash,
|
||||
_new_finalized_state: BeaconState<E>,
|
||||
_head_tracker: Arc<HeadTracker>,
|
||||
_old_finalized_checkpoint: Checkpoint,
|
||||
_new_finalized_checkpoint: Checkpoint,
|
||||
) -> Result<(), BeaconChainError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Traverses live heads and prunes blocks and states of chains that we know can't be built
|
||||
/// upon because finalization would prohibit it. This is an optimisation intended to save disk
|
||||
/// space.
|
||||
///
|
||||
/// Assumptions:
|
||||
/// * It is called after every finalization.
|
||||
fn prune_abandoned_forks(
|
||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
new_finalized_state_hash: BeaconStateHash,
|
||||
new_finalized_state: &BeaconState<E>,
|
||||
old_finalized_checkpoint: Checkpoint,
|
||||
new_finalized_checkpoint: Checkpoint,
|
||||
log: &Logger,
|
||||
) -> Result<(), BeaconChainError> {
|
||||
// There will never be any blocks to prune if there is only a single head in the chain.
|
||||
if head_tracker.heads().len() == 1 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let old_finalized_slot = old_finalized_checkpoint
|
||||
.epoch
|
||||
.start_slot(E::slots_per_epoch());
|
||||
let new_finalized_slot = new_finalized_checkpoint
|
||||
.epoch
|
||||
.start_slot(E::slots_per_epoch());
|
||||
let new_finalized_block_hash = new_finalized_checkpoint.root.into();
|
||||
|
||||
// The finalized state must be for the epoch boundary slot, not the slot of the finalized
|
||||
// block.
|
||||
if new_finalized_state.slot != new_finalized_slot {
|
||||
return Err(PruningError::IncorrectFinalizedState {
|
||||
state_slot: new_finalized_state.slot,
|
||||
new_finalized_slot,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
debug!(
|
||||
log,
|
||||
"Starting database pruning";
|
||||
"old_finalized_epoch" => old_finalized_checkpoint.epoch,
|
||||
"old_finalized_root" => format!("{:?}", old_finalized_checkpoint.root),
|
||||
"new_finalized_epoch" => new_finalized_checkpoint.epoch,
|
||||
"new_finalized_root" => format!("{:?}", new_finalized_checkpoint.root),
|
||||
);
|
||||
|
||||
// For each slot between the new finalized checkpoint and the old finalized checkpoint,
|
||||
// collect the beacon block root and state root of the canonical chain.
|
||||
let newly_finalized_chain: HashMap<Slot, (SignedBeaconBlockHash, BeaconStateHash)> =
|
||||
std::iter::once(Ok((
|
||||
new_finalized_slot,
|
||||
(new_finalized_block_hash, new_finalized_state_hash),
|
||||
)))
|
||||
.chain(
|
||||
RootsIterator::new(store.clone(), new_finalized_state).map(|res| {
|
||||
res.map(|(block_root, state_root, slot)| {
|
||||
(slot, (block_root.into(), state_root.into()))
|
||||
})
|
||||
}),
|
||||
)
|
||||
.take_while(|res| {
|
||||
res.as_ref()
|
||||
.map_or(true, |(slot, _)| *slot >= old_finalized_slot)
|
||||
})
|
||||
.collect::<Result<_, _>>()?;
|
||||
|
||||
// We don't know which blocks are shared among abandoned chains, so we buffer and delete
|
||||
// everything in one fell swoop.
|
||||
let mut abandoned_blocks: HashSet<SignedBeaconBlockHash> = HashSet::new();
|
||||
let mut abandoned_states: HashSet<(Slot, BeaconStateHash)> = HashSet::new();
|
||||
let mut abandoned_heads: HashSet<Hash256> = HashSet::new();
|
||||
|
||||
for (head_hash, head_slot) in head_tracker.heads() {
|
||||
let mut potentially_abandoned_head = Some(head_hash);
|
||||
let mut potentially_abandoned_blocks = vec![];
|
||||
|
||||
let head_state_hash = store
|
||||
.get_block(&head_hash)?
|
||||
.ok_or_else(|| BeaconStateError::MissingBeaconBlock(head_hash.into()))?
|
||||
.state_root();
|
||||
|
||||
// Iterate backwards from this head, staging blocks and states for deletion.
|
||||
let iter = std::iter::once(Ok((head_hash, head_state_hash, head_slot)))
|
||||
.chain(RootsIterator::from_block(store.clone(), head_hash)?);
|
||||
|
||||
for maybe_tuple in iter {
|
||||
let (block_root, state_root, slot) = maybe_tuple?;
|
||||
let block_root = SignedBeaconBlockHash::from(block_root);
|
||||
let state_root = BeaconStateHash::from(state_root);
|
||||
|
||||
match newly_finalized_chain.get(&slot) {
|
||||
// If there's no information about a slot on the finalized chain, then
|
||||
// it should be because it's ahead of the new finalized slot. Stage
|
||||
// the fork's block and state for possible deletion.
|
||||
None => {
|
||||
if slot > new_finalized_slot {
|
||||
potentially_abandoned_blocks.push((
|
||||
slot,
|
||||
Some(block_root),
|
||||
Some(state_root),
|
||||
));
|
||||
} else if slot >= old_finalized_slot {
|
||||
return Err(PruningError::MissingInfoForCanonicalChain { slot }.into());
|
||||
} else {
|
||||
// We must assume here any candidate chains include the old finalized
|
||||
// checkpoint, i.e. there aren't any forks starting at a block that is a
|
||||
// strict ancestor of old_finalized_checkpoint.
|
||||
warn!(
|
||||
log,
|
||||
"Found a chain that should already have been pruned";
|
||||
"head_block_root" => format!("{:?}", head_hash),
|
||||
"head_slot" => head_slot,
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
Some((finalized_block_root, finalized_state_root)) => {
|
||||
// This fork descends from a newly finalized block, we can stop.
|
||||
if block_root == *finalized_block_root {
|
||||
// Sanity check: if the slot and block root match, then the
|
||||
// state roots should match too.
|
||||
if state_root != *finalized_state_root {
|
||||
return Err(PruningError::UnexpectedUnequalStateRoots.into());
|
||||
}
|
||||
|
||||
// If the fork descends from the whole finalized chain,
|
||||
// do not prune it. Otherwise continue to delete all
|
||||
// of the blocks and states that have been staged for
|
||||
// deletion so far.
|
||||
if slot == new_finalized_slot {
|
||||
potentially_abandoned_blocks.clear();
|
||||
potentially_abandoned_head.take();
|
||||
}
|
||||
// If there are skipped slots on the fork to be pruned, then
|
||||
// we will have just staged the common block for deletion.
|
||||
// Unstage it.
|
||||
else {
|
||||
for (_, block_root, _) in
|
||||
potentially_abandoned_blocks.iter_mut().rev()
|
||||
{
|
||||
if block_root.as_ref() == Some(finalized_block_root) {
|
||||
*block_root = None;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
} else {
|
||||
if state_root == *finalized_state_root {
|
||||
return Err(PruningError::UnexpectedEqualStateRoots.into());
|
||||
}
|
||||
potentially_abandoned_blocks.push((
|
||||
slot,
|
||||
Some(block_root),
|
||||
Some(state_root),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(abandoned_head) = potentially_abandoned_head {
|
||||
debug!(
|
||||
log,
|
||||
"Pruning head";
|
||||
"head_block_root" => format!("{:?}", abandoned_head),
|
||||
"head_slot" => head_slot,
|
||||
);
|
||||
abandoned_heads.insert(abandoned_head);
|
||||
abandoned_blocks.extend(
|
||||
potentially_abandoned_blocks
|
||||
.iter()
|
||||
.filter_map(|(_, maybe_block_hash, _)| *maybe_block_hash),
|
||||
);
|
||||
abandoned_states.extend(potentially_abandoned_blocks.iter().filter_map(
|
||||
|(slot, _, maybe_state_hash)| maybe_state_hash.map(|sr| (*slot, sr)),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let batch: Vec<StoreOp<E>> = abandoned_blocks
|
||||
.into_iter()
|
||||
.map(StoreOp::DeleteBlock)
|
||||
.chain(
|
||||
abandoned_states
|
||||
.into_iter()
|
||||
.map(|(slot, state_hash)| StoreOp::DeleteState(state_hash, slot)),
|
||||
)
|
||||
.collect();
|
||||
|
||||
store.do_atomically(batch)?;
|
||||
for head_hash in abandoned_heads.into_iter() {
|
||||
head_tracker.remove_head(head_hash);
|
||||
}
|
||||
|
||||
debug!(log, "Database pruning complete");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Migrator that does nothing, for stores that don't need migration.
|
||||
pub struct NullMigrator;
|
||||
|
||||
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold> for NullMigrator {
|
||||
fn process_finalization(
|
||||
&self,
|
||||
_finalized_state_root: BeaconStateHash,
|
||||
_new_finalized_state: BeaconState<E>,
|
||||
_head_tracker: Arc<HeadTracker>,
|
||||
_old_finalized_checkpoint: Checkpoint,
|
||||
_new_finalized_checkpoint: Checkpoint,
|
||||
) -> Result<(), BeaconChainError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn new(_: Arc<HotColdDB<E, Hot, Cold>>, _: Logger) -> Self {
|
||||
NullMigrator
|
||||
}
|
||||
}
|
||||
|
||||
/// Migrator that immediately calls the store's migration function, blocking the current execution.
|
||||
///
|
||||
/// Mostly useful for tests.
|
||||
pub struct BlockingMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
log: Logger,
|
||||
}
|
||||
|
||||
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold>
|
||||
for BlockingMigrator<E, Hot, Cold>
|
||||
{
|
||||
fn new(db: Arc<HotColdDB<E, Hot, Cold>>, log: Logger) -> Self {
|
||||
BlockingMigrator { db, log }
|
||||
}
|
||||
|
||||
fn process_finalization(
|
||||
&self,
|
||||
finalized_state_root: BeaconStateHash,
|
||||
new_finalized_state: BeaconState<E>,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
old_finalized_checkpoint: Checkpoint,
|
||||
new_finalized_checkpoint: Checkpoint,
|
||||
) -> Result<(), BeaconChainError> {
|
||||
Self::prune_abandoned_forks(
|
||||
self.db.clone(),
|
||||
head_tracker,
|
||||
finalized_state_root,
|
||||
&new_finalized_state,
|
||||
old_finalized_checkpoint,
|
||||
new_finalized_checkpoint,
|
||||
&self.log,
|
||||
)?;
|
||||
|
||||
match migrate_database(
|
||||
self.db.clone(),
|
||||
finalized_state_root.into(),
|
||||
&new_finalized_state,
|
||||
) {
|
||||
Ok(()) => Ok(()),
|
||||
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
||||
debug!(
|
||||
self.log,
|
||||
"Database migration postponed, unaligned finalized block";
|
||||
"slot" => slot.as_u64()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type MpscSender<E> = mpsc::Sender<(
|
||||
BeaconStateHash,
|
||||
BeaconState<E>,
|
||||
Arc<HeadTracker>,
|
||||
Checkpoint,
|
||||
Checkpoint,
|
||||
)>;
|
||||
|
||||
/// Migrator that runs a background thread to migrate state from the hot to the cold database.
|
||||
pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
tx_thread: Mutex<(MpscSender<E>, thread::JoinHandle<()>)>,
|
||||
log: Logger,
|
||||
}
|
||||
|
||||
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold>
|
||||
for BackgroundMigrator<E, Hot, Cold>
|
||||
{
|
||||
fn new(db: Arc<HotColdDB<E, Hot, Cold>>, log: Logger) -> Self {
|
||||
let tx_thread = Mutex::new(Self::spawn_thread(db.clone(), log.clone()));
|
||||
Self { db, tx_thread, log }
|
||||
}
|
||||
|
||||
fn process_finalization(
|
||||
&self,
|
||||
finalized_state_root: BeaconStateHash,
|
||||
new_finalized_state: BeaconState<E>,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
old_finalized_checkpoint: Checkpoint,
|
||||
new_finalized_checkpoint: Checkpoint,
|
||||
) -> Result<(), BeaconChainError> {
|
||||
let (ref mut tx, ref mut thread) = *self.tx_thread.lock();
|
||||
|
||||
if let Err(tx_err) = tx.send((
|
||||
finalized_state_root,
|
||||
new_finalized_state,
|
||||
head_tracker,
|
||||
old_finalized_checkpoint,
|
||||
new_finalized_checkpoint,
|
||||
)) {
|
||||
let (new_tx, new_thread) = Self::spawn_thread(self.db.clone(), self.log.clone());
|
||||
|
||||
*tx = new_tx;
|
||||
let old_thread = mem::replace(thread, new_thread);
|
||||
|
||||
// Join the old thread, which will probably have panicked, or may have
|
||||
// halted normally just now as a result of us dropping the old `mpsc::Sender`.
|
||||
if let Err(thread_err) = old_thread.join() {
|
||||
warn!(
|
||||
self.log,
|
||||
"Migration thread died, so it was restarted";
|
||||
"reason" => format!("{:?}", thread_err)
|
||||
);
|
||||
}
|
||||
|
||||
// Retry at most once, we could recurse but that would risk overflowing the stack.
|
||||
let _ = tx.send(tx_err.0);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Hot, Cold> {
|
||||
/// Spawn a new child thread to run the migration process.
|
||||
///
|
||||
/// Return a channel handle for sending new finalized states to the thread.
|
||||
fn spawn_thread(
|
||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
log: Logger,
|
||||
) -> (MpscSender<E>, thread::JoinHandle<()>) {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let thread = thread::spawn(move || {
|
||||
while let Ok((
|
||||
state_root,
|
||||
state,
|
||||
head_tracker,
|
||||
old_finalized_checkpoint,
|
||||
new_finalized_checkpoint,
|
||||
)) = rx.recv()
|
||||
{
|
||||
match Self::prune_abandoned_forks(
|
||||
db.clone(),
|
||||
head_tracker,
|
||||
state_root,
|
||||
&state,
|
||||
old_finalized_checkpoint,
|
||||
new_finalized_checkpoint,
|
||||
&log,
|
||||
) {
|
||||
Ok(()) => {}
|
||||
Err(e) => warn!(log, "Block pruning failed: {:?}", e),
|
||||
}
|
||||
|
||||
match migrate_database(db.clone(), state_root.into(), &state) {
|
||||
Ok(()) => {}
|
||||
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
||||
debug!(
|
||||
log,
|
||||
"Database migration postponed, unaligned finalized block";
|
||||
"slot" => slot.as_u64()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
log,
|
||||
"Database migration failed";
|
||||
"error" => format!("{:?}", e)
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
(tx, thread)
|
||||
}
|
||||
}
|
||||
494
beacon_node/beacon_chain/src/naive_aggregation_pool.rs
Normal file
494
beacon_node/beacon_chain/src/naive_aggregation_pool.rs
Normal file
@@ -0,0 +1,494 @@
|
||||
use crate::metrics;
|
||||
use std::collections::HashMap;
|
||||
use types::{Attestation, AttestationData, EthSpec, Slot};
|
||||
|
||||
/// The number of slots that will be stored in the pool.
|
||||
///
|
||||
/// For example, if `SLOTS_RETAINED == 3` and the pool is pruned at slot `6`, then all attestations
|
||||
/// at slots less than `4` will be dropped and any future attestation with a slot less than `4`
|
||||
/// will be refused.
|
||||
const SLOTS_RETAINED: usize = 3;
|
||||
|
||||
/// The maximum number of distinct `AttestationData` that will be stored in each slot.
|
||||
///
|
||||
/// This is a DoS protection measure.
|
||||
const MAX_ATTESTATIONS_PER_SLOT: usize = 16_384;
|
||||
|
||||
/// Returned upon successfully inserting an attestation into the pool.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum InsertOutcome {
|
||||
/// The `attestation.data` had not been seen before and was added to the pool.
|
||||
NewAttestationData { committee_index: usize },
|
||||
/// A validator signature for the given `attestation.data` was already known. No changes were
|
||||
/// made.
|
||||
SignatureAlreadyKnown { committee_index: usize },
|
||||
/// The `attestation.data` was known, but a signature for the given validator was not yet
|
||||
/// known. The signature was aggregated into the pool.
|
||||
SignatureAggregated { committee_index: usize },
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
/// The given `attestation.data.slot` was too low to be stored. No changes were made.
|
||||
SlotTooLow {
|
||||
slot: Slot,
|
||||
lowest_permissible_slot: Slot,
|
||||
},
|
||||
/// The given `attestation.aggregation_bits` field was empty.
|
||||
NoAggregationBitsSet,
|
||||
/// The given `attestation.aggregation_bits` field had more than one signature. The number of
|
||||
/// signatures found is included.
|
||||
MoreThanOneAggregationBitSet(usize),
|
||||
/// We have reached the maximum number of unique `AttestationData` that can be stored in a
|
||||
/// slot. This is a DoS protection function.
|
||||
ReachedMaxAttestationsPerSlot(usize),
|
||||
/// The given `attestation.aggregation_bits` field had a different length to the one currently
|
||||
/// stored. This indicates a fairly serious error somewhere in the code that called this
|
||||
/// function.
|
||||
InconsistentBitfieldLengths,
|
||||
/// The given `attestation` was for the incorrect slot. This is an internal error.
|
||||
IncorrectSlot { expected: Slot, attestation: Slot },
|
||||
}
|
||||
|
||||
/// A collection of `Attestation` objects, keyed by their `attestation.data`. Enforces that all
|
||||
/// `attestation` are from the same slot.
|
||||
struct AggregatedAttestationMap<E: EthSpec> {
|
||||
map: HashMap<AttestationData, Attestation<E>>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> AggregatedAttestationMap<E> {
|
||||
/// Create an empty collection with the given `initial_capacity`.
|
||||
pub fn new(initial_capacity: usize) -> Self {
|
||||
Self {
|
||||
map: HashMap::with_capacity(initial_capacity),
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert an attestation into `self`, aggregating it into the pool.
|
||||
///
|
||||
/// The given attestation (`a`) must only have one signature.
|
||||
pub fn insert(&mut self, a: &Attestation<E>) -> Result<InsertOutcome, Error> {
|
||||
let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_CORE_INSERT);
|
||||
|
||||
let set_bits = a
|
||||
.aggregation_bits
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_i, bit)| *bit)
|
||||
.map(|(i, _bit)| i)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let committee_index = set_bits
|
||||
.first()
|
||||
.copied()
|
||||
.ok_or_else(|| Error::NoAggregationBitsSet)?;
|
||||
|
||||
if set_bits.len() > 1 {
|
||||
return Err(Error::MoreThanOneAggregationBitSet(set_bits.len()));
|
||||
}
|
||||
|
||||
if let Some(existing_attestation) = self.map.get_mut(&a.data) {
|
||||
if existing_attestation
|
||||
.aggregation_bits
|
||||
.get(committee_index)
|
||||
.map_err(|_| Error::InconsistentBitfieldLengths)?
|
||||
{
|
||||
Ok(InsertOutcome::SignatureAlreadyKnown { committee_index })
|
||||
} else {
|
||||
let _timer =
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_AGGREGATION);
|
||||
existing_attestation.aggregate(a);
|
||||
Ok(InsertOutcome::SignatureAggregated { committee_index })
|
||||
}
|
||||
} else {
|
||||
if self.map.len() >= MAX_ATTESTATIONS_PER_SLOT {
|
||||
return Err(Error::ReachedMaxAttestationsPerSlot(
|
||||
MAX_ATTESTATIONS_PER_SLOT,
|
||||
));
|
||||
}
|
||||
|
||||
self.map.insert(a.data.clone(), a.clone());
|
||||
Ok(InsertOutcome::NewAttestationData { committee_index })
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an aggregated `Attestation` with the given `data`, if any.
|
||||
///
|
||||
/// The given `a.data.slot` must match the slot that `self` was initialized with.
|
||||
pub fn get(&self, data: &AttestationData) -> Result<Option<Attestation<E>>, Error> {
|
||||
Ok(self.map.get(data).cloned())
|
||||
}
|
||||
|
||||
/// Iterate all attestations in `self`.
|
||||
pub fn iter(&self) -> impl Iterator<Item = &Attestation<E>> {
|
||||
self.map.iter().map(|(_key, attestation)| attestation)
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.map.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// A pool of `Attestation` that is specially designed to store "unaggregated" attestations from
|
||||
/// the native aggregation scheme.
|
||||
///
|
||||
/// **The `NaiveAggregationPool` does not do any signature or attestation verification. It assumes
|
||||
/// that all `Attestation` objects provided are valid.**
|
||||
///
|
||||
/// ## Details
|
||||
///
|
||||
/// The pool sorts the `Attestation` by `attestation.data.slot`, then by `attestation.data`.
|
||||
///
|
||||
/// As each unaggregated attestation is added it is aggregated with any existing `attestation` with
|
||||
/// the same `AttestationData`. Considering that the pool only accepts attestations with a single
|
||||
/// signature, there should only ever be a single aggregated `Attestation` for any given
|
||||
/// `AttestationData`.
|
||||
///
|
||||
/// The pool has a capacity for `SLOTS_RETAINED` slots, when a new `attestation.data.slot` is
|
||||
/// provided, the oldest slot is dropped and replaced with the new slot. The pool can also be
|
||||
/// pruned by supplying a `current_slot`; all existing attestations with a slot lower than
|
||||
/// `current_slot - SLOTS_RETAINED` will be removed and any future attestation with a slot lower
|
||||
/// than that will also be refused. Pruning is done automatically based upon the attestations it
|
||||
/// receives and it can be triggered manually.
|
||||
pub struct NaiveAggregationPool<E: EthSpec> {
|
||||
lowest_permissible_slot: Slot,
|
||||
maps: HashMap<Slot, AggregatedAttestationMap<E>>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Default for NaiveAggregationPool<E> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lowest_permissible_slot: Slot::new(0),
|
||||
maps: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> NaiveAggregationPool<E> {
|
||||
/// Insert an attestation into `self`, aggregating it into the pool.
|
||||
///
|
||||
/// The given attestation (`a`) must only have one signature and have an
|
||||
/// `attestation.data.slot` that is not lower than `self.lowest_permissible_slot`.
|
||||
///
|
||||
/// The pool may be pruned if the given `attestation.data` has a slot higher than any
|
||||
/// previously seen.
|
||||
pub fn insert(&mut self, attestation: &Attestation<E>) -> Result<InsertOutcome, Error> {
|
||||
let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_INSERT);
|
||||
let slot = attestation.data.slot;
|
||||
let lowest_permissible_slot = self.lowest_permissible_slot;
|
||||
|
||||
// Reject any attestations that are too old.
|
||||
if slot < lowest_permissible_slot {
|
||||
return Err(Error::SlotTooLow {
|
||||
slot,
|
||||
lowest_permissible_slot,
|
||||
});
|
||||
}
|
||||
|
||||
let lock_timer =
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_MAPS_WRITE_LOCK);
|
||||
drop(lock_timer);
|
||||
|
||||
let outcome = if let Some(map) = self.maps.get_mut(&slot) {
|
||||
map.insert(attestation)
|
||||
} else {
|
||||
let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_CREATE_MAP);
|
||||
// To avoid re-allocations, try and determine a rough initial capacity for the new item
|
||||
// by obtaining the mean size of all items in earlier epoch.
|
||||
let (count, sum) = self
|
||||
.maps
|
||||
.iter()
|
||||
// Only include epochs that are less than the given slot in the average. This should
|
||||
// generally avoid including recent epochs that are still "filling up".
|
||||
.filter(|(map_slot, _item)| **map_slot < slot)
|
||||
.map(|(_slot, map)| map.len())
|
||||
.fold((0, 0), |(count, sum), len| (count + 1, sum + len));
|
||||
|
||||
// Use the mainnet default committee size if we can't determine an average.
|
||||
let initial_capacity = sum.checked_div(count).unwrap_or(128);
|
||||
|
||||
let mut item = AggregatedAttestationMap::new(initial_capacity);
|
||||
let outcome = item.insert(attestation);
|
||||
self.maps.insert(slot, item);
|
||||
|
||||
outcome
|
||||
};
|
||||
|
||||
self.prune(slot);
|
||||
|
||||
outcome
|
||||
}
|
||||
|
||||
/// Returns an aggregated `Attestation` with the given `data`, if any.
|
||||
pub fn get(&self, data: &AttestationData) -> Result<Option<Attestation<E>>, Error> {
|
||||
self.maps
|
||||
.iter()
|
||||
.find(|(slot, _map)| **slot == data.slot)
|
||||
.map(|(_slot, map)| map.get(data))
|
||||
.unwrap_or_else(|| Ok(None))
|
||||
}
|
||||
|
||||
/// Iterate all attestations in all slots of `self`.
|
||||
pub fn iter(&self) -> impl Iterator<Item = &Attestation<E>> {
|
||||
self.maps.iter().map(|(_slot, map)| map.iter()).flatten()
|
||||
}
|
||||
|
||||
/// Removes any attestations with a slot lower than `current_slot` and bars any future
|
||||
/// attestations with a slot lower than `current_slot - SLOTS_RETAINED`.
|
||||
pub fn prune(&mut self, current_slot: Slot) {
|
||||
let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_PRUNE);
|
||||
|
||||
// Taking advantage of saturating subtraction on `Slot`.
|
||||
let lowest_permissible_slot = current_slot - Slot::from(SLOTS_RETAINED);
|
||||
|
||||
// No need to prune if the lowest permissible slot has not changed and the queue length is
|
||||
// less than the maximum
|
||||
if self.lowest_permissible_slot == lowest_permissible_slot
|
||||
&& self.maps.len() <= SLOTS_RETAINED
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
self.lowest_permissible_slot = lowest_permissible_slot;
|
||||
|
||||
// Remove any maps that are definitely expired.
|
||||
self.maps
|
||||
.retain(|slot, _map| *slot >= lowest_permissible_slot);
|
||||
|
||||
// If we have too many maps, remove the lowest amount to ensure we only have
|
||||
// `SLOTS_RETAINED` left.
|
||||
if self.maps.len() > SLOTS_RETAINED {
|
||||
let mut slots = self
|
||||
.maps
|
||||
.iter()
|
||||
.map(|(slot, _map)| *slot)
|
||||
.collect::<Vec<_>>();
|
||||
// Sort is generally pretty slow, however `SLOTS_RETAINED` is quite low so it should be
|
||||
// negligible.
|
||||
slots.sort_unstable();
|
||||
slots
|
||||
.into_iter()
|
||||
.take(self.maps.len().saturating_sub(SLOTS_RETAINED))
|
||||
.for_each(|slot| {
|
||||
self.maps.remove(&slot);
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ssz_types::BitList;
|
||||
use types::{
|
||||
test_utils::{generate_deterministic_keypair, test_random_instance},
|
||||
Fork, Hash256,
|
||||
};
|
||||
|
||||
type E = types::MainnetEthSpec;
|
||||
|
||||
fn get_attestation(slot: Slot) -> Attestation<E> {
|
||||
let mut a: Attestation<E> = test_random_instance();
|
||||
a.data.slot = slot;
|
||||
a.aggregation_bits = BitList::with_capacity(4).expect("should create bitlist");
|
||||
a
|
||||
}
|
||||
|
||||
fn sign(a: &mut Attestation<E>, i: usize, genesis_validators_root: Hash256) {
|
||||
a.sign(
|
||||
&generate_deterministic_keypair(i).sk,
|
||||
i,
|
||||
&Fork::default(),
|
||||
genesis_validators_root,
|
||||
&E::default_spec(),
|
||||
)
|
||||
.expect("should sign attestation");
|
||||
}
|
||||
|
||||
fn unset_bit(a: &mut Attestation<E>, i: usize) {
|
||||
a.aggregation_bits
|
||||
.set(i, false)
|
||||
.expect("should unset aggregation bit")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_attestation() {
|
||||
let mut a = get_attestation(Slot::new(0));
|
||||
|
||||
let mut pool = NaiveAggregationPool::default();
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Err(Error::NoAggregationBitsSet),
|
||||
"should not accept attestation without any signatures"
|
||||
);
|
||||
|
||||
sign(&mut a, 0, Hash256::random());
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Ok(InsertOutcome::NewAttestationData { committee_index: 0 }),
|
||||
"should accept new attestation"
|
||||
);
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Ok(InsertOutcome::SignatureAlreadyKnown { committee_index: 0 }),
|
||||
"should acknowledge duplicate signature"
|
||||
);
|
||||
|
||||
let retrieved = pool
|
||||
.get(&a.data)
|
||||
.expect("should not error while getting attestation")
|
||||
.expect("should get an attestation");
|
||||
assert_eq!(
|
||||
retrieved, a,
|
||||
"retrieved attestation should equal the one inserted"
|
||||
);
|
||||
|
||||
sign(&mut a, 1, Hash256::random());
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Err(Error::MoreThanOneAggregationBitSet(2)),
|
||||
"should not accept attestation with multiple signatures"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiple_attestations() {
|
||||
let mut a_0 = get_attestation(Slot::new(0));
|
||||
let mut a_1 = a_0.clone();
|
||||
|
||||
let genesis_validators_root = Hash256::random();
|
||||
sign(&mut a_0, 0, genesis_validators_root);
|
||||
sign(&mut a_1, 1, genesis_validators_root);
|
||||
|
||||
let mut pool = NaiveAggregationPool::default();
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a_0),
|
||||
Ok(InsertOutcome::NewAttestationData { committee_index: 0 }),
|
||||
"should accept a_0"
|
||||
);
|
||||
assert_eq!(
|
||||
pool.insert(&a_1),
|
||||
Ok(InsertOutcome::SignatureAggregated { committee_index: 1 }),
|
||||
"should accept a_1"
|
||||
);
|
||||
|
||||
let retrieved = pool
|
||||
.get(&a_0.data)
|
||||
.expect("should not error while getting attestation")
|
||||
.expect("should get an attestation");
|
||||
|
||||
let mut a_01 = a_0.clone();
|
||||
a_01.aggregate(&a_1);
|
||||
|
||||
assert_eq!(
|
||||
retrieved, a_01,
|
||||
"retrieved attestation should be aggregated"
|
||||
);
|
||||
|
||||
/*
|
||||
* Throw a different attestation data in there and ensure it isn't aggregated
|
||||
*/
|
||||
|
||||
let mut a_different = a_0.clone();
|
||||
let different_root = Hash256::from_low_u64_be(1337);
|
||||
unset_bit(&mut a_different, 0);
|
||||
sign(&mut a_different, 2, genesis_validators_root);
|
||||
assert_ne!(a_different.data.beacon_block_root, different_root);
|
||||
a_different.data.beacon_block_root = different_root;
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a_different),
|
||||
Ok(InsertOutcome::NewAttestationData { committee_index: 2 }),
|
||||
"should accept a_different"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
pool.get(&a_0.data)
|
||||
.expect("should not error while getting attestation")
|
||||
.expect("should get an attestation"),
|
||||
retrieved,
|
||||
"should not have aggregated different attestation data"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn auto_pruning() {
|
||||
let mut base = get_attestation(Slot::new(0));
|
||||
sign(&mut base, 0, Hash256::random());
|
||||
|
||||
let mut pool = NaiveAggregationPool::default();
|
||||
|
||||
for i in 0..SLOTS_RETAINED * 2 {
|
||||
let slot = Slot::from(i);
|
||||
let mut a = base.clone();
|
||||
a.data.slot = slot;
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Ok(InsertOutcome::NewAttestationData { committee_index: 0 }),
|
||||
"should accept new attestation"
|
||||
);
|
||||
|
||||
if i < SLOTS_RETAINED {
|
||||
let len = i + 1;
|
||||
assert_eq!(pool.maps.len(), len, "the pool should have length {}", len);
|
||||
} else {
|
||||
assert_eq!(
|
||||
pool.maps.len(),
|
||||
SLOTS_RETAINED,
|
||||
"the pool should have length SLOTS_RETAINED"
|
||||
);
|
||||
|
||||
let mut pool_slots = pool
|
||||
.maps
|
||||
.iter()
|
||||
.map(|(slot, _map)| *slot)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
pool_slots.sort_unstable();
|
||||
|
||||
for (j, pool_slot) in pool_slots.iter().enumerate() {
|
||||
let expected_slot = slot - (SLOTS_RETAINED - 1 - j) as u64;
|
||||
assert_eq!(
|
||||
*pool_slot, expected_slot,
|
||||
"the slot of the map should be {}",
|
||||
expected_slot
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn max_attestations() {
|
||||
let mut base = get_attestation(Slot::new(0));
|
||||
sign(&mut base, 0, Hash256::random());
|
||||
|
||||
let mut pool = NaiveAggregationPool::default();
|
||||
|
||||
for i in 0..=MAX_ATTESTATIONS_PER_SLOT {
|
||||
let mut a = base.clone();
|
||||
a.data.beacon_block_root = Hash256::from_low_u64_be(i as u64);
|
||||
|
||||
if i < MAX_ATTESTATIONS_PER_SLOT {
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Ok(InsertOutcome::NewAttestationData { committee_index: 0 }),
|
||||
"should accept attestation below limit"
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Err(Error::ReachedMaxAttestationsPerSlot(
|
||||
MAX_ATTESTATIONS_PER_SLOT
|
||||
)),
|
||||
"should not accept attestation above limit"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
443
beacon_node/beacon_chain/src/observed_attestations.rs
Normal file
443
beacon_node/beacon_chain/src/observed_attestations.rs
Normal file
@@ -0,0 +1,443 @@
|
||||
//! Provides an `ObservedAttestations` struct which allows us to reject aggregated attestations if
|
||||
//! we've already seen the aggregated attestation.
|
||||
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::HashSet;
|
||||
use std::marker::PhantomData;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{Attestation, EthSpec, Hash256, Slot};
|
||||
|
||||
/// As a DoS protection measure, the maximum number of distinct `Attestations` that will be
|
||||
/// recorded for each slot.
|
||||
///
|
||||
/// Currently this is set to ~524k. If we say that each entry is 40 bytes (Hash256 (32 bytes) + an
|
||||
/// 8 byte hash) then this comes to about 20mb per slot. If we're storing 34 of these slots, then
|
||||
/// we're at 680mb. This is a lot of memory usage, but probably not a show-stopper for most
|
||||
/// reasonable hardware.
|
||||
///
|
||||
/// Upstream conditions should strongly restrict the amount of attestations that can show up in
|
||||
/// this pool. The maximum size with respect to upstream restrictions is more likely on the order
|
||||
/// of the number of validators.
|
||||
const MAX_OBSERVATIONS_PER_SLOT: usize = 1 << 19; // 524,288
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum ObserveOutcome {
|
||||
/// This attestation was already known.
|
||||
AlreadyKnown,
|
||||
/// This was the first time this attestation was observed.
|
||||
New,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
SlotTooLow {
|
||||
slot: Slot,
|
||||
lowest_permissible_slot: Slot,
|
||||
},
|
||||
/// The function to obtain a set index failed, this is an internal error.
|
||||
InvalidSetIndex(usize),
|
||||
/// We have reached the maximum number of unique `Attestation` that can be observed in a slot.
|
||||
/// This is a DoS protection function.
|
||||
ReachedMaxObservationsPerSlot(usize),
|
||||
IncorrectSlot {
|
||||
expected: Slot,
|
||||
attestation: Slot,
|
||||
},
|
||||
}
|
||||
|
||||
/// A `HashSet` that contains entries related to some `Slot`.
|
||||
struct SlotHashSet {
|
||||
set: HashSet<Hash256>,
|
||||
slot: Slot,
|
||||
}
|
||||
|
||||
impl SlotHashSet {
|
||||
pub fn new(slot: Slot, initial_capacity: usize) -> Self {
|
||||
Self {
|
||||
slot,
|
||||
set: HashSet::with_capacity(initial_capacity),
|
||||
}
|
||||
}
|
||||
|
||||
/// Store the attestation in self so future observations recognise its existence.
|
||||
pub fn observe_attestation<E: EthSpec>(
|
||||
&mut self,
|
||||
a: &Attestation<E>,
|
||||
root: Hash256,
|
||||
) -> Result<ObserveOutcome, Error> {
|
||||
if a.data.slot != self.slot {
|
||||
return Err(Error::IncorrectSlot {
|
||||
expected: self.slot,
|
||||
attestation: a.data.slot,
|
||||
});
|
||||
}
|
||||
|
||||
if self.set.contains(&root) {
|
||||
Ok(ObserveOutcome::AlreadyKnown)
|
||||
} else {
|
||||
// Here we check to see if this slot has reached the maximum observation count.
|
||||
//
|
||||
// The resulting behaviour is that we are no longer able to successfully observe new
|
||||
// attestations, however we will continue to return `is_known` values. We could also
|
||||
// disable `is_known`, however then we would stop forwarding attestations across the
|
||||
// gossip network and I think that this is a worse case than sending some invalid ones.
|
||||
// The underlying libp2p network is responsible for removing duplicate messages, so
|
||||
// this doesn't risk a broadcast loop.
|
||||
if self.set.len() >= MAX_OBSERVATIONS_PER_SLOT {
|
||||
return Err(Error::ReachedMaxObservationsPerSlot(
|
||||
MAX_OBSERVATIONS_PER_SLOT,
|
||||
));
|
||||
}
|
||||
|
||||
self.set.insert(root);
|
||||
|
||||
Ok(ObserveOutcome::New)
|
||||
}
|
||||
}
|
||||
|
||||
/// Indicates if `a` has been observed before.
|
||||
pub fn is_known<E: EthSpec>(&self, a: &Attestation<E>, root: Hash256) -> Result<bool, Error> {
|
||||
if a.data.slot != self.slot {
|
||||
return Err(Error::IncorrectSlot {
|
||||
expected: self.slot,
|
||||
attestation: a.data.slot,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(self.set.contains(&root))
|
||||
}
|
||||
|
||||
/// The number of observed attestations in `self`.
|
||||
pub fn len(&self) -> usize {
|
||||
self.set.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores the roots of `Attestation` objects for some number of `Slots`, so we can determine if
|
||||
/// these have previously been seen on the network.
|
||||
pub struct ObservedAttestations<E: EthSpec> {
|
||||
lowest_permissible_slot: RwLock<Slot>,
|
||||
sets: RwLock<Vec<SlotHashSet>>,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Default for ObservedAttestations<E> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lowest_permissible_slot: RwLock::new(Slot::new(0)),
|
||||
sets: RwLock::new(vec![]),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> ObservedAttestations<E> {
|
||||
/// Store the root of `a` in `self`.
|
||||
///
|
||||
/// `root` must equal `a.tree_hash_root()`.
|
||||
pub fn observe_attestation(
|
||||
&self,
|
||||
a: &Attestation<E>,
|
||||
root_opt: Option<Hash256>,
|
||||
) -> Result<ObserveOutcome, Error> {
|
||||
let index = self.get_set_index(a.data.slot)?;
|
||||
let root = root_opt.unwrap_or_else(|| a.tree_hash_root());
|
||||
|
||||
self.sets
|
||||
.write()
|
||||
.get_mut(index)
|
||||
.ok_or_else(|| Error::InvalidSetIndex(index))
|
||||
.and_then(|set| set.observe_attestation(a, root))
|
||||
}
|
||||
|
||||
/// Check to see if the `root` of `a` is in self.
|
||||
///
|
||||
/// `root` must equal `a.tree_hash_root()`.
|
||||
pub fn is_known(&self, a: &Attestation<E>, root: Hash256) -> Result<bool, Error> {
|
||||
let index = self.get_set_index(a.data.slot)?;
|
||||
|
||||
self.sets
|
||||
.read()
|
||||
.get(index)
|
||||
.ok_or_else(|| Error::InvalidSetIndex(index))
|
||||
.and_then(|set| set.is_known(a, root))
|
||||
}
|
||||
|
||||
/// The maximum number of slots that attestations are stored for.
|
||||
fn max_capacity(&self) -> u64 {
|
||||
// We add `2` in order to account for one slot either side of the range due to
|
||||
// `MAXIMUM_GOSSIP_CLOCK_DISPARITY`.
|
||||
E::slots_per_epoch() + 2
|
||||
}
|
||||
|
||||
/// Removes any attestations with a slot lower than `current_slot` and bars any future
|
||||
/// attestations with a slot lower than `current_slot - SLOTS_RETAINED`.
|
||||
pub fn prune(&self, current_slot: Slot) {
|
||||
// Taking advantage of saturating subtraction on `Slot`.
|
||||
let lowest_permissible_slot = current_slot - (self.max_capacity() - 1);
|
||||
|
||||
self.sets
|
||||
.write()
|
||||
.retain(|set| set.slot >= lowest_permissible_slot);
|
||||
|
||||
*self.lowest_permissible_slot.write() = lowest_permissible_slot;
|
||||
}
|
||||
|
||||
/// Returns the index of `self.set` that matches `slot`.
|
||||
///
|
||||
/// If there is no existing set for this slot one will be created. If `self.sets.len() >=
|
||||
/// Self::max_capacity()`, the set with the lowest slot will be replaced.
|
||||
fn get_set_index(&self, slot: Slot) -> Result<usize, Error> {
|
||||
let lowest_permissible_slot: Slot = *self.lowest_permissible_slot.read();
|
||||
|
||||
if slot < lowest_permissible_slot {
|
||||
return Err(Error::SlotTooLow {
|
||||
slot,
|
||||
lowest_permissible_slot,
|
||||
});
|
||||
}
|
||||
|
||||
// Prune the pool if this attestation indicates that the current slot has advanced.
|
||||
if lowest_permissible_slot + self.max_capacity() < slot + 1 {
|
||||
self.prune(slot)
|
||||
}
|
||||
|
||||
let mut sets = self.sets.write();
|
||||
|
||||
if let Some(index) = sets.iter().position(|set| set.slot == slot) {
|
||||
return Ok(index);
|
||||
}
|
||||
|
||||
// To avoid re-allocations, try and determine a rough initial capacity for the new set
|
||||
// by obtaining the mean size of all items in earlier epoch.
|
||||
let (count, sum) = sets
|
||||
.iter()
|
||||
// Only include slots that are less than the given slot in the average. This should
|
||||
// generally avoid including recent slots that are still "filling up".
|
||||
.filter(|set| set.slot < slot)
|
||||
.map(|set| set.len())
|
||||
.fold((0, 0), |(count, sum), len| (count + 1, sum + len));
|
||||
// If we are unable to determine an average, just use 128 as it's the target committee
|
||||
// size for the mainnet spec. This is perhaps a little wasteful for the minimal spec,
|
||||
// but considering it's approx. 128 * 32 bytes we're not wasting much.
|
||||
let initial_capacity = sum.checked_div(count).unwrap_or(128);
|
||||
|
||||
if sets.len() < self.max_capacity() as usize || sets.is_empty() {
|
||||
let index = sets.len();
|
||||
sets.push(SlotHashSet::new(slot, initial_capacity));
|
||||
return Ok(index);
|
||||
}
|
||||
|
||||
let index = sets
|
||||
.iter()
|
||||
.enumerate()
|
||||
.min_by_key(|(_i, set)| set.slot)
|
||||
.map(|(i, _set)| i)
|
||||
.expect("sets cannot be empty due to previous .is_empty() check");
|
||||
|
||||
sets[index] = SlotHashSet::new(slot, initial_capacity);
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(not(debug_assertions))]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{test_utils::test_random_instance, Hash256};
|
||||
|
||||
type E = types::MainnetEthSpec;
|
||||
|
||||
const NUM_ELEMENTS: usize = 8;
|
||||
|
||||
fn get_attestation(slot: Slot, beacon_block_root: u64) -> Attestation<E> {
|
||||
let mut a: Attestation<E> = test_random_instance();
|
||||
a.data.slot = slot;
|
||||
a.data.beacon_block_root = Hash256::from_low_u64_be(beacon_block_root);
|
||||
a
|
||||
}
|
||||
|
||||
fn single_slot_test(store: &ObservedAttestations<E>, slot: Slot) {
|
||||
let attestations = (0..NUM_ELEMENTS as u64)
|
||||
.map(|i| get_attestation(slot, i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for a in &attestations {
|
||||
assert_eq!(
|
||||
store.is_known(a, a.tree_hash_root()),
|
||||
Ok(false),
|
||||
"should indicate an unknown attestation is unknown"
|
||||
);
|
||||
assert_eq!(
|
||||
store.observe_attestation(a, None),
|
||||
Ok(ObserveOutcome::New),
|
||||
"should observe new attestation"
|
||||
);
|
||||
}
|
||||
|
||||
for a in &attestations {
|
||||
assert_eq!(
|
||||
store.is_known(a, a.tree_hash_root()),
|
||||
Ok(true),
|
||||
"should indicate a known attestation is known"
|
||||
);
|
||||
assert_eq!(
|
||||
store.observe_attestation(a, Some(a.tree_hash_root())),
|
||||
Ok(ObserveOutcome::AlreadyKnown),
|
||||
"should acknowledge an existing attestation"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_slot() {
|
||||
let store = ObservedAttestations::default();
|
||||
|
||||
single_slot_test(&store, Slot::new(0));
|
||||
|
||||
assert_eq!(
|
||||
store.sets.read().len(),
|
||||
1,
|
||||
"should have a single set stored"
|
||||
);
|
||||
assert_eq!(
|
||||
store.sets.read()[0].len(),
|
||||
NUM_ELEMENTS,
|
||||
"set should have NUM_ELEMENTS elements"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mulitple_contiguous_slots() {
|
||||
let store = ObservedAttestations::default();
|
||||
let max_cap = store.max_capacity();
|
||||
|
||||
for i in 0..max_cap * 3 {
|
||||
let slot = Slot::new(i);
|
||||
|
||||
single_slot_test(&store, slot);
|
||||
|
||||
/*
|
||||
* Ensure that the number of sets is correct.
|
||||
*/
|
||||
|
||||
if i < max_cap {
|
||||
assert_eq!(
|
||||
store.sets.read().len(),
|
||||
i as usize + 1,
|
||||
"should have a {} sets stored",
|
||||
i + 1
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
store.sets.read().len(),
|
||||
max_cap as usize,
|
||||
"should have max_capacity sets stored"
|
||||
);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that each set contains the correct number of elements.
|
||||
*/
|
||||
|
||||
for set in &store.sets.read()[..] {
|
||||
assert_eq!(
|
||||
set.len(),
|
||||
NUM_ELEMENTS,
|
||||
"each store should have NUM_ELEMENTS elements"
|
||||
)
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that all the sets have the expected slots
|
||||
*/
|
||||
|
||||
let mut store_slots = store
|
||||
.sets
|
||||
.read()
|
||||
.iter()
|
||||
.map(|set| set.slot)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(
|
||||
store_slots.len() <= store.max_capacity() as usize,
|
||||
"store size should not exceed max"
|
||||
);
|
||||
|
||||
store_slots.sort_unstable();
|
||||
|
||||
let expected_slots = (i.saturating_sub(max_cap - 1)..=i)
|
||||
.map(Slot::new)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(expected_slots, store_slots, "should have expected slots");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mulitple_non_contiguous_slots() {
|
||||
let store = ObservedAttestations::default();
|
||||
let max_cap = store.max_capacity();
|
||||
|
||||
let to_skip = vec![1_u64, 2, 3, 5, 6, 29, 30, 31, 32, 64];
|
||||
let slots = (0..max_cap * 3)
|
||||
.into_iter()
|
||||
.filter(|i| !to_skip.contains(i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for &i in &slots {
|
||||
if to_skip.contains(&i) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let slot = Slot::from(i);
|
||||
|
||||
single_slot_test(&store, slot);
|
||||
|
||||
/*
|
||||
* Ensure that each set contains the correct number of elements.
|
||||
*/
|
||||
|
||||
for set in &store.sets.read()[..] {
|
||||
assert_eq!(
|
||||
set.len(),
|
||||
NUM_ELEMENTS,
|
||||
"each store should have NUM_ELEMENTS elements"
|
||||
)
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that all the sets have the expected slots
|
||||
*/
|
||||
|
||||
let mut store_slots = store
|
||||
.sets
|
||||
.read()
|
||||
.iter()
|
||||
.map(|set| set.slot)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
store_slots.sort_unstable();
|
||||
|
||||
assert!(
|
||||
store_slots.len() <= store.max_capacity() as usize,
|
||||
"store size should not exceed max"
|
||||
);
|
||||
|
||||
let lowest = store.lowest_permissible_slot.read().as_u64();
|
||||
let highest = slot.as_u64();
|
||||
let expected_slots = (lowest..=highest)
|
||||
.filter(|i| !to_skip.contains(i))
|
||||
.map(Slot::new)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(
|
||||
expected_slots,
|
||||
&store_slots[..],
|
||||
"should have expected slots"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
461
beacon_node/beacon_chain/src/observed_attesters.rs
Normal file
461
beacon_node/beacon_chain/src/observed_attesters.rs
Normal file
@@ -0,0 +1,461 @@
|
||||
//! Provides two structs that help us filter out attestation gossip from validators that have
|
||||
//! already published attestations:
|
||||
//!
|
||||
//! - `ObservedAttesters`: allows filtering unaggregated attestations from the same validator in
|
||||
//! the same epoch.
|
||||
//! - `ObservedAggregators`: allows filtering aggregated attestations from the same aggregators in
|
||||
//! the same epoch
|
||||
|
||||
use bitvec::vec::BitVec;
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::marker::PhantomData;
|
||||
use types::{Attestation, Epoch, EthSpec, Unsigned};
|
||||
|
||||
pub type ObservedAttesters<E> = AutoPruningContainer<EpochBitfield, E>;
|
||||
pub type ObservedAggregators<E> = AutoPruningContainer<EpochHashSet, E>;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
EpochTooLow {
|
||||
epoch: Epoch,
|
||||
lowest_permissible_epoch: Epoch,
|
||||
},
|
||||
/// We have reached the maximum number of unique `Attestation` that can be observed in a slot.
|
||||
/// This is a DoS protection function.
|
||||
ReachedMaxObservationsPerSlot(usize),
|
||||
/// The function to obtain a set index failed, this is an internal error.
|
||||
ValidatorIndexTooHigh(usize),
|
||||
}
|
||||
|
||||
/// Implemented on an item in an `AutoPruningContainer`.
|
||||
pub trait Item {
|
||||
/// Instantiate `Self` with the given `capacity`.
|
||||
fn with_capacity(capacity: usize) -> Self;
|
||||
|
||||
/// The default capacity for self. Used when we can't guess a reasonable size.
|
||||
fn default_capacity() -> usize;
|
||||
|
||||
/// Returns the allocated size of `self`, measured by validator indices.
|
||||
fn len(&self) -> usize;
|
||||
|
||||
/// Returns the number of validators that have been observed by `self`.
|
||||
fn validator_count(&self) -> usize;
|
||||
|
||||
/// Store `validator_index` in `self`.
|
||||
fn insert(&mut self, validator_index: usize) -> bool;
|
||||
|
||||
/// Returns `true` if `validator_index` has been stored in `self`.
|
||||
fn contains(&self, validator_index: usize) -> bool;
|
||||
}
|
||||
|
||||
/// Stores a `BitVec` that represents which validator indices have attested during an epoch.
|
||||
pub struct EpochBitfield {
|
||||
bitfield: BitVec,
|
||||
}
|
||||
|
||||
impl Item for EpochBitfield {
|
||||
fn with_capacity(capacity: usize) -> Self {
|
||||
Self {
|
||||
bitfield: BitVec::with_capacity(capacity),
|
||||
}
|
||||
}
|
||||
|
||||
/// Uses a default size that equals the number of genesis validators.
|
||||
fn default_capacity() -> usize {
|
||||
16_384
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.bitfield.len()
|
||||
}
|
||||
|
||||
fn validator_count(&self) -> usize {
|
||||
self.bitfield.iter().filter(|bit| **bit).count()
|
||||
}
|
||||
|
||||
fn insert(&mut self, validator_index: usize) -> bool {
|
||||
self.bitfield
|
||||
.get_mut(validator_index)
|
||||
.map(|mut bit| {
|
||||
if *bit {
|
||||
true
|
||||
} else {
|
||||
*bit = true;
|
||||
false
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
self.bitfield
|
||||
.resize(validator_index.saturating_add(1), false);
|
||||
if let Some(mut bit) = self.bitfield.get_mut(validator_index) {
|
||||
*bit = true;
|
||||
}
|
||||
false
|
||||
})
|
||||
}
|
||||
|
||||
fn contains(&self, validator_index: usize) -> bool {
|
||||
self.bitfield.get(validator_index).map_or(false, |bit| *bit)
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores a `HashSet` of which validator indices have created an aggregate attestation during an
|
||||
/// epoch.
|
||||
pub struct EpochHashSet {
|
||||
set: HashSet<usize>,
|
||||
}
|
||||
|
||||
impl Item for EpochHashSet {
|
||||
fn with_capacity(capacity: usize) -> Self {
|
||||
Self {
|
||||
set: HashSet::with_capacity(capacity),
|
||||
}
|
||||
}
|
||||
|
||||
/// Defaults to the target number of aggregators per committee (16) multiplied by the expected
|
||||
/// max committee count (64).
|
||||
fn default_capacity() -> usize {
|
||||
16 * 64
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.set.len()
|
||||
}
|
||||
|
||||
fn validator_count(&self) -> usize {
|
||||
self.set.len()
|
||||
}
|
||||
|
||||
/// Inserts the `validator_index` in the set. Returns `true` if the `validator_index` was
|
||||
/// already in the set.
|
||||
fn insert(&mut self, validator_index: usize) -> bool {
|
||||
!self.set.insert(validator_index)
|
||||
}
|
||||
|
||||
/// Returns `true` if the `validator_index` is in the set.
|
||||
fn contains(&self, validator_index: usize) -> bool {
|
||||
self.set.contains(&validator_index)
|
||||
}
|
||||
}
|
||||
|
||||
/// A container that stores some number of `T` items.
|
||||
///
|
||||
/// This container is "auto-pruning" since it gets an idea of the current slot by which
|
||||
/// attestations are provided to it and prunes old entries based upon that. For example, if
|
||||
/// `Self::max_capacity == 32` and an attestation with `a.data.target.epoch` is supplied, then all
|
||||
/// attestations with an epoch prior to `a.data.target.epoch - 32` will be cleared from the cache.
|
||||
///
|
||||
/// `T` should be set to a `EpochBitfield` or `EpochHashSet`.
|
||||
pub struct AutoPruningContainer<T, E: EthSpec> {
|
||||
lowest_permissible_epoch: RwLock<Epoch>,
|
||||
items: RwLock<HashMap<Epoch, T>>,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<T, E: EthSpec> Default for AutoPruningContainer<T, E> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lowest_permissible_epoch: RwLock::new(Epoch::new(0)),
|
||||
items: RwLock::new(HashMap::new()),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Item, E: EthSpec> AutoPruningContainer<T, E> {
|
||||
/// Observe that `validator_index` has produced attestation `a`. Returns `Ok(true)` if `a` has
|
||||
/// previously been observed for `validator_index`.
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// - `validator_index` is higher than `VALIDATOR_REGISTRY_LIMIT`.
|
||||
/// - `a.data.target.slot` is earlier than `self.earliest_permissible_slot`.
|
||||
pub fn observe_validator(
|
||||
&self,
|
||||
a: &Attestation<E>,
|
||||
validator_index: usize,
|
||||
) -> Result<bool, Error> {
|
||||
self.sanitize_request(a, validator_index)?;
|
||||
|
||||
let epoch = a.data.target.epoch;
|
||||
|
||||
self.prune(epoch);
|
||||
|
||||
let mut items = self.items.write();
|
||||
|
||||
if let Some(item) = items.get_mut(&epoch) {
|
||||
Ok(item.insert(validator_index))
|
||||
} else {
|
||||
// To avoid re-allocations, try and determine a rough initial capacity for the new item
|
||||
// by obtaining the mean size of all items in earlier epoch.
|
||||
let (count, sum) = items
|
||||
.iter()
|
||||
// Only include epochs that are less than the given slot in the average. This should
|
||||
// generally avoid including recent epochs that are still "filling up".
|
||||
.filter(|(item_epoch, _item)| **item_epoch < epoch)
|
||||
.map(|(_epoch, item)| item.len())
|
||||
.fold((0, 0), |(count, sum), len| (count + 1, sum + len));
|
||||
|
||||
let initial_capacity = sum.checked_div(count).unwrap_or_else(T::default_capacity);
|
||||
|
||||
let mut item = T::with_capacity(initial_capacity);
|
||||
item.insert(validator_index);
|
||||
items.insert(epoch, item);
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `Ok(true)` if the `validator_index` has produced an attestation conflicting with
|
||||
/// `a`.
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// - `validator_index` is higher than `VALIDATOR_REGISTRY_LIMIT`.
|
||||
/// - `a.data.target.slot` is earlier than `self.earliest_permissible_slot`.
|
||||
pub fn validator_has_been_observed(
|
||||
&self,
|
||||
a: &Attestation<E>,
|
||||
validator_index: usize,
|
||||
) -> Result<bool, Error> {
|
||||
self.sanitize_request(a, validator_index)?;
|
||||
|
||||
let exists = self
|
||||
.items
|
||||
.read()
|
||||
.get(&a.data.target.epoch)
|
||||
.map_or(false, |item| item.contains(validator_index));
|
||||
|
||||
Ok(exists)
|
||||
}
|
||||
|
||||
/// Returns the number of validators that have been observed at the given `epoch`. Returns
|
||||
/// `None` if `self` does not have a cache for that epoch.
|
||||
pub fn observed_validator_count(&self, epoch: Epoch) -> Option<usize> {
|
||||
self.items
|
||||
.read()
|
||||
.get(&epoch)
|
||||
.map(|item| item.validator_count())
|
||||
}
|
||||
|
||||
fn sanitize_request(&self, a: &Attestation<E>, validator_index: usize) -> Result<(), Error> {
|
||||
if validator_index > E::ValidatorRegistryLimit::to_usize() {
|
||||
return Err(Error::ValidatorIndexTooHigh(validator_index));
|
||||
}
|
||||
|
||||
let epoch = a.data.target.epoch;
|
||||
let lowest_permissible_epoch: Epoch = *self.lowest_permissible_epoch.read();
|
||||
if epoch < lowest_permissible_epoch {
|
||||
return Err(Error::EpochTooLow {
|
||||
epoch,
|
||||
lowest_permissible_epoch,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The maximum number of epochs stored in `self`.
|
||||
fn max_capacity(&self) -> u64 {
|
||||
// The current epoch and the previous epoch. This is sufficient whilst
|
||||
// GOSSIP_CLOCK_DISPARITY is 1/2 a slot or less:
|
||||
//
|
||||
// https://github.com/ethereum/eth2.0-specs/pull/1706#issuecomment-610151808
|
||||
2
|
||||
}
|
||||
|
||||
/// Updates `self` with the current epoch, removing all attestations that become expired
|
||||
/// relative to `Self::max_capacity`.
|
||||
///
|
||||
/// Also sets `self.lowest_permissible_epoch` with relation to `current_epoch` and
|
||||
/// `Self::max_capacity`.
|
||||
pub fn prune(&self, current_epoch: Epoch) {
|
||||
// Taking advantage of saturating subtraction on `Slot`.
|
||||
let lowest_permissible_epoch = current_epoch - (self.max_capacity().saturating_sub(1));
|
||||
|
||||
*self.lowest_permissible_epoch.write() = lowest_permissible_epoch;
|
||||
|
||||
self.items
|
||||
.write()
|
||||
.retain(|epoch, _item| *epoch >= lowest_permissible_epoch);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
macro_rules! test_suite {
|
||||
($mod_name: ident, $type: ident) => {
|
||||
#[cfg(test)]
|
||||
mod $mod_name {
|
||||
use super::*;
|
||||
use types::test_utils::test_random_instance;
|
||||
|
||||
type E = types::MainnetEthSpec;
|
||||
|
||||
fn get_attestation(epoch: Epoch) -> Attestation<E> {
|
||||
let mut a: Attestation<E> = test_random_instance();
|
||||
a.data.target.epoch = epoch;
|
||||
a
|
||||
}
|
||||
|
||||
fn single_epoch_test(store: &$type<E>, epoch: Epoch) {
|
||||
let attesters = [0, 1, 2, 3, 5, 6, 7, 18, 22];
|
||||
let a = &get_attestation(epoch);
|
||||
|
||||
for &i in &attesters {
|
||||
assert_eq!(
|
||||
store.validator_has_been_observed(a, i),
|
||||
Ok(false),
|
||||
"should indicate an unknown attestation is unknown"
|
||||
);
|
||||
assert_eq!(
|
||||
store.observe_validator(a, i),
|
||||
Ok(false),
|
||||
"should observe new attestation"
|
||||
);
|
||||
}
|
||||
|
||||
for &i in &attesters {
|
||||
assert_eq!(
|
||||
store.validator_has_been_observed(a, i),
|
||||
Ok(true),
|
||||
"should indicate a known attestation is known"
|
||||
);
|
||||
assert_eq!(
|
||||
store.observe_validator(a, i),
|
||||
Ok(true),
|
||||
"should acknowledge an existing attestation"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_epoch() {
|
||||
let store = $type::default();
|
||||
|
||||
single_epoch_test(&store, Epoch::new(0));
|
||||
|
||||
assert_eq!(
|
||||
store.items.read().len(),
|
||||
1,
|
||||
"should have a single bitfield stored"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mulitple_contiguous_epochs() {
|
||||
let store = $type::default();
|
||||
let max_cap = store.max_capacity();
|
||||
|
||||
for i in 0..max_cap * 3 {
|
||||
let epoch = Epoch::new(i);
|
||||
|
||||
single_epoch_test(&store, epoch);
|
||||
|
||||
/*
|
||||
* Ensure that the number of sets is correct.
|
||||
*/
|
||||
|
||||
if i < max_cap {
|
||||
assert_eq!(
|
||||
store.items.read().len(),
|
||||
i as usize + 1,
|
||||
"should have a {} items stored",
|
||||
i + 1
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
store.items.read().len(),
|
||||
max_cap as usize,
|
||||
"should have max_capacity items stored"
|
||||
);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that all the sets have the expected slots
|
||||
*/
|
||||
|
||||
let mut store_epochs = store
|
||||
.items
|
||||
.read()
|
||||
.iter()
|
||||
.map(|(epoch, _set)| *epoch)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(
|
||||
store_epochs.len() <= store.max_capacity() as usize,
|
||||
"store size should not exceed max"
|
||||
);
|
||||
|
||||
store_epochs.sort_unstable();
|
||||
|
||||
let expected_epochs = (i.saturating_sub(max_cap - 1)..=i)
|
||||
.map(Epoch::new)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(expected_epochs, store_epochs, "should have expected slots");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mulitple_non_contiguous_epochs() {
|
||||
let store = $type::default();
|
||||
let max_cap = store.max_capacity();
|
||||
|
||||
let to_skip = vec![1_u64, 3, 4, 5];
|
||||
let epochs = (0..max_cap * 3)
|
||||
.into_iter()
|
||||
.filter(|i| !to_skip.contains(i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for &i in &epochs {
|
||||
if to_skip.contains(&i) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let epoch = Epoch::from(i);
|
||||
|
||||
single_epoch_test(&store, epoch);
|
||||
|
||||
/*
|
||||
* Ensure that all the sets have the expected slots
|
||||
*/
|
||||
|
||||
let mut store_epochs = store
|
||||
.items
|
||||
.read()
|
||||
.iter()
|
||||
.map(|(epoch, _)| *epoch)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
store_epochs.sort_unstable();
|
||||
|
||||
assert!(
|
||||
store_epochs.len() <= store.max_capacity() as usize,
|
||||
"store size should not exceed max"
|
||||
);
|
||||
|
||||
let lowest = store.lowest_permissible_epoch.read().as_u64();
|
||||
let highest = epoch.as_u64();
|
||||
let expected_epochs = (lowest..=highest)
|
||||
.filter(|i| !to_skip.contains(i))
|
||||
.map(Epoch::new)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(
|
||||
expected_epochs,
|
||||
&store_epochs[..],
|
||||
"should have expected epochs"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
test_suite!(observed_attesters, ObservedAttesters);
|
||||
test_suite!(observed_aggregators, ObservedAggregators);
|
||||
}
|
||||
428
beacon_node/beacon_chain/src/observed_block_producers.rs
Normal file
428
beacon_node/beacon_chain/src/observed_block_producers.rs
Normal file
@@ -0,0 +1,428 @@
|
||||
//! Provides the `ObservedBlockProducers` struct which allows for rejecting gossip blocks from
|
||||
//! validators that have already produced a block.
|
||||
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::marker::PhantomData;
|
||||
use types::{BeaconBlock, EthSpec, Slot, Unsigned};
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
/// The slot of the provided block is prior to finalization and should not have been provided
|
||||
/// to this function. This is an internal error.
|
||||
FinalizedBlock { slot: Slot, finalized_slot: Slot },
|
||||
/// The function to obtain a set index failed, this is an internal error.
|
||||
ValidatorIndexTooHigh(u64),
|
||||
}
|
||||
|
||||
/// Maintains a cache of observed `(block.slot, block.proposer)`.
|
||||
///
|
||||
/// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you
|
||||
/// must call `Self::prune` manually.
|
||||
///
|
||||
/// The maximum size of the cache is determined by `slots_since_finality *
|
||||
/// VALIDATOR_REGISTRY_LIMIT`. This is quite a large size, so it's important that upstream
|
||||
/// functions only use this cache for blocks with a valid signature. Only allowing valid signed
|
||||
/// blocks reduces the theoretical maximum size of this cache to `slots_since_finality *
|
||||
/// active_validator_count`, however in reality that is more like `slots_since_finality *
|
||||
/// known_distinct_shufflings` which is much smaller.
|
||||
pub struct ObservedBlockProducers<E: EthSpec> {
|
||||
finalized_slot: RwLock<Slot>,
|
||||
items: RwLock<HashMap<Slot, HashSet<u64>>>,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Default for ObservedBlockProducers<E> {
|
||||
/// Instantiates `Self` with `finalized_slot == 0`.
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
finalized_slot: RwLock::new(Slot::new(0)),
|
||||
items: RwLock::new(HashMap::new()),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> ObservedBlockProducers<E> {
|
||||
/// Observe that the `block` was produced by `block.proposer_index` at `block.slot`. This will
|
||||
/// update `self` so future calls to it indicate that this block is known.
|
||||
///
|
||||
/// The supplied `block` **MUST** be signature verified (see struct-level documentation).
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
|
||||
/// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
|
||||
pub fn observe_proposer(&self, block: &BeaconBlock<E>) -> Result<bool, Error> {
|
||||
self.sanitize_block(block)?;
|
||||
|
||||
let did_not_exist = self
|
||||
.items
|
||||
.write()
|
||||
.entry(block.slot)
|
||||
.or_insert_with(|| HashSet::with_capacity(E::SlotsPerEpoch::to_usize()))
|
||||
.insert(block.proposer_index);
|
||||
|
||||
Ok(!did_not_exist)
|
||||
}
|
||||
|
||||
/// Returns `Ok(true)` if the `block` has been observed before, `Ok(false)` if not. Does not
|
||||
/// update the cache, so calling this function multiple times will continue to return
|
||||
/// `Ok(false)`, until `Self::observe_proposer` is called.
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
|
||||
/// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
|
||||
pub fn proposer_has_been_observed(&self, block: &BeaconBlock<E>) -> Result<bool, Error> {
|
||||
self.sanitize_block(block)?;
|
||||
|
||||
let exists = self
|
||||
.items
|
||||
.read()
|
||||
.get(&block.slot)
|
||||
.map_or(false, |set| set.contains(&block.proposer_index));
|
||||
|
||||
Ok(exists)
|
||||
}
|
||||
|
||||
/// Returns `Ok(())` if the given `block` is sane.
|
||||
fn sanitize_block(&self, block: &BeaconBlock<E>) -> Result<(), Error> {
|
||||
if block.proposer_index > E::ValidatorRegistryLimit::to_u64() {
|
||||
return Err(Error::ValidatorIndexTooHigh(block.proposer_index));
|
||||
}
|
||||
|
||||
let finalized_slot = *self.finalized_slot.read();
|
||||
if finalized_slot > 0 && block.slot <= finalized_slot {
|
||||
return Err(Error::FinalizedBlock {
|
||||
slot: block.slot,
|
||||
finalized_slot,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Removes all observations of blocks equal to or earlier than `finalized_slot`.
|
||||
///
|
||||
/// Stores `finalized_slot` in `self`, so that `self` will reject any block that has a slot
|
||||
/// equal to or less than `finalized_slot`.
|
||||
///
|
||||
/// No-op if `finalized_slot == 0`.
|
||||
pub fn prune(&self, finalized_slot: Slot) {
|
||||
if finalized_slot == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
*self.finalized_slot.write() = finalized_slot;
|
||||
self.items
|
||||
.write()
|
||||
.retain(|slot, _set| *slot > finalized_slot);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use types::MainnetEthSpec;
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
|
||||
fn get_block(slot: u64, proposer: u64) -> BeaconBlock<E> {
|
||||
let mut block = BeaconBlock::empty(&E::default_spec());
|
||||
block.slot = slot.into();
|
||||
block.proposer_index = proposer;
|
||||
block
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pruning() {
|
||||
let cache = ObservedBlockProducers::default();
|
||||
|
||||
assert_eq!(*cache.finalized_slot.read(), 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.read().len(), 0, "no slots should be present");
|
||||
|
||||
// Slot 0, proposer 0
|
||||
let block_a = &get_block(0, 0);
|
||||
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_a),
|
||||
Ok(false),
|
||||
"can observe proposer, indicates proposer unobserved"
|
||||
);
|
||||
|
||||
/*
|
||||
* Preconditions.
|
||||
*/
|
||||
|
||||
assert_eq!(*cache.finalized_slot.read(), 0, "finalized slot is zero");
|
||||
assert_eq!(
|
||||
cache.items.read().len(),
|
||||
1,
|
||||
"only one slot should be present"
|
||||
);
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(0))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
1,
|
||||
"only one proposer should be present"
|
||||
);
|
||||
|
||||
/*
|
||||
* Check that a prune at the genesis slot does nothing.
|
||||
*/
|
||||
|
||||
cache.prune(Slot::new(0));
|
||||
|
||||
assert_eq!(*cache.finalized_slot.read(), 0, "finalized slot is zero");
|
||||
assert_eq!(
|
||||
cache.items.read().len(),
|
||||
1,
|
||||
"only one slot should be present"
|
||||
);
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(0))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
1,
|
||||
"only one proposer should be present"
|
||||
);
|
||||
|
||||
/*
|
||||
* Check that a prune empties the cache
|
||||
*/
|
||||
|
||||
cache.prune(E::slots_per_epoch().into());
|
||||
assert_eq!(
|
||||
*cache.finalized_slot.read(),
|
||||
Slot::from(E::slots_per_epoch()),
|
||||
"finalized slot is updated"
|
||||
);
|
||||
assert_eq!(cache.items.read().len(), 0, "no items left");
|
||||
|
||||
/*
|
||||
* Check that we can't insert a finalized block
|
||||
*/
|
||||
|
||||
// First slot of finalized epoch, proposer 0
|
||||
let block_b = &get_block(E::slots_per_epoch(), 0);
|
||||
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_b),
|
||||
Err(Error::FinalizedBlock {
|
||||
slot: E::slots_per_epoch().into(),
|
||||
finalized_slot: E::slots_per_epoch().into(),
|
||||
}),
|
||||
"cant insert finalized block"
|
||||
);
|
||||
|
||||
assert_eq!(cache.items.read().len(), 0, "block was not added");
|
||||
|
||||
/*
|
||||
* Check that we _can_ insert a non-finalized block
|
||||
*/
|
||||
|
||||
let three_epochs = E::slots_per_epoch() * 3;
|
||||
|
||||
// First slot of finalized epoch, proposer 0
|
||||
let block_b = &get_block(three_epochs, 0);
|
||||
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_b),
|
||||
Ok(false),
|
||||
"can insert non-finalized block"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
cache.items.read().len(),
|
||||
1,
|
||||
"only one slot should be present"
|
||||
);
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(three_epochs))
|
||||
.expect("the three epochs slot should be present")
|
||||
.len(),
|
||||
1,
|
||||
"only one proposer should be present"
|
||||
);
|
||||
|
||||
/*
|
||||
* Check that a prune doesnt wipe later blocks
|
||||
*/
|
||||
|
||||
let two_epochs = E::slots_per_epoch() * 2;
|
||||
cache.prune(two_epochs.into());
|
||||
|
||||
assert_eq!(
|
||||
*cache.finalized_slot.read(),
|
||||
Slot::from(two_epochs),
|
||||
"finalized slot is updated"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
cache.items.read().len(),
|
||||
1,
|
||||
"only one slot should be present"
|
||||
);
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(three_epochs))
|
||||
.expect("the three epochs slot should be present")
|
||||
.len(),
|
||||
1,
|
||||
"only one proposer should be present"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn simple_observations() {
|
||||
let cache = ObservedBlockProducers::default();
|
||||
|
||||
// Slot 0, proposer 0
|
||||
let block_a = &get_block(0, 0);
|
||||
|
||||
assert_eq!(
|
||||
cache.proposer_has_been_observed(block_a),
|
||||
Ok(false),
|
||||
"no observation in empty cache"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_a),
|
||||
Ok(false),
|
||||
"can observe proposer, indicates proposer unobserved"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.proposer_has_been_observed(block_a),
|
||||
Ok(true),
|
||||
"observed block is indicated as true"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_a),
|
||||
Ok(true),
|
||||
"observing again indicates true"
|
||||
);
|
||||
|
||||
assert_eq!(*cache.finalized_slot.read(), 0, "finalized slot is zero");
|
||||
assert_eq!(
|
||||
cache.items.read().len(),
|
||||
1,
|
||||
"only one slot should be present"
|
||||
);
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(0))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
1,
|
||||
"only one proposer should be present"
|
||||
);
|
||||
|
||||
// Slot 1, proposer 0
|
||||
let block_b = &get_block(1, 0);
|
||||
|
||||
assert_eq!(
|
||||
cache.proposer_has_been_observed(block_b),
|
||||
Ok(false),
|
||||
"no observation for new slot"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_b),
|
||||
Ok(false),
|
||||
"can observe proposer for new slot, indicates proposer unobserved"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.proposer_has_been_observed(block_b),
|
||||
Ok(true),
|
||||
"observed block in slot 1 is indicated as true"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_b),
|
||||
Ok(true),
|
||||
"observing slot 1 again indicates true"
|
||||
);
|
||||
|
||||
assert_eq!(*cache.finalized_slot.read(), 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.read().len(), 2, "two slots should be present");
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(0))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
1,
|
||||
"only one proposer should be present in slot 0"
|
||||
);
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(1))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
1,
|
||||
"only one proposer should be present in slot 1"
|
||||
);
|
||||
|
||||
// Slot 0, proposer 1
|
||||
let block_c = &get_block(0, 1);
|
||||
|
||||
assert_eq!(
|
||||
cache.proposer_has_been_observed(block_c),
|
||||
Ok(false),
|
||||
"no observation for new proposer"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_c),
|
||||
Ok(false),
|
||||
"can observe new proposer, indicates proposer unobserved"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.proposer_has_been_observed(block_c),
|
||||
Ok(true),
|
||||
"observed new proposer block is indicated as true"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_c),
|
||||
Ok(true),
|
||||
"observing new proposer again indicates true"
|
||||
);
|
||||
|
||||
assert_eq!(*cache.finalized_slot.read(), 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.read().len(), 2, "two slots should be present");
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(0))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
2,
|
||||
"two proposers should be present in slot 0"
|
||||
);
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(1))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
1,
|
||||
"only one proposer should be present in slot 1"
|
||||
);
|
||||
}
|
||||
}
|
||||
104
beacon_node/beacon_chain/src/observed_operations.rs
Normal file
104
beacon_node/beacon_chain/src/observed_operations.rs
Normal file
@@ -0,0 +1,104 @@
|
||||
use derivative::Derivative;
|
||||
use parking_lot::Mutex;
|
||||
use smallvec::SmallVec;
|
||||
use state_processing::{SigVerifiedOp, VerifyOperation};
|
||||
use std::collections::HashSet;
|
||||
use std::iter::FromIterator;
|
||||
use std::marker::PhantomData;
|
||||
use types::{
|
||||
AttesterSlashing, BeaconState, ChainSpec, EthSpec, ProposerSlashing, SignedVoluntaryExit,
|
||||
};
|
||||
|
||||
/// Number of validator indices to store on the stack in `observed_validators`.
|
||||
pub const SMALL_VEC_SIZE: usize = 8;
|
||||
|
||||
/// Stateful tracker for exit/slashing operations seen on the network.
|
||||
///
|
||||
/// Implements the conditions for gossip verification of exits and slashings from the P2P spec.
|
||||
#[derive(Debug, Derivative)]
|
||||
#[derivative(Default(bound = "T: ObservableOperation<E>, E: EthSpec"))]
|
||||
pub struct ObservedOperations<T: ObservableOperation<E>, E: EthSpec> {
|
||||
/// Indices of validators for whom we have already seen an instance of an operation `T`.
|
||||
///
|
||||
/// For voluntary exits, this is the set of all `signed_voluntary_exit.message.validator_index`.
|
||||
/// For proposer slashings, this is the set of all `proposer_slashing.index`.
|
||||
/// For attester slashings, this is the set of all validators who would be slashed by
|
||||
/// previously seen attester slashings, i.e. those validators in the intersection of
|
||||
/// `attestation_1.attester_indices` and `attestation_2.attester_indices`.
|
||||
observed_validator_indices: Mutex<HashSet<u64>>,
|
||||
_phantom: PhantomData<(T, E)>,
|
||||
}
|
||||
|
||||
/// Was the observed operation new and valid for further processing, or a useless duplicate?
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub enum ObservationOutcome<T> {
|
||||
New(SigVerifiedOp<T>),
|
||||
AlreadyKnown,
|
||||
}
|
||||
|
||||
/// Trait for exits and slashings which can be observed using `ObservedOperations`.
|
||||
pub trait ObservableOperation<E: EthSpec>: VerifyOperation<E> + Sized {
|
||||
/// The set of validator indices involved in this operation.
|
||||
///
|
||||
/// See the comment on `observed_validator_indices` above for detail.
|
||||
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]>;
|
||||
}
|
||||
|
||||
impl<E: EthSpec> ObservableOperation<E> for SignedVoluntaryExit {
|
||||
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
|
||||
std::iter::once(self.message.validator_index).collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> ObservableOperation<E> for ProposerSlashing {
|
||||
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
|
||||
std::iter::once(self.signed_header_1.message.proposer_index).collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> ObservableOperation<E> for AttesterSlashing<E> {
|
||||
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
|
||||
let attestation_1_indices =
|
||||
HashSet::<u64>::from_iter(self.attestation_1.attesting_indices.iter().copied());
|
||||
let attestation_2_indices =
|
||||
HashSet::<u64>::from_iter(self.attestation_2.attesting_indices.iter().copied());
|
||||
attestation_1_indices
|
||||
.intersection(&attestation_2_indices)
|
||||
.copied()
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ObservableOperation<E>, E: EthSpec> ObservedOperations<T, E> {
|
||||
pub fn verify_and_observe(
|
||||
&self,
|
||||
op: T,
|
||||
head_state: &BeaconState<E>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<ObservationOutcome<T>, T::Error> {
|
||||
let mut observed_validator_indices = self.observed_validator_indices.lock();
|
||||
let new_validator_indices = op.observed_validators();
|
||||
|
||||
// If all of the new validator indices have been previously observed, short-circuit
|
||||
// the validation. This implements the uniqueness check part of the spec, which for attester
|
||||
// slashings reads:
|
||||
//
|
||||
// At least one index in the intersection of the attesting indices of each attestation has
|
||||
// not yet been seen in any prior attester_slashing.
|
||||
if new_validator_indices
|
||||
.iter()
|
||||
.all(|index| observed_validator_indices.contains(index))
|
||||
{
|
||||
return Ok(ObservationOutcome::AlreadyKnown);
|
||||
}
|
||||
|
||||
// Validate the op using operation-specific logic (`verify_attester_slashing`, etc).
|
||||
let verified_op = op.validate(head_state, spec)?;
|
||||
|
||||
// Add the relevant indices to the set of known indices to prevent processing of duplicates
|
||||
// in the future.
|
||||
observed_validator_indices.extend(new_validator_indices);
|
||||
|
||||
Ok(ObservationOutcome::New(verified_op))
|
||||
}
|
||||
}
|
||||
@@ -1,26 +1,24 @@
|
||||
use crate::fork_choice::SszForkChoice;
|
||||
use crate::head_tracker::SszHeadTracker;
|
||||
use crate::{BeaconChainTypes, CheckPoint};
|
||||
use operation_pool::PersistedOperationPool;
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use store::{DBColumn, Error as StoreError, SimpleStoreItem};
|
||||
use store::{DBColumn, Error as StoreError, StoreItem};
|
||||
use types::Hash256;
|
||||
|
||||
/// 32-byte key for accessing the `PersistedBeaconChain`.
|
||||
pub const BEACON_CHAIN_DB_KEY: &str = "PERSISTEDBEACONCHAINPERSISTEDBEA";
|
||||
|
||||
#[derive(Clone, Encode, Decode)]
|
||||
pub struct PersistedBeaconChain<T: BeaconChainTypes> {
|
||||
pub canonical_head: CheckPoint<T::EthSpec>,
|
||||
pub finalized_checkpoint: CheckPoint<T::EthSpec>,
|
||||
pub op_pool: PersistedOperationPool<T::EthSpec>,
|
||||
pub struct PersistedBeaconChain {
|
||||
/// This value is ignored to resolve the issue described here:
|
||||
///
|
||||
/// https://github.com/sigp/lighthouse/pull/1639
|
||||
///
|
||||
/// The following PR will clean-up and remove this field:
|
||||
///
|
||||
/// https://github.com/sigp/lighthouse/pull/1638
|
||||
pub canonical_head_block_root: Hash256,
|
||||
pub genesis_block_root: Hash256,
|
||||
pub ssz_head_tracker: SszHeadTracker,
|
||||
pub fork_choice: SszForkChoice,
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> SimpleStoreItem for PersistedBeaconChain<T> {
|
||||
impl StoreItem for PersistedBeaconChain {
|
||||
fn db_column() -> DBColumn {
|
||||
DBColumn::BeaconChain
|
||||
}
|
||||
|
||||
25
beacon_node/beacon_chain/src/persisted_fork_choice.rs
Normal file
25
beacon_node/beacon_chain/src/persisted_fork_choice.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
use crate::beacon_fork_choice_store::PersistedForkChoiceStore as ForkChoiceStore;
|
||||
use fork_choice::PersistedForkChoice as ForkChoice;
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use store::{DBColumn, Error, StoreItem};
|
||||
|
||||
#[derive(Encode, Decode)]
|
||||
pub struct PersistedForkChoice {
|
||||
pub fork_choice: ForkChoice,
|
||||
pub fork_choice_store: ForkChoiceStore,
|
||||
}
|
||||
|
||||
impl StoreItem for PersistedForkChoice {
|
||||
fn db_column() -> DBColumn {
|
||||
DBColumn::ForkChoice
|
||||
}
|
||||
|
||||
fn as_store_bytes(&self) -> Vec<u8> {
|
||||
self.as_ssz_bytes()
|
||||
}
|
||||
|
||||
fn from_store_bytes(bytes: &[u8]) -> std::result::Result<Self, Error> {
|
||||
Self::from_ssz_bytes(bytes).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
46
beacon_node/beacon_chain/src/shuffling_cache.rs
Normal file
46
beacon_node/beacon_chain/src/shuffling_cache.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
use crate::metrics;
|
||||
use lru::LruCache;
|
||||
use types::{beacon_state::CommitteeCache, Epoch, Hash256};
|
||||
|
||||
/// The size of the LRU cache that stores committee caches for quicker verification.
|
||||
///
|
||||
/// Each entry should be `8 + 800,000 = 800,008` bytes in size with 100k validators. (8-byte hash +
|
||||
/// 100k indices). Therefore, this cache should be approx `16 * 800,008 = 12.8 MB`. (Note: this
|
||||
/// ignores a few extra bytes in the caches that should be insignificant compared to the indices).
|
||||
const CACHE_SIZE: usize = 16;
|
||||
|
||||
/// Provides an LRU cache for `CommitteeCache`.
|
||||
///
|
||||
/// It has been named `ShufflingCache` because `CommitteeCacheCache` is a bit weird and looks like
|
||||
/// a find/replace error.
|
||||
pub struct ShufflingCache {
|
||||
cache: LruCache<(Epoch, Hash256), CommitteeCache>,
|
||||
}
|
||||
|
||||
impl ShufflingCache {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
cache: LruCache::new(CACHE_SIZE),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(&mut self, epoch: Epoch, root: Hash256) -> Option<&CommitteeCache> {
|
||||
let opt = self.cache.get(&(epoch, root));
|
||||
|
||||
if opt.is_some() {
|
||||
metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS);
|
||||
} else {
|
||||
metrics::inc_counter(&metrics::SHUFFLING_CACHE_MISSES);
|
||||
}
|
||||
|
||||
opt
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, epoch: Epoch, root: Hash256, committee_cache: &CommitteeCache) {
|
||||
let key = (epoch, root);
|
||||
|
||||
if !self.cache.contains(&key) {
|
||||
self.cache.put(key, committee_cache.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
219
beacon_node/beacon_chain/src/snapshot_cache.rs
Normal file
219
beacon_node/beacon_chain/src/snapshot_cache.rs
Normal file
@@ -0,0 +1,219 @@
|
||||
use crate::BeaconSnapshot;
|
||||
use std::cmp;
|
||||
use types::{Epoch, EthSpec, Hash256};
|
||||
|
||||
/// The default size of the cache.
|
||||
pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 4;
|
||||
|
||||
/// Provides a cache of `BeaconSnapshot` that is intended primarily for block processing.
|
||||
///
|
||||
/// ## Cache Queuing
|
||||
///
|
||||
/// The cache has a non-standard queue mechanism (specifically, it is not LRU).
|
||||
///
|
||||
/// The cache has a max number of elements (`max_len`). Until `max_len` is achieved, all snapshots
|
||||
/// are simply added to the queue. Once `max_len` is achieved, adding a new snapshot will cause an
|
||||
/// existing snapshot to be ejected. The ejected snapshot will:
|
||||
///
|
||||
/// - Never be the `head_block_root`.
|
||||
/// - Be the snapshot with the lowest `state.slot` (ties broken arbitrarily).
|
||||
pub struct SnapshotCache<T: EthSpec> {
|
||||
max_len: usize,
|
||||
head_block_root: Hash256,
|
||||
snapshots: Vec<BeaconSnapshot<T>>,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> SnapshotCache<T> {
|
||||
/// Instantiate a new cache which contains the `head` snapshot.
|
||||
///
|
||||
/// Setting `max_len = 0` is equivalent to setting `max_len = 1`.
|
||||
pub fn new(max_len: usize, head: BeaconSnapshot<T>) -> Self {
|
||||
Self {
|
||||
max_len: cmp::max(max_len, 1),
|
||||
head_block_root: head.beacon_block_root,
|
||||
snapshots: vec![head],
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a snapshot, potentially removing an existing snapshot if `self` is at capacity (see
|
||||
/// struct-level documentation for more info).
|
||||
pub fn insert(&mut self, snapshot: BeaconSnapshot<T>) {
|
||||
if self.snapshots.len() < self.max_len {
|
||||
self.snapshots.push(snapshot);
|
||||
} else {
|
||||
let insert_at = self
|
||||
.snapshots
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(i, snapshot)| {
|
||||
if snapshot.beacon_block_root != self.head_block_root {
|
||||
Some((i, snapshot.beacon_state.slot))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.min_by_key(|(_i, slot)| *slot)
|
||||
.map(|(i, _slot)| i);
|
||||
|
||||
if let Some(i) = insert_at {
|
||||
self.snapshots[i] = snapshot;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// If there is a snapshot with `block_root`, remove and return it.
|
||||
pub fn try_remove(&mut self, block_root: Hash256) -> Option<BeaconSnapshot<T>> {
|
||||
self.snapshots
|
||||
.iter()
|
||||
.position(|snapshot| snapshot.beacon_block_root == block_root)
|
||||
.map(|i| self.snapshots.remove(i))
|
||||
}
|
||||
|
||||
/// If there is a snapshot with `block_root`, clone it (with only the committee caches) and
|
||||
/// return the clone.
|
||||
pub fn get_cloned(&self, block_root: Hash256) -> Option<BeaconSnapshot<T>> {
|
||||
self.snapshots
|
||||
.iter()
|
||||
.find(|snapshot| snapshot.beacon_block_root == block_root)
|
||||
.map(|snapshot| snapshot.clone_with_only_committee_caches())
|
||||
}
|
||||
|
||||
/// Removes all snapshots from the queue that are less than or equal to the finalized epoch.
|
||||
pub fn prune(&mut self, finalized_epoch: Epoch) {
|
||||
self.snapshots.retain(|snapshot| {
|
||||
snapshot.beacon_state.slot > finalized_epoch.start_slot(T::slots_per_epoch())
|
||||
})
|
||||
}
|
||||
|
||||
/// Inform the cache that the head of the beacon chain has changed.
|
||||
///
|
||||
/// The snapshot that matches this `head_block_root` will never be ejected from the cache
|
||||
/// during `Self::insert`.
|
||||
pub fn update_head(&mut self, head_block_root: Hash256) {
|
||||
self.head_block_root = head_block_root
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use types::{
|
||||
test_utils::{generate_deterministic_keypair, TestingBeaconStateBuilder},
|
||||
BeaconBlock, Epoch, MainnetEthSpec, SignedBeaconBlock, Slot,
|
||||
};
|
||||
|
||||
const CACHE_SIZE: usize = 4;
|
||||
|
||||
fn get_snapshot(i: u64) -> BeaconSnapshot<MainnetEthSpec> {
|
||||
let spec = MainnetEthSpec::default_spec();
|
||||
|
||||
let state_builder = TestingBeaconStateBuilder::from_deterministic_keypairs(1, &spec);
|
||||
let (beacon_state, _keypairs) = state_builder.build();
|
||||
|
||||
BeaconSnapshot {
|
||||
beacon_state,
|
||||
beacon_state_root: Hash256::from_low_u64_be(i),
|
||||
beacon_block: SignedBeaconBlock {
|
||||
message: BeaconBlock::empty(&spec),
|
||||
signature: generate_deterministic_keypair(0)
|
||||
.sk
|
||||
.sign(Hash256::from_low_u64_be(42)),
|
||||
},
|
||||
beacon_block_root: Hash256::from_low_u64_be(i),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_get_prune_update() {
|
||||
let mut cache = SnapshotCache::new(CACHE_SIZE, get_snapshot(0));
|
||||
|
||||
// Insert a bunch of entries in the cache. It should look like this:
|
||||
//
|
||||
// Index Root
|
||||
// 0 0 <--head
|
||||
// 1 1
|
||||
// 2 2
|
||||
// 3 3
|
||||
for i in 1..CACHE_SIZE as u64 {
|
||||
let mut snapshot = get_snapshot(i);
|
||||
|
||||
// Each snapshot should be one slot into an epoch, with each snapshot one epoch apart.
|
||||
snapshot.beacon_state.slot = Slot::from(i * MainnetEthSpec::slots_per_epoch() + 1);
|
||||
|
||||
cache.insert(snapshot);
|
||||
|
||||
assert_eq!(
|
||||
cache.snapshots.len(),
|
||||
i as usize + 1,
|
||||
"cache length should be as expected"
|
||||
);
|
||||
assert_eq!(cache.head_block_root, Hash256::from_low_u64_be(0));
|
||||
}
|
||||
|
||||
// Insert a new value in the cache. Afterwards it should look like:
|
||||
//
|
||||
// Index Root
|
||||
// 0 0 <--head
|
||||
// 1 42
|
||||
// 2 2
|
||||
// 3 3
|
||||
assert_eq!(cache.snapshots.len(), CACHE_SIZE);
|
||||
cache.insert(get_snapshot(42));
|
||||
assert_eq!(cache.snapshots.len(), CACHE_SIZE);
|
||||
|
||||
assert!(
|
||||
cache.try_remove(Hash256::from_low_u64_be(1)).is_none(),
|
||||
"the snapshot with the lowest slot should have been removed during the insert function"
|
||||
);
|
||||
assert!(cache.get_cloned(Hash256::from_low_u64_be(1)).is_none());
|
||||
|
||||
assert!(
|
||||
cache
|
||||
.get_cloned(Hash256::from_low_u64_be(0))
|
||||
.expect("the head should still be in the cache")
|
||||
.beacon_block_root
|
||||
== Hash256::from_low_u64_be(0),
|
||||
"get_cloned should get the correct snapshot"
|
||||
);
|
||||
assert!(
|
||||
cache
|
||||
.try_remove(Hash256::from_low_u64_be(0))
|
||||
.expect("the head should still be in the cache")
|
||||
.beacon_block_root
|
||||
== Hash256::from_low_u64_be(0),
|
||||
"try_remove should get the correct snapshot"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
cache.snapshots.len(),
|
||||
CACHE_SIZE - 1,
|
||||
"try_remove should shorten the cache"
|
||||
);
|
||||
|
||||
// Prune the cache. Afterwards it should look like:
|
||||
//
|
||||
// Index Root
|
||||
// 0 2
|
||||
// 1 3
|
||||
cache.prune(Epoch::new(2));
|
||||
|
||||
assert_eq!(cache.snapshots.len(), 2);
|
||||
|
||||
cache.update_head(Hash256::from_low_u64_be(2));
|
||||
|
||||
// Over-fill the cache so it needs to eject some old values on insert.
|
||||
for i in 0..CACHE_SIZE as u64 {
|
||||
cache.insert(get_snapshot(u64::max_value() - i));
|
||||
}
|
||||
|
||||
// Ensure that the new head value was not removed from the cache.
|
||||
assert!(
|
||||
cache
|
||||
.try_remove(Hash256::from_low_u64_be(2))
|
||||
.expect("the new head should still be in the cache")
|
||||
.beacon_block_root
|
||||
== Hash256::from_low_u64_be(2),
|
||||
"try_remove should get the correct snapshot"
|
||||
);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
20
beacon_node/beacon_chain/src/timeout_rw_lock.rs
Normal file
20
beacon_node/beacon_chain/src/timeout_rw_lock.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
use std::time::Duration;
|
||||
|
||||
/// A simple wrapper around `parking_lot::RwLock` that only permits read/write access with a
|
||||
/// time-out (i.e., no indefinitely-blocking operations).
|
||||
pub struct TimeoutRwLock<T>(RwLock<T>);
|
||||
|
||||
impl<T> TimeoutRwLock<T> {
|
||||
pub fn new(inner: T) -> Self {
|
||||
Self(RwLock::new(inner))
|
||||
}
|
||||
|
||||
pub fn try_read_for(&self, timeout: Duration) -> Option<RwLockReadGuard<T>> {
|
||||
self.0.try_read_for(timeout)
|
||||
}
|
||||
|
||||
pub fn try_write_for(&self, timeout: Duration) -> Option<RwLockWriteGuard<T>> {
|
||||
self.0.try_write_for(timeout)
|
||||
}
|
||||
}
|
||||
363
beacon_node/beacon_chain/src/validator_pubkey_cache.rs
Normal file
363
beacon_node/beacon_chain/src/validator_pubkey_cache.rs
Normal file
@@ -0,0 +1,363 @@
|
||||
use crate::errors::BeaconChainError;
|
||||
use ssz::{Decode, DecodeError, Encode};
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryInto;
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::io::{self, Read, Write};
|
||||
use std::path::Path;
|
||||
use types::{BeaconState, EthSpec, PublicKey, PublicKeyBytes, Validator};
|
||||
|
||||
/// Provides a mapping of `validator_index -> validator_publickey`.
|
||||
///
|
||||
/// This cache exists for two reasons:
|
||||
///
|
||||
/// 1. To avoid reading a `BeaconState` from disk each time we need a public key.
|
||||
/// 2. To reduce the amount of public key _decompression_ required. A `BeaconState` stores public
|
||||
/// keys in compressed form and they are needed in decompressed form for signature verification.
|
||||
/// Decompression is expensive when many keys are involved.
|
||||
///
|
||||
/// The cache has a `persistence_file` that it uses to maintain a persistent, on-disk
|
||||
/// copy of itself. This allows it to be restored between process invocations.
|
||||
pub struct ValidatorPubkeyCache {
|
||||
pubkeys: Vec<PublicKey>,
|
||||
indices: HashMap<PublicKeyBytes, usize>,
|
||||
persitence_file: ValidatorPubkeyCacheFile,
|
||||
}
|
||||
|
||||
impl ValidatorPubkeyCache {
|
||||
pub fn load_from_file<P: AsRef<Path>>(path: P) -> Result<Self, BeaconChainError> {
|
||||
ValidatorPubkeyCacheFile::open(&path)
|
||||
.and_then(ValidatorPubkeyCacheFile::into_cache)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Create a new public key cache using the keys in `state.validators`.
|
||||
///
|
||||
/// Also creates a new persistence file, returning an error if there is already a file at
|
||||
/// `persistence_path`.
|
||||
pub fn new<T: EthSpec, P: AsRef<Path>>(
|
||||
state: &BeaconState<T>,
|
||||
persistence_path: P,
|
||||
) -> Result<Self, BeaconChainError> {
|
||||
if persistence_path.as_ref().exists() {
|
||||
return Err(BeaconChainError::ValidatorPubkeyCacheFileError(format!(
|
||||
"Persistence file already exists: {:?}",
|
||||
persistence_path.as_ref()
|
||||
)));
|
||||
}
|
||||
|
||||
let mut cache = Self {
|
||||
persitence_file: ValidatorPubkeyCacheFile::create(persistence_path)?,
|
||||
pubkeys: vec![],
|
||||
indices: HashMap::new(),
|
||||
};
|
||||
|
||||
cache.import_new_pubkeys(state)?;
|
||||
|
||||
Ok(cache)
|
||||
}
|
||||
|
||||
/// Scan the given `state` and add any new validator public keys.
|
||||
///
|
||||
/// Does not delete any keys from `self` if they don't appear in `state`.
|
||||
pub fn import_new_pubkeys<T: EthSpec>(
|
||||
&mut self,
|
||||
state: &BeaconState<T>,
|
||||
) -> Result<(), BeaconChainError> {
|
||||
if state.validators.len() > self.pubkeys.len() {
|
||||
self.import(&state.validators[self.pubkeys.len()..])
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds zero or more validators to `self`.
|
||||
fn import(&mut self, validators: &[Validator]) -> Result<(), BeaconChainError> {
|
||||
self.pubkeys.reserve(validators.len());
|
||||
self.indices.reserve(validators.len());
|
||||
|
||||
for v in validators.iter() {
|
||||
let i = self.pubkeys.len();
|
||||
|
||||
if self.indices.contains_key(&v.pubkey) {
|
||||
return Err(BeaconChainError::DuplicateValidatorPublicKey);
|
||||
}
|
||||
|
||||
// The item is written to disk (the persistence file) _before_ it is written into
|
||||
// the local struct.
|
||||
//
|
||||
// This means that a pubkey cache read from disk will always be equivalent to or
|
||||
// _later than_ the cache that was running in the previous instance of Lighthouse.
|
||||
//
|
||||
// The motivation behind this ordering is that we do not want to have states that
|
||||
// reference a pubkey that is not in our cache. However, it's fine to have pubkeys
|
||||
// that are never referenced in a state.
|
||||
self.persitence_file.append(i, &v.pubkey)?;
|
||||
|
||||
self.pubkeys.push(
|
||||
(&v.pubkey)
|
||||
.try_into()
|
||||
.map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?,
|
||||
);
|
||||
|
||||
self.indices.insert(v.pubkey.clone(), i);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the public key for a validator with index `i`.
|
||||
pub fn get(&self, i: usize) -> Option<&PublicKey> {
|
||||
self.pubkeys.get(i)
|
||||
}
|
||||
|
||||
/// Get the index of a validator with `pubkey`.
|
||||
pub fn get_index(&self, pubkey: &PublicKeyBytes) -> Option<usize> {
|
||||
self.indices.get(pubkey).copied()
|
||||
}
|
||||
|
||||
/// Returns the number of validators in the cache.
|
||||
pub fn len(&self) -> usize {
|
||||
self.indices.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Allows for maintaining an on-disk copy of the `ValidatorPubkeyCache`. The file is raw SSZ bytes
|
||||
/// (not ASCII encoded).
|
||||
///
|
||||
/// ## Writes
|
||||
///
|
||||
/// Each entry is simply appended to the file.
|
||||
///
|
||||
/// ## Reads
|
||||
///
|
||||
/// The whole file is parsed as an SSZ "variable list" of objects.
|
||||
///
|
||||
/// This parsing method is possible because the items in the list are fixed-length SSZ objects.
|
||||
struct ValidatorPubkeyCacheFile(File);
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Error {
|
||||
Io(io::Error),
|
||||
Ssz(DecodeError),
|
||||
PubkeyDecode(bls::Error),
|
||||
/// The file read from disk does not have a contiguous list of validator public keys. The file
|
||||
/// has become corrupted.
|
||||
InconsistentIndex {
|
||||
expected: Option<usize>,
|
||||
found: usize,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<Error> for BeaconChainError {
|
||||
fn from(e: Error) -> BeaconChainError {
|
||||
BeaconChainError::ValidatorPubkeyCacheFileError(format!("{:?}", e))
|
||||
}
|
||||
}
|
||||
|
||||
impl ValidatorPubkeyCacheFile {
|
||||
/// Creates a file for reading and writing.
|
||||
pub fn create<P: AsRef<Path>>(path: P) -> Result<Self, Error> {
|
||||
OpenOptions::new()
|
||||
.create_new(true)
|
||||
.write(true)
|
||||
.open(path)
|
||||
.map(Self)
|
||||
.map_err(Error::Io)
|
||||
}
|
||||
|
||||
/// Opens an existing file for reading and writing.
|
||||
pub fn open<P: AsRef<Path>>(path: P) -> Result<Self, Error> {
|
||||
OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create(false)
|
||||
.append(true)
|
||||
.open(path)
|
||||
.map(Self)
|
||||
.map_err(Error::Io)
|
||||
}
|
||||
|
||||
/// Append a public key to file.
|
||||
///
|
||||
/// The provided `index` should each be one greater than the previous and start at 0.
|
||||
/// Otherwise, the file will become corrupted and unable to be converted into a cache .
|
||||
pub fn append(&mut self, index: usize, pubkey: &PublicKeyBytes) -> Result<(), Error> {
|
||||
append_to_file(&mut self.0, index, pubkey)
|
||||
}
|
||||
|
||||
/// Creates a `ValidatorPubkeyCache` by reading and parsing the underlying file.
|
||||
pub fn into_cache(mut self) -> Result<ValidatorPubkeyCache, Error> {
|
||||
let mut bytes = vec![];
|
||||
self.0.read_to_end(&mut bytes).map_err(Error::Io)?;
|
||||
|
||||
let list: Vec<(usize, PublicKeyBytes)> = Vec::from_ssz_bytes(&bytes).map_err(Error::Ssz)?;
|
||||
|
||||
let mut last = None;
|
||||
let mut pubkeys = Vec::with_capacity(list.len());
|
||||
let mut indices = HashMap::new();
|
||||
|
||||
for (index, pubkey) in list {
|
||||
let expected = last.map(|n| n + 1);
|
||||
if expected.map_or(true, |expected| index == expected) {
|
||||
last = Some(index);
|
||||
pubkeys.push((&pubkey).try_into().map_err(Error::PubkeyDecode)?);
|
||||
indices.insert(pubkey, index);
|
||||
} else {
|
||||
return Err(Error::InconsistentIndex {
|
||||
expected,
|
||||
found: index,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ValidatorPubkeyCache {
|
||||
pubkeys,
|
||||
indices,
|
||||
persitence_file: self,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn append_to_file(file: &mut File, index: usize, pubkey: &PublicKeyBytes) -> Result<(), Error> {
|
||||
let mut line = Vec::with_capacity(index.ssz_bytes_len() + pubkey.ssz_bytes_len());
|
||||
|
||||
index.ssz_append(&mut line);
|
||||
pubkey.ssz_append(&mut line);
|
||||
|
||||
file.write_all(&line).map_err(Error::Io)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use tempfile::tempdir;
|
||||
use types::{
|
||||
test_utils::{generate_deterministic_keypair, TestingBeaconStateBuilder},
|
||||
BeaconState, EthSpec, Keypair, MainnetEthSpec,
|
||||
};
|
||||
|
||||
fn get_state(validator_count: usize) -> (BeaconState<MainnetEthSpec>, Vec<Keypair>) {
|
||||
let spec = MainnetEthSpec::default_spec();
|
||||
let builder =
|
||||
TestingBeaconStateBuilder::from_deterministic_keypairs(validator_count, &spec);
|
||||
builder.build()
|
||||
}
|
||||
|
||||
fn check_cache_get(cache: &ValidatorPubkeyCache, keypairs: &[Keypair]) {
|
||||
let validator_count = keypairs.len();
|
||||
|
||||
for i in 0..validator_count + 1 {
|
||||
if i < validator_count {
|
||||
let pubkey = cache.get(i).expect("pubkey should be present");
|
||||
assert_eq!(pubkey, &keypairs[i].pk, "pubkey should match cache");
|
||||
|
||||
let pubkey_bytes: PublicKeyBytes = pubkey.clone().into();
|
||||
|
||||
assert_eq!(
|
||||
i,
|
||||
cache
|
||||
.get_index(&pubkey_bytes)
|
||||
.expect("should resolve index"),
|
||||
"index should match cache"
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
cache.get(i),
|
||||
None,
|
||||
"should not get pubkey for out of bounds index",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_operation() {
|
||||
let (state, keypairs) = get_state(8);
|
||||
|
||||
let dir = tempdir().expect("should create tempdir");
|
||||
let path = dir.path().join("cache.ssz");
|
||||
|
||||
let mut cache = ValidatorPubkeyCache::new(&state, path).expect("should create cache");
|
||||
|
||||
check_cache_get(&cache, &keypairs[..]);
|
||||
|
||||
// Try adding a state with the same number of keypairs.
|
||||
let (state, keypairs) = get_state(8);
|
||||
cache
|
||||
.import_new_pubkeys(&state)
|
||||
.expect("should import pubkeys");
|
||||
check_cache_get(&cache, &keypairs[..]);
|
||||
|
||||
// Try adding a state with less keypairs.
|
||||
let (state, _) = get_state(1);
|
||||
cache
|
||||
.import_new_pubkeys(&state)
|
||||
.expect("should import pubkeys");
|
||||
check_cache_get(&cache, &keypairs[..]);
|
||||
|
||||
// Try adding a state with more keypairs.
|
||||
let (state, keypairs) = get_state(12);
|
||||
cache
|
||||
.import_new_pubkeys(&state)
|
||||
.expect("should import pubkeys");
|
||||
check_cache_get(&cache, &keypairs[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn persistence() {
|
||||
let (state, keypairs) = get_state(8);
|
||||
|
||||
let dir = tempdir().expect("should create tempdir");
|
||||
let path = dir.path().join("cache.ssz");
|
||||
|
||||
// Create a new cache.
|
||||
let cache = ValidatorPubkeyCache::new(&state, &path).expect("should create cache");
|
||||
check_cache_get(&cache, &keypairs[..]);
|
||||
drop(cache);
|
||||
|
||||
// Re-init the cache from the file.
|
||||
let mut cache = ValidatorPubkeyCache::load_from_file(&path).expect("should open cache");
|
||||
check_cache_get(&cache, &keypairs[..]);
|
||||
|
||||
// Add some more keypairs.
|
||||
let (state, keypairs) = get_state(12);
|
||||
cache
|
||||
.import_new_pubkeys(&state)
|
||||
.expect("should import pubkeys");
|
||||
check_cache_get(&cache, &keypairs[..]);
|
||||
drop(cache);
|
||||
|
||||
// Re-init the cache from the file.
|
||||
let cache = ValidatorPubkeyCache::load_from_file(&path).expect("should open cache");
|
||||
check_cache_get(&cache, &keypairs[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_persisted_file() {
|
||||
let dir = tempdir().expect("should create tempdir");
|
||||
let path = dir.path().join("cache.ssz");
|
||||
let pubkey = generate_deterministic_keypair(0).pk.into();
|
||||
|
||||
let mut file = File::create(&path).expect("should create file");
|
||||
append_to_file(&mut file, 0, &pubkey).expect("should write to file");
|
||||
drop(file);
|
||||
|
||||
let cache = ValidatorPubkeyCache::load_from_file(&path).expect("should open cache");
|
||||
drop(cache);
|
||||
|
||||
let mut file = OpenOptions::new()
|
||||
.write(true)
|
||||
.append(true)
|
||||
.open(&path)
|
||||
.expect("should open file");
|
||||
|
||||
append_to_file(&mut file, 42, &pubkey).expect("should write bad data to file");
|
||||
drop(file);
|
||||
|
||||
assert!(
|
||||
ValidatorPubkeyCache::load_from_file(&path).is_err(),
|
||||
"should not parse invalid file"
|
||||
);
|
||||
}
|
||||
}
|
||||
132
beacon_node/beacon_chain/tests/attestation_production.rs
Normal file
132
beacon_node/beacon_chain/tests/attestation_production.rs
Normal file
@@ -0,0 +1,132 @@
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use beacon_chain::{
|
||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy},
|
||||
StateSkipConfig,
|
||||
};
|
||||
use store::config::StoreConfig;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot};
|
||||
|
||||
pub const VALIDATOR_COUNT: usize = 16;
|
||||
|
||||
lazy_static! {
|
||||
/// A cached set of keys.
|
||||
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
}
|
||||
|
||||
/// This test builds a chain that is just long enough to finalize an epoch then it produces an
|
||||
/// attestation at each slot from genesis through to three epochs past the head.
|
||||
///
|
||||
/// It checks the produced attestation against some locally computed values.
|
||||
#[test]
|
||||
fn produces_attestations() {
|
||||
let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 4;
|
||||
|
||||
let mut harness = BeaconChainHarness::new_with_store_config(
|
||||
MainnetEthSpec,
|
||||
KEYPAIRS[..].to_vec(),
|
||||
StoreConfig::default(),
|
||||
);
|
||||
|
||||
// Skip past the genesis slot.
|
||||
harness.advance_slot();
|
||||
|
||||
harness.extend_chain(
|
||||
num_blocks_produced as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
let chain = &harness.chain;
|
||||
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
assert_eq!(state.slot, num_blocks_produced, "head should have updated");
|
||||
assert_ne!(
|
||||
state.finalized_checkpoint.epoch, 0,
|
||||
"head should have updated"
|
||||
);
|
||||
|
||||
let current_slot = chain.slot().expect("should get slot");
|
||||
|
||||
// Test all valid committee indices for all slots in the chain.
|
||||
for slot in 0..=current_slot.as_u64() + MainnetEthSpec::slots_per_epoch() * 3 {
|
||||
let slot = Slot::from(slot);
|
||||
let state = chain
|
||||
.state_at_slot(slot, StateSkipConfig::WithStateRoots)
|
||||
.expect("should get state");
|
||||
|
||||
let block_slot = if slot > current_slot {
|
||||
current_slot
|
||||
} else {
|
||||
slot
|
||||
};
|
||||
let block = chain
|
||||
.block_at_slot(block_slot)
|
||||
.expect("should get block")
|
||||
.expect("block should not be skipped");
|
||||
let block_root = block.message.tree_hash_root();
|
||||
|
||||
let epoch_boundary_slot = state
|
||||
.current_epoch()
|
||||
.start_slot(MainnetEthSpec::slots_per_epoch());
|
||||
let target_root = if state.slot == epoch_boundary_slot {
|
||||
block_root
|
||||
} else {
|
||||
*state
|
||||
.get_block_root(epoch_boundary_slot)
|
||||
.expect("should get target block root")
|
||||
};
|
||||
|
||||
let committee_cache = state
|
||||
.committee_cache(RelativeEpoch::Current)
|
||||
.expect("should get committee_cache");
|
||||
|
||||
let committee_count = committee_cache.committees_per_slot();
|
||||
|
||||
for index in 0..committee_count {
|
||||
let committee_len = committee_cache
|
||||
.get_beacon_committee(slot, index)
|
||||
.expect("should get committee for slot")
|
||||
.committee
|
||||
.len();
|
||||
|
||||
let attestation = chain
|
||||
.produce_unaggregated_attestation(slot, index)
|
||||
.expect("should produce attestation");
|
||||
|
||||
let data = &attestation.data;
|
||||
|
||||
assert_eq!(
|
||||
attestation.aggregation_bits.len(),
|
||||
committee_len,
|
||||
"bad committee len"
|
||||
);
|
||||
assert!(
|
||||
attestation.aggregation_bits.is_zero(),
|
||||
"some committee bits are set"
|
||||
);
|
||||
assert_eq!(
|
||||
attestation.signature,
|
||||
AggregateSignature::empty(),
|
||||
"bad signature"
|
||||
);
|
||||
assert_eq!(data.index, index, "bad index");
|
||||
assert_eq!(data.slot, slot, "bad slot");
|
||||
assert_eq!(data.beacon_block_root, block_root, "bad block root");
|
||||
assert_eq!(
|
||||
data.source, state.current_justified_checkpoint,
|
||||
"bad source"
|
||||
);
|
||||
assert_eq!(
|
||||
data.source, state.current_justified_checkpoint,
|
||||
"bad source"
|
||||
);
|
||||
assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch");
|
||||
assert_eq!(data.target.root, target_root, "bad target root");
|
||||
}
|
||||
}
|
||||
}
|
||||
836
beacon_node/beacon_chain/tests/attestation_verification.rs
Normal file
836
beacon_node/beacon_chain/tests/attestation_verification.rs
Normal file
@@ -0,0 +1,836 @@
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use beacon_chain::{
|
||||
attestation_verification::Error as AttnError,
|
||||
test_utils::{
|
||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, NullMigratorEphemeralHarnessType,
|
||||
},
|
||||
BeaconChain, BeaconChainTypes,
|
||||
};
|
||||
use int_to_bytes::int_to_bytes32;
|
||||
use state_processing::per_slot_processing;
|
||||
use store::config::StoreConfig;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{
|
||||
test_utils::generate_deterministic_keypair, AggregateSignature, Attestation, EthSpec, Hash256,
|
||||
Keypair, MainnetEthSpec, SecretKey, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock,
|
||||
SubnetId, Unsigned,
|
||||
};
|
||||
|
||||
pub type E = MainnetEthSpec;
|
||||
|
||||
/// The validator count needs to be relatively high compared to other tests to ensure that we can
|
||||
/// have committees where _some_ validators are aggregators but not _all_.
|
||||
pub const VALIDATOR_COUNT: usize = 256;
|
||||
|
||||
lazy_static! {
|
||||
/// A cached set of keys.
|
||||
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
}
|
||||
|
||||
/// Returns a beacon chain harness.
|
||||
fn get_harness(validator_count: usize) -> BeaconChainHarness<NullMigratorEphemeralHarnessType<E>> {
|
||||
let harness = BeaconChainHarness::new_with_target_aggregators(
|
||||
MainnetEthSpec,
|
||||
KEYPAIRS[0..validator_count].to_vec(),
|
||||
// A kind-of arbitrary number that ensures that _some_ validators are aggregators, but
|
||||
// not all.
|
||||
4,
|
||||
StoreConfig::default(),
|
||||
);
|
||||
|
||||
harness.advance_slot();
|
||||
|
||||
harness
|
||||
}
|
||||
|
||||
/// Returns an attestation that is valid for some slot in the given `chain`.
|
||||
///
|
||||
/// Also returns some info about who created it.
|
||||
fn get_valid_unaggregated_attestation<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
) -> (Attestation<T::EthSpec>, usize, usize, SecretKey, SubnetId) {
|
||||
let head = chain.head().expect("should get head");
|
||||
let current_slot = chain.slot().expect("should get slot");
|
||||
|
||||
let mut valid_attestation = chain
|
||||
.produce_unaggregated_attestation(current_slot, 0)
|
||||
.expect("should not error while producing attestation");
|
||||
|
||||
let validator_committee_index = 0;
|
||||
let validator_index = *head
|
||||
.beacon_state
|
||||
.get_beacon_committee(current_slot, valid_attestation.data.index)
|
||||
.expect("should get committees")
|
||||
.committee
|
||||
.get(validator_committee_index)
|
||||
.expect("there should be an attesting validator");
|
||||
|
||||
let validator_sk = generate_deterministic_keypair(validator_index).sk;
|
||||
|
||||
valid_attestation
|
||||
.sign(
|
||||
&validator_sk,
|
||||
validator_committee_index,
|
||||
&head.beacon_state.fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
)
|
||||
.expect("should sign attestation");
|
||||
|
||||
let subnet_id = SubnetId::compute_subnet_for_attestation_data::<E>(
|
||||
&valid_attestation.data,
|
||||
head.beacon_state
|
||||
.get_committee_count_at_slot(current_slot)
|
||||
.expect("should get committee count"),
|
||||
&chain.spec,
|
||||
)
|
||||
.expect("should get subnet_id");
|
||||
|
||||
(
|
||||
valid_attestation,
|
||||
validator_index,
|
||||
validator_committee_index,
|
||||
validator_sk,
|
||||
subnet_id,
|
||||
)
|
||||
}
|
||||
|
||||
fn get_valid_aggregated_attestation<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
aggregate: Attestation<T::EthSpec>,
|
||||
) -> (SignedAggregateAndProof<T::EthSpec>, usize, SecretKey) {
|
||||
let state = &chain.head().expect("should get head").beacon_state;
|
||||
let current_slot = chain.slot().expect("should get slot");
|
||||
|
||||
let committee = state
|
||||
.get_beacon_committee(current_slot, aggregate.data.index)
|
||||
.expect("should get committees");
|
||||
let committee_len = committee.committee.len();
|
||||
|
||||
let (aggregator_index, aggregator_sk) = committee
|
||||
.committee
|
||||
.iter()
|
||||
.find_map(|&val_index| {
|
||||
let aggregator_sk = generate_deterministic_keypair(val_index).sk;
|
||||
|
||||
let proof = SelectionProof::new::<T::EthSpec>(
|
||||
aggregate.data.slot,
|
||||
&aggregator_sk,
|
||||
&state.fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
);
|
||||
|
||||
if proof.is_aggregator(committee_len, &chain.spec).unwrap() {
|
||||
Some((val_index, aggregator_sk))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.expect("should find aggregator for committee");
|
||||
|
||||
let signed_aggregate = SignedAggregateAndProof::from_aggregate(
|
||||
aggregator_index as u64,
|
||||
aggregate,
|
||||
None,
|
||||
&aggregator_sk,
|
||||
&state.fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
);
|
||||
|
||||
(signed_aggregate, aggregator_index, aggregator_sk)
|
||||
}
|
||||
|
||||
/// Returns a proof and index for a validator that is **not** an aggregator for the given
|
||||
/// attestation.
|
||||
fn get_non_aggregator<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
aggregate: &Attestation<T::EthSpec>,
|
||||
) -> (usize, SecretKey) {
|
||||
let state = &chain.head().expect("should get head").beacon_state;
|
||||
let current_slot = chain.slot().expect("should get slot");
|
||||
|
||||
let committee = state
|
||||
.get_beacon_committee(current_slot, aggregate.data.index)
|
||||
.expect("should get committees");
|
||||
let committee_len = committee.committee.len();
|
||||
|
||||
committee
|
||||
.committee
|
||||
.iter()
|
||||
.find_map(|&val_index| {
|
||||
let aggregator_sk = generate_deterministic_keypair(val_index).sk;
|
||||
|
||||
let proof = SelectionProof::new::<T::EthSpec>(
|
||||
aggregate.data.slot,
|
||||
&aggregator_sk,
|
||||
&state.fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
);
|
||||
|
||||
if proof.is_aggregator(committee_len, &chain.spec).unwrap() {
|
||||
None
|
||||
} else {
|
||||
Some((val_index, aggregator_sk))
|
||||
}
|
||||
})
|
||||
.expect("should find non-aggregator for committee")
|
||||
}
|
||||
|
||||
/// Tests verification of `SignedAggregateAndProof` from the gossip network.
|
||||
#[test]
|
||||
fn aggregated_gossip_verification() {
|
||||
let mut harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
// Extend the chain out a few epochs so we have some chain depth to play with.
|
||||
harness.extend_chain(
|
||||
MainnetEthSpec::slots_per_epoch() as usize * 3 - 1,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
// Advance into a slot where there have not been blocks or attestations produced.
|
||||
harness.advance_slot();
|
||||
|
||||
let current_slot = harness.chain.slot().expect("should get slot");
|
||||
|
||||
assert_eq!(
|
||||
current_slot % E::slots_per_epoch(),
|
||||
0,
|
||||
"the test requires a new epoch to avoid already-seen errors"
|
||||
);
|
||||
|
||||
let (valid_attestation, _attester_index, _attester_committee_index, validator_sk, _subnet_id) =
|
||||
get_valid_unaggregated_attestation(&harness.chain);
|
||||
let (valid_aggregate, aggregator_index, aggregator_sk) =
|
||||
get_valid_aggregated_attestation(&harness.chain, valid_attestation);
|
||||
|
||||
macro_rules! assert_invalid {
|
||||
($desc: tt, $attn_getter: expr, $($error: pat) |+ $( if $guard: expr )?) => {
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_aggregated_attestation_for_gossip($attn_getter)
|
||||
.err()
|
||||
.expect(&format!(
|
||||
"{} should error during verify_aggregated_attestation_for_gossip",
|
||||
$desc
|
||||
)),
|
||||
$( $error ) |+ $( if $guard )?
|
||||
),
|
||||
"case: {}",
|
||||
$desc,
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* The following two tests ensure:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* aggregate.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (with a
|
||||
* MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. aggregate.data.slot +
|
||||
* ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot (a client MAY
|
||||
* queue future aggregates for processing at the appropriate slot).
|
||||
*/
|
||||
|
||||
let future_slot = current_slot + 1;
|
||||
assert_invalid!(
|
||||
"aggregate from future slot",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
a.message.aggregate.data.slot = future_slot;
|
||||
a
|
||||
},
|
||||
AttnError::FutureSlot { attestation_slot, latest_permissible_slot }
|
||||
if attestation_slot == future_slot && latest_permissible_slot == current_slot
|
||||
);
|
||||
|
||||
let early_slot = current_slot
|
||||
.as_u64()
|
||||
.checked_sub(E::slots_per_epoch() + 2)
|
||||
.expect("chain is not sufficiently deep for test")
|
||||
.into();
|
||||
assert_invalid!(
|
||||
"aggregate from past slot",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
a.message.aggregate.data.slot = early_slot;
|
||||
a
|
||||
},
|
||||
AttnError::PastSlot {
|
||||
attestation_slot,
|
||||
// Subtract an additional slot since the harness will be exactly on the start of the
|
||||
// slot and the propagation tolerance will allow an extra slot.
|
||||
earliest_permissible_slot
|
||||
}
|
||||
if attestation_slot == early_slot
|
||||
&& earliest_permissible_slot == current_slot - E::slots_per_epoch() - 1
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The block being voted for (aggregate.data.beacon_block_root) passes validation.
|
||||
*/
|
||||
|
||||
let unknown_root = Hash256::from_low_u64_le(424242);
|
||||
assert_invalid!(
|
||||
"aggregate with unknown head block",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
a.message.aggregate.data.beacon_block_root = unknown_root;
|
||||
a
|
||||
},
|
||||
AttnError::UnknownHeadBlock {
|
||||
beacon_block_root
|
||||
}
|
||||
if beacon_block_root == unknown_root
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The attestation has participants.
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"aggregate with no participants",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
let aggregation_bits = &mut a.message.aggregate.aggregation_bits;
|
||||
aggregation_bits.difference_inplace(&aggregation_bits.clone());
|
||||
assert!(aggregation_bits.is_zero());
|
||||
a.message.aggregate.signature = AggregateSignature::infinity();
|
||||
a
|
||||
},
|
||||
AttnError::EmptyAggregationBitfield
|
||||
);
|
||||
|
||||
/*
|
||||
* This test ensures:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The aggregator signature, signed_aggregate_and_proof.signature, is valid.
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"aggregate with bad signature",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
|
||||
a.signature = validator_sk.sign(Hash256::from_low_u64_be(42));
|
||||
|
||||
a
|
||||
},
|
||||
AttnError::InvalidSignature
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The aggregate_and_proof.selection_proof is a valid signature of the aggregate.data.slot by
|
||||
* the validator with index aggregate_and_proof.aggregator_index.
|
||||
*/
|
||||
|
||||
let committee_len = harness
|
||||
.chain
|
||||
.head()
|
||||
.unwrap()
|
||||
.beacon_state
|
||||
.get_beacon_committee(
|
||||
harness.chain.slot().unwrap(),
|
||||
valid_aggregate.message.aggregate.data.index,
|
||||
)
|
||||
.expect("should get committees")
|
||||
.committee
|
||||
.len();
|
||||
assert_invalid!(
|
||||
"aggregate with bad selection proof signature",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
|
||||
// Generate some random signature until happens to be a valid selection proof. We need
|
||||
// this in order to reach the signature verification code.
|
||||
//
|
||||
// Could run for ever, but that seems _really_ improbable.
|
||||
let mut i: u64 = 0;
|
||||
a.message.selection_proof = loop {
|
||||
i += 1;
|
||||
let proof: SelectionProof = validator_sk
|
||||
.sign(Hash256::from_slice(&int_to_bytes32(i)))
|
||||
.into();
|
||||
if proof
|
||||
.is_aggregator(committee_len, &harness.chain.spec)
|
||||
.unwrap()
|
||||
{
|
||||
break proof.into();
|
||||
}
|
||||
};
|
||||
|
||||
a
|
||||
},
|
||||
AttnError::InvalidSignature
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The signature of aggregate is valid.
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"aggregate with bad aggregate signature",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
|
||||
let mut agg_sig = AggregateSignature::infinity();
|
||||
agg_sig.add_assign(&aggregator_sk.sign(Hash256::from_low_u64_be(42)));
|
||||
a.message.aggregate.signature = agg_sig;
|
||||
|
||||
a
|
||||
},
|
||||
AttnError::InvalidSignature
|
||||
);
|
||||
|
||||
let too_high_index = <E as EthSpec>::ValidatorRegistryLimit::to_u64() + 1;
|
||||
assert_invalid!(
|
||||
"aggregate with too-high aggregator index",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
a.message.aggregator_index = too_high_index;
|
||||
a
|
||||
},
|
||||
AttnError::ValidatorIndexTooHigh(index)
|
||||
if index == too_high_index as usize
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The aggregator's validator index is within the committee -- i.e.
|
||||
* aggregate_and_proof.aggregator_index in get_beacon_committee(state, aggregate.data.slot,
|
||||
* aggregate.data.index).
|
||||
*/
|
||||
|
||||
let unknown_validator = VALIDATOR_COUNT as u64;
|
||||
assert_invalid!(
|
||||
"aggregate with unknown aggregator index",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
a.message.aggregator_index = unknown_validator;
|
||||
a
|
||||
},
|
||||
// Naively we should think this condition would trigger this error:
|
||||
//
|
||||
// AttnError::AggregatorPubkeyUnknown(unknown_validator)
|
||||
//
|
||||
// However the following error is triggered first:
|
||||
AttnError::AggregatorNotInCommittee {
|
||||
aggregator_index
|
||||
}
|
||||
if aggregator_index == unknown_validator
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* aggregate_and_proof.selection_proof selects the validator as an aggregator for the slot --
|
||||
* i.e. is_aggregator(state, aggregate.data.slot, aggregate.data.index,
|
||||
* aggregate_and_proof.selection_proof) returns True.
|
||||
*/
|
||||
|
||||
let (non_aggregator_index, non_aggregator_sk) =
|
||||
get_non_aggregator(&harness.chain, &valid_aggregate.message.aggregate);
|
||||
assert_invalid!(
|
||||
"aggregate from non-aggregator",
|
||||
{
|
||||
SignedAggregateAndProof::from_aggregate(
|
||||
non_aggregator_index as u64,
|
||||
valid_aggregate.message.aggregate.clone(),
|
||||
None,
|
||||
&non_aggregator_sk,
|
||||
&harness.chain.head_info().unwrap().fork,
|
||||
harness.chain.genesis_validators_root,
|
||||
&harness.chain.spec,
|
||||
)
|
||||
},
|
||||
AttnError::InvalidSelectionProof {
|
||||
aggregator_index: index
|
||||
}
|
||||
if index == non_aggregator_index as u64
|
||||
);
|
||||
|
||||
// NOTE: from here on, the tests are stateful, and rely on the valid attestation having been
|
||||
// seen. A refactor to give each test case its own state might be nice at some point
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.verify_aggregated_attestation_for_gossip(valid_aggregate.clone())
|
||||
.is_ok(),
|
||||
"valid aggregate should be verified"
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The valid aggregate attestation defined by hash_tree_root(aggregate) has not already been
|
||||
* seen (via aggregate gossip, within a block, or through the creation of an equivalent
|
||||
* aggregate locally).
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"aggregate that has already been seen",
|
||||
valid_aggregate.clone(),
|
||||
AttnError::AttestationAlreadyKnown(hash)
|
||||
if hash == valid_aggregate.message.aggregate.tree_hash_root()
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The aggregate is the first valid aggregate received for the aggregator with index
|
||||
* aggregate_and_proof.aggregator_index for the epoch aggregate.data.target.epoch.
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"aggregate from aggregator that has already been seen",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
a.message.aggregate.data.beacon_block_root = Hash256::from_low_u64_le(42);
|
||||
a
|
||||
},
|
||||
AttnError::AggregatorAlreadyKnown(index)
|
||||
if index == aggregator_index as u64
|
||||
);
|
||||
}
|
||||
|
||||
/// Tests the verification conditions for an unaggregated attestation on the gossip network.
|
||||
#[test]
|
||||
fn unaggregated_gossip_verification() {
|
||||
let mut harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
// Extend the chain out a few epochs so we have some chain depth to play with.
|
||||
harness.extend_chain(
|
||||
MainnetEthSpec::slots_per_epoch() as usize * 3 - 1,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
// Advance into a slot where there have not been blocks or attestations produced.
|
||||
harness.advance_slot();
|
||||
|
||||
let current_slot = harness.chain.slot().expect("should get slot");
|
||||
let current_epoch = harness.chain.epoch().expect("should get epoch");
|
||||
|
||||
assert_eq!(
|
||||
current_slot % E::slots_per_epoch(),
|
||||
0,
|
||||
"the test requires a new epoch to avoid already-seen errors"
|
||||
);
|
||||
|
||||
let (
|
||||
valid_attestation,
|
||||
expected_validator_index,
|
||||
validator_committee_index,
|
||||
validator_sk,
|
||||
subnet_id,
|
||||
) = get_valid_unaggregated_attestation(&harness.chain);
|
||||
|
||||
macro_rules! assert_invalid {
|
||||
($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat) |+ $( if $guard: expr )?) => {
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_unaggregated_attestation_for_gossip($attn_getter, $subnet_getter)
|
||||
.err()
|
||||
.expect(&format!(
|
||||
"{} should error during verify_unaggregated_attestation_for_gossip",
|
||||
$desc
|
||||
)),
|
||||
$( $error ) |+ $( if $guard )?
|
||||
),
|
||||
"case: {}",
|
||||
$desc,
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The attestation is for the correct subnet (i.e. compute_subnet_for_attestation(state,
|
||||
* attestation.data.slot, attestation.data.index) == subnet_id).
|
||||
*/
|
||||
let id: u64 = subnet_id.into();
|
||||
let invalid_subnet_id = SubnetId::new(id + 1);
|
||||
assert_invalid!(
|
||||
"attestation from future slot",
|
||||
{
|
||||
valid_attestation.clone()
|
||||
},
|
||||
invalid_subnet_id,
|
||||
AttnError::InvalidSubnetId {
|
||||
received,
|
||||
expected,
|
||||
}
|
||||
if received == invalid_subnet_id && expected == subnet_id
|
||||
);
|
||||
|
||||
/*
|
||||
* The following two tests ensure:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* attestation.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (within a
|
||||
* MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. attestation.data.slot +
|
||||
* ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot (a client MAY
|
||||
* queue future attestations for processing at the appropriate slot).
|
||||
*/
|
||||
|
||||
let future_slot = current_slot + 1;
|
||||
assert_invalid!(
|
||||
"attestation from future slot",
|
||||
{
|
||||
let mut a = valid_attestation.clone();
|
||||
a.data.slot = future_slot;
|
||||
a
|
||||
},
|
||||
subnet_id,
|
||||
AttnError::FutureSlot {
|
||||
attestation_slot,
|
||||
latest_permissible_slot,
|
||||
}
|
||||
if attestation_slot == future_slot && latest_permissible_slot == current_slot
|
||||
);
|
||||
|
||||
let early_slot = current_slot
|
||||
.as_u64()
|
||||
.checked_sub(E::slots_per_epoch() + 2)
|
||||
.expect("chain is not sufficiently deep for test")
|
||||
.into();
|
||||
assert_invalid!(
|
||||
"attestation from past slot",
|
||||
{
|
||||
let mut a = valid_attestation.clone();
|
||||
a.data.slot = early_slot;
|
||||
a
|
||||
},
|
||||
subnet_id,
|
||||
AttnError::PastSlot {
|
||||
attestation_slot,
|
||||
// Subtract an additional slot since the harness will be exactly on the start of the
|
||||
// slot and the propagation tolerance will allow an extra slot.
|
||||
earliest_permissible_slot,
|
||||
}
|
||||
if attestation_slot == early_slot && earliest_permissible_slot == current_slot - E::slots_per_epoch() - 1
|
||||
);
|
||||
|
||||
/*
|
||||
* The following two tests ensure:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The attestation is unaggregated -- that is, it has exactly one participating validator
|
||||
* (len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1).
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"attestation without any aggregation bits set",
|
||||
{
|
||||
let mut a = valid_attestation.clone();
|
||||
a.aggregation_bits
|
||||
.set(validator_committee_index, false)
|
||||
.expect("should unset aggregation bit");
|
||||
assert_eq!(
|
||||
a.aggregation_bits.num_set_bits(),
|
||||
0,
|
||||
"test requires no set bits"
|
||||
);
|
||||
a
|
||||
},
|
||||
subnet_id,
|
||||
AttnError::NotExactlyOneAggregationBitSet(0)
|
||||
);
|
||||
|
||||
assert_invalid!(
|
||||
"attestation with two aggregation bits set",
|
||||
{
|
||||
let mut a = valid_attestation.clone();
|
||||
a.aggregation_bits
|
||||
.set(validator_committee_index + 1, true)
|
||||
.expect("should set second aggregation bit");
|
||||
a
|
||||
},
|
||||
subnet_id,
|
||||
AttnError::NotExactlyOneAggregationBitSet(2)
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures that:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The block being voted for (attestation.data.beacon_block_root) passes validation.
|
||||
*/
|
||||
|
||||
let unknown_root = Hash256::from_low_u64_le(424242); // No one wants one of these
|
||||
assert_invalid!(
|
||||
"attestation with unknown head block",
|
||||
{
|
||||
let mut a = valid_attestation.clone();
|
||||
a.data.beacon_block_root = unknown_root;
|
||||
a
|
||||
},
|
||||
subnet_id,
|
||||
AttnError::UnknownHeadBlock {
|
||||
beacon_block_root,
|
||||
}
|
||||
if beacon_block_root == unknown_root
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures that:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The signature of attestation is valid.
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"attestation with bad signature",
|
||||
{
|
||||
let mut a = valid_attestation.clone();
|
||||
|
||||
let mut agg_sig = AggregateSignature::infinity();
|
||||
agg_sig.add_assign(&validator_sk.sign(Hash256::from_low_u64_be(42)));
|
||||
a.signature = agg_sig;
|
||||
|
||||
a
|
||||
},
|
||||
subnet_id,
|
||||
AttnError::InvalidSignature
|
||||
);
|
||||
|
||||
harness
|
||||
.chain
|
||||
.verify_unaggregated_attestation_for_gossip(valid_attestation.clone(), subnet_id)
|
||||
.expect("valid attestation should be verified");
|
||||
|
||||
/*
|
||||
* The following test ensures that:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
*
|
||||
* There has been no other valid attestation seen on an attestation subnet that has an
|
||||
* identical attestation.data.target.epoch and participating validator index.
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"attestation that has already been seen",
|
||||
valid_attestation.clone(),
|
||||
subnet_id,
|
||||
AttnError::PriorAttestationKnown {
|
||||
validator_index,
|
||||
epoch,
|
||||
}
|
||||
if validator_index == expected_validator_index as u64 && epoch == current_epoch
|
||||
);
|
||||
}
|
||||
|
||||
/// Ensures that an attestation that skips epochs can still be processed.
|
||||
///
|
||||
/// This also checks that we can do a state lookup if we don't get a hit from the shuffling cache.
|
||||
#[test]
|
||||
fn attestation_that_skips_epochs() {
|
||||
let mut harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
// Extend the chain out a few epochs so we have some chain depth to play with.
|
||||
harness.extend_chain(
|
||||
MainnetEthSpec::slots_per_epoch() as usize * 3 + 1,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::SomeValidators(vec![]),
|
||||
);
|
||||
|
||||
let current_slot = harness.chain.slot().expect("should get slot");
|
||||
let current_epoch = harness.chain.epoch().expect("should get epoch");
|
||||
|
||||
let earlier_slot = (current_epoch - 2).start_slot(MainnetEthSpec::slots_per_epoch());
|
||||
let earlier_block = harness
|
||||
.chain
|
||||
.block_at_slot(earlier_slot)
|
||||
.expect("should not error getting block at slot")
|
||||
.expect("should find block at slot");
|
||||
|
||||
let mut state = harness
|
||||
.chain
|
||||
.get_state(&earlier_block.state_root(), Some(earlier_slot))
|
||||
.expect("should not error getting state")
|
||||
.expect("should find state");
|
||||
|
||||
while state.slot < current_slot {
|
||||
per_slot_processing(&mut state, None, &harness.spec).expect("should process slot");
|
||||
}
|
||||
|
||||
let (attestation, subnet_id) = harness
|
||||
.get_unaggregated_attestations(
|
||||
&AttestationStrategy::AllValidators,
|
||||
&state,
|
||||
earlier_block.canonical_root(),
|
||||
current_slot,
|
||||
)
|
||||
.first()
|
||||
.expect("should have at least one committee")
|
||||
.first()
|
||||
.cloned()
|
||||
.expect("should have at least one attestation in committee");
|
||||
|
||||
let block_root = attestation.data.beacon_block_root;
|
||||
let block_slot = harness
|
||||
.chain
|
||||
.store
|
||||
.get_item::<SignedBeaconBlock<E>>(&block_root)
|
||||
.expect("should not error getting block")
|
||||
.expect("should find attestation block")
|
||||
.message
|
||||
.slot;
|
||||
|
||||
assert!(
|
||||
attestation.data.slot - block_slot > E::slots_per_epoch() * 2,
|
||||
"the attestation must skip more than two epochs"
|
||||
);
|
||||
|
||||
harness
|
||||
.chain
|
||||
.verify_unaggregated_attestation_for_gossip(attestation, subnet_id)
|
||||
.expect("should gossip verify attestation that skips slots");
|
||||
}
|
||||
798
beacon_node/beacon_chain/tests/block_verification.rs
Normal file
798
beacon_node/beacon_chain/tests/block_verification.rs
Normal file
@@ -0,0 +1,798 @@
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use beacon_chain::{
|
||||
test_utils::{
|
||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, NullMigratorEphemeralHarnessType,
|
||||
},
|
||||
BeaconSnapshot, BlockError,
|
||||
};
|
||||
use store::config::StoreConfig;
|
||||
use types::{
|
||||
test_utils::generate_deterministic_keypair, AggregateSignature, AttestationData,
|
||||
AttesterSlashing, Checkpoint, Deposit, DepositData, Epoch, EthSpec, Hash256,
|
||||
IndexedAttestation, Keypair, MainnetEthSpec, ProposerSlashing, Signature, SignedBeaconBlock,
|
||||
SignedBeaconBlockHeader, SignedVoluntaryExit, Slot, VoluntaryExit, DEPOSIT_TREE_DEPTH,
|
||||
};
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
|
||||
// Should ideally be divisible by 3.
|
||||
const VALIDATOR_COUNT: usize = 24;
|
||||
const CHAIN_SEGMENT_LENGTH: usize = 64 * 5;
|
||||
const BLOCK_INDICES: &[usize] = &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT_LENGTH - 1];
|
||||
|
||||
lazy_static! {
|
||||
/// A cached set of keys.
|
||||
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
|
||||
/// A cached set of valid blocks
|
||||
static ref CHAIN_SEGMENT: Vec<BeaconSnapshot<E>> = get_chain_segment();
|
||||
}
|
||||
|
||||
fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
|
||||
let mut harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
harness.extend_chain(
|
||||
CHAIN_SEGMENT_LENGTH,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
harness
|
||||
.chain
|
||||
.chain_dump()
|
||||
.expect("should dump chain")
|
||||
.into_iter()
|
||||
.skip(1)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn get_harness(validator_count: usize) -> BeaconChainHarness<NullMigratorEphemeralHarnessType<E>> {
|
||||
let harness = BeaconChainHarness::new_with_store_config(
|
||||
MainnetEthSpec,
|
||||
KEYPAIRS[0..validator_count].to_vec(),
|
||||
StoreConfig::default(),
|
||||
);
|
||||
|
||||
harness.advance_slot();
|
||||
|
||||
harness
|
||||
}
|
||||
|
||||
fn chain_segment_blocks() -> Vec<SignedBeaconBlock<E>> {
|
||||
CHAIN_SEGMENT
|
||||
.iter()
|
||||
.map(|snapshot| snapshot.beacon_block.clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn junk_signature() -> Signature {
|
||||
let kp = generate_deterministic_keypair(VALIDATOR_COUNT);
|
||||
let message = Hash256::from_slice(&[42; 32]);
|
||||
kp.sk.sign(message)
|
||||
}
|
||||
|
||||
fn junk_aggregate_signature() -> AggregateSignature {
|
||||
let mut agg_sig = AggregateSignature::empty();
|
||||
agg_sig.add_assign(&junk_signature());
|
||||
agg_sig
|
||||
}
|
||||
|
||||
fn update_proposal_signatures(
|
||||
snapshots: &mut [BeaconSnapshot<E>],
|
||||
harness: &BeaconChainHarness<NullMigratorEphemeralHarnessType<E>>,
|
||||
) {
|
||||
for snapshot in snapshots {
|
||||
let spec = &harness.chain.spec;
|
||||
let slot = snapshot.beacon_block.slot();
|
||||
let state = &snapshot.beacon_state;
|
||||
let proposer_index = state
|
||||
.get_beacon_proposer_index(slot, spec)
|
||||
.expect("should find proposer index");
|
||||
let keypair = harness
|
||||
.validators_keypairs
|
||||
.get(proposer_index)
|
||||
.expect("proposer keypair should be available");
|
||||
|
||||
snapshot.beacon_block = snapshot.beacon_block.message.clone().sign(
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn update_parent_roots(snapshots: &mut [BeaconSnapshot<E>]) {
|
||||
for i in 0..snapshots.len() {
|
||||
let root = snapshots[i].beacon_block.canonical_root();
|
||||
if let Some(child) = snapshots.get_mut(i + 1) {
|
||||
child.beacon_block.message.parent_root = root
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chain_segment_full_segment() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let blocks = chain_segment_blocks();
|
||||
|
||||
harness
|
||||
.chain
|
||||
.slot_clock
|
||||
.set_slot(blocks.last().unwrap().slot().as_u64());
|
||||
|
||||
// Sneak in a little check to ensure we can process empty chain segments.
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(vec![])
|
||||
.into_block_error()
|
||||
.expect("should import empty chain segment");
|
||||
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.into_block_error()
|
||||
.expect("should import chain segment");
|
||||
|
||||
harness.chain.fork_choice().expect("should run fork choice");
|
||||
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.head_info()
|
||||
.expect("should get harness b head")
|
||||
.block_root,
|
||||
blocks.last().unwrap().canonical_root(),
|
||||
"harness should have last block as head"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chain_segment_varying_chunk_size() {
|
||||
for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let blocks = chain_segment_blocks();
|
||||
|
||||
harness
|
||||
.chain
|
||||
.slot_clock
|
||||
.set_slot(blocks.last().unwrap().slot().as_u64());
|
||||
|
||||
for chunk in blocks.chunks(*chunk_size) {
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(chunk.to_vec())
|
||||
.into_block_error()
|
||||
.expect(&format!(
|
||||
"should import chain segment of len {}",
|
||||
chunk_size
|
||||
));
|
||||
}
|
||||
|
||||
harness.chain.fork_choice().expect("should run fork choice");
|
||||
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.head_info()
|
||||
.expect("should get harness b head")
|
||||
.block_root,
|
||||
blocks.last().unwrap().canonical_root(),
|
||||
"harness should have last block as head"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chain_segment_non_linear_parent_roots() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
harness
|
||||
.chain
|
||||
.slot_clock
|
||||
.set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64());
|
||||
|
||||
/*
|
||||
* Test with a block removed.
|
||||
*/
|
||||
let mut blocks = chain_segment_blocks();
|
||||
blocks.remove(2);
|
||||
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearParentRoots)
|
||||
),
|
||||
"should not import chain with missing parent"
|
||||
);
|
||||
|
||||
/*
|
||||
* Test with a modified parent root.
|
||||
*/
|
||||
let mut blocks = chain_segment_blocks();
|
||||
blocks[3].message.parent_root = Hash256::zero();
|
||||
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearParentRoots)
|
||||
),
|
||||
"should not import chain with a broken parent root link"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chain_segment_non_linear_slots() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
harness
|
||||
.chain
|
||||
.slot_clock
|
||||
.set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64());
|
||||
|
||||
/*
|
||||
* Test where a child is lower than the parent.
|
||||
*/
|
||||
|
||||
let mut blocks = chain_segment_blocks();
|
||||
blocks[3].message.slot = Slot::new(0);
|
||||
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearSlots)
|
||||
),
|
||||
"should not import chain with a parent that has a lower slot than its child"
|
||||
);
|
||||
|
||||
/*
|
||||
* Test where a child is equal to the parent.
|
||||
*/
|
||||
|
||||
let mut blocks = chain_segment_blocks();
|
||||
blocks[3].message.slot = blocks[2].message.slot;
|
||||
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks.clone())
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearSlots)
|
||||
),
|
||||
"should not import chain with a parent that has an equal slot to its child"
|
||||
);
|
||||
}
|
||||
|
||||
fn assert_invalid_signature(
|
||||
harness: &BeaconChainHarness<NullMigratorEphemeralHarnessType<E>>,
|
||||
block_index: usize,
|
||||
snapshots: &[BeaconSnapshot<E>],
|
||||
item: &str,
|
||||
) {
|
||||
let blocks = snapshots
|
||||
.iter()
|
||||
.map(|snapshot| snapshot.beacon_block.clone())
|
||||
.collect();
|
||||
|
||||
// Ensure the block will be rejected if imported in a chain segment.
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks)
|
||||
.into_block_error(),
|
||||
Err(BlockError::InvalidSignature)
|
||||
),
|
||||
"should not import chain segment with an invalid {} signature",
|
||||
item
|
||||
);
|
||||
|
||||
// Ensure the block will be rejected if imported on its own (without gossip checking).
|
||||
let ancestor_blocks = CHAIN_SEGMENT
|
||||
.iter()
|
||||
.take(block_index)
|
||||
.map(|snapshot| snapshot.beacon_block.clone())
|
||||
.collect();
|
||||
// We don't care if this fails, we just call this to ensure that all prior blocks have been
|
||||
// imported prior to this test.
|
||||
let _ = harness.chain.process_chain_segment(ancestor_blocks);
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_block(snapshots[block_index].beacon_block.clone()),
|
||||
Err(BlockError::InvalidSignature)
|
||||
),
|
||||
"should not import individual block with an invalid {} signature",
|
||||
item
|
||||
);
|
||||
|
||||
// NOTE: we choose not to check gossip verification here. It only checks one signature
|
||||
// (proposal) and that is already tested elsewhere in this file.
|
||||
//
|
||||
// It's not trivial to just check gossip verification since it will start refusing
|
||||
// blocks as soon as it has seen one valid proposal signature for a given (validator,
|
||||
// slot) tuple.
|
||||
}
|
||||
|
||||
fn get_invalid_sigs_harness() -> BeaconChainHarness<NullMigratorEphemeralHarnessType<E>> {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
harness
|
||||
.chain
|
||||
.slot_clock
|
||||
.set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64());
|
||||
harness
|
||||
}
|
||||
#[test]
|
||||
fn invalid_signature_gossip_block() {
|
||||
for &block_index in BLOCK_INDICES {
|
||||
// Ensure the block will be rejected if imported on its own (without gossip checking).
|
||||
let harness = get_invalid_sigs_harness();
|
||||
let mut snapshots = CHAIN_SEGMENT.clone();
|
||||
snapshots[block_index].beacon_block.signature = junk_signature();
|
||||
// Import all the ancestors before the `block_index` block.
|
||||
let ancestor_blocks = CHAIN_SEGMENT
|
||||
.iter()
|
||||
.take(block_index)
|
||||
.map(|snapshot| snapshot.beacon_block.clone())
|
||||
.collect();
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(ancestor_blocks)
|
||||
.into_block_error()
|
||||
.expect("should import all blocks prior to the one being tested");
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_block(snapshots[block_index].beacon_block.clone()),
|
||||
Err(BlockError::InvalidSignature)
|
||||
),
|
||||
"should not import individual block with an invalid gossip signature",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_signature_block_proposal() {
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness();
|
||||
let mut snapshots = CHAIN_SEGMENT.clone();
|
||||
snapshots[block_index].beacon_block.signature = junk_signature();
|
||||
let blocks = snapshots
|
||||
.iter()
|
||||
.map(|snapshot| snapshot.beacon_block.clone())
|
||||
.collect::<Vec<_>>();
|
||||
// Ensure the block will be rejected if imported in a chain segment.
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks)
|
||||
.into_block_error(),
|
||||
Err(BlockError::InvalidSignature)
|
||||
),
|
||||
"should not import chain segment with an invalid block signature",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_signature_randao_reveal() {
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness();
|
||||
let mut snapshots = CHAIN_SEGMENT.clone();
|
||||
snapshots[block_index]
|
||||
.beacon_block
|
||||
.message
|
||||
.body
|
||||
.randao_reveal = junk_signature();
|
||||
update_parent_roots(&mut snapshots);
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
assert_invalid_signature(&harness, block_index, &snapshots, "randao");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_signature_proposer_slashing() {
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness();
|
||||
let mut snapshots = CHAIN_SEGMENT.clone();
|
||||
let proposer_slashing = ProposerSlashing {
|
||||
signed_header_1: SignedBeaconBlockHeader {
|
||||
message: snapshots[block_index].beacon_block.message.block_header(),
|
||||
signature: junk_signature(),
|
||||
},
|
||||
signed_header_2: SignedBeaconBlockHeader {
|
||||
message: snapshots[block_index].beacon_block.message.block_header(),
|
||||
signature: junk_signature(),
|
||||
},
|
||||
};
|
||||
snapshots[block_index]
|
||||
.beacon_block
|
||||
.message
|
||||
.body
|
||||
.proposer_slashings
|
||||
.push(proposer_slashing)
|
||||
.expect("should update proposer slashing");
|
||||
update_parent_roots(&mut snapshots);
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
assert_invalid_signature(&harness, block_index, &snapshots, "proposer slashing");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_signature_attester_slashing() {
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness();
|
||||
let mut snapshots = CHAIN_SEGMENT.clone();
|
||||
let indexed_attestation = IndexedAttestation {
|
||||
attesting_indices: vec![0].into(),
|
||||
data: AttestationData {
|
||||
slot: Slot::new(0),
|
||||
index: 0,
|
||||
beacon_block_root: Hash256::zero(),
|
||||
source: Checkpoint {
|
||||
epoch: Epoch::new(0),
|
||||
root: Hash256::zero(),
|
||||
},
|
||||
target: Checkpoint {
|
||||
epoch: Epoch::new(0),
|
||||
root: Hash256::zero(),
|
||||
},
|
||||
},
|
||||
signature: junk_aggregate_signature(),
|
||||
};
|
||||
let attester_slashing = AttesterSlashing {
|
||||
attestation_1: indexed_attestation.clone(),
|
||||
attestation_2: indexed_attestation,
|
||||
};
|
||||
snapshots[block_index]
|
||||
.beacon_block
|
||||
.message
|
||||
.body
|
||||
.attester_slashings
|
||||
.push(attester_slashing)
|
||||
.expect("should update attester slashing");
|
||||
update_parent_roots(&mut snapshots);
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
assert_invalid_signature(&harness, block_index, &snapshots, "attester slashing");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_signature_attestation() {
|
||||
let mut checked_attestation = false;
|
||||
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness();
|
||||
let mut snapshots = CHAIN_SEGMENT.clone();
|
||||
if let Some(attestation) = snapshots[block_index]
|
||||
.beacon_block
|
||||
.message
|
||||
.body
|
||||
.attestations
|
||||
.get_mut(0)
|
||||
{
|
||||
attestation.signature = junk_aggregate_signature();
|
||||
update_parent_roots(&mut snapshots);
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
assert_invalid_signature(&harness, block_index, &snapshots, "attestation");
|
||||
checked_attestation = true;
|
||||
}
|
||||
}
|
||||
|
||||
assert!(
|
||||
checked_attestation,
|
||||
"the test should check an attestation signature"
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_signature_deposit() {
|
||||
for &block_index in BLOCK_INDICES {
|
||||
// Note: an invalid deposit signature is permitted!
|
||||
let harness = get_invalid_sigs_harness();
|
||||
let mut snapshots = CHAIN_SEGMENT.clone();
|
||||
let deposit = Deposit {
|
||||
proof: vec![Hash256::zero(); DEPOSIT_TREE_DEPTH + 1].into(),
|
||||
data: DepositData {
|
||||
pubkey: Keypair::random().pk.into(),
|
||||
withdrawal_credentials: Hash256::zero(),
|
||||
amount: 0,
|
||||
signature: junk_signature().into(),
|
||||
},
|
||||
};
|
||||
snapshots[block_index]
|
||||
.beacon_block
|
||||
.message
|
||||
.body
|
||||
.deposits
|
||||
.push(deposit)
|
||||
.expect("should update deposit");
|
||||
update_parent_roots(&mut snapshots);
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
let blocks = snapshots
|
||||
.iter()
|
||||
.map(|snapshot| snapshot.beacon_block.clone())
|
||||
.collect();
|
||||
assert!(
|
||||
!matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks)
|
||||
.into_block_error(),
|
||||
Err(BlockError::InvalidSignature)
|
||||
),
|
||||
"should not throw an invalid signature error for a bad deposit signature"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_signature_exit() {
|
||||
for &block_index in BLOCK_INDICES {
|
||||
let harness = get_invalid_sigs_harness();
|
||||
let mut snapshots = CHAIN_SEGMENT.clone();
|
||||
let epoch = snapshots[block_index].beacon_state.current_epoch();
|
||||
snapshots[block_index]
|
||||
.beacon_block
|
||||
.message
|
||||
.body
|
||||
.voluntary_exits
|
||||
.push(SignedVoluntaryExit {
|
||||
message: VoluntaryExit {
|
||||
epoch,
|
||||
validator_index: 0,
|
||||
},
|
||||
signature: junk_signature(),
|
||||
})
|
||||
.expect("should update deposit");
|
||||
update_parent_roots(&mut snapshots);
|
||||
update_proposal_signatures(&mut snapshots, &harness);
|
||||
assert_invalid_signature(&harness, block_index, &snapshots, "voluntary exit");
|
||||
}
|
||||
}
|
||||
|
||||
fn unwrap_err<T, E>(result: Result<T, E>) -> E {
|
||||
match result {
|
||||
Ok(_) => panic!("called unwrap_err on Ok"),
|
||||
Err(e) => e,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn block_gossip_verification() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
let block_index = CHAIN_SEGMENT_LENGTH - 2;
|
||||
|
||||
harness
|
||||
.chain
|
||||
.slot_clock
|
||||
.set_slot(CHAIN_SEGMENT[block_index].beacon_block.slot().as_u64());
|
||||
|
||||
// Import the ancestors prior to the block we're testing.
|
||||
for snapshot in &CHAIN_SEGMENT[0..block_index] {
|
||||
let gossip_verified = harness
|
||||
.chain
|
||||
.verify_block_for_gossip(snapshot.beacon_block.clone())
|
||||
.expect("should obtain gossip verified block");
|
||||
|
||||
harness
|
||||
.chain
|
||||
.process_block(gossip_verified)
|
||||
.expect("should import valid gossip verified block");
|
||||
}
|
||||
|
||||
/*
|
||||
* This test ensures that:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The block is not from a future slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) --
|
||||
* i.e. validate that signed_beacon_block.message.slot <= current_slot (a client MAY queue
|
||||
* future blocks for processing at the appropriate slot).
|
||||
*/
|
||||
|
||||
let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone();
|
||||
let expected_block_slot = block.message.slot + 1;
|
||||
block.message.slot = expected_block_slot;
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block)),
|
||||
BlockError::FutureSlot {
|
||||
present_slot,
|
||||
block_slot,
|
||||
}
|
||||
if present_slot == expected_block_slot - 1 && block_slot == expected_block_slot
|
||||
),
|
||||
"should not import a block with a future slot"
|
||||
);
|
||||
|
||||
/*
|
||||
* This test ensure that:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The block is from a slot greater than the latest finalized slot -- i.e. validate that
|
||||
* signed_beacon_block.message.slot >
|
||||
* compute_start_slot_at_epoch(state.finalized_checkpoint.epoch) (a client MAY choose to
|
||||
* validate and store such blocks for additional purposes -- e.g. slashing detection, archive
|
||||
* nodes, etc).
|
||||
*/
|
||||
|
||||
let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone();
|
||||
let expected_finalized_slot = harness
|
||||
.chain
|
||||
.head_info()
|
||||
.expect("should get head info")
|
||||
.finalized_checkpoint
|
||||
.epoch
|
||||
.start_slot(E::slots_per_epoch());
|
||||
block.message.slot = expected_finalized_slot;
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block)),
|
||||
BlockError::WouldRevertFinalizedSlot {
|
||||
block_slot,
|
||||
finalized_slot,
|
||||
}
|
||||
if block_slot == expected_finalized_slot && finalized_slot == expected_finalized_slot
|
||||
),
|
||||
"should not import a block with a finalized slot"
|
||||
);
|
||||
|
||||
/*
|
||||
* This test ensures that:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The proposer signature, signed_beacon_block.signature, is valid with respect to the
|
||||
* proposer_index pubkey.
|
||||
*/
|
||||
|
||||
let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone();
|
||||
block.signature = junk_signature();
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block)),
|
||||
BlockError::ProposalSignatureInvalid
|
||||
),
|
||||
"should not import a block with an invalid proposal signature"
|
||||
);
|
||||
|
||||
/*
|
||||
* This test ensures that:
|
||||
*
|
||||
* Spec v0.12.2
|
||||
*
|
||||
* The block's parent (defined by block.parent_root) passes validation.
|
||||
*/
|
||||
|
||||
let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone();
|
||||
let parent_root = Hash256::from_low_u64_be(42);
|
||||
block.message.parent_root = parent_root;
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block)),
|
||||
BlockError::ParentUnknown(block)
|
||||
if block.parent_root() == parent_root
|
||||
),
|
||||
"should not import a block for an unknown parent"
|
||||
);
|
||||
|
||||
/*
|
||||
* This test ensures that:
|
||||
*
|
||||
* Spec v0.12.2
|
||||
*
|
||||
* The current finalized_checkpoint is an ancestor of block -- i.e. get_ancestor(store,
|
||||
* block.parent_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)) ==
|
||||
* store.finalized_checkpoint.root
|
||||
*/
|
||||
|
||||
let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone();
|
||||
let parent_root = CHAIN_SEGMENT[0].beacon_block_root;
|
||||
block.message.parent_root = parent_root;
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block)),
|
||||
BlockError::NotFinalizedDescendant { block_parent_root }
|
||||
if block_parent_root == parent_root
|
||||
),
|
||||
"should not import a block that conflicts with finality"
|
||||
);
|
||||
|
||||
/*
|
||||
* This test ensures that:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The block is proposed by the expected proposer_index for the block's slot in the context of
|
||||
* the current shuffling (defined by parent_root/slot). If the proposer_index cannot
|
||||
* immediately be verified against the expected shuffling, the block MAY be queued for later
|
||||
* processing while proposers for the block's branch are calculated.
|
||||
*/
|
||||
|
||||
let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone();
|
||||
let expected_proposer = block.message.proposer_index;
|
||||
let other_proposer = (0..VALIDATOR_COUNT as u64)
|
||||
.into_iter()
|
||||
.find(|i| *i != block.message.proposer_index)
|
||||
.expect("there must be more than one validator in this test");
|
||||
block.message.proposer_index = other_proposer;
|
||||
let block = block.message.clone().sign(
|
||||
&generate_deterministic_keypair(other_proposer as usize).sk,
|
||||
&harness.chain.head_info().unwrap().fork,
|
||||
harness.chain.genesis_validators_root,
|
||||
&harness.chain.spec,
|
||||
);
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block.clone())),
|
||||
BlockError::IncorrectBlockProposer {
|
||||
block,
|
||||
local_shuffling,
|
||||
}
|
||||
if block == other_proposer && local_shuffling == expected_proposer
|
||||
),
|
||||
"should not import a block with the wrong proposer index"
|
||||
);
|
||||
// Check to ensure that we registered this is a valid block from this proposer.
|
||||
assert!(
|
||||
matches!(
|
||||
unwrap_err(harness.chain.verify_block_for_gossip(block.clone())),
|
||||
BlockError::RepeatProposal {
|
||||
proposer,
|
||||
slot,
|
||||
}
|
||||
if proposer == other_proposer && slot == block.message.slot
|
||||
),
|
||||
"should register any valid signature against the proposer, even if the block failed later verification"
|
||||
);
|
||||
|
||||
let block = CHAIN_SEGMENT[block_index].beacon_block.clone();
|
||||
assert!(
|
||||
harness.chain.verify_block_for_gossip(block).is_ok(),
|
||||
"the valid block should be processed"
|
||||
);
|
||||
|
||||
/*
|
||||
* This test ensures that:
|
||||
*
|
||||
* Spec v0.12.1
|
||||
*
|
||||
* The block is the first block with valid signature received for the proposer for the slot,
|
||||
* signed_beacon_block.message.slot.
|
||||
*/
|
||||
|
||||
let block = CHAIN_SEGMENT[block_index].beacon_block.clone();
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_block_for_gossip(block.clone())
|
||||
.err()
|
||||
.expect("should error when processing known block"),
|
||||
BlockError::RepeatProposal {
|
||||
proposer,
|
||||
slot,
|
||||
}
|
||||
if proposer == block.message.proposer_index && slot == block.message.slot
|
||||
),
|
||||
"the second proposal by this validator should be rejected"
|
||||
);
|
||||
}
|
||||
275
beacon_node/beacon_chain/tests/op_verification.rs
Normal file
275
beacon_node/beacon_chain/tests/op_verification.rs
Normal file
@@ -0,0 +1,275 @@
|
||||
//! Tests for gossip verification of voluntary exits, propser slashings and attester slashings.
|
||||
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use beacon_chain::observed_operations::ObservationOutcome;
|
||||
use beacon_chain::test_utils::{
|
||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, BlockingMigratorDiskHarnessType,
|
||||
};
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use std::sync::Arc;
|
||||
use store::{LevelDB, StoreConfig};
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use types::test_utils::{
|
||||
AttesterSlashingTestTask, ProposerSlashingTestTask, TestingAttesterSlashingBuilder,
|
||||
TestingProposerSlashingBuilder, TestingVoluntaryExitBuilder,
|
||||
};
|
||||
use types::*;
|
||||
|
||||
pub const VALIDATOR_COUNT: usize = 24;
|
||||
|
||||
lazy_static! {
|
||||
/// A cached set of keys.
|
||||
static ref KEYPAIRS: Vec<Keypair> =
|
||||
types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
}
|
||||
|
||||
type E = MinimalEthSpec;
|
||||
type TestHarness = BeaconChainHarness<BlockingMigratorDiskHarnessType<E>>;
|
||||
type HotColdDB = store::HotColdDB<E, LevelDB<E>, LevelDB<E>>;
|
||||
|
||||
fn get_store(db_path: &TempDir) -> Arc<HotColdDB> {
|
||||
let spec = E::default_spec();
|
||||
let hot_path = db_path.path().join("hot_db");
|
||||
let cold_path = db_path.path().join("cold_db");
|
||||
let config = StoreConfig::default();
|
||||
let log = NullLoggerBuilder.build().expect("logger should build");
|
||||
Arc::new(
|
||||
HotColdDB::open(&hot_path, &cold_path, config, spec, log)
|
||||
.expect("disk store should initialize"),
|
||||
)
|
||||
}
|
||||
|
||||
fn get_harness(store: Arc<HotColdDB>, validator_count: usize) -> TestHarness {
|
||||
let harness = BeaconChainHarness::new_with_disk_store(
|
||||
MinimalEthSpec,
|
||||
store,
|
||||
KEYPAIRS[0..validator_count].to_vec(),
|
||||
);
|
||||
harness.advance_slot();
|
||||
harness
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn voluntary_exit() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let mut harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||
let spec = &harness.chain.spec.clone();
|
||||
|
||||
harness.extend_chain(
|
||||
(E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
let head_info = harness.chain.head_info().unwrap();
|
||||
|
||||
let make_exit = |validator_index: usize, exit_epoch: u64| {
|
||||
TestingVoluntaryExitBuilder::new(Epoch::new(exit_epoch), validator_index as u64).build(
|
||||
&KEYPAIRS[validator_index].sk,
|
||||
&head_info.fork,
|
||||
head_info.genesis_validators_root,
|
||||
spec,
|
||||
)
|
||||
};
|
||||
|
||||
let validator_index1 = VALIDATOR_COUNT - 1;
|
||||
let validator_index2 = VALIDATOR_COUNT - 2;
|
||||
|
||||
let exit1 = make_exit(validator_index1, spec.shard_committee_period);
|
||||
|
||||
// First verification should show it to be fresh.
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_voluntary_exit_for_gossip(exit1.clone())
|
||||
.unwrap(),
|
||||
ObservationOutcome::New(_)
|
||||
));
|
||||
|
||||
// Second should not.
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_voluntary_exit_for_gossip(exit1.clone()),
|
||||
Ok(ObservationOutcome::AlreadyKnown)
|
||||
));
|
||||
|
||||
// A different exit for the same validator should also be detected as a duplicate.
|
||||
let exit2 = make_exit(validator_index1, spec.shard_committee_period + 1);
|
||||
assert!(matches!(
|
||||
harness.chain.verify_voluntary_exit_for_gossip(exit2),
|
||||
Ok(ObservationOutcome::AlreadyKnown)
|
||||
));
|
||||
|
||||
// Exit for a different validator should be fine.
|
||||
let exit3 = make_exit(validator_index2, spec.shard_committee_period);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_voluntary_exit_for_gossip(exit3)
|
||||
.unwrap(),
|
||||
ObservationOutcome::New(_)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn proposer_slashing() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||
let spec = &harness.chain.spec;
|
||||
|
||||
let head_info = harness.chain.head_info().unwrap();
|
||||
|
||||
let validator_index1 = VALIDATOR_COUNT - 1;
|
||||
let validator_index2 = VALIDATOR_COUNT - 2;
|
||||
|
||||
let make_slashing = |validator_index: usize| {
|
||||
TestingProposerSlashingBuilder::double_vote::<E>(
|
||||
ProposerSlashingTestTask::Valid,
|
||||
validator_index as u64,
|
||||
&KEYPAIRS[validator_index].sk,
|
||||
&head_info.fork,
|
||||
head_info.genesis_validators_root,
|
||||
spec,
|
||||
)
|
||||
};
|
||||
|
||||
let slashing1 = make_slashing(validator_index1);
|
||||
|
||||
// First slashing for this proposer should be allowed.
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_proposer_slashing_for_gossip(slashing1.clone())
|
||||
.unwrap(),
|
||||
ObservationOutcome::New(_)
|
||||
));
|
||||
// Duplicate slashing should be detected.
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_proposer_slashing_for_gossip(slashing1.clone())
|
||||
.unwrap(),
|
||||
ObservationOutcome::AlreadyKnown
|
||||
));
|
||||
|
||||
// Different slashing for the same index should be rejected
|
||||
let slashing2 = ProposerSlashing {
|
||||
signed_header_1: slashing1.signed_header_2,
|
||||
signed_header_2: slashing1.signed_header_1,
|
||||
};
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_proposer_slashing_for_gossip(slashing2)
|
||||
.unwrap(),
|
||||
ObservationOutcome::AlreadyKnown
|
||||
));
|
||||
|
||||
// Proposer slashing for a different index should be accepted
|
||||
let slashing3 = make_slashing(validator_index2);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_proposer_slashing_for_gossip(slashing3)
|
||||
.unwrap(),
|
||||
ObservationOutcome::New(_)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn attester_slashing() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||
let spec = &harness.chain.spec;
|
||||
|
||||
let head_info = harness.chain.head_info().unwrap();
|
||||
|
||||
// First third of the validators
|
||||
let first_third = (0..VALIDATOR_COUNT as u64 / 3).collect::<Vec<_>>();
|
||||
// First half of the validators
|
||||
let first_half = (0..VALIDATOR_COUNT as u64 / 2).collect::<Vec<_>>();
|
||||
// Last third of the validators
|
||||
let last_third = (2 * VALIDATOR_COUNT as u64 / 3..VALIDATOR_COUNT as u64).collect::<Vec<_>>();
|
||||
// Last half of the validators
|
||||
let second_half = (VALIDATOR_COUNT as u64 / 2..VALIDATOR_COUNT as u64).collect::<Vec<_>>();
|
||||
|
||||
let signer = |idx: u64, message: &[u8]| {
|
||||
KEYPAIRS[idx as usize]
|
||||
.sk
|
||||
.sign(Hash256::from_slice(&message))
|
||||
};
|
||||
|
||||
let make_slashing = |validators| {
|
||||
TestingAttesterSlashingBuilder::double_vote::<_, E>(
|
||||
AttesterSlashingTestTask::Valid,
|
||||
validators,
|
||||
signer,
|
||||
&head_info.fork,
|
||||
head_info.genesis_validators_root,
|
||||
spec,
|
||||
)
|
||||
};
|
||||
|
||||
// Slashing for first third of validators should be accepted.
|
||||
let slashing1 = make_slashing(&first_third);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_attester_slashing_for_gossip(slashing1.clone())
|
||||
.unwrap(),
|
||||
ObservationOutcome::New(_)
|
||||
));
|
||||
|
||||
// Overlapping slashing for first half of validators should also be accepted.
|
||||
let slashing2 = make_slashing(&first_half);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_attester_slashing_for_gossip(slashing2.clone())
|
||||
.unwrap(),
|
||||
ObservationOutcome::New(_)
|
||||
));
|
||||
|
||||
// Repeating slashing1 or slashing2 should be rejected
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_attester_slashing_for_gossip(slashing1.clone())
|
||||
.unwrap(),
|
||||
ObservationOutcome::AlreadyKnown
|
||||
));
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_attester_slashing_for_gossip(slashing2.clone())
|
||||
.unwrap(),
|
||||
ObservationOutcome::AlreadyKnown
|
||||
));
|
||||
|
||||
// Slashing for last half of validators should be accepted (distinct from all existing)
|
||||
let slashing3 = make_slashing(&second_half);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_attester_slashing_for_gossip(slashing3)
|
||||
.unwrap(),
|
||||
ObservationOutcome::New(_)
|
||||
));
|
||||
// Slashing for last third (contained in last half) should be rejected.
|
||||
let slashing4 = make_slashing(&last_third);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_attester_slashing_for_gossip(slashing4)
|
||||
.unwrap(),
|
||||
ObservationOutcome::AlreadyKnown
|
||||
));
|
||||
}
|
||||
@@ -9,7 +9,7 @@ use beacon_chain::{
|
||||
};
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use std::sync::Arc;
|
||||
use store::DiskStore;
|
||||
use store::{HotColdDB, LevelDB, StoreConfig};
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use types::{EthSpec, Keypair, MinimalEthSpec};
|
||||
|
||||
@@ -23,14 +23,14 @@ lazy_static! {
|
||||
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
}
|
||||
|
||||
fn get_store(db_path: &TempDir) -> Arc<DiskStore<E>> {
|
||||
fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>> {
|
||||
let spec = E::default_spec();
|
||||
let hot_path = db_path.path().join("hot_db");
|
||||
let cold_path = db_path.path().join("cold_db");
|
||||
let slots_per_restore_point = MinimalEthSpec::slots_per_historical_root() as u64;
|
||||
let config = StoreConfig::default();
|
||||
let log = NullLoggerBuilder.build().expect("logger should build");
|
||||
Arc::new(
|
||||
DiskStore::open(&hot_path, &cold_path, slots_per_restore_point, spec, log)
|
||||
HotColdDB::open(&hot_path, &cold_path, config, spec, log)
|
||||
.expect("disk store should initialize"),
|
||||
)
|
||||
}
|
||||
@@ -44,7 +44,7 @@ fn finalizes_after_resuming_from_db() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
|
||||
let harness = BeaconChainHarness::new_with_disk_store(
|
||||
let mut harness = BeaconChainHarness::new_with_disk_store(
|
||||
MinimalEthSpec,
|
||||
store.clone(),
|
||||
KEYPAIRS[0..validator_count].to_vec(),
|
||||
@@ -59,27 +59,43 @@ fn finalizes_after_resuming_from_db() {
|
||||
);
|
||||
|
||||
assert!(
|
||||
harness.chain.head().beacon_state.finalized_checkpoint.epoch > 0,
|
||||
harness
|
||||
.chain
|
||||
.head()
|
||||
.expect("should read head")
|
||||
.beacon_state
|
||||
.finalized_checkpoint
|
||||
.epoch
|
||||
> 0,
|
||||
"the chain should have already finalized"
|
||||
);
|
||||
|
||||
let latest_slot = harness.chain.slot().expect("should have a slot");
|
||||
|
||||
harness.chain.persist().expect("should persist the chain");
|
||||
harness
|
||||
.chain
|
||||
.persist_head_and_fork_choice()
|
||||
.expect("should persist the head and fork choice");
|
||||
harness
|
||||
.chain
|
||||
.persist_op_pool()
|
||||
.expect("should persist the op pool");
|
||||
harness
|
||||
.chain
|
||||
.persist_eth1_cache()
|
||||
.expect("should persist the eth1 cache");
|
||||
|
||||
let resumed_harness = BeaconChainHarness::resume_from_disk_store(
|
||||
let data_dir = harness.data_dir;
|
||||
let original_chain = harness.chain;
|
||||
|
||||
let mut resumed_harness = BeaconChainHarness::resume_from_disk_store(
|
||||
MinimalEthSpec,
|
||||
store,
|
||||
KEYPAIRS[0..validator_count].to_vec(),
|
||||
data_dir,
|
||||
);
|
||||
|
||||
assert_chains_pretty_much_the_same(&harness.chain, &resumed_harness.chain);
|
||||
|
||||
// Ensures we don't accidentally use it again.
|
||||
//
|
||||
// Note: this will persist the chain again, but that shouldn't matter since nothing has
|
||||
// changed.
|
||||
drop(harness);
|
||||
assert_chains_pretty_much_the_same(&original_chain, &resumed_harness.chain);
|
||||
|
||||
// Set the slot clock of the resumed harness to be in the slot following the previous harness.
|
||||
//
|
||||
@@ -95,7 +111,11 @@ fn finalizes_after_resuming_from_db() {
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
let state = &resumed_harness.chain.head().beacon_state;
|
||||
let state = &resumed_harness
|
||||
.chain
|
||||
.head()
|
||||
.expect("should read head")
|
||||
.beacon_state;
|
||||
assert_eq!(
|
||||
state.slot, num_blocks_produced,
|
||||
"head should be at the current slot"
|
||||
@@ -123,14 +143,21 @@ fn finalizes_after_resuming_from_db() {
|
||||
fn assert_chains_pretty_much_the_same<T: BeaconChainTypes>(a: &BeaconChain<T>, b: &BeaconChain<T>) {
|
||||
assert_eq!(a.spec, b.spec, "spec should be equal");
|
||||
assert_eq!(a.op_pool, b.op_pool, "op_pool should be equal");
|
||||
assert_eq!(a.head(), b.head(), "head() should be equal");
|
||||
assert_eq!(
|
||||
a.head().unwrap(),
|
||||
b.head().unwrap(),
|
||||
"head() should be equal"
|
||||
);
|
||||
assert_eq!(a.heads(), b.heads(), "heads() should be equal");
|
||||
assert_eq!(
|
||||
a.genesis_block_root, b.genesis_block_root,
|
||||
"genesis_block_root should be equal"
|
||||
);
|
||||
|
||||
let slot = a.slot().unwrap();
|
||||
assert!(
|
||||
a.fork_choice == b.fork_choice,
|
||||
"fork_choice should be equal"
|
||||
a.fork_choice.write().get_head(slot).unwrap()
|
||||
== b.fork_choice.write().get_head(slot).unwrap(),
|
||||
"fork_choice heads should be equal"
|
||||
);
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,23 +3,19 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use beacon_chain::AttestationProcessingOutcome;
|
||||
use beacon_chain::{
|
||||
attestation_verification::Error as AttnError,
|
||||
test_utils::{
|
||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType, PersistedBeaconChain,
|
||||
BEACON_CHAIN_DB_KEY,
|
||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, NullMigratorEphemeralHarnessType,
|
||||
OP_POOL_DB_KEY,
|
||||
},
|
||||
BlockProcessingOutcome,
|
||||
};
|
||||
use rand::Rng;
|
||||
use operation_pool::PersistedOperationPool;
|
||||
use state_processing::{
|
||||
per_slot_processing, per_slot_processing::Error as SlotProcessingError, EpochProcessingError,
|
||||
};
|
||||
use store::Store;
|
||||
use types::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||
use types::{
|
||||
BeaconStateError, Deposit, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot,
|
||||
};
|
||||
use store::config::StoreConfig;
|
||||
use types::{BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot};
|
||||
|
||||
// Should ideally be divisible by 3.
|
||||
pub const VALIDATOR_COUNT: usize = 24;
|
||||
@@ -29,8 +25,14 @@ lazy_static! {
|
||||
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
}
|
||||
|
||||
fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<MinimalEthSpec>> {
|
||||
let harness = BeaconChainHarness::new(MinimalEthSpec, KEYPAIRS[0..validator_count].to_vec());
|
||||
fn get_harness(
|
||||
validator_count: usize,
|
||||
) -> BeaconChainHarness<NullMigratorEphemeralHarnessType<MinimalEthSpec>> {
|
||||
let harness = BeaconChainHarness::new_with_store_config(
|
||||
MinimalEthSpec,
|
||||
KEYPAIRS[0..validator_count].to_vec(),
|
||||
StoreConfig::default(),
|
||||
);
|
||||
|
||||
harness.advance_slot();
|
||||
|
||||
@@ -41,11 +43,11 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<Minimal
|
||||
fn massive_skips() {
|
||||
let harness = get_harness(8);
|
||||
let spec = &MinimalEthSpec::default_spec();
|
||||
let mut state = harness.chain.head().beacon_state;
|
||||
let mut state = harness.chain.head().expect("should get head").beacon_state;
|
||||
|
||||
// Run per_slot_processing until it returns an error.
|
||||
let error = loop {
|
||||
match per_slot_processing(&mut state, spec) {
|
||||
match per_slot_processing(&mut state, None, spec) {
|
||||
Ok(_) => continue,
|
||||
Err(e) => break e,
|
||||
}
|
||||
@@ -65,7 +67,7 @@ fn massive_skips() {
|
||||
fn iterators() {
|
||||
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1;
|
||||
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let mut harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
harness.extend_chain(
|
||||
num_blocks_produced as usize,
|
||||
@@ -74,8 +76,18 @@ fn iterators() {
|
||||
AttestationStrategy::SomeValidators(vec![]),
|
||||
);
|
||||
|
||||
let block_roots: Vec<(Hash256, Slot)> = harness.chain.rev_iter_block_roots().collect();
|
||||
let state_roots: Vec<(Hash256, Slot)> = harness.chain.rev_iter_state_roots().collect();
|
||||
let block_roots: Vec<(Hash256, Slot)> = harness
|
||||
.chain
|
||||
.rev_iter_block_roots()
|
||||
.expect("should get iter")
|
||||
.map(Result::unwrap)
|
||||
.collect();
|
||||
let state_roots: Vec<(Hash256, Slot)> = harness
|
||||
.chain
|
||||
.rev_iter_state_roots()
|
||||
.expect("should get iter")
|
||||
.map(Result::unwrap)
|
||||
.collect();
|
||||
|
||||
assert_eq!(
|
||||
block_roots.len(),
|
||||
@@ -113,11 +125,11 @@ fn iterators() {
|
||||
)
|
||||
});
|
||||
|
||||
let head = &harness.chain.head();
|
||||
let head = &harness.chain.head().expect("should get head");
|
||||
|
||||
assert_eq!(
|
||||
*block_roots.first().expect("should have some block roots"),
|
||||
(head.beacon_block_root, head.beacon_block.slot),
|
||||
(head.beacon_block_root, head.beacon_block.slot()),
|
||||
"first block root and slot should be for the head block"
|
||||
);
|
||||
|
||||
@@ -130,7 +142,7 @@ fn iterators() {
|
||||
|
||||
#[test]
|
||||
fn chooses_fork() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let mut harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
|
||||
let delay = MinimalEthSpec::default_spec().min_attestation_inclusion_delay as usize;
|
||||
@@ -156,9 +168,9 @@ fn chooses_fork() {
|
||||
faulty_fork_blocks,
|
||||
);
|
||||
|
||||
assert!(honest_head != faulty_head, "forks should be distinct");
|
||||
assert_ne!(honest_head, faulty_head, "forks should be distinct");
|
||||
|
||||
let state = &harness.chain.head().beacon_state;
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
|
||||
assert_eq!(
|
||||
state.slot,
|
||||
@@ -167,7 +179,11 @@ fn chooses_fork() {
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
harness.chain.head().beacon_block_root,
|
||||
harness
|
||||
.chain
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_block_root,
|
||||
honest_head,
|
||||
"the honest chain should be the canonical chain"
|
||||
);
|
||||
@@ -177,7 +193,7 @@ fn chooses_fork() {
|
||||
fn finalizes_with_full_participation() {
|
||||
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
|
||||
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let mut harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
harness.extend_chain(
|
||||
num_blocks_produced as usize,
|
||||
@@ -185,7 +201,7 @@ fn finalizes_with_full_participation() {
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
let state = &harness.chain.head().beacon_state;
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
|
||||
assert_eq!(
|
||||
state.slot, num_blocks_produced,
|
||||
@@ -212,7 +228,7 @@ fn finalizes_with_full_participation() {
|
||||
fn finalizes_with_two_thirds_participation() {
|
||||
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
|
||||
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let mut harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
|
||||
let attesters = (0..two_thirds).collect();
|
||||
@@ -223,7 +239,7 @@ fn finalizes_with_two_thirds_participation() {
|
||||
AttestationStrategy::SomeValidators(attesters),
|
||||
);
|
||||
|
||||
let state = &harness.chain.head().beacon_state;
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
|
||||
assert_eq!(
|
||||
state.slot, num_blocks_produced,
|
||||
@@ -255,7 +271,7 @@ fn finalizes_with_two_thirds_participation() {
|
||||
fn does_not_finalize_with_less_than_two_thirds_participation() {
|
||||
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
|
||||
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let mut harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
|
||||
let less_than_two_thirds = two_thirds - 1;
|
||||
@@ -267,7 +283,7 @@ fn does_not_finalize_with_less_than_two_thirds_participation() {
|
||||
AttestationStrategy::SomeValidators(attesters),
|
||||
);
|
||||
|
||||
let state = &harness.chain.head().beacon_state;
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
|
||||
assert_eq!(
|
||||
state.slot, num_blocks_produced,
|
||||
@@ -292,7 +308,7 @@ fn does_not_finalize_with_less_than_two_thirds_participation() {
|
||||
fn does_not_finalize_without_attestation() {
|
||||
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
|
||||
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let mut harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
harness.extend_chain(
|
||||
num_blocks_produced as usize,
|
||||
@@ -300,7 +316,7 @@ fn does_not_finalize_without_attestation() {
|
||||
AttestationStrategy::SomeValidators(vec![]),
|
||||
);
|
||||
|
||||
let state = &harness.chain.head().beacon_state;
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
|
||||
assert_eq!(
|
||||
state.slot, num_blocks_produced,
|
||||
@@ -325,7 +341,7 @@ fn does_not_finalize_without_attestation() {
|
||||
fn roundtrip_operation_pool() {
|
||||
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
|
||||
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let mut harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
// Add some attestations
|
||||
harness.extend_chain(
|
||||
@@ -335,34 +351,29 @@ fn roundtrip_operation_pool() {
|
||||
);
|
||||
assert!(harness.chain.op_pool.num_attestations() > 0);
|
||||
|
||||
// Add some deposits
|
||||
let rng = &mut XorShiftRng::from_seed([66; 16]);
|
||||
for i in 0..rng.gen_range(1, VALIDATOR_COUNT) {
|
||||
harness
|
||||
.chain
|
||||
.process_deposit(i as u64, Deposit::random_for_test(rng))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// TODO: could add some other operations
|
||||
harness.chain.persist().unwrap();
|
||||
harness
|
||||
.chain
|
||||
.persist_op_pool()
|
||||
.expect("should persist op pool");
|
||||
|
||||
let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes());
|
||||
let p: PersistedBeaconChain<HarnessType<MinimalEthSpec>> =
|
||||
harness.chain.store.get(&key).unwrap().unwrap();
|
||||
|
||||
let restored_op_pool = p
|
||||
.op_pool
|
||||
.into_operation_pool(&p.canonical_head.beacon_state, &harness.spec);
|
||||
let key = Hash256::from_slice(&OP_POOL_DB_KEY);
|
||||
let restored_op_pool = harness
|
||||
.chain
|
||||
.store
|
||||
.get_item::<PersistedOperationPool<MinimalEthSpec>>(&key)
|
||||
.expect("should read db")
|
||||
.expect("should find op pool")
|
||||
.into_operation_pool();
|
||||
|
||||
assert_eq!(harness.chain.op_pool, restored_op_pool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn free_attestations_added_to_fork_choice_some_none() {
|
||||
fn unaggregated_attestations_added_to_fork_choice_some_none() {
|
||||
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() / 2;
|
||||
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let mut harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
harness.extend_chain(
|
||||
num_blocks_produced as usize,
|
||||
@@ -370,8 +381,14 @@ fn free_attestations_added_to_fork_choice_some_none() {
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
let state = &harness.chain.head().beacon_state;
|
||||
let fork_choice = &harness.chain.fork_choice;
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
let mut fork_choice = harness.chain.fork_choice.write();
|
||||
|
||||
// Move forward a slot so all queued attestations can be processed.
|
||||
harness.advance_slot();
|
||||
fork_choice
|
||||
.update_time(harness.chain.slot().unwrap())
|
||||
.unwrap();
|
||||
|
||||
let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT)
|
||||
.into_iter()
|
||||
@@ -392,8 +409,8 @@ fn free_attestations_added_to_fork_choice_some_none() {
|
||||
if slot <= num_blocks_produced && slot != 0 {
|
||||
assert_eq!(
|
||||
latest_message.unwrap().1,
|
||||
slot,
|
||||
"Latest message slot for {} should be equal to slot {}.",
|
||||
slot.epoch(MinimalEthSpec::slots_per_epoch()),
|
||||
"Latest message epoch for {} should be equal to epoch {}.",
|
||||
validator,
|
||||
slot
|
||||
)
|
||||
@@ -410,7 +427,7 @@ fn free_attestations_added_to_fork_choice_some_none() {
|
||||
fn attestations_with_increasing_slots() {
|
||||
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
|
||||
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let mut harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
let mut attestations = vec![];
|
||||
|
||||
@@ -422,29 +439,57 @@ fn attestations_with_increasing_slots() {
|
||||
AttestationStrategy::SomeValidators(vec![]),
|
||||
);
|
||||
|
||||
attestations.append(&mut harness.get_free_attestations(
|
||||
&AttestationStrategy::AllValidators,
|
||||
&harness.chain.head().beacon_state,
|
||||
harness.chain.head().beacon_block_root,
|
||||
harness.chain.head().beacon_block.slot,
|
||||
));
|
||||
attestations.append(
|
||||
&mut harness.get_unaggregated_attestations(
|
||||
&AttestationStrategy::AllValidators,
|
||||
&harness.chain.head().expect("should get head").beacon_state,
|
||||
harness
|
||||
.chain
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_block_root,
|
||||
harness
|
||||
.chain
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_block
|
||||
.slot(),
|
||||
),
|
||||
);
|
||||
|
||||
harness.advance_slot();
|
||||
}
|
||||
|
||||
for attestation in attestations {
|
||||
assert_eq!(
|
||||
harness.chain.process_attestation(attestation),
|
||||
Ok(AttestationProcessingOutcome::Processed)
|
||||
)
|
||||
for (attestation, subnet_id) in attestations.into_iter().flatten() {
|
||||
let res = harness
|
||||
.chain
|
||||
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id);
|
||||
|
||||
let current_slot = harness.chain.slot().expect("should get slot");
|
||||
let expected_attestation_slot = attestation.data.slot;
|
||||
let expected_earliest_permissible_slot =
|
||||
current_slot - MinimalEthSpec::slots_per_epoch() - 1;
|
||||
|
||||
if expected_attestation_slot < expected_earliest_permissible_slot {
|
||||
assert!(matches!(
|
||||
res.err().unwrap(),
|
||||
AttnError::PastSlot {
|
||||
attestation_slot,
|
||||
earliest_permissible_slot,
|
||||
}
|
||||
if attestation_slot == expected_attestation_slot && earliest_permissible_slot == expected_earliest_permissible_slot
|
||||
))
|
||||
} else {
|
||||
res.expect("should process attestation");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn free_attestations_added_to_fork_choice_all_updated() {
|
||||
fn unaggregated_attestations_added_to_fork_choice_all_updated() {
|
||||
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1;
|
||||
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let mut harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
harness.extend_chain(
|
||||
num_blocks_produced as usize,
|
||||
@@ -452,8 +497,14 @@ fn free_attestations_added_to_fork_choice_all_updated() {
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
let state = &harness.chain.head().beacon_state;
|
||||
let fork_choice = &harness.chain.fork_choice;
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
let mut fork_choice = harness.chain.fork_choice.write();
|
||||
|
||||
// Move forward a slot so all queued attestations can be processed.
|
||||
harness.advance_slot();
|
||||
fork_choice
|
||||
.update_time(harness.chain.slot().unwrap())
|
||||
.unwrap();
|
||||
|
||||
let validators: Vec<usize> = (0..VALIDATOR_COUNT).collect();
|
||||
let slots: Vec<Slot> = validators
|
||||
@@ -473,7 +524,7 @@ fn free_attestations_added_to_fork_choice_all_updated() {
|
||||
|
||||
assert_eq!(
|
||||
latest_message.unwrap().1,
|
||||
slot,
|
||||
slot.epoch(MinimalEthSpec::slots_per_epoch()),
|
||||
"Latest message slot should be equal to attester duty."
|
||||
);
|
||||
|
||||
@@ -493,7 +544,7 @@ fn free_attestations_added_to_fork_choice_all_updated() {
|
||||
|
||||
fn run_skip_slot_test(skip_slots: u64) {
|
||||
let num_validators = 8;
|
||||
let harness_a = get_harness(num_validators);
|
||||
let mut harness_a = get_harness(num_validators);
|
||||
let harness_b = get_harness(num_validators);
|
||||
|
||||
for _ in 0..skip_slots {
|
||||
@@ -509,18 +560,41 @@ fn run_skip_slot_test(skip_slots: u64) {
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
harness_a.chain.head().beacon_block.slot,
|
||||
harness_a
|
||||
.chain
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_block
|
||||
.slot(),
|
||||
Slot::new(skip_slots + 1)
|
||||
);
|
||||
assert_eq!(harness_b.chain.head().beacon_block.slot, Slot::new(0));
|
||||
assert_eq!(
|
||||
harness_b
|
||||
.chain
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_block
|
||||
.slot(),
|
||||
Slot::new(0)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
harness_b
|
||||
.chain
|
||||
.process_block(harness_a.chain.head().beacon_block.clone()),
|
||||
Ok(BlockProcessingOutcome::Processed {
|
||||
block_root: harness_a.chain.head().beacon_block_root
|
||||
})
|
||||
.process_block(
|
||||
harness_a
|
||||
.chain
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_block
|
||||
.clone(),
|
||||
)
|
||||
.unwrap(),
|
||||
harness_a
|
||||
.chain
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_block_root
|
||||
);
|
||||
|
||||
harness_b
|
||||
@@ -529,7 +603,12 @@ fn run_skip_slot_test(skip_slots: u64) {
|
||||
.expect("should run fork choice");
|
||||
|
||||
assert_eq!(
|
||||
harness_b.chain.head().beacon_block.slot,
|
||||
harness_b
|
||||
.chain
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_block
|
||||
.slot(),
|
||||
Slot::new(skip_slots + 1)
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,41 +1,43 @@
|
||||
[package]
|
||||
name = "client"
|
||||
version = "0.1.0"
|
||||
authors = ["Age Manning <Age@AgeManning.com>"]
|
||||
version = "0.2.0"
|
||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dev-dependencies]
|
||||
sloggers = "0.3.4"
|
||||
toml = "^0.5"
|
||||
sloggers = "1.0.0"
|
||||
toml = "0.5.6"
|
||||
|
||||
[dependencies]
|
||||
beacon_chain = { path = "../beacon_chain" }
|
||||
store = { path = "../store" }
|
||||
network = { path = "../network" }
|
||||
eth2-libp2p = { path = "../eth2-libp2p" }
|
||||
timer = { path = "../timer" }
|
||||
eth2_libp2p = { path = "../eth2_libp2p" }
|
||||
rest_api = { path = "../rest_api" }
|
||||
parking_lot = "0.9.0"
|
||||
parking_lot = "0.11.0"
|
||||
websocket_server = { path = "../websocket_server" }
|
||||
prometheus = "0.7.0"
|
||||
types = { path = "../../eth2/types" }
|
||||
prometheus = "0.9.0"
|
||||
types = { path = "../../consensus/types" }
|
||||
tree_hash = "0.1.0"
|
||||
eth2_config = { path = "../../eth2/utils/eth2_config" }
|
||||
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
error-chain = "0.12.1"
|
||||
eth2_config = { path = "../../common/eth2_config" }
|
||||
slot_clock = { path = "../../common/slot_clock" }
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.110"
|
||||
error-chain = "0.12.2"
|
||||
serde_yaml = "0.8.11"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
slog-async = "2.3.0"
|
||||
tokio = "0.1.22"
|
||||
slog-async = "2.5.0"
|
||||
tokio = "0.2.21"
|
||||
dirs = "2.0.2"
|
||||
exit-future = "0.1.4"
|
||||
futures = "0.1.29"
|
||||
reqwest = "0.9.22"
|
||||
url = "2.1.0"
|
||||
lmd_ghost = { path = "../../eth2/lmd_ghost" }
|
||||
futures = "0.3.5"
|
||||
reqwest = { version = "0.10.4", features = ["native-tls-vendored"] }
|
||||
url = "2.1.1"
|
||||
eth1 = { path = "../eth1" }
|
||||
genesis = { path = "../genesis" }
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
lighthouse_bootstrap = { path = "../../eth2/utils/lighthouse_bootstrap" }
|
||||
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
||||
eth2_ssz = "0.1.2"
|
||||
lazy_static = "1.4.0"
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||
time = "0.2.16"
|
||||
bus = "2.2.3"
|
||||
|
||||
@@ -1,36 +1,35 @@
|
||||
use crate::config::{ClientGenesis, Config as ClientConfig};
|
||||
use crate::notifier::spawn_notifier;
|
||||
use crate::Client;
|
||||
use beacon_chain::events::TeeEventHandler;
|
||||
use beacon_chain::{
|
||||
builder::{BeaconChainBuilder, Witness},
|
||||
eth1_chain::CachingEth1Backend,
|
||||
lmd_ghost::ThreadSafeReducedTree,
|
||||
eth1_chain::{CachingEth1Backend, Eth1Chain},
|
||||
migrate::{BackgroundMigrator, Migrate},
|
||||
slot_clock::{SlotClock, SystemTimeSlotClock},
|
||||
store::{
|
||||
migrate::{BackgroundMigrator, Migrate, NullMigrator},
|
||||
DiskStore, MemoryStore, SimpleDiskStore, Store,
|
||||
},
|
||||
store::{HotColdDB, ItemStore, LevelDB, StoreConfig},
|
||||
BeaconChain, BeaconChainTypes, Eth1ChainBackend, EventHandler,
|
||||
};
|
||||
use bus::Bus;
|
||||
use environment::RuntimeContext;
|
||||
use eth1::{Config as Eth1Config, Service as Eth1Service};
|
||||
use eth2_config::Eth2Config;
|
||||
use exit_future::Signal;
|
||||
use futures::{future, Future, IntoFuture};
|
||||
use genesis::{
|
||||
generate_deterministic_keypairs, interop_genesis_state, state_from_ssz_file, Eth1GenesisService,
|
||||
};
|
||||
use lighthouse_bootstrap::Bootstrapper;
|
||||
use lmd_ghost::LmdGhost;
|
||||
use network::{NetworkConfig, NetworkMessage, Service as NetworkService};
|
||||
use eth2_libp2p::NetworkGlobals;
|
||||
use genesis::{interop_genesis_state, Eth1GenesisService};
|
||||
use network::{NetworkConfig, NetworkMessage, NetworkService};
|
||||
use parking_lot::Mutex;
|
||||
use slog::info;
|
||||
use ssz::Decode;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use timer::spawn_timer;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use types::{BeaconState, ChainSpec, EthSpec};
|
||||
use types::{
|
||||
test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec,
|
||||
SignedBeaconBlockHash,
|
||||
};
|
||||
use websocket_server::{Config as WebSocketConfig, WebSocketSender};
|
||||
|
||||
/// Interval between polling the eth1 node for genesis information.
|
||||
@@ -51,42 +50,42 @@ pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000;
|
||||
/// `self.memory_store(..)` has been called.
|
||||
pub struct ClientBuilder<T: BeaconChainTypes> {
|
||||
slot_clock: Option<T::SlotClock>,
|
||||
store: Option<Arc<T::Store>>,
|
||||
#[allow(clippy::type_complexity)]
|
||||
store: Option<Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>>,
|
||||
store_migrator: Option<T::StoreMigrator>,
|
||||
runtime_context: Option<RuntimeContext<T::EthSpec>>,
|
||||
chain_spec: Option<ChainSpec>,
|
||||
beacon_chain_builder: Option<BeaconChainBuilder<T>>,
|
||||
beacon_chain: Option<Arc<BeaconChain<T>>>,
|
||||
eth1_service: Option<Eth1Service>,
|
||||
exit_signals: Vec<Signal>,
|
||||
event_handler: Option<T::EventHandler>,
|
||||
libp2p_network: Option<Arc<NetworkService<T>>>,
|
||||
libp2p_network_send: Option<UnboundedSender<NetworkMessage>>,
|
||||
network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
|
||||
network_send: Option<UnboundedSender<NetworkMessage<T::EthSpec>>>,
|
||||
http_listen_addr: Option<SocketAddr>,
|
||||
websocket_listen_addr: Option<SocketAddr>,
|
||||
eth_spec_instance: T::EthSpec,
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
ClientBuilder<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
|
||||
TSlotClock: SlotClock + Clone + 'static,
|
||||
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
{
|
||||
/// Instantiates a new, empty builder.
|
||||
///
|
||||
@@ -101,10 +100,9 @@ where
|
||||
beacon_chain_builder: None,
|
||||
beacon_chain: None,
|
||||
eth1_service: None,
|
||||
exit_signals: vec![],
|
||||
event_handler: None,
|
||||
libp2p_network: None,
|
||||
libp2p_network_send: None,
|
||||
network_globals: None,
|
||||
network_send: None,
|
||||
http_listen_addr: None,
|
||||
websocket_listen_addr: None,
|
||||
eth_spec_instance,
|
||||
@@ -125,155 +123,159 @@ where
|
||||
|
||||
/// Initializes the `BeaconChainBuilder`. The `build_beacon_chain` method will need to be
|
||||
/// called later in order to actually instantiate the `BeaconChain`.
|
||||
pub fn beacon_chain_builder(
|
||||
pub async fn beacon_chain_builder(
|
||||
mut self,
|
||||
client_genesis: ClientGenesis,
|
||||
config: Eth1Config,
|
||||
) -> impl Future<Item = Self, Error = String> {
|
||||
config: ClientConfig,
|
||||
) -> Result<Self, String> {
|
||||
let store = self.store.clone();
|
||||
let store_migrator = self.store_migrator.take();
|
||||
let chain_spec = self.chain_spec.clone();
|
||||
let runtime_context = self.runtime_context.clone();
|
||||
let eth_spec_instance = self.eth_spec_instance.clone();
|
||||
let data_dir = config.data_dir.clone();
|
||||
let disabled_forks = config.disabled_forks.clone();
|
||||
let chain_config = config.chain.clone();
|
||||
let graffiti = config.graffiti;
|
||||
|
||||
future::ok(())
|
||||
.and_then(move |()| {
|
||||
let store = store
|
||||
.ok_or_else(|| "beacon_chain_start_method requires a store".to_string())?;
|
||||
let store_migrator = store_migrator.ok_or_else(|| {
|
||||
"beacon_chain_start_method requires a store migrator".to_string()
|
||||
})?;
|
||||
let context = runtime_context
|
||||
.ok_or_else(|| "beacon_chain_start_method requires a log".to_string())?
|
||||
.service_context("beacon".into());
|
||||
let spec = chain_spec
|
||||
.ok_or_else(|| "beacon_chain_start_method requires a chain spec".to_string())?;
|
||||
let store =
|
||||
store.ok_or_else(|| "beacon_chain_start_method requires a store".to_string())?;
|
||||
let store_migrator = store_migrator
|
||||
.ok_or_else(|| "beacon_chain_start_method requires a store migrator".to_string())?;
|
||||
let context = runtime_context
|
||||
.ok_or_else(|| "beacon_chain_start_method requires a runtime context".to_string())?
|
||||
.service_context("beacon".into());
|
||||
let spec = chain_spec
|
||||
.ok_or_else(|| "beacon_chain_start_method requires a chain spec".to_string())?;
|
||||
|
||||
let builder = BeaconChainBuilder::new(eth_spec_instance)
|
||||
.logger(context.log.clone())
|
||||
.store(store.clone())
|
||||
.store_migrator(store_migrator)
|
||||
.custom_spec(spec.clone());
|
||||
let builder = BeaconChainBuilder::new(eth_spec_instance)
|
||||
.logger(context.log().clone())
|
||||
.store(store)
|
||||
.store_migrator(store_migrator)
|
||||
.data_dir(data_dir)
|
||||
.custom_spec(spec.clone())
|
||||
.chain_config(chain_config)
|
||||
.disabled_forks(disabled_forks)
|
||||
.graffiti(graffiti);
|
||||
|
||||
Ok((builder, spec, context))
|
||||
})
|
||||
.and_then(move |(builder, spec, context)| {
|
||||
let genesis_state_future: Box<dyn Future<Item = _, Error = _> + Send> =
|
||||
match client_genesis {
|
||||
ClientGenesis::Interop {
|
||||
validator_count,
|
||||
genesis_time,
|
||||
} => {
|
||||
let keypairs = generate_deterministic_keypairs(validator_count);
|
||||
let result = interop_genesis_state(&keypairs, genesis_time, &spec);
|
||||
let chain_exists = builder
|
||||
.store_contains_beacon_chain()
|
||||
.unwrap_or_else(|_| false);
|
||||
|
||||
let future = result
|
||||
.and_then(move |genesis_state| builder.genesis_state(genesis_state))
|
||||
.into_future()
|
||||
.map(|v| (v, None));
|
||||
// If the client is expect to resume but there's no beacon chain in the database,
|
||||
// use the `DepositContract` method. This scenario is quite common when the client
|
||||
// is shutdown before finding genesis via eth1.
|
||||
//
|
||||
// Alternatively, if there's a beacon chain in the database then always resume
|
||||
// using it.
|
||||
let client_genesis = if client_genesis == ClientGenesis::FromStore && !chain_exists {
|
||||
info!(context.log(), "Defaulting to deposit contract genesis");
|
||||
|
||||
Box::new(future)
|
||||
}
|
||||
ClientGenesis::SszFile { path } => {
|
||||
let result = state_from_ssz_file(path);
|
||||
ClientGenesis::DepositContract
|
||||
} else if chain_exists {
|
||||
ClientGenesis::FromStore
|
||||
} else {
|
||||
client_genesis
|
||||
};
|
||||
|
||||
let future = result
|
||||
.and_then(move |genesis_state| builder.genesis_state(genesis_state))
|
||||
.into_future()
|
||||
.map(|v| (v, None));
|
||||
let (beacon_chain_builder, eth1_service_option) = match client_genesis {
|
||||
ClientGenesis::Interop {
|
||||
validator_count,
|
||||
genesis_time,
|
||||
} => {
|
||||
let keypairs = generate_deterministic_keypairs(validator_count);
|
||||
let genesis_state = interop_genesis_state(&keypairs, genesis_time, &spec)?;
|
||||
builder.genesis_state(genesis_state).map(|v| (v, None))?
|
||||
}
|
||||
ClientGenesis::SszBytes {
|
||||
genesis_state_bytes,
|
||||
} => {
|
||||
info!(
|
||||
context.log(),
|
||||
"Starting from known genesis state";
|
||||
);
|
||||
|
||||
Box::new(future)
|
||||
}
|
||||
ClientGenesis::SszBytes {
|
||||
genesis_state_bytes,
|
||||
} => {
|
||||
info!(
|
||||
context.log,
|
||||
"Starting from known genesis state";
|
||||
);
|
||||
let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes)
|
||||
.map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?;
|
||||
|
||||
let result = BeaconState::from_ssz_bytes(&genesis_state_bytes)
|
||||
.map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e));
|
||||
builder.genesis_state(genesis_state).map(|v| (v, None))?
|
||||
}
|
||||
ClientGenesis::DepositContract => {
|
||||
info!(
|
||||
context.log(),
|
||||
"Waiting for eth2 genesis from eth1";
|
||||
"eth1_endpoint" => &config.eth1.endpoint,
|
||||
"contract_deploy_block" => config.eth1.deposit_contract_deploy_block,
|
||||
"deposit_contract" => &config.eth1.deposit_contract_address
|
||||
);
|
||||
|
||||
let future = result
|
||||
.and_then(move |genesis_state| builder.genesis_state(genesis_state))
|
||||
.into_future()
|
||||
.map(|v| (v, None));
|
||||
let genesis_service = Eth1GenesisService::new(
|
||||
config.eth1,
|
||||
context.log().clone(),
|
||||
context.eth2_config().spec.clone(),
|
||||
);
|
||||
|
||||
Box::new(future)
|
||||
}
|
||||
ClientGenesis::DepositContract => {
|
||||
info!(
|
||||
context.log,
|
||||
"Waiting for eth2 genesis from eth1";
|
||||
"eth1_node" => &config.endpoint
|
||||
);
|
||||
let genesis_state = genesis_service
|
||||
.wait_for_genesis_state(
|
||||
Duration::from_millis(ETH1_GENESIS_UPDATE_INTERVAL_MILLIS),
|
||||
context.eth2_config().spec.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let genesis_service =
|
||||
Eth1GenesisService::new(config, context.log.clone());
|
||||
builder
|
||||
.genesis_state(genesis_state)
|
||||
.map(|v| (v, Some(genesis_service.into_core_service())))?
|
||||
}
|
||||
ClientGenesis::FromStore => builder.resume_from_db().map(|v| (v, None))?,
|
||||
};
|
||||
|
||||
let future = genesis_service
|
||||
.wait_for_genesis_state(
|
||||
Duration::from_millis(ETH1_GENESIS_UPDATE_INTERVAL_MILLIS),
|
||||
context.eth2_config().spec.clone(),
|
||||
)
|
||||
.and_then(move |genesis_state| builder.genesis_state(genesis_state))
|
||||
.map(|v| (v, Some(genesis_service.into_core_service())));
|
||||
|
||||
Box::new(future)
|
||||
}
|
||||
ClientGenesis::RemoteNode { server, .. } => {
|
||||
let future = Bootstrapper::connect(server.to_string(), &context.log)
|
||||
.map_err(|e| {
|
||||
format!("Failed to initialize bootstrap client: {}", e)
|
||||
})
|
||||
.into_future()
|
||||
.and_then(|bootstrapper| {
|
||||
let (genesis_state, _genesis_block) =
|
||||
bootstrapper.genesis().map_err(|e| {
|
||||
format!("Failed to bootstrap genesis state: {}", e)
|
||||
})?;
|
||||
|
||||
builder.genesis_state(genesis_state)
|
||||
})
|
||||
.map(|v| (v, None));
|
||||
|
||||
Box::new(future)
|
||||
}
|
||||
ClientGenesis::Resume => {
|
||||
let future = builder.resume_from_db().into_future().map(|v| (v, None));
|
||||
|
||||
Box::new(future)
|
||||
}
|
||||
};
|
||||
|
||||
genesis_state_future
|
||||
})
|
||||
.map(move |(beacon_chain_builder, eth1_service_option)| {
|
||||
self.eth1_service = eth1_service_option;
|
||||
self.beacon_chain_builder = Some(beacon_chain_builder);
|
||||
self
|
||||
})
|
||||
self.eth1_service = eth1_service_option;
|
||||
self.beacon_chain_builder = Some(beacon_chain_builder);
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Immediately starts the libp2p networking stack.
|
||||
pub fn libp2p_network(mut self, config: &NetworkConfig) -> Result<Self, String> {
|
||||
/// Starts the networking stack.
|
||||
pub async fn network(mut self, config: &NetworkConfig) -> Result<Self, String> {
|
||||
let beacon_chain = self
|
||||
.beacon_chain
|
||||
.clone()
|
||||
.ok_or_else(|| "libp2p_network requires a beacon chain")?;
|
||||
.ok_or_else(|| "network requires a beacon chain")?;
|
||||
let context = self
|
||||
.runtime_context
|
||||
.as_ref()
|
||||
.ok_or_else(|| "libp2p_network requires a runtime_context")?
|
||||
.service_context("network".into());
|
||||
.ok_or_else(|| "network requires a runtime_context")?
|
||||
.clone();
|
||||
|
||||
let (network, network_send) =
|
||||
NetworkService::new(beacon_chain, config, &context.executor, context.log)
|
||||
.map_err(|e| format!("Failed to start libp2p network: {:?}", e))?;
|
||||
let (network_globals, network_send) =
|
||||
NetworkService::start(beacon_chain, config, context.executor)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to start network: {:?}", e))?;
|
||||
|
||||
self.libp2p_network = Some(network);
|
||||
self.libp2p_network_send = Some(network_send);
|
||||
self.network_globals = Some(network_globals);
|
||||
self.network_send = Some(network_send);
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Immediately starts the timer service.
|
||||
fn timer(self) -> Result<Self, String> {
|
||||
let context = self
|
||||
.runtime_context
|
||||
.as_ref()
|
||||
.ok_or_else(|| "node timer requires a runtime_context")?
|
||||
.service_context("node_timer".into());
|
||||
let beacon_chain = self
|
||||
.beacon_chain
|
||||
.clone()
|
||||
.ok_or_else(|| "node timer requires a beacon chain")?;
|
||||
let milliseconds_per_slot = self
|
||||
.chain_spec
|
||||
.as_ref()
|
||||
.ok_or_else(|| "node timer requires a chain spec".to_string())?
|
||||
.milliseconds_per_slot;
|
||||
|
||||
spawn_timer(context.executor, beacon_chain, milliseconds_per_slot)
|
||||
.map_err(|e| format!("Unable to start node timer: {}", e))?;
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
@@ -283,6 +285,7 @@ where
|
||||
mut self,
|
||||
client_config: &ClientConfig,
|
||||
eth2_config: &Eth2Config,
|
||||
events: Arc<Mutex<Bus<SignedBeaconBlockHash>>>,
|
||||
) -> Result<Self, String> {
|
||||
let beacon_chain = self
|
||||
.beacon_chain
|
||||
@@ -293,41 +296,43 @@ where
|
||||
.as_ref()
|
||||
.ok_or_else(|| "http_server requires a runtime_context")?
|
||||
.service_context("http".into());
|
||||
let network = self
|
||||
.libp2p_network
|
||||
let network_globals = self
|
||||
.network_globals
|
||||
.clone()
|
||||
.ok_or_else(|| "http_server requires a libp2p network")?;
|
||||
let network_send = self
|
||||
.libp2p_network_send
|
||||
.network_send
|
||||
.clone()
|
||||
.ok_or_else(|| "http_server requires a libp2p network sender")?;
|
||||
|
||||
let network_info = rest_api::NetworkInfo {
|
||||
network_service: network.clone(),
|
||||
network_chan: network_send.clone(),
|
||||
network_globals,
|
||||
network_chan: network_send,
|
||||
};
|
||||
|
||||
let (exit_signal, listening_addr) = rest_api::start_server(
|
||||
let listening_addr = rest_api::start_server(
|
||||
context.executor,
|
||||
&client_config.rest_api,
|
||||
&context.executor,
|
||||
beacon_chain.clone(),
|
||||
beacon_chain,
|
||||
network_info,
|
||||
client_config
|
||||
.create_db_path()
|
||||
.expect("unable to read datadir"),
|
||||
.map_err(|_| "unable to read data dir")?,
|
||||
client_config
|
||||
.create_freezer_db_path()
|
||||
.map_err(|_| "unable to read freezer DB dir")?,
|
||||
eth2_config.clone(),
|
||||
context.log,
|
||||
events,
|
||||
)
|
||||
.map_err(|e| format!("Failed to start HTTP API: {:?}", e))?;
|
||||
|
||||
self.exit_signals.push(exit_signal);
|
||||
self.http_listen_addr = Some(listening_addr);
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Immediately starts the service that periodically logs information each slot.
|
||||
pub fn notifier(mut self) -> Result<Self, String> {
|
||||
pub fn notifier(self) -> Result<Self, String> {
|
||||
let context = self
|
||||
.runtime_context
|
||||
.as_ref()
|
||||
@@ -337,8 +342,8 @@ where
|
||||
.beacon_chain
|
||||
.clone()
|
||||
.ok_or_else(|| "slot_notifier requires a beacon chain")?;
|
||||
let network = self
|
||||
.libp2p_network
|
||||
let network_globals = self
|
||||
.network_globals
|
||||
.clone()
|
||||
.ok_or_else(|| "slot_notifier requires a libp2p network")?;
|
||||
let milliseconds_per_slot = self
|
||||
@@ -347,10 +352,13 @@ where
|
||||
.ok_or_else(|| "slot_notifier requires a chain spec".to_string())?
|
||||
.milliseconds_per_slot;
|
||||
|
||||
let exit_signal = spawn_notifier(context, beacon_chain, network, milliseconds_per_slot)
|
||||
.map_err(|e| format!("Unable to start slot notifier: {}", e))?;
|
||||
|
||||
self.exit_signals.push(exit_signal);
|
||||
spawn_notifier(
|
||||
context.executor,
|
||||
beacon_chain,
|
||||
network_globals,
|
||||
milliseconds_per_slot,
|
||||
)
|
||||
.map_err(|e| format!("Unable to start slot notifier: {}", e))?;
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
@@ -363,44 +371,44 @@ where
|
||||
self,
|
||||
) -> Client<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
> {
|
||||
Client {
|
||||
beacon_chain: self.beacon_chain,
|
||||
libp2p_network: self.libp2p_network,
|
||||
network_globals: self.network_globals,
|
||||
http_listen_addr: self.http_listen_addr,
|
||||
websocket_listen_addr: self.websocket_listen_addr,
|
||||
_exit_signals: self.exit_signals,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
ClientBuilder<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
ThreadSafeReducedTree<TStore, TEthSpec>,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
|
||||
TSlotClock: SlotClock + Clone + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
{
|
||||
/// Consumes the internal `BeaconChainBuilder`, attaching the resulting `BeaconChain` to self.
|
||||
pub fn build_beacon_chain(mut self) -> Result<Self, String> {
|
||||
@@ -416,8 +424,6 @@ where
|
||||
.clone()
|
||||
.ok_or_else(|| "beacon_chain requires a slot clock")?,
|
||||
)
|
||||
.reduced_tree_fork_choice()
|
||||
.map_err(|e| format!("Failed to init fork choice: {}", e))?
|
||||
.build()
|
||||
.map_err(|e| format!("Failed to build beacon chain: {}", e))?;
|
||||
|
||||
@@ -425,86 +431,84 @@ where
|
||||
self.beacon_chain_builder = None;
|
||||
self.event_handler = None;
|
||||
|
||||
Ok(self)
|
||||
// a beacon chain requires a timer
|
||||
self.timer()
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec>
|
||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, THotStore, TColdStore>
|
||||
ClientBuilder<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
WebSocketSender<TEthSpec>,
|
||||
TeeEventHandler<TEthSpec>,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
{
|
||||
#[allow(clippy::type_complexity)]
|
||||
/// Specifies that the `BeaconChain` should publish events using the WebSocket server.
|
||||
pub fn websocket_event_handler(mut self, config: WebSocketConfig) -> Result<Self, String> {
|
||||
pub fn tee_event_handler(
|
||||
mut self,
|
||||
config: WebSocketConfig,
|
||||
) -> Result<(Self, Arc<Mutex<Bus<SignedBeaconBlockHash>>>), String> {
|
||||
let context = self
|
||||
.runtime_context
|
||||
.as_ref()
|
||||
.ok_or_else(|| "websocket_event_handler requires a runtime_context")?
|
||||
.ok_or_else(|| "tee_event_handler requires a runtime_context")?
|
||||
.service_context("ws".into());
|
||||
|
||||
let (sender, exit_signal, listening_addr): (
|
||||
WebSocketSender<TEthSpec>,
|
||||
Option<_>,
|
||||
Option<_>,
|
||||
) = if config.enabled {
|
||||
let (sender, exit, listening_addr) =
|
||||
websocket_server::start_server(&config, &context.executor, &context.log)?;
|
||||
(sender, Some(exit), Some(listening_addr))
|
||||
let log = context.log().clone();
|
||||
let (sender, listening_addr): (WebSocketSender<TEthSpec>, Option<_>) = if config.enabled {
|
||||
let (sender, listening_addr) =
|
||||
websocket_server::start_server(context.executor, &config)?;
|
||||
(sender, Some(listening_addr))
|
||||
} else {
|
||||
(WebSocketSender::dummy(), None, None)
|
||||
(WebSocketSender::dummy(), None)
|
||||
};
|
||||
|
||||
if let Some(signal) = exit_signal {
|
||||
self.exit_signals.push(signal);
|
||||
}
|
||||
self.event_handler = Some(sender);
|
||||
self.websocket_listen_addr = listening_addr;
|
||||
|
||||
Ok(self)
|
||||
let (tee_event_handler, bus) = TeeEventHandler::new(log, sender)?;
|
||||
self.event_handler = Some(tee_event_handler);
|
||||
Ok((self, bus))
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
ClientBuilder<
|
||||
Witness<
|
||||
DiskStore<TEthSpec>,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
LevelDB<TEthSpec>,
|
||||
LevelDB<TEthSpec>,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TStoreMigrator: store::Migrate<DiskStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TLmdGhost: LmdGhost<DiskStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TEthSpec, LevelDB<TEthSpec>, LevelDB<TEthSpec>> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
{
|
||||
/// Specifies that the `Client` should use a `DiskStore` database.
|
||||
/// Specifies that the `Client` should use a `HotColdDB` database.
|
||||
pub fn disk_store(
|
||||
mut self,
|
||||
hot_path: &Path,
|
||||
cold_path: &Path,
|
||||
slots_per_restore_point: u64,
|
||||
config: StoreConfig,
|
||||
) -> Result<Self, String> {
|
||||
let context = self
|
||||
.runtime_context
|
||||
@@ -516,130 +520,71 @@ where
|
||||
.clone()
|
||||
.ok_or_else(|| "disk_store requires a chain spec".to_string())?;
|
||||
|
||||
let store = DiskStore::open(
|
||||
hot_path,
|
||||
cold_path,
|
||||
slots_per_restore_point,
|
||||
spec,
|
||||
context.log,
|
||||
)
|
||||
.map_err(|e| format!("Unable to open database: {:?}", e).to_string())?;
|
||||
let store = HotColdDB::open(hot_path, cold_path, config, spec, context.log().clone())
|
||||
.map_err(|e| format!("Unable to open database: {:?}", e))?;
|
||||
self.store = Some(Arc::new(store));
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
ClientBuilder<
|
||||
Witness<
|
||||
SimpleDiskStore<TEthSpec>,
|
||||
TStoreMigrator,
|
||||
BackgroundMigrator<TEthSpec, THotStore, TColdStore>,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TStoreMigrator: store::Migrate<SimpleDiskStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TLmdGhost: LmdGhost<SimpleDiskStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
{
|
||||
/// Specifies that the `Client` should use a `DiskStore` database.
|
||||
pub fn simple_disk_store(mut self, path: &Path) -> Result<Self, String> {
|
||||
let store = SimpleDiskStore::open(path)
|
||||
.map_err(|e| format!("Unable to open database: {:?}", e).to_string())?;
|
||||
self.store = Some(Arc::new(store));
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
|
||||
ClientBuilder<
|
||||
Witness<
|
||||
MemoryStore<TEthSpec>,
|
||||
NullMigrator,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TLmdGhost: LmdGhost<MemoryStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
{
|
||||
/// Specifies that the `Client` should use a `MemoryStore` database.
|
||||
///
|
||||
/// Also sets the `store_migrator` to the `NullMigrator`, as that's the only viable choice.
|
||||
pub fn memory_store(mut self) -> Self {
|
||||
let store = MemoryStore::open();
|
||||
self.store = Some(Arc::new(store));
|
||||
self.store_migrator = Some(NullMigrator);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
|
||||
ClientBuilder<
|
||||
Witness<
|
||||
DiskStore<TEthSpec>,
|
||||
BackgroundMigrator<TEthSpec>,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TLmdGhost: LmdGhost<DiskStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
{
|
||||
pub fn background_migrator(mut self) -> Result<Self, String> {
|
||||
let context = self
|
||||
.runtime_context
|
||||
.as_ref()
|
||||
.ok_or_else(|| "disk_store requires a log".to_string())?
|
||||
.service_context("freezer_db".into());
|
||||
let store = self.store.clone().ok_or_else(|| {
|
||||
"background_migrator requires the store to be initialized".to_string()
|
||||
})?;
|
||||
self.store_migrator = Some(BackgroundMigrator::new(store));
|
||||
self.store_migrator = Some(BackgroundMigrator::new(store, context.log().clone()));
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TSlotClock, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
ClientBuilder<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
TSlotClock,
|
||||
TLmdGhost,
|
||||
CachingEth1Backend<TEthSpec, TStore>,
|
||||
CachingEth1Backend<TEthSpec>,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
{
|
||||
/// Specifies that the `BeaconChain` should cache eth1 blocks/logs from a remote eth1 node
|
||||
/// (e.g., Parity/Geth) and refer to that cache when collecting deposits or eth1 votes during
|
||||
/// block production.
|
||||
pub fn caching_eth1_backend(mut self, config: Eth1Config) -> Result<Self, String> {
|
||||
pub async fn caching_eth1_backend(mut self, config: Eth1Config) -> Result<Self, String> {
|
||||
let context = self
|
||||
.runtime_context
|
||||
.as_ref()
|
||||
@@ -648,13 +593,24 @@ where
|
||||
let beacon_chain_builder = self
|
||||
.beacon_chain_builder
|
||||
.ok_or_else(|| "caching_eth1_backend requires a beacon_chain_builder")?;
|
||||
let store = self
|
||||
.store
|
||||
let spec = self
|
||||
.chain_spec
|
||||
.clone()
|
||||
.ok_or_else(|| "caching_eth1_backend requires a store".to_string())?;
|
||||
.ok_or_else(|| "caching_eth1_backend requires a chain spec".to_string())?;
|
||||
|
||||
// Check if the eth1 endpoint we connect to is on the correct network id.
|
||||
let network_id =
|
||||
eth1::http::get_network_id(&config.endpoint, Duration::from_millis(15_000)).await?;
|
||||
|
||||
if network_id != config.network_id {
|
||||
return Err(format!(
|
||||
"Invalid eth1 network id. Expected {:?}, got {:?}",
|
||||
config.network_id, network_id
|
||||
));
|
||||
}
|
||||
|
||||
let backend = if let Some(eth1_service_from_genesis) = self.eth1_service {
|
||||
eth1_service_from_genesis.update_config(config.clone())?;
|
||||
eth1_service_from_genesis.update_config(config)?;
|
||||
|
||||
// This cache is not useful because it's first (earliest) block likely the block that
|
||||
// triggered genesis.
|
||||
@@ -666,21 +622,32 @@ where
|
||||
// adding earlier blocks too.
|
||||
eth1_service_from_genesis.drop_block_cache();
|
||||
|
||||
CachingEth1Backend::from_service(eth1_service_from_genesis, store)
|
||||
CachingEth1Backend::from_service(eth1_service_from_genesis)
|
||||
} else {
|
||||
CachingEth1Backend::new(config, context.log, store)
|
||||
beacon_chain_builder
|
||||
.get_persisted_eth1_backend()?
|
||||
.map(|persisted| {
|
||||
Eth1Chain::from_ssz_container(
|
||||
&persisted,
|
||||
config.clone(),
|
||||
&context.log().clone(),
|
||||
spec.clone(),
|
||||
)
|
||||
.map(|chain| chain.into_backend())
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
Ok(CachingEth1Backend::new(
|
||||
config,
|
||||
context.log().clone(),
|
||||
spec.clone(),
|
||||
))
|
||||
})?
|
||||
};
|
||||
|
||||
self.eth1_service = None;
|
||||
|
||||
let exit = {
|
||||
let (tx, rx) = exit_future::signal();
|
||||
self.exit_signals.push(tx);
|
||||
rx
|
||||
};
|
||||
|
||||
// Starts the service that connects to an eth1 node and periodically updates caches.
|
||||
context.executor.spawn(backend.start(exit));
|
||||
backend.start(context.executor);
|
||||
|
||||
self.beacon_chain_builder = Some(beacon_chain_builder.eth1_backend(Some(backend)));
|
||||
|
||||
@@ -718,25 +685,25 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<TStore, TStoreMigrator, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
|
||||
impl<TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
|
||||
ClientBuilder<
|
||||
Witness<
|
||||
TStore,
|
||||
TStoreMigrator,
|
||||
SystemTimeSlotClock,
|
||||
TLmdGhost,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
|
||||
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
THotStore: ItemStore<TEthSpec> + 'static,
|
||||
TColdStore: ItemStore<TEthSpec> + 'static,
|
||||
{
|
||||
/// Specifies that the slot clock should read the time from the computers system clock.
|
||||
pub fn system_time_slot_clock(mut self) -> Result<Self, String> {
|
||||
@@ -746,11 +713,8 @@ where
|
||||
.ok_or_else(|| "system_time_slot_clock requires a beacon_chain_builder")?;
|
||||
|
||||
let genesis_time = beacon_chain_builder
|
||||
.finalized_checkpoint
|
||||
.as_ref()
|
||||
.ok_or_else(|| "system_time_slot_clock requires an initialized beacon state")?
|
||||
.beacon_state
|
||||
.genesis_time;
|
||||
.genesis_time
|
||||
.ok_or_else(|| "system_time_slot_clock requires an initialized beacon state")?;
|
||||
|
||||
let spec = self
|
||||
.chain_spec
|
||||
|
||||
@@ -2,33 +2,34 @@ use network::NetworkConfig;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use types::Graffiti;
|
||||
|
||||
pub const DEFAULT_DATADIR: &str = ".lighthouse";
|
||||
|
||||
/// The number initial validators when starting the `Minimal`.
|
||||
const TESTNET_SPEC_CONSTANTS: &str = "minimal";
|
||||
|
||||
/// Default directory name for the freezer database under the top-level data dir.
|
||||
const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db";
|
||||
|
||||
/// Defines how the client should initialize the `BeaconChain` and other components.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum ClientGenesis {
|
||||
/// Reads the genesis state and other persisted data from the `Store`.
|
||||
Resume,
|
||||
/// Creates a genesis state as per the 2019 Canada interop specifications.
|
||||
Interop {
|
||||
validator_count: usize,
|
||||
genesis_time: u64,
|
||||
},
|
||||
/// Reads the genesis state and other persisted data from the `Store`.
|
||||
FromStore,
|
||||
/// Connects to an eth1 node and waits until it can create the genesis state from the deposit
|
||||
/// contract.
|
||||
DepositContract,
|
||||
/// Loads the genesis state from a SSZ-encoded `BeaconState` file.
|
||||
SszFile { path: PathBuf },
|
||||
/// Loads the genesis state from SSZ-encoded `BeaconState` bytes.
|
||||
///
|
||||
/// We include the bytes instead of the `BeaconState<E>` because the `EthSpec` type
|
||||
/// parameter would be very annoying.
|
||||
SszBytes { genesis_state_bytes: Vec<u8> },
|
||||
/// Connects to another Lighthouse instance and reads the genesis state and other data via the
|
||||
/// HTTP API.
|
||||
RemoteNode { server: String, port: Option<u16> },
|
||||
}
|
||||
|
||||
impl Default for ClientGenesis {
|
||||
@@ -41,7 +42,10 @@ impl Default for ClientGenesis {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub data_dir: PathBuf,
|
||||
pub testnet_dir: Option<PathBuf>,
|
||||
/// Name of the directory inside the data directory where the main "hot" DB is located.
|
||||
pub db_name: String,
|
||||
/// Path where the freezer database will be located.
|
||||
pub freezer_db_path: Option<PathBuf>,
|
||||
pub log_file: PathBuf,
|
||||
pub spec_constants: String,
|
||||
/// If true, the node will use co-ordinated junk for eth1 values.
|
||||
@@ -49,6 +53,10 @@ pub struct Config {
|
||||
/// This is the method used for the 2019 client interop in Canada.
|
||||
pub dummy_eth1_backend: bool,
|
||||
pub sync_eth1_chain: bool,
|
||||
/// A list of hard-coded forks that will be disabled.
|
||||
pub disabled_forks: Vec<String>,
|
||||
/// Graffiti to be inserted everytime we create a block.
|
||||
pub graffiti: Graffiti,
|
||||
#[serde(skip)]
|
||||
/// The `genesis` field is not serialized or deserialized by `serde` to ensure it is defined
|
||||
/// via the CLI at runtime, instead of from a configuration file saved to disk.
|
||||
@@ -56,6 +64,7 @@ pub struct Config {
|
||||
pub store: store::StoreConfig,
|
||||
pub network: network::NetworkConfig,
|
||||
pub rest_api: rest_api::Config,
|
||||
pub chain: beacon_chain::ChainConfig,
|
||||
pub websocket_server: websocket_server::Config,
|
||||
pub eth1: eth1::Config,
|
||||
}
|
||||
@@ -63,18 +72,22 @@ pub struct Config {
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
data_dir: PathBuf::from(".lighthouse"),
|
||||
testnet_dir: None,
|
||||
data_dir: PathBuf::from(DEFAULT_DATADIR),
|
||||
db_name: "chain_db".to_string(),
|
||||
freezer_db_path: None,
|
||||
log_file: PathBuf::from(""),
|
||||
genesis: <_>::default(),
|
||||
store: <_>::default(),
|
||||
network: NetworkConfig::default(),
|
||||
chain: <_>::default(),
|
||||
rest_api: <_>::default(),
|
||||
websocket_server: <_>::default(),
|
||||
spec_constants: TESTNET_SPEC_CONSTANTS.into(),
|
||||
dummy_eth1_backend: false,
|
||||
sync_eth1_chain: false,
|
||||
eth1: <_>::default(),
|
||||
disabled_forks: Vec::new(),
|
||||
graffiti: Graffiti::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -83,7 +96,7 @@ impl Config {
|
||||
/// Get the database path without initialising it.
|
||||
pub fn get_db_path(&self) -> Option<PathBuf> {
|
||||
self.get_data_dir()
|
||||
.map(|data_dir| data_dir.join(&self.store.db_name))
|
||||
.map(|data_dir| data_dir.join(&self.db_name))
|
||||
}
|
||||
|
||||
/// Get the database path, creating it if necessary.
|
||||
@@ -97,7 +110,7 @@ impl Config {
|
||||
/// Fetch default path to use for the freezer database.
|
||||
fn default_freezer_db_path(&self) -> Option<PathBuf> {
|
||||
self.get_data_dir()
|
||||
.map(|data_dir| data_dir.join(self.store.default_freezer_db_dir()))
|
||||
.map(|data_dir| data_dir.join(DEFAULT_FREEZER_DB_DIR))
|
||||
}
|
||||
|
||||
/// Returns the path to which the client may initialize the on-disk freezer database.
|
||||
@@ -105,8 +118,7 @@ impl Config {
|
||||
/// Will attempt to use the user-supplied path from e.g. the CLI, or will default
|
||||
/// to a directory in the data_dir if no path is provided.
|
||||
pub fn get_freezer_db_path(&self) -> Option<PathBuf> {
|
||||
self.store
|
||||
.freezer_db_path
|
||||
self.freezer_db_path
|
||||
.clone()
|
||||
.or_else(|| self.default_freezer_db_path())
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use network;
|
||||
|
||||
use error_chain::error_chain;
|
||||
|
||||
error_chain! {
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
extern crate slog;
|
||||
|
||||
mod config;
|
||||
pub mod config;
|
||||
mod metrics;
|
||||
mod notifier;
|
||||
|
||||
pub mod builder;
|
||||
pub mod error;
|
||||
|
||||
use beacon_chain::BeaconChain;
|
||||
use eth2_libp2p::{Enr, Multiaddr};
|
||||
use exit_future::Signal;
|
||||
use network::Service as NetworkService;
|
||||
use eth2_libp2p::{Enr, Multiaddr, NetworkGlobals};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -23,11 +22,9 @@ pub use eth2_config::Eth2Config;
|
||||
/// Holds references to running services, cleanly shutting them down when dropped.
|
||||
pub struct Client<T: BeaconChainTypes> {
|
||||
beacon_chain: Option<Arc<BeaconChain<T>>>,
|
||||
libp2p_network: Option<Arc<NetworkService<T>>>,
|
||||
network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
|
||||
http_listen_addr: Option<SocketAddr>,
|
||||
websocket_listen_addr: Option<SocketAddr>,
|
||||
/// Exit signals will "fire" when dropped, causing each service to exit gracefully.
|
||||
_exit_signals: Vec<Signal>,
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> Client<T> {
|
||||
@@ -48,24 +45,16 @@ impl<T: BeaconChainTypes> Client<T> {
|
||||
|
||||
/// Returns the port of the client's libp2p stack, if it was started.
|
||||
pub fn libp2p_listen_port(&self) -> Option<u16> {
|
||||
self.libp2p_network.as_ref().map(|n| n.listen_port())
|
||||
self.network_globals.as_ref().map(|n| n.listen_port_tcp())
|
||||
}
|
||||
|
||||
/// Returns the list of libp2p addresses the client is listening to.
|
||||
pub fn libp2p_listen_addresses(&self) -> Option<Vec<Multiaddr>> {
|
||||
self.libp2p_network.as_ref().map(|n| n.listen_multiaddrs())
|
||||
self.network_globals.as_ref().map(|n| n.listen_multiaddrs())
|
||||
}
|
||||
|
||||
/// Returns the local libp2p ENR of this node, for network discovery.
|
||||
pub fn enr(&self) -> Option<Enr> {
|
||||
self.libp2p_network.as_ref().map(|n| n.local_enr())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> Drop for Client<T> {
|
||||
fn drop(&mut self) {
|
||||
if let Some(beacon_chain) = &self.beacon_chain {
|
||||
let _result = beacon_chain.persist();
|
||||
}
|
||||
self.network_globals.as_ref().map(|n| n.local_enr())
|
||||
}
|
||||
}
|
||||
|
||||
9
beacon_node/client/src/metrics.rs
Normal file
9
beacon_node/client/src/metrics.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
use lazy_static::lazy_static;
|
||||
pub use lighthouse_metrics::*;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref SYNC_SLOTS_PER_SECOND: Result<IntGauge> = try_create_int_gauge(
|
||||
"sync_slots_per_second",
|
||||
"The number of blocks being imported per second"
|
||||
);
|
||||
}
|
||||
@@ -1,43 +1,32 @@
|
||||
use crate::metrics;
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use environment::RuntimeContext;
|
||||
use exit_future::Signal;
|
||||
use futures::{Future, Stream};
|
||||
use network::Service as NetworkService;
|
||||
use eth2_libp2p::NetworkGlobals;
|
||||
use futures::prelude::*;
|
||||
use parking_lot::Mutex;
|
||||
use slog::{debug, error, info, warn};
|
||||
use slot_clock::SlotClock;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::timer::Interval;
|
||||
use tokio::time::delay_for;
|
||||
use types::{EthSpec, Slot};
|
||||
|
||||
/// Create a warning log whenever the peer count is at or below this value.
|
||||
pub const WARN_PEER_COUNT: usize = 1;
|
||||
|
||||
const SECS_PER_MINUTE: f64 = 60.0;
|
||||
const SECS_PER_HOUR: f64 = 3600.0;
|
||||
const SECS_PER_DAY: f64 = 86400.0; // non-leap
|
||||
const SECS_PER_WEEK: f64 = 604800.0; // non-leap
|
||||
const DAYS_PER_WEEK: f64 = 7.0;
|
||||
const HOURS_PER_DAY: f64 = 24.0;
|
||||
const MINUTES_PER_HOUR: f64 = 60.0;
|
||||
|
||||
/// How long to wait for the lock on `network.libp2p_service()` before we give up.
|
||||
const LIBP2P_LOCK_TIMEOUT: Duration = Duration::from_millis(50);
|
||||
const DAYS_PER_WEEK: i64 = 7;
|
||||
const HOURS_PER_DAY: i64 = 24;
|
||||
const MINUTES_PER_HOUR: i64 = 60;
|
||||
|
||||
/// The number of historical observations that should be used to determine the average sync time.
|
||||
const SPEEDO_OBSERVATIONS: usize = 4;
|
||||
|
||||
/// Spawns a notifier service which periodically logs information about the node.
|
||||
pub fn spawn_notifier<T: BeaconChainTypes>(
|
||||
context: RuntimeContext<T::EthSpec>,
|
||||
executor: environment::TaskExecutor,
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
network: Arc<NetworkService<T>>,
|
||||
network: Arc<NetworkGlobals<T::EthSpec>>,
|
||||
milliseconds_per_slot: u64,
|
||||
) -> Result<Signal, String> {
|
||||
let log_1 = context.log.clone();
|
||||
let log_2 = context.log.clone();
|
||||
|
||||
) -> Result<(), String> {
|
||||
let slot_duration = Duration::from_millis(milliseconds_per_slot);
|
||||
let duration_to_next_slot = beacon_chain
|
||||
.slot_clock
|
||||
@@ -45,34 +34,48 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
|
||||
.ok_or_else(|| "slot_notifier unable to determine time to next slot")?;
|
||||
|
||||
// Run this half way through each slot.
|
||||
let start_instant = Instant::now() + duration_to_next_slot + (slot_duration / 2);
|
||||
let start_instant = tokio::time::Instant::now() + duration_to_next_slot + (slot_duration / 2);
|
||||
|
||||
// Run this each slot.
|
||||
let interval_duration = slot_duration;
|
||||
|
||||
let speedo = Mutex::new(Speedo::default());
|
||||
let log = executor.log().clone();
|
||||
let mut interval = tokio::time::interval_at(start_instant, interval_duration);
|
||||
|
||||
let interval_future = Interval::new(start_instant, interval_duration)
|
||||
.map_err(
|
||||
move |e| error!(log_1, "Slot notifier timer failed"; "error" => format!("{:?}", e)),
|
||||
)
|
||||
.for_each(move |_| {
|
||||
let log = log_2.clone();
|
||||
let interval_future = async move {
|
||||
// Perform pre-genesis logging.
|
||||
loop {
|
||||
match beacon_chain.slot_clock.duration_to_next_slot() {
|
||||
// If the duration to the next slot is greater than the slot duration, then we are
|
||||
// waiting for genesis.
|
||||
Some(next_slot) if next_slot > slot_duration => {
|
||||
info!(
|
||||
log,
|
||||
"Waiting for genesis";
|
||||
"peers" => peer_count_pretty(network.connected_peers()),
|
||||
"wait_time" => estimated_time_pretty(Some(next_slot.as_secs() as f64)),
|
||||
);
|
||||
delay_for(slot_duration).await;
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
|
||||
let connected_peer_count = if let Some(libp2p) = network
|
||||
.libp2p_service()
|
||||
.try_lock_until(Instant::now() + LIBP2P_LOCK_TIMEOUT)
|
||||
{
|
||||
libp2p.swarm.connected_peers()
|
||||
} else {
|
||||
// Use max_value here and we'll print something pretty later.
|
||||
usize::max_value()
|
||||
};
|
||||
// Perform post-genesis logging.
|
||||
while interval.next().await.is_some() {
|
||||
let connected_peer_count = network.connected_peers();
|
||||
let sync_state = network.sync_state();
|
||||
|
||||
let head = beacon_chain.head();
|
||||
let head_info = beacon_chain.head_info().map_err(|e| {
|
||||
error!(
|
||||
log,
|
||||
"Failed to get beacon chain head info";
|
||||
"error" => format!("{:?}", e)
|
||||
)
|
||||
})?;
|
||||
|
||||
let head_slot = head.beacon_block.slot;
|
||||
let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch());
|
||||
let head_slot = head_info.slot;
|
||||
let current_slot = beacon_chain.slot().map_err(|e| {
|
||||
error!(
|
||||
log,
|
||||
@@ -81,13 +84,18 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
|
||||
)
|
||||
})?;
|
||||
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
|
||||
let finalized_epoch = head.beacon_state.finalized_checkpoint.epoch;
|
||||
let finalized_root = head.beacon_state.finalized_checkpoint.root;
|
||||
let head_root = head.beacon_block_root;
|
||||
let finalized_epoch = head_info.finalized_checkpoint.epoch;
|
||||
let finalized_root = head_info.finalized_checkpoint.root;
|
||||
let head_root = head_info.block_root;
|
||||
|
||||
let mut speedo = speedo.lock();
|
||||
speedo.observe(head_slot, Instant::now());
|
||||
|
||||
metrics::set_gauge(
|
||||
&metrics::SYNC_SLOTS_PER_SECOND,
|
||||
speedo.slots_per_second().unwrap_or_else(|| 0_f64) as i64,
|
||||
);
|
||||
|
||||
// The next two lines take advantage of saturating subtraction on `Slot`.
|
||||
let head_distance = current_slot - head_slot;
|
||||
|
||||
@@ -104,66 +112,73 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
|
||||
"head_block" => format!("{}", head_root),
|
||||
"head_slot" => head_slot,
|
||||
"current_slot" => current_slot,
|
||||
"sync_state" =>format!("{}", sync_state)
|
||||
);
|
||||
|
||||
if head_epoch + 1 < current_epoch {
|
||||
// Log if we are syncing
|
||||
if sync_state.is_syncing() {
|
||||
let distance = format!(
|
||||
"{} slots ({})",
|
||||
head_distance.as_u64(),
|
||||
slot_distance_pretty(head_distance, slot_duration)
|
||||
);
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Syncing";
|
||||
"peers" => peer_count_pretty(connected_peer_count),
|
||||
"est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)),
|
||||
"speed" => sync_speed_pretty(speedo.slots_per_second()),
|
||||
"distance" => distance
|
||||
);
|
||||
let speed = speedo.slots_per_second();
|
||||
let display_speed = speed.map_or(false, |speed| speed != 0.0);
|
||||
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
macro_rules! not_quite_synced_log {
|
||||
($message: expr) => {
|
||||
if display_speed {
|
||||
info!(
|
||||
log_2,
|
||||
$message;
|
||||
log,
|
||||
"Syncing";
|
||||
"peers" => peer_count_pretty(connected_peer_count),
|
||||
"finalized_root" => format!("{}", finalized_root),
|
||||
"finalized_epoch" => finalized_epoch,
|
||||
"head_slot" => head_slot,
|
||||
"current_slot" => current_slot,
|
||||
"distance" => distance,
|
||||
"speed" => sync_speed_pretty(speed),
|
||||
"est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)),
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Syncing";
|
||||
"peers" => peer_count_pretty(connected_peer_count),
|
||||
"distance" => distance,
|
||||
"est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if head_epoch + 1 == current_epoch {
|
||||
not_quite_synced_log!("Synced to previous epoch")
|
||||
} else if head_slot != current_slot {
|
||||
not_quite_synced_log!("Synced to current epoch")
|
||||
} else {
|
||||
} else if sync_state.is_synced() {
|
||||
let block_info = if current_slot > head_slot {
|
||||
" … empty".to_string()
|
||||
} else {
|
||||
head_root.to_string()
|
||||
};
|
||||
info!(
|
||||
log_2,
|
||||
log,
|
||||
"Synced";
|
||||
"peers" => peer_count_pretty(connected_peer_count),
|
||||
"finalized_root" => format!("{}", finalized_root),
|
||||
"finalized_epoch" => finalized_epoch,
|
||||
"epoch" => current_epoch,
|
||||
"block" => block_info,
|
||||
"slot" => current_slot,
|
||||
);
|
||||
};
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Searching for peers";
|
||||
"peers" => peer_count_pretty(connected_peer_count),
|
||||
"finalized_root" => format!("{}", finalized_root),
|
||||
"finalized_epoch" => finalized_epoch,
|
||||
"head_slot" => head_slot,
|
||||
"current_slot" => current_slot,
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok::<(), ()>(())
|
||||
};
|
||||
|
||||
Ok(())
|
||||
});
|
||||
// run the notifier on the current executor
|
||||
executor.spawn(interval_future.unwrap_or_else(|_| ()), "notifier");
|
||||
|
||||
let (exit_signal, exit) = exit_future::signal();
|
||||
context
|
||||
.executor
|
||||
.spawn(exit.until(interval_future).map(|_| ()));
|
||||
|
||||
Ok(exit_signal)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the peer count, returning something helpful if it's `usize::max_value` (effectively a
|
||||
@@ -212,31 +227,44 @@ fn seconds_pretty(secs: f64) -> String {
|
||||
return "--".into();
|
||||
}
|
||||
|
||||
let weeks = secs / SECS_PER_WEEK;
|
||||
let days = secs / SECS_PER_DAY;
|
||||
let hours = secs / SECS_PER_HOUR;
|
||||
let minutes = secs / SECS_PER_MINUTE;
|
||||
let d = time::Duration::seconds_f64(secs);
|
||||
|
||||
if weeks.floor() > 0.0 {
|
||||
let weeks = d.whole_weeks();
|
||||
let days = d.whole_days();
|
||||
let hours = d.whole_hours();
|
||||
let minutes = d.whole_minutes();
|
||||
|
||||
let week_string = if weeks == 1 { "week" } else { "weeks" };
|
||||
let day_string = if days == 1 { "day" } else { "days" };
|
||||
let hour_string = if hours == 1 { "hr" } else { "hrs" };
|
||||
let min_string = if minutes == 1 { "min" } else { "mins" };
|
||||
|
||||
if weeks > 0 {
|
||||
format!(
|
||||
"{:.0} weeks {:.0} days",
|
||||
"{:.0} {} {:.0} {}",
|
||||
weeks,
|
||||
(days % DAYS_PER_WEEK).round()
|
||||
week_string,
|
||||
days % DAYS_PER_WEEK,
|
||||
day_string
|
||||
)
|
||||
} else if days.floor() > 0.0 {
|
||||
} else if days > 0 {
|
||||
format!(
|
||||
"{:.0} days {:.0} hrs",
|
||||
"{:.0} {} {:.0} {}",
|
||||
days,
|
||||
(hours % HOURS_PER_DAY).round()
|
||||
day_string,
|
||||
hours % HOURS_PER_DAY,
|
||||
hour_string
|
||||
)
|
||||
} else if hours.floor() > 0.0 {
|
||||
} else if hours > 0 {
|
||||
format!(
|
||||
"{:.0} hrs {:.0} mins",
|
||||
"{:.0} {} {:.0} {}",
|
||||
hours,
|
||||
(minutes % MINUTES_PER_HOUR).round()
|
||||
hour_string,
|
||||
minutes % MINUTES_PER_HOUR,
|
||||
min_string
|
||||
)
|
||||
} else {
|
||||
format!("{:.0} mins", minutes.round())
|
||||
format!("{:.0} {}", minutes, min_string)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -297,7 +325,7 @@ impl Speedo {
|
||||
let (prev_slot, _) = self.0.last()?;
|
||||
let slots_per_second = self.slots_per_second()?;
|
||||
|
||||
if target_slot > *prev_slot {
|
||||
if target_slot > *prev_slot && slots_per_second > 0.0 {
|
||||
let distance = (target_slot - *prev_slot).as_u64() as f64;
|
||||
Some(distance / slots_per_second)
|
||||
} else {
|
||||
|
||||
@@ -1,29 +1,32 @@
|
||||
[package]
|
||||
name = "eth1"
|
||||
version = "0.1.0"
|
||||
version = "0.2.0"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dev-dependencies]
|
||||
eth1_test_rig = { path = "../../tests/eth1_test_rig" }
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
toml = "^0.5"
|
||||
web3 = "0.8.0"
|
||||
eth1_test_rig = { path = "../../testing/eth1_test_rig" }
|
||||
toml = "0.5.6"
|
||||
web3 = "0.11.0"
|
||||
sloggers = "1.0.0"
|
||||
|
||||
[dependencies]
|
||||
reqwest = "0.9"
|
||||
futures = "0.1.25"
|
||||
serde_json = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
hex = "0.3"
|
||||
types = { path = "../../eth2/types"}
|
||||
merkle_proof = { path = "../../eth2/utils/merkle_proof"}
|
||||
eth2_ssz = { path = "../../eth2/utils/ssz"}
|
||||
tree_hash = { path = "../../eth2/utils/tree_hash"}
|
||||
eth2_hashing = { path = "../../eth2/utils/eth2_hashing"}
|
||||
parking_lot = "0.7"
|
||||
slog = "^2.2.3"
|
||||
tokio = "0.1.22"
|
||||
state_processing = { path = "../../eth2/state_processing" }
|
||||
exit-future = "0.1.4"
|
||||
libflate = "0.1"
|
||||
reqwest = { version = "0.10.4", features = ["native-tls-vendored"] }
|
||||
futures = { version = "0.3.5", features = ["compat"] }
|
||||
serde_json = "1.0.52"
|
||||
serde = { version = "1.0.110", features = ["derive"] }
|
||||
hex = "0.4.2"
|
||||
types = { path = "../../consensus/types"}
|
||||
merkle_proof = { path = "../../consensus/merkle_proof"}
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
tree_hash = "0.1.0"
|
||||
eth2_hashing = "0.1.0"
|
||||
parking_lot = "0.11.0"
|
||||
slog = "2.5.2"
|
||||
tokio = { version = "0.2.21", features = ["full"] }
|
||||
state_processing = { path = "../../consensus/state_processing" }
|
||||
libflate = "1.0.0"
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics"}
|
||||
lazy_static = "1.4.0"
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use std::ops::RangeInclusive;
|
||||
use types::{Eth1Data, Hash256};
|
||||
|
||||
@@ -17,7 +18,7 @@ pub enum Error {
|
||||
/// A block of the eth1 chain.
|
||||
///
|
||||
/// Contains all information required to add a `BlockCache` entry.
|
||||
#[derive(Debug, PartialEq, Clone, Eq, Hash)]
|
||||
#[derive(Debug, PartialEq, Clone, Eq, Hash, Encode, Decode)]
|
||||
pub struct Eth1Block {
|
||||
pub hash: Hash256,
|
||||
pub timestamp: u64,
|
||||
@@ -38,7 +39,7 @@ impl Eth1Block {
|
||||
|
||||
/// Stores block and deposit contract information and provides queries based upon the block
|
||||
/// timestamp.
|
||||
#[derive(Debug, PartialEq, Clone, Default)]
|
||||
#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)]
|
||||
pub struct BlockCache {
|
||||
blocks: Vec<Eth1Block>,
|
||||
}
|
||||
@@ -192,10 +193,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn get_blocks(n: usize, interval_secs: u64) -> Vec<Eth1Block> {
|
||||
(0..n as u64)
|
||||
.into_iter()
|
||||
.map(|i| get_block(i, interval_secs))
|
||||
.collect()
|
||||
(0..n as u64).map(|i| get_block(i, interval_secs)).collect()
|
||||
}
|
||||
|
||||
fn insert(cache: &mut BlockCache, s: Eth1Block) -> Result<(), Error> {
|
||||
@@ -213,20 +211,20 @@ mod tests {
|
||||
insert(&mut cache, block.clone()).expect("should add consecutive blocks");
|
||||
}
|
||||
|
||||
for len in vec![0, 1, 2, 3, 4, 8, 15, 16] {
|
||||
for len in &[0, 1, 2, 3, 4, 8, 15, 16] {
|
||||
let mut cache = cache.clone();
|
||||
|
||||
cache.truncate(len);
|
||||
cache.truncate(*len);
|
||||
|
||||
assert_eq!(
|
||||
cache.blocks.len(),
|
||||
len,
|
||||
*len,
|
||||
"should truncate to length: {}",
|
||||
len
|
||||
*len
|
||||
);
|
||||
}
|
||||
|
||||
let mut cache_2 = cache.clone();
|
||||
let mut cache_2 = cache;
|
||||
cache_2.truncate(17);
|
||||
assert_eq!(
|
||||
cache_2.blocks.len(),
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
use crate::DepositLog;
|
||||
use eth2_hashing::hash;
|
||||
use std::ops::Range;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use state_processing::common::DepositDataTree;
|
||||
use std::cmp::Ordering;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{Deposit, Hash256};
|
||||
use types::{Deposit, Hash256, DEPOSIT_TREE_DEPTH};
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
/// A deposit log was added when a prior deposit was not already in the cache.
|
||||
///
|
||||
@@ -24,61 +25,93 @@ pub enum Error {
|
||||
///
|
||||
/// E.g., you cannot request deposit 10 when the deposit count is 9.
|
||||
DepositCountInvalid { deposit_count: u64, range_end: u64 },
|
||||
/// Error with the merkle tree for deposits.
|
||||
DepositTreeError(merkle_proof::MerkleTreeError),
|
||||
/// An unexpected condition was encountered.
|
||||
InternalError(String),
|
||||
}
|
||||
|
||||
/// Emulates the eth1 deposit contract merkle tree.
|
||||
pub struct DepositDataTree {
|
||||
tree: merkle_proof::MerkleTree,
|
||||
mix_in_length: usize,
|
||||
depth: usize,
|
||||
#[derive(Encode, Decode, Clone)]
|
||||
pub struct SszDepositCache {
|
||||
logs: Vec<DepositLog>,
|
||||
leaves: Vec<Hash256>,
|
||||
deposit_contract_deploy_block: u64,
|
||||
deposit_roots: Vec<Hash256>,
|
||||
}
|
||||
|
||||
impl DepositDataTree {
|
||||
/// Create a new Merkle tree from a list of leaves (`DepositData::tree_hash_root`) and a fixed depth.
|
||||
pub fn create(leaves: &[Hash256], mix_in_length: usize, depth: usize) -> Self {
|
||||
impl SszDepositCache {
|
||||
pub fn from_deposit_cache(cache: &DepositCache) -> Self {
|
||||
Self {
|
||||
tree: merkle_proof::MerkleTree::create(leaves, depth),
|
||||
mix_in_length,
|
||||
depth,
|
||||
logs: cache.logs.clone(),
|
||||
leaves: cache.leaves.clone(),
|
||||
deposit_contract_deploy_block: cache.deposit_contract_deploy_block,
|
||||
deposit_roots: cache.deposit_roots.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns 32 bytes representing the "mix in length" for the merkle root of this tree.
|
||||
fn length_bytes(&self) -> Vec<u8> {
|
||||
int_to_bytes32(self.mix_in_length)
|
||||
}
|
||||
|
||||
/// Retrieve the root hash of this Merkle tree with the length mixed in.
|
||||
pub fn root(&self) -> Hash256 {
|
||||
let mut preimage = [0; 64];
|
||||
preimage[0..32].copy_from_slice(&self.tree.hash()[..]);
|
||||
preimage[32..64].copy_from_slice(&self.length_bytes());
|
||||
Hash256::from_slice(&hash(&preimage))
|
||||
}
|
||||
|
||||
/// Return the leaf at `index` and a Merkle proof of its inclusion.
|
||||
///
|
||||
/// The Merkle proof is in "bottom-up" order, starting with a leaf node
|
||||
/// and moving up the tree. Its length will be exactly equal to `depth + 1`.
|
||||
pub fn generate_proof(&self, index: usize) -> (Hash256, Vec<Hash256>) {
|
||||
let (root, mut proof) = self.tree.generate_proof(index, self.depth);
|
||||
proof.push(Hash256::from_slice(&self.length_bytes()));
|
||||
(root, proof)
|
||||
pub fn to_deposit_cache(&self) -> Result<DepositCache, String> {
|
||||
let deposit_tree =
|
||||
DepositDataTree::create(&self.leaves, self.leaves.len(), DEPOSIT_TREE_DEPTH);
|
||||
// Check for invalid SszDepositCache conditions
|
||||
if self.leaves.len() != self.logs.len() {
|
||||
return Err("Invalid SszDepositCache: logs and leaves should have equal length".into());
|
||||
}
|
||||
// `deposit_roots` also includes the zero root
|
||||
if self.leaves.len() + 1 != self.deposit_roots.len() {
|
||||
return Err(
|
||||
"Invalid SszDepositCache: deposit_roots length must be only one more than leaves"
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
Ok(DepositCache {
|
||||
logs: self.logs.clone(),
|
||||
leaves: self.leaves.clone(),
|
||||
deposit_contract_deploy_block: self.deposit_contract_deploy_block,
|
||||
deposit_tree,
|
||||
deposit_roots: self.deposit_roots.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Mirrors the merkle tree of deposits in the eth1 deposit contract.
|
||||
///
|
||||
/// Provides `Deposit` objects with merkle proofs included.
|
||||
#[derive(Default)]
|
||||
pub struct DepositCache {
|
||||
logs: Vec<DepositLog>,
|
||||
roots: Vec<Hash256>,
|
||||
leaves: Vec<Hash256>,
|
||||
deposit_contract_deploy_block: u64,
|
||||
/// An incremental merkle tree which represents the current state of the
|
||||
/// deposit contract tree.
|
||||
deposit_tree: DepositDataTree,
|
||||
/// Vector of deposit roots. `deposit_roots[i]` denotes `deposit_root` at
|
||||
/// `deposit_index` `i`.
|
||||
deposit_roots: Vec<Hash256>,
|
||||
}
|
||||
|
||||
impl Default for DepositCache {
|
||||
fn default() -> Self {
|
||||
let deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH);
|
||||
let deposit_roots = vec![deposit_tree.root()];
|
||||
DepositCache {
|
||||
logs: Vec::new(),
|
||||
leaves: Vec::new(),
|
||||
deposit_contract_deploy_block: 1,
|
||||
deposit_tree,
|
||||
deposit_roots,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DepositCache {
|
||||
/// Create new `DepositCache` given block number at which deposit
|
||||
/// contract was deployed.
|
||||
pub fn new(deposit_contract_deploy_block: u64) -> Self {
|
||||
DepositCache {
|
||||
deposit_contract_deploy_block,
|
||||
..Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of deposits available in the cache.
|
||||
pub fn len(&self) -> usize {
|
||||
self.logs.len()
|
||||
@@ -114,23 +147,28 @@ impl DepositCache {
|
||||
/// - If a log with index `log.index - 1` is not already present in `self` (ignored when empty).
|
||||
/// - If a log with `log.index` is already known, but the given `log` is distinct to it.
|
||||
pub fn insert_log(&mut self, log: DepositLog) -> Result<(), Error> {
|
||||
if log.index == self.logs.len() as u64 {
|
||||
self.roots
|
||||
.push(Hash256::from_slice(&log.deposit_data.tree_hash_root()));
|
||||
self.logs.push(log);
|
||||
|
||||
Ok(())
|
||||
} else if log.index < self.logs.len() as u64 {
|
||||
if self.logs[log.index as usize] == log {
|
||||
match log.index.cmp(&(self.logs.len() as u64)) {
|
||||
Ordering::Equal => {
|
||||
let deposit = log.deposit_data.tree_hash_root();
|
||||
self.leaves.push(deposit);
|
||||
self.logs.push(log);
|
||||
self.deposit_tree
|
||||
.push_leaf(deposit)
|
||||
.map_err(Error::DepositTreeError)?;
|
||||
self.deposit_roots.push(self.deposit_tree.root());
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::DuplicateDistinctLog(log.index))
|
||||
}
|
||||
} else {
|
||||
Err(Error::NonConsecutive {
|
||||
Ordering::Less => {
|
||||
if self.logs[log.index as usize] == log {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::DuplicateDistinctLog(log.index))
|
||||
}
|
||||
}
|
||||
Ordering::Greater => Err(Error::NonConsecutive {
|
||||
log_index: log.index,
|
||||
expected: self.logs.len(),
|
||||
})
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,27 +181,28 @@ impl DepositCache {
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// - If `deposit_count` is larger than `range.end`.
|
||||
/// - If `deposit_count` is larger than `end`.
|
||||
/// - There are not sufficient deposits in the tree to generate the proof.
|
||||
pub fn get_deposits(
|
||||
&self,
|
||||
range: Range<u64>,
|
||||
start: u64,
|
||||
end: u64,
|
||||
deposit_count: u64,
|
||||
tree_depth: usize,
|
||||
) -> Result<(Hash256, Vec<Deposit>), Error> {
|
||||
if deposit_count < range.end {
|
||||
if deposit_count < end {
|
||||
// It's invalid to ask for more deposits than should exist.
|
||||
Err(Error::DepositCountInvalid {
|
||||
deposit_count,
|
||||
range_end: range.end,
|
||||
range_end: end,
|
||||
})
|
||||
} else if range.end > self.logs.len() as u64 {
|
||||
} else if end > self.logs.len() as u64 {
|
||||
// The range of requested deposits exceeds the deposits stored locally.
|
||||
Err(Error::InsufficientDeposits {
|
||||
requested: range.end,
|
||||
requested: end,
|
||||
known_deposits: self.logs.len(),
|
||||
})
|
||||
} else if deposit_count > self.roots.len() as u64 {
|
||||
} else if deposit_count > self.leaves.len() as u64 {
|
||||
// There are not `deposit_count` known deposit roots, so we can't build the merkle tree
|
||||
// to prove into.
|
||||
Err(Error::InsufficientDeposits {
|
||||
@@ -171,10 +210,10 @@ impl DepositCache {
|
||||
known_deposits: self.logs.len(),
|
||||
})
|
||||
} else {
|
||||
let roots = self
|
||||
.roots
|
||||
let leaves = self
|
||||
.leaves
|
||||
.get(0..deposit_count as usize)
|
||||
.ok_or_else(|| Error::InternalError("Unable to get known root".into()))?;
|
||||
.ok_or_else(|| Error::InternalError("Unable to get known leaves".into()))?;
|
||||
|
||||
// Note: there is likely a more optimal solution than recreating the `DepositDataTree`
|
||||
// each time this function is called.
|
||||
@@ -183,11 +222,11 @@ impl DepositCache {
|
||||
// last finalized eth1 deposit count. Then, that tree could be cloned and extended for
|
||||
// each of these calls.
|
||||
|
||||
let tree = DepositDataTree::create(roots, deposit_count as usize, tree_depth);
|
||||
let tree = DepositDataTree::create(leaves, deposit_count as usize, tree_depth);
|
||||
|
||||
let deposits = self
|
||||
.logs
|
||||
.get(range.start as usize..range.end as usize)
|
||||
.get(start as usize..end as usize)
|
||||
.ok_or_else(|| Error::InternalError("Unable to get known log".into()))?
|
||||
.iter()
|
||||
.map(|deposit_log| {
|
||||
@@ -203,13 +242,50 @@ impl DepositCache {
|
||||
Ok((tree.root(), deposits))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `int` as little-endian bytes with a length of 32.
|
||||
fn int_to_bytes32(int: usize) -> Vec<u8> {
|
||||
let mut vec = int.to_le_bytes().to_vec();
|
||||
vec.resize(32, 0);
|
||||
vec
|
||||
/// Returns the number of deposits with valid signatures that have been observed up to and
|
||||
/// including the block at `block_number`.
|
||||
///
|
||||
/// Returns `None` if the `block_number` is zero or prior to contract deployment.
|
||||
pub fn get_valid_signature_count(&self, block_number: u64) -> Option<usize> {
|
||||
if block_number == 0 || block_number < self.deposit_contract_deploy_block {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
self.logs
|
||||
.iter()
|
||||
.take_while(|deposit| deposit.block_number <= block_number)
|
||||
.filter(|deposit| deposit.signature_is_valid)
|
||||
.count(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of deposits that have been observed up to and
|
||||
/// including the block at `block_number`.
|
||||
///
|
||||
/// Returns `None` if the `block_number` is zero or prior to contract deployment.
|
||||
pub fn get_deposit_count_from_cache(&self, block_number: u64) -> Option<u64> {
|
||||
if block_number == 0 || block_number < self.deposit_contract_deploy_block {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
self.logs
|
||||
.iter()
|
||||
.take_while(|deposit| deposit.block_number <= block_number)
|
||||
.count() as u64,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the deposit root at block height = block_number.
|
||||
///
|
||||
/// Fetches the `deposit_count` on or just before the queried `block_number`
|
||||
/// and queries the `deposit_roots` map to get the corresponding `deposit_root`.
|
||||
pub fn get_deposit_root_from_cache(&self, block_number: u64) -> Option<Hash256> {
|
||||
let index = self.get_deposit_count_from_cache(block_number)?;
|
||||
Some(*self.deposit_roots.get(index as usize)?)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -217,15 +293,18 @@ pub mod tests {
|
||||
use super::*;
|
||||
use crate::deposit_log::tests::EXAMPLE_LOG;
|
||||
use crate::http::Log;
|
||||
use types::{EthSpec, MainnetEthSpec};
|
||||
|
||||
pub const TREE_DEPTH: usize = 32;
|
||||
|
||||
fn example_log() -> DepositLog {
|
||||
let spec = MainnetEthSpec::default_spec();
|
||||
|
||||
let log = Log {
|
||||
block_number: 42,
|
||||
data: EXAMPLE_LOG.to_vec(),
|
||||
};
|
||||
DepositLog::from_log(&log).expect("should decode log")
|
||||
DepositLog::from_log(&log, &spec).expect("should decode log")
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -281,31 +360,31 @@ pub mod tests {
|
||||
|
||||
// Get 0 deposits, with max deposit count.
|
||||
let (_, deposits) = tree
|
||||
.get_deposits(0..0, n, TREE_DEPTH)
|
||||
.get_deposits(0, 0, n, TREE_DEPTH)
|
||||
.expect("should get the full tree");
|
||||
assert_eq!(deposits.len(), 0, "should return no deposits");
|
||||
|
||||
// Get 0 deposits, with 0 deposit count.
|
||||
let (_, deposits) = tree
|
||||
.get_deposits(0..0, 0, TREE_DEPTH)
|
||||
.get_deposits(0, 0, 0, TREE_DEPTH)
|
||||
.expect("should get the full tree");
|
||||
assert_eq!(deposits.len(), 0, "should return no deposits");
|
||||
|
||||
// Get 0 deposits, with 0 deposit count, tree depth 0.
|
||||
let (_, deposits) = tree
|
||||
.get_deposits(0..0, 0, 0)
|
||||
.get_deposits(0, 0, 0, 0)
|
||||
.expect("should get the full tree");
|
||||
assert_eq!(deposits.len(), 0, "should return no deposits");
|
||||
|
||||
// Get all deposits, with max deposit count.
|
||||
let (full_root, deposits) = tree
|
||||
.get_deposits(0..n, n, TREE_DEPTH)
|
||||
.get_deposits(0, n, n, TREE_DEPTH)
|
||||
.expect("should get the full tree");
|
||||
assert_eq!(deposits.len(), n as usize, "should return all deposits");
|
||||
|
||||
// Get 4 deposits, with max deposit count.
|
||||
let (root, deposits) = tree
|
||||
.get_deposits(0..4, n, TREE_DEPTH)
|
||||
.get_deposits(0, 4, n, TREE_DEPTH)
|
||||
.expect("should get the four from the full tree");
|
||||
assert_eq!(
|
||||
deposits.len(),
|
||||
@@ -318,18 +397,15 @@ pub mod tests {
|
||||
);
|
||||
|
||||
// Get half of the deposits, with half deposit count.
|
||||
let half = n / 2;
|
||||
let (half_root, deposits) = tree
|
||||
.get_deposits(0..n / 2, n / 2, TREE_DEPTH)
|
||||
.get_deposits(0, half, half, TREE_DEPTH)
|
||||
.expect("should get the half tree");
|
||||
assert_eq!(
|
||||
deposits.len(),
|
||||
n as usize / 2,
|
||||
"should return half deposits"
|
||||
);
|
||||
assert_eq!(deposits.len(), half as usize, "should return half deposits");
|
||||
|
||||
// Get 4 deposits, with half deposit count.
|
||||
let (root, deposits) = tree
|
||||
.get_deposits(0..4, n / 2, TREE_DEPTH)
|
||||
.get_deposits(0, 4, n / 2, TREE_DEPTH)
|
||||
.expect("should get the half tree");
|
||||
assert_eq!(
|
||||
deposits.len(),
|
||||
@@ -360,12 +436,12 @@ pub mod tests {
|
||||
}
|
||||
|
||||
// Range too high.
|
||||
assert!(tree.get_deposits(0..n + 1, n, TREE_DEPTH).is_err());
|
||||
assert!(tree.get_deposits(0, n + 1, n, TREE_DEPTH).is_err());
|
||||
|
||||
// Count too high.
|
||||
assert!(tree.get_deposits(0..n, n + 1, TREE_DEPTH).is_err());
|
||||
assert!(tree.get_deposits(0, n, n + 1, TREE_DEPTH).is_err());
|
||||
|
||||
// Range higher than count.
|
||||
assert!(tree.get_deposits(0..4, 2, TREE_DEPTH).is_err());
|
||||
assert!(tree.get_deposits(0, 4, 2, TREE_DEPTH).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
use super::http::Log;
|
||||
use ssz::Decode;
|
||||
use types::{DepositData, Hash256, PublicKeyBytes, SignatureBytes};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use state_processing::per_block_processing::signature_sets::{
|
||||
deposit_pubkey_signature_message, deposit_signature_set,
|
||||
};
|
||||
use types::{ChainSpec, DepositData, Hash256, PublicKeyBytes, SignatureBytes};
|
||||
|
||||
/// The following constants define the layout of bytes in the deposit contract `DepositEvent`. The
|
||||
/// event bytes are formatted according to the Ethereum ABI.
|
||||
@@ -16,18 +20,20 @@ const INDEX_START: usize = SIG_START + 96 + 32;
|
||||
const INDEX_LEN: usize = 8;
|
||||
|
||||
/// A fully parsed eth1 deposit contract log.
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
#[derive(Debug, PartialEq, Clone, Encode, Decode)]
|
||||
pub struct DepositLog {
|
||||
pub deposit_data: DepositData,
|
||||
/// The block number of the log that included this `DepositData`.
|
||||
pub block_number: u64,
|
||||
/// The index included with the deposit log.
|
||||
pub index: u64,
|
||||
/// True if the signature is valid.
|
||||
pub signature_is_valid: bool,
|
||||
}
|
||||
|
||||
impl DepositLog {
|
||||
/// Attempts to parse a raw `Log` from the deposit contract into a `DepositLog`.
|
||||
pub fn from_log(log: &Log) -> Result<Self, String> {
|
||||
pub fn from_log(log: &Log, spec: &ChainSpec) -> Result<Self, String> {
|
||||
let bytes = &log.data;
|
||||
|
||||
let pubkey = bytes
|
||||
@@ -57,10 +63,14 @@ impl DepositLog {
|
||||
.map_err(|e| format!("Invalid signature ssz: {:?}", e))?,
|
||||
};
|
||||
|
||||
let signature_is_valid = deposit_pubkey_signature_message(&deposit_data, spec)
|
||||
.map_or(false, |msg| deposit_signature_set(&msg).verify());
|
||||
|
||||
Ok(DepositLog {
|
||||
deposit_data,
|
||||
block_number: log.block_number,
|
||||
index: u64::from_ssz_bytes(index).map_err(|e| format!("Invalid index ssz: {:?}", e))?,
|
||||
signature_is_valid,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -69,6 +79,7 @@ impl DepositLog {
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use crate::http::Log;
|
||||
use types::{EthSpec, MainnetEthSpec};
|
||||
|
||||
/// The data from a deposit event, using the v0.8.3 version of the deposit contract.
|
||||
pub const EXAMPLE_LOG: &[u8] = &[
|
||||
@@ -102,6 +113,6 @@ pub mod tests {
|
||||
block_number: 42,
|
||||
data: EXAMPLE_LOG.to_vec(),
|
||||
};
|
||||
DepositLog::from_log(&log).expect("should decode log");
|
||||
DepositLog::from_log(&log, &MainnetEthSpec::default_spec()).expect("should decode log");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,12 +10,12 @@
|
||||
//!
|
||||
//! There is no ABI parsing here, all function signatures and topics are hard-coded as constants.
|
||||
|
||||
use futures::{Future, Stream};
|
||||
use libflate::gzip::Decoder;
|
||||
use reqwest::{header::CONTENT_TYPE, r#async::ClientBuilder, StatusCode};
|
||||
use futures::future::TryFutureExt;
|
||||
use reqwest::{header::CONTENT_TYPE, ClientBuilder, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{json, Value};
|
||||
use std::io::prelude::*;
|
||||
use std::ops::Range;
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
use types::Hash256;
|
||||
|
||||
@@ -32,6 +32,40 @@ pub const DEPOSIT_COUNT_RESPONSE_BYTES: usize = 96;
|
||||
/// Number of bytes in deposit contract deposit root (value only).
|
||||
pub const DEPOSIT_ROOT_BYTES: usize = 32;
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
pub enum Eth1NetworkId {
|
||||
Goerli,
|
||||
Mainnet,
|
||||
Custom(u64),
|
||||
}
|
||||
|
||||
impl FromStr for Eth1NetworkId {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"1" => Ok(Eth1NetworkId::Mainnet),
|
||||
"5" => Ok(Eth1NetworkId::Goerli),
|
||||
custom => {
|
||||
let network_id = u64::from_str_radix(custom, 10)
|
||||
.map_err(|e| format!("Failed to parse eth1 network id {}", e))?;
|
||||
Ok(Eth1NetworkId::Custom(network_id))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the eth1 network id of the given endpoint.
|
||||
pub async fn get_network_id(endpoint: &str, timeout: Duration) -> Result<Eth1NetworkId, String> {
|
||||
let response_body = send_rpc_request(endpoint, "net_version", json!([]), timeout).await?;
|
||||
Eth1NetworkId::from_str(
|
||||
response_result(&response_body)?
|
||||
.ok_or_else(|| "No result was returned for block number".to_string())?
|
||||
.as_str()
|
||||
.ok_or_else(|| "Data was not string")?,
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct Block {
|
||||
pub hash: Hash256,
|
||||
@@ -42,80 +76,73 @@ pub struct Block {
|
||||
/// Returns the current block number.
|
||||
///
|
||||
/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`.
|
||||
pub fn get_block_number(
|
||||
endpoint: &str,
|
||||
timeout: Duration,
|
||||
) -> impl Future<Item = u64, Error = String> {
|
||||
send_rpc_request(endpoint, "eth_blockNumber", json!([]), timeout)
|
||||
.and_then(|response_body| {
|
||||
hex_to_u64_be(
|
||||
response_result(&response_body)?
|
||||
.ok_or_else(|| "No result field was returned for block number".to_string())?
|
||||
.as_str()
|
||||
.ok_or_else(|| "Data was not string")?,
|
||||
)
|
||||
})
|
||||
.map_err(|e| format!("Failed to get block number: {}", e))
|
||||
pub async fn get_block_number(endpoint: &str, timeout: Duration) -> Result<u64, String> {
|
||||
let response_body = send_rpc_request(endpoint, "eth_blockNumber", json!([]), timeout).await?;
|
||||
hex_to_u64_be(
|
||||
response_result(&response_body)?
|
||||
.ok_or_else(|| "No result field was returned for block number".to_string())?
|
||||
.as_str()
|
||||
.ok_or_else(|| "Data was not string")?,
|
||||
)
|
||||
.map_err(|e| format!("Failed to get block number: {}", e))
|
||||
}
|
||||
|
||||
/// Gets a block hash by block number.
|
||||
///
|
||||
/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`.
|
||||
pub fn get_block(
|
||||
pub async fn get_block(
|
||||
endpoint: &str,
|
||||
block_number: u64,
|
||||
timeout: Duration,
|
||||
) -> impl Future<Item = Block, Error = String> {
|
||||
) -> Result<Block, String> {
|
||||
let params = json!([
|
||||
format!("0x{:x}", block_number),
|
||||
false // do not return full tx objects.
|
||||
]);
|
||||
|
||||
send_rpc_request(endpoint, "eth_getBlockByNumber", params, timeout)
|
||||
.and_then(|response_body| {
|
||||
let hash = hex_to_bytes(
|
||||
response_result(&response_body)?
|
||||
.ok_or_else(|| "No result field was returned for block".to_string())?
|
||||
.get("hash")
|
||||
.ok_or_else(|| "No hash for block")?
|
||||
.as_str()
|
||||
.ok_or_else(|| "Block hash was not string")?,
|
||||
)?;
|
||||
let hash = if hash.len() == 32 {
|
||||
Ok(Hash256::from_slice(&hash))
|
||||
} else {
|
||||
Err(format!("Block has was not 32 bytes: {:?}", hash))
|
||||
}?;
|
||||
let response_body = send_rpc_request(endpoint, "eth_getBlockByNumber", params, timeout).await?;
|
||||
let hash = hex_to_bytes(
|
||||
response_result(&response_body)?
|
||||
.ok_or_else(|| "No result field was returned for block".to_string())?
|
||||
.get("hash")
|
||||
.ok_or_else(|| "No hash for block")?
|
||||
.as_str()
|
||||
.ok_or_else(|| "Block hash was not string")?,
|
||||
)?;
|
||||
let hash = if hash.len() == 32 {
|
||||
Ok(Hash256::from_slice(&hash))
|
||||
} else {
|
||||
Err(format!("Block has was not 32 bytes: {:?}", hash))
|
||||
}?;
|
||||
|
||||
let timestamp = hex_to_u64_be(
|
||||
response_result(&response_body)?
|
||||
.ok_or_else(|| "No result field was returned for timestamp".to_string())?
|
||||
.get("timestamp")
|
||||
.ok_or_else(|| "No timestamp for block")?
|
||||
.as_str()
|
||||
.ok_or_else(|| "Block timestamp was not string")?,
|
||||
)?;
|
||||
let timestamp = hex_to_u64_be(
|
||||
response_result(&response_body)?
|
||||
.ok_or_else(|| "No result field was returned for timestamp".to_string())?
|
||||
.get("timestamp")
|
||||
.ok_or_else(|| "No timestamp for block")?
|
||||
.as_str()
|
||||
.ok_or_else(|| "Block timestamp was not string")?,
|
||||
)?;
|
||||
|
||||
let number = hex_to_u64_be(
|
||||
response_result(&response_body)?
|
||||
.ok_or_else(|| "No result field was returned for number".to_string())?
|
||||
.get("number")
|
||||
.ok_or_else(|| "No number for block")?
|
||||
.as_str()
|
||||
.ok_or_else(|| "Block number was not string")?,
|
||||
)?;
|
||||
let number = hex_to_u64_be(
|
||||
response_result(&response_body)?
|
||||
.ok_or_else(|| "No result field was returned for number".to_string())?
|
||||
.get("number")
|
||||
.ok_or_else(|| "No number for block")?
|
||||
.as_str()
|
||||
.ok_or_else(|| "Block number was not string")?,
|
||||
)?;
|
||||
|
||||
if number <= usize::max_value() as u64 {
|
||||
Ok(Block {
|
||||
hash,
|
||||
timestamp,
|
||||
number,
|
||||
})
|
||||
} else {
|
||||
Err(format!("Block number {} is larger than a usize", number))
|
||||
}
|
||||
if number <= usize::max_value() as u64 {
|
||||
Ok(Block {
|
||||
hash,
|
||||
timestamp,
|
||||
number,
|
||||
})
|
||||
.map_err(|e| format!("Failed to get block number: {}", e))
|
||||
} else {
|
||||
Err(format!("Block number {} is larger than a usize", number))
|
||||
}
|
||||
.map_err(|e| format!("Failed to get block number: {}", e))
|
||||
}
|
||||
|
||||
/// Returns the value of the `get_deposit_count()` call at the given `address` for the given
|
||||
@@ -124,21 +151,22 @@ pub fn get_block(
|
||||
/// Assumes that the `address` has the same ABI as the eth2 deposit contract.
|
||||
///
|
||||
/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`.
|
||||
pub fn get_deposit_count(
|
||||
pub async fn get_deposit_count(
|
||||
endpoint: &str,
|
||||
address: &str,
|
||||
block_number: u64,
|
||||
timeout: Duration,
|
||||
) -> impl Future<Item = Option<u64>, Error = String> {
|
||||
call(
|
||||
) -> Result<Option<u64>, String> {
|
||||
let result = call(
|
||||
endpoint,
|
||||
address,
|
||||
DEPOSIT_COUNT_FN_SIGNATURE,
|
||||
block_number,
|
||||
timeout,
|
||||
)
|
||||
.and_then(|result| match result {
|
||||
None => Err(format!("Deposit root response was none")),
|
||||
.await?;
|
||||
match result {
|
||||
None => Err("Deposit root response was none".to_string()),
|
||||
Some(bytes) => {
|
||||
if bytes.is_empty() {
|
||||
Ok(None)
|
||||
@@ -153,7 +181,7 @@ pub fn get_deposit_count(
|
||||
))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the value of the `get_hash_tree_root()` call at the given `block_number`.
|
||||
@@ -161,21 +189,22 @@ pub fn get_deposit_count(
|
||||
/// Assumes that the `address` has the same ABI as the eth2 deposit contract.
|
||||
///
|
||||
/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`.
|
||||
pub fn get_deposit_root(
|
||||
pub async fn get_deposit_root(
|
||||
endpoint: &str,
|
||||
address: &str,
|
||||
block_number: u64,
|
||||
timeout: Duration,
|
||||
) -> impl Future<Item = Option<Hash256>, Error = String> {
|
||||
call(
|
||||
) -> Result<Option<Hash256>, String> {
|
||||
let result = call(
|
||||
endpoint,
|
||||
address,
|
||||
DEPOSIT_ROOT_FN_SIGNATURE,
|
||||
block_number,
|
||||
timeout,
|
||||
)
|
||||
.and_then(|result| match result {
|
||||
None => Err(format!("Deposit root response was none")),
|
||||
.await?;
|
||||
match result {
|
||||
None => Err("Deposit root response was none".to_string()),
|
||||
Some(bytes) => {
|
||||
if bytes.is_empty() {
|
||||
Ok(None)
|
||||
@@ -188,7 +217,7 @@ pub fn get_deposit_root(
|
||||
))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Performs a instant, no-transaction call to the contract `address` with the given `0x`-prefixed
|
||||
@@ -197,13 +226,13 @@ pub fn get_deposit_root(
|
||||
/// Returns bytes, if any.
|
||||
///
|
||||
/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`.
|
||||
fn call(
|
||||
async fn call(
|
||||
endpoint: &str,
|
||||
address: &str,
|
||||
hex_data: &str,
|
||||
block_number: u64,
|
||||
timeout: Duration,
|
||||
) -> impl Future<Item = Option<Vec<u8>>, Error = String> {
|
||||
) -> Result<Option<Vec<u8>>, String> {
|
||||
let params = json! ([
|
||||
{
|
||||
"to": address,
|
||||
@@ -212,19 +241,18 @@ fn call(
|
||||
format!("0x{:x}", block_number)
|
||||
]);
|
||||
|
||||
send_rpc_request(endpoint, "eth_call", params, timeout).and_then(|response_body| {
|
||||
match response_result(&response_body)? {
|
||||
None => Ok(None),
|
||||
Some(result) => {
|
||||
let hex = result
|
||||
.as_str()
|
||||
.map(|s| s.to_string())
|
||||
.ok_or_else(|| "'result' value was not a string".to_string())?;
|
||||
let response_body = send_rpc_request(endpoint, "eth_call", params, timeout).await?;
|
||||
match response_result(&response_body)? {
|
||||
None => Ok(None),
|
||||
Some(result) => {
|
||||
let hex = result
|
||||
.as_str()
|
||||
.map(|s| s.to_string())
|
||||
.ok_or_else(|| "'result' value was not a string".to_string())?;
|
||||
|
||||
Ok(Some(hex_to_bytes(&hex)?))
|
||||
}
|
||||
Ok(Some(hex_to_bytes(&hex)?))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// A reduced set of fields from an Eth1 contract log.
|
||||
@@ -240,12 +268,12 @@ pub struct Log {
|
||||
/// It's not clear from the Ethereum JSON-RPC docs if this range is inclusive or not.
|
||||
///
|
||||
/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`.
|
||||
pub fn get_deposit_logs_in_range(
|
||||
pub async fn get_deposit_logs_in_range(
|
||||
endpoint: &str,
|
||||
address: &str,
|
||||
block_height_range: Range<u64>,
|
||||
timeout: Duration,
|
||||
) -> impl Future<Item = Vec<Log>, Error = String> {
|
||||
) -> Result<Vec<Log>, String> {
|
||||
let params = json! ([{
|
||||
"address": address,
|
||||
"topics": [DEPOSIT_EVENT_TOPIC],
|
||||
@@ -253,46 +281,44 @@ pub fn get_deposit_logs_in_range(
|
||||
"toBlock": format!("0x{:x}", block_height_range.end),
|
||||
}]);
|
||||
|
||||
send_rpc_request(endpoint, "eth_getLogs", params, timeout)
|
||||
.and_then(|response_body| {
|
||||
response_result(&response_body)?
|
||||
.ok_or_else(|| "No result field was returned for deposit logs".to_string())?
|
||||
.as_array()
|
||||
.cloned()
|
||||
.ok_or_else(|| "'result' value was not an array".to_string())?
|
||||
.into_iter()
|
||||
.map(|value| {
|
||||
let block_number = value
|
||||
.get("blockNumber")
|
||||
.ok_or_else(|| "No block number field in log")?
|
||||
.as_str()
|
||||
.ok_or_else(|| "Block number was not string")?;
|
||||
let response_body = send_rpc_request(endpoint, "eth_getLogs", params, timeout).await?;
|
||||
response_result(&response_body)?
|
||||
.ok_or_else(|| "No result field was returned for deposit logs".to_string())?
|
||||
.as_array()
|
||||
.cloned()
|
||||
.ok_or_else(|| "'result' value was not an array".to_string())?
|
||||
.into_iter()
|
||||
.map(|value| {
|
||||
let block_number = value
|
||||
.get("blockNumber")
|
||||
.ok_or_else(|| "No block number field in log")?
|
||||
.as_str()
|
||||
.ok_or_else(|| "Block number was not string")?;
|
||||
|
||||
let data = value
|
||||
.get("data")
|
||||
.ok_or_else(|| "No block number field in log")?
|
||||
.as_str()
|
||||
.ok_or_else(|| "Data was not string")?;
|
||||
let data = value
|
||||
.get("data")
|
||||
.ok_or_else(|| "No block number field in log")?
|
||||
.as_str()
|
||||
.ok_or_else(|| "Data was not string")?;
|
||||
|
||||
Ok(Log {
|
||||
block_number: hex_to_u64_be(&block_number)?,
|
||||
data: hex_to_bytes(data)?,
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<Log>, String>>()
|
||||
Ok(Log {
|
||||
block_number: hex_to_u64_be(&block_number)?,
|
||||
data: hex_to_bytes(data)?,
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<Log>, String>>()
|
||||
.map_err(|e| format!("Failed to get logs in range: {}", e))
|
||||
}
|
||||
|
||||
/// Sends an RPC request to `endpoint`, using a POST with the given `body`.
|
||||
///
|
||||
/// Tries to receive the response and parse the body as a `String`.
|
||||
pub fn send_rpc_request(
|
||||
pub async fn send_rpc_request(
|
||||
endpoint: &str,
|
||||
method: &str,
|
||||
params: Value,
|
||||
timeout: Duration,
|
||||
) -> impl Future<Item = String, Error = String> {
|
||||
) -> Result<String, String> {
|
||||
let body = json! ({
|
||||
"jsonrpc": "2.0",
|
||||
"method": method,
|
||||
@@ -305,7 +331,7 @@ pub fn send_rpc_request(
|
||||
//
|
||||
// A better solution would be to create some struct that contains a built client and pass it
|
||||
// around (similar to the `web3` crate's `Transport` structs).
|
||||
ClientBuilder::new()
|
||||
let response = ClientBuilder::new()
|
||||
.timeout(timeout)
|
||||
.build()
|
||||
.expect("The builder should always build a client")
|
||||
@@ -314,61 +340,32 @@ pub fn send_rpc_request(
|
||||
.body(body)
|
||||
.send()
|
||||
.map_err(|e| format!("Request failed: {:?}", e))
|
||||
.and_then(|response| {
|
||||
if response.status() != StatusCode::OK {
|
||||
Err(format!(
|
||||
"Response HTTP status was not 200 OK: {}.",
|
||||
response.status()
|
||||
))
|
||||
} else {
|
||||
Ok(response)
|
||||
}
|
||||
})
|
||||
.and_then(|response| {
|
||||
response
|
||||
.headers()
|
||||
.get(CONTENT_TYPE)
|
||||
.ok_or_else(|| "No content-type header in response".to_string())
|
||||
.and_then(|encoding| {
|
||||
encoding
|
||||
.to_str()
|
||||
.map(|s| s.to_string())
|
||||
.map_err(|e| format!("Failed to parse content-type header: {}", e))
|
||||
})
|
||||
.map(|encoding| (response, encoding))
|
||||
})
|
||||
.and_then(|(response, encoding)| {
|
||||
response
|
||||
.into_body()
|
||||
.concat2()
|
||||
.map(|chunk| chunk.iter().cloned().collect::<Vec<u8>>())
|
||||
.map_err(|e| format!("Failed to receive body: {:?}", e))
|
||||
.and_then(move |bytes| match encoding.as_str() {
|
||||
"application/json" => Ok(bytes),
|
||||
"application/json; charset=utf-8" => Ok(bytes),
|
||||
// Note: gzip is not presently working because we always seem to get an empty
|
||||
// response from the server.
|
||||
//
|
||||
// I expect this is some simple-to-solve issue for someone who is familiar with
|
||||
// the eth1 JSON RPC.
|
||||
//
|
||||
// Some public-facing web3 servers use gzip to compress their traffic, it would
|
||||
// be good to support this.
|
||||
"application/x-gzip" => {
|
||||
let mut decoder = Decoder::new(&bytes[..])
|
||||
.map_err(|e| format!("Failed to create gzip decoder: {}", e))?;
|
||||
let mut decompressed = vec![];
|
||||
decoder
|
||||
.read_to_end(&mut decompressed)
|
||||
.map_err(|e| format!("Failed to decompress gzip data: {}", e))?;
|
||||
.await?;
|
||||
if response.status() != StatusCode::OK {
|
||||
return Err(format!(
|
||||
"Response HTTP status was not 200 OK: {}.",
|
||||
response.status()
|
||||
));
|
||||
};
|
||||
let encoding = response
|
||||
.headers()
|
||||
.get(CONTENT_TYPE)
|
||||
.ok_or_else(|| "No content-type header in response".to_string())?
|
||||
.to_str()
|
||||
.map(|s| s.to_string())
|
||||
.map_err(|e| format!("Failed to parse content-type header: {}", e))?;
|
||||
|
||||
Ok(decompressed)
|
||||
}
|
||||
other => Err(format!("Unsupported encoding: {}", other)),
|
||||
})
|
||||
.map(|bytes| String::from_utf8_lossy(&bytes).into_owned())
|
||||
.map_err(|e| format!("Failed to receive body: {:?}", e))
|
||||
response
|
||||
.bytes()
|
||||
.map_err(|e| format!("Failed to receive body: {:?}", e))
|
||||
.await
|
||||
.and_then(move |bytes| match encoding.as_str() {
|
||||
"application/json" => Ok(bytes),
|
||||
"application/json; charset=utf-8" => Ok(bytes),
|
||||
other => Err(format!("Unsupported encoding: {}", other)),
|
||||
})
|
||||
.map(|bytes| String::from_utf8_lossy(&bytes).into_owned())
|
||||
.map_err(|e| format!("Failed to receive body: {:?}", e))
|
||||
}
|
||||
|
||||
/// Accepts an entire HTTP body (as a string) and returns the `result` field, as a serde `Value`.
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
use crate::Config;
|
||||
use crate::{block_cache::BlockCache, deposit_cache::DepositCache};
|
||||
use crate::{
|
||||
block_cache::BlockCache,
|
||||
deposit_cache::{DepositCache, SszDepositCache},
|
||||
};
|
||||
use parking_lot::RwLock;
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use types::ChainSpec;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DepositUpdater {
|
||||
@@ -8,11 +14,22 @@ pub struct DepositUpdater {
|
||||
pub last_processed_block: Option<u64>,
|
||||
}
|
||||
|
||||
impl DepositUpdater {
|
||||
pub fn new(deposit_contract_deploy_block: u64) -> Self {
|
||||
let cache = DepositCache::new(deposit_contract_deploy_block);
|
||||
DepositUpdater {
|
||||
cache,
|
||||
last_processed_block: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Inner {
|
||||
pub block_cache: RwLock<BlockCache>,
|
||||
pub deposit_cache: RwLock<DepositUpdater>,
|
||||
pub config: RwLock<Config>,
|
||||
pub spec: ChainSpec,
|
||||
}
|
||||
|
||||
impl Inner {
|
||||
@@ -24,4 +41,53 @@ impl Inner {
|
||||
self.block_cache.write().truncate(block_cache_truncation);
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode the eth1 block and deposit cache as bytes.
|
||||
pub fn as_bytes(&self) -> Vec<u8> {
|
||||
let ssz_eth1_cache = SszEth1Cache::from_inner(&self);
|
||||
ssz_eth1_cache.as_ssz_bytes()
|
||||
}
|
||||
|
||||
/// Recover `Inner` given byte representation of eth1 deposit and block caches.
|
||||
pub fn from_bytes(bytes: &[u8], config: Config, spec: ChainSpec) -> Result<Self, String> {
|
||||
let ssz_cache = SszEth1Cache::from_ssz_bytes(bytes)
|
||||
.map_err(|e| format!("Ssz decoding error: {:?}", e))?;
|
||||
Ok(ssz_cache.to_inner(config, spec)?)
|
||||
}
|
||||
|
||||
/// Returns a reference to the specification.
|
||||
pub fn spec(&self) -> &ChainSpec {
|
||||
&self.spec
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Encode, Decode, Clone)]
|
||||
pub struct SszEth1Cache {
|
||||
block_cache: BlockCache,
|
||||
deposit_cache: SszDepositCache,
|
||||
last_processed_block: Option<u64>,
|
||||
}
|
||||
|
||||
impl SszEth1Cache {
|
||||
pub fn from_inner(inner: &Inner) -> Self {
|
||||
let deposit_updater = inner.deposit_cache.read();
|
||||
let block_cache = inner.block_cache.read();
|
||||
Self {
|
||||
block_cache: (*block_cache).clone(),
|
||||
deposit_cache: SszDepositCache::from_deposit_cache(&deposit_updater.cache),
|
||||
last_processed_block: deposit_updater.last_processed_block,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_inner(&self, config: Config, spec: ChainSpec) -> Result<Inner, String> {
|
||||
Ok(Inner {
|
||||
block_cache: RwLock::new(self.block_cache.clone()),
|
||||
deposit_cache: RwLock::new(DepositUpdater {
|
||||
cache: self.deposit_cache.to_deposit_cache()?,
|
||||
last_processed_block: self.last_processed_block,
|
||||
}),
|
||||
config: RwLock::new(config),
|
||||
spec,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
mod block_cache;
|
||||
mod deposit_cache;
|
||||
mod deposit_log;
|
||||
pub mod http;
|
||||
mod inner;
|
||||
mod metrics;
|
||||
mod service;
|
||||
|
||||
pub use block_cache::{BlockCache, Eth1Block};
|
||||
pub use deposit_cache::DepositCache;
|
||||
pub use deposit_log::DepositLog;
|
||||
pub use inner::SszEth1Cache;
|
||||
pub use service::{BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service};
|
||||
|
||||
19
beacon_node/eth1/src/metrics.rs
Normal file
19
beacon_node/eth1/src/metrics.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
pub use lighthouse_metrics::*;
|
||||
|
||||
lazy_static! {
|
||||
/*
|
||||
* Eth1 blocks
|
||||
*/
|
||||
pub static ref BLOCK_CACHE_LEN: Result<IntGauge> =
|
||||
try_create_int_gauge("eth1_block_cache_len", "Count of eth1 blocks in cache");
|
||||
pub static ref LATEST_CACHED_BLOCK_TIMESTAMP: Result<IntGauge> =
|
||||
try_create_int_gauge("eth1_latest_cached_block_timestamp", "Timestamp of latest block in eth1 cache");
|
||||
|
||||
/*
|
||||
* Eth1 deposits
|
||||
*/
|
||||
pub static ref DEPOSIT_CACHE_LEN: Result<IntGauge> =
|
||||
try_create_int_gauge("eth1_deposit_cache_len", "Number of deposits in the eth1 cache");
|
||||
pub static ref HIGHEST_PROCESSED_DEPOSIT_BLOCK: Result<IntGauge> =
|
||||
try_create_int_gauge("eth1_highest_processed_deposit_block", "Number of the last block checked for deposits");
|
||||
}
|
||||
@@ -1,24 +1,25 @@
|
||||
use crate::metrics;
|
||||
use crate::{
|
||||
block_cache::{BlockCache, Error as BlockCacheError, Eth1Block},
|
||||
deposit_cache::Error as DepositCacheError,
|
||||
http::{
|
||||
get_block, get_block_number, get_deposit_count, get_deposit_logs_in_range, get_deposit_root,
|
||||
get_block, get_block_number, get_deposit_logs_in_range, get_network_id, Eth1NetworkId, Log,
|
||||
},
|
||||
inner::{DepositUpdater, Inner},
|
||||
DepositLog,
|
||||
};
|
||||
use exit_future::Exit;
|
||||
use futures::{
|
||||
future::{loop_fn, Loop},
|
||||
stream, Future, Stream,
|
||||
};
|
||||
use futures::{future::TryFutureExt, stream, stream::TryStreamExt, StreamExt};
|
||||
use parking_lot::{RwLock, RwLockReadGuard};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use slog::{debug, error, trace, Logger};
|
||||
use slog::{debug, error, info, trace, Logger};
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::timer::Delay;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use tokio::time::{interval_at, Duration, Instant};
|
||||
use types::ChainSpec;
|
||||
|
||||
/// Indicates the default eth1 network we use for the deposit contract.
|
||||
pub const DEFAULT_NETWORK_ID: Eth1NetworkId = Eth1NetworkId::Goerli;
|
||||
|
||||
const STANDARD_TIMEOUT_MILLIS: u64 = 15_000;
|
||||
|
||||
@@ -26,14 +27,10 @@ const STANDARD_TIMEOUT_MILLIS: u64 = 15_000;
|
||||
const BLOCK_NUMBER_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS;
|
||||
/// Timeout when doing an eth_getBlockByNumber call.
|
||||
const GET_BLOCK_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS;
|
||||
/// Timeout when doing an eth_call to read the deposit contract root.
|
||||
const GET_DEPOSIT_ROOT_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS;
|
||||
/// Timeout when doing an eth_call to read the deposit contract deposit count.
|
||||
const GET_DEPOSIT_COUNT_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS;
|
||||
/// Timeout when doing an eth_getLogs to read the deposit contract logs.
|
||||
const GET_DEPOSIT_LOG_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS;
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
/// The remote node is less synced that we expect, it is not useful until has done more
|
||||
/// syncing.
|
||||
@@ -67,19 +64,15 @@ pub enum Error {
|
||||
|
||||
/// The success message for an Eth1Data cache update.
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum BlockCacheUpdateOutcome {
|
||||
/// The cache was sucessfully updated.
|
||||
Success {
|
||||
blocks_imported: usize,
|
||||
head_block_number: Option<u64>,
|
||||
},
|
||||
pub struct BlockCacheUpdateOutcome {
|
||||
pub blocks_imported: usize,
|
||||
pub head_block_number: Option<u64>,
|
||||
}
|
||||
|
||||
/// The success message for an Eth1 deposit cache update.
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum DepositCacheUpdateOutcome {
|
||||
/// The cache was sucessfully updated.
|
||||
Success { logs_imported: usize },
|
||||
pub struct DepositCacheUpdateOutcome {
|
||||
pub logs_imported: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -88,6 +81,8 @@ pub struct Config {
|
||||
pub endpoint: String,
|
||||
/// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract.
|
||||
pub deposit_contract_address: String,
|
||||
/// The eth1 network id where the deposit contract is deployed (Goerli/Mainnet).
|
||||
pub network_id: Eth1NetworkId,
|
||||
/// Defines the first block that the `DepositCache` will start searching for deposit logs.
|
||||
///
|
||||
/// Setting too high can result in missed logs. Setting too low will result in unnecessary
|
||||
@@ -117,8 +112,9 @@ impl Default for Config {
|
||||
Self {
|
||||
endpoint: "http://localhost:8545".into(),
|
||||
deposit_contract_address: "0x0000000000000000000000000000000000000000".into(),
|
||||
deposit_contract_deploy_block: 0,
|
||||
lowest_cached_block_number: 0,
|
||||
network_id: DEFAULT_NETWORK_ID,
|
||||
deposit_contract_deploy_block: 1,
|
||||
lowest_cached_block_number: 1,
|
||||
follow_distance: 128,
|
||||
block_cache_truncation: Some(4_096),
|
||||
auto_update_interval_millis: 7_000,
|
||||
@@ -143,16 +139,39 @@ pub struct Service {
|
||||
|
||||
impl Service {
|
||||
/// Creates a new service. Does not attempt to connect to the eth1 node.
|
||||
pub fn new(config: Config, log: Logger) -> Self {
|
||||
pub fn new(config: Config, log: Logger, spec: ChainSpec) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Inner {
|
||||
block_cache: <_>::default(),
|
||||
deposit_cache: RwLock::new(DepositUpdater::new(
|
||||
config.deposit_contract_deploy_block,
|
||||
)),
|
||||
config: RwLock::new(config),
|
||||
..Inner::default()
|
||||
spec,
|
||||
}),
|
||||
log,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return byte representation of deposit and block caches.
|
||||
pub fn as_bytes(&self) -> Vec<u8> {
|
||||
self.inner.as_bytes()
|
||||
}
|
||||
|
||||
/// Recover the deposit and block caches from encoded bytes.
|
||||
pub fn from_bytes(
|
||||
bytes: &[u8],
|
||||
config: Config,
|
||||
log: Logger,
|
||||
spec: ChainSpec,
|
||||
) -> Result<Self, String> {
|
||||
let inner = Inner::from_bytes(bytes, config, spec)?;
|
||||
Ok(Self {
|
||||
inner: Arc::new(inner),
|
||||
log,
|
||||
})
|
||||
}
|
||||
|
||||
/// Provides access to the block cache.
|
||||
pub fn blocks(&self) -> &RwLock<BlockCache> {
|
||||
&self.inner.block_cache
|
||||
@@ -163,6 +182,13 @@ impl Service {
|
||||
&self.inner.deposit_cache
|
||||
}
|
||||
|
||||
/// Removes all blocks from the cache, except for the latest block.
|
||||
///
|
||||
/// We don't remove the latest blocks so we don't lose track of the latest block.
|
||||
pub fn clear_block_cache(&self) {
|
||||
self.inner.block_cache.write().truncate(1)
|
||||
}
|
||||
|
||||
/// Drop the block cache, replacing it with an empty one.
|
||||
pub fn drop_block_cache(&self) {
|
||||
*(self.inner.block_cache.write()) = BlockCache::default();
|
||||
@@ -183,6 +209,14 @@ impl Service {
|
||||
self.inner.block_cache.read().lowest_block_number()
|
||||
}
|
||||
|
||||
/// Returns the highest block that is present in both the deposit and block caches.
|
||||
pub fn highest_safe_block(&self) -> Option<u64> {
|
||||
let block_cache = self.blocks().read().highest_block_number()?;
|
||||
let deposit_cache = self.deposits().read().last_processed_block?;
|
||||
|
||||
Some(std::cmp::min(block_cache, deposit_cache))
|
||||
}
|
||||
|
||||
/// Returns the number of currently cached blocks.
|
||||
pub fn block_cache_len(&self) -> usize {
|
||||
self.blocks().read().len()
|
||||
@@ -193,6 +227,34 @@ impl Service {
|
||||
self.deposits().read().cache.len()
|
||||
}
|
||||
|
||||
/// Returns the number of deposits with valid signatures that have been observed.
|
||||
pub fn get_valid_signature_count(&self) -> Option<usize> {
|
||||
self.deposits()
|
||||
.read()
|
||||
.cache
|
||||
.get_valid_signature_count(self.highest_safe_block()?)
|
||||
}
|
||||
|
||||
/// Returns the number of deposits with valid signatures that have been observed, without
|
||||
/// respecting the `highest_safe_block`.
|
||||
pub fn get_raw_valid_signature_count(&self) -> Option<usize> {
|
||||
let deposits = self.deposits().read();
|
||||
deposits
|
||||
.cache
|
||||
.get_valid_signature_count(deposits.cache.latest_block_number()?)
|
||||
}
|
||||
|
||||
/// Returns the number of deposits with valid signatures that have been observed up to and
|
||||
/// including the block at `block_number`.
|
||||
///
|
||||
/// Returns `None` if the `block_number` is zero or prior to contract deployment.
|
||||
pub fn get_valid_signature_count_at_block(&self, block_number: u64) -> Option<usize> {
|
||||
self.deposits()
|
||||
.read()
|
||||
.cache
|
||||
.get_valid_signature_count(block_number)
|
||||
}
|
||||
|
||||
/// Read the service's configuration.
|
||||
pub fn config(&self) -> RwLockReadGuard<Config> {
|
||||
self.inner.config.read()
|
||||
@@ -234,62 +296,42 @@ impl Service {
|
||||
/// - Err(_) if there is an error.
|
||||
///
|
||||
/// Emits logs for debugging and errors.
|
||||
pub fn update(
|
||||
pub async fn update(
|
||||
&self,
|
||||
) -> impl Future<Item = (DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), Error = String>
|
||||
{
|
||||
let log_a = self.log.clone();
|
||||
let log_b = self.log.clone();
|
||||
let inner_1 = self.inner.clone();
|
||||
let inner_2 = self.inner.clone();
|
||||
) -> Result<(DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), String> {
|
||||
let update_deposit_cache = async {
|
||||
let outcome = self
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to update eth1 cache: {:?}", e))?;
|
||||
|
||||
let deposit_future = self
|
||||
.update_deposit_cache()
|
||||
.map_err(|e| format!("Failed to update eth1 cache: {:?}", e))
|
||||
.then(move |result| {
|
||||
match &result {
|
||||
Ok(DepositCacheUpdateOutcome::Success { logs_imported }) => trace!(
|
||||
log_a,
|
||||
"Updated eth1 deposit cache";
|
||||
"cached_deposits" => inner_1.deposit_cache.read().cache.len(),
|
||||
"logs_imported" => logs_imported,
|
||||
),
|
||||
Err(e) => error!(
|
||||
log_a,
|
||||
"Failed to update eth1 deposit cache";
|
||||
"error" => e
|
||||
),
|
||||
};
|
||||
trace!(
|
||||
self.log,
|
||||
"Updated eth1 deposit cache";
|
||||
"cached_deposits" => self.inner.deposit_cache.read().cache.len(),
|
||||
"logs_imported" => outcome.logs_imported,
|
||||
"last_processed_eth1_block" => self.inner.deposit_cache.read().last_processed_block,
|
||||
);
|
||||
Ok(outcome)
|
||||
};
|
||||
|
||||
result
|
||||
});
|
||||
let update_block_cache = async {
|
||||
let outcome = self
|
||||
.update_block_cache()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to update eth1 cache: {:?}", e))?;
|
||||
|
||||
let block_future = self
|
||||
.update_block_cache()
|
||||
.map_err(|e| format!("Failed to update eth1 cache: {:?}", e))
|
||||
.then(move |result| {
|
||||
match &result {
|
||||
Ok(BlockCacheUpdateOutcome::Success {
|
||||
blocks_imported,
|
||||
head_block_number,
|
||||
}) => trace!(
|
||||
log_b,
|
||||
"Updated eth1 block cache";
|
||||
"cached_blocks" => inner_2.block_cache.read().len(),
|
||||
"blocks_imported" => blocks_imported,
|
||||
"head_block" => head_block_number,
|
||||
),
|
||||
Err(e) => error!(
|
||||
log_b,
|
||||
"Failed to update eth1 block cache";
|
||||
"error" => e
|
||||
),
|
||||
};
|
||||
trace!(
|
||||
self.log,
|
||||
"Updated eth1 block cache";
|
||||
"cached_blocks" => self.inner.block_cache.read().len(),
|
||||
"blocks_imported" => outcome.blocks_imported,
|
||||
"head_block" => outcome.head_block_number,
|
||||
);
|
||||
Ok(outcome)
|
||||
};
|
||||
|
||||
result
|
||||
});
|
||||
|
||||
deposit_future.join(block_future)
|
||||
futures::try_join!(update_deposit_cache, update_block_cache)
|
||||
}
|
||||
|
||||
/// A looping future that updates the cache, then waits `config.auto_update_interval` before
|
||||
@@ -301,53 +343,61 @@ impl Service {
|
||||
/// - Err(_) if there is an error.
|
||||
///
|
||||
/// Emits logs for debugging and errors.
|
||||
pub fn auto_update(&self, exit: Exit) -> impl Future<Item = (), Error = ()> {
|
||||
let service = self.clone();
|
||||
let log = self.log.clone();
|
||||
pub fn auto_update(self, handle: environment::TaskExecutor) {
|
||||
let update_interval = Duration::from_millis(self.config().auto_update_interval_millis);
|
||||
|
||||
let loop_future = loop_fn((), move |()| {
|
||||
let service = service.clone();
|
||||
let log_a = log.clone();
|
||||
let log_b = log.clone();
|
||||
let mut interval = interval_at(Instant::now(), update_interval);
|
||||
|
||||
service
|
||||
.update()
|
||||
.then(move |update_result| {
|
||||
match update_result {
|
||||
Err(e) => error!(
|
||||
log_a,
|
||||
"Failed to update eth1 genesis cache";
|
||||
"retry_millis" => update_interval.as_millis(),
|
||||
"error" => e,
|
||||
),
|
||||
Ok((deposit, block)) => debug!(
|
||||
log_a,
|
||||
"Updated eth1 genesis cache";
|
||||
"retry_millis" => update_interval.as_millis(),
|
||||
"blocks" => format!("{:?}", block),
|
||||
"deposits" => format!("{:?}", deposit),
|
||||
),
|
||||
};
|
||||
let update_future = async move {
|
||||
while interval.next().await.is_some() {
|
||||
self.do_update(update_interval).await.ok();
|
||||
}
|
||||
};
|
||||
|
||||
// Do not break the loop if there is an update failure.
|
||||
Ok(())
|
||||
})
|
||||
.and_then(move |_| Delay::new(Instant::now() + update_interval))
|
||||
.then(move |timer_result| {
|
||||
if let Err(e) = timer_result {
|
||||
error!(
|
||||
log_b,
|
||||
"Failed to trigger eth1 cache update delay";
|
||||
"error" => format!("{:?}", e),
|
||||
);
|
||||
}
|
||||
// Do not break the loop if there is an timer failure.
|
||||
Ok(Loop::Continue(()))
|
||||
})
|
||||
});
|
||||
handle.spawn(update_future, "eth1");
|
||||
}
|
||||
|
||||
exit.until(loop_future).map(|_: Option<()>| ())
|
||||
async fn do_update(&self, update_interval: Duration) -> Result<(), ()> {
|
||||
let endpoint = self.config().endpoint.clone();
|
||||
let config_network = self.config().network_id.clone();
|
||||
let result =
|
||||
get_network_id(&endpoint, Duration::from_millis(STANDARD_TIMEOUT_MILLIS)).await;
|
||||
match result {
|
||||
Ok(network_id) => {
|
||||
if network_id != config_network {
|
||||
error!(
|
||||
self.log,
|
||||
"Failed to update eth1 cache";
|
||||
"reason" => "Invalid eth1 network id",
|
||||
"expected" => format!("{:?}",DEFAULT_NETWORK_ID),
|
||||
"got" => format!("{:?}",network_id),
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!(self.log, "Failed to get eth1 network id"; "error" => e);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let update_result = self.update().await;
|
||||
match update_result {
|
||||
Err(e) => error!(
|
||||
self.log,
|
||||
"Failed to update eth1 cache";
|
||||
"retry_millis" => update_interval.as_millis(),
|
||||
"error" => e,
|
||||
),
|
||||
Ok((deposit, block)) => debug!(
|
||||
self.log,
|
||||
"Updated eth1 cache";
|
||||
"retry_millis" => update_interval.as_millis(),
|
||||
"blocks" => format!("{:?}", block),
|
||||
"deposits" => format!("{:?}", deposit),
|
||||
),
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Contacts the remote eth1 node and attempts to import deposit logs up to the configured
|
||||
@@ -362,11 +412,11 @@ impl Service {
|
||||
/// - Err(_) if there is an error.
|
||||
///
|
||||
/// Emits logs for debugging and errors.
|
||||
pub fn update_deposit_cache(
|
||||
&self,
|
||||
) -> impl Future<Item = DepositCacheUpdateOutcome, Error = Error> {
|
||||
let service_1 = self.clone();
|
||||
let service_2 = self.clone();
|
||||
pub async fn update_deposit_cache(&self) -> Result<DepositCacheUpdateOutcome, Error> {
|
||||
let endpoint = self.config().endpoint.clone();
|
||||
let follow_distance = self.config().follow_distance;
|
||||
let deposit_contract_address = self.config().deposit_contract_address.clone();
|
||||
|
||||
let blocks_per_log_query = self.config().blocks_per_log_query;
|
||||
let max_log_requests_per_update = self
|
||||
.config()
|
||||
@@ -380,91 +430,110 @@ impl Service {
|
||||
.map(|n| n + 1)
|
||||
.unwrap_or_else(|| self.config().deposit_contract_deploy_block);
|
||||
|
||||
get_new_block_numbers(
|
||||
&self.config().endpoint,
|
||||
next_required_block,
|
||||
self.config().follow_distance,
|
||||
)
|
||||
.map(move |range| {
|
||||
let range = get_new_block_numbers(&endpoint, next_required_block, follow_distance).await?;
|
||||
|
||||
let block_number_chunks = if let Some(range) = range {
|
||||
range
|
||||
.map(|range| {
|
||||
range
|
||||
.collect::<Vec<u64>>()
|
||||
.chunks(blocks_per_log_query)
|
||||
.take(max_log_requests_per_update)
|
||||
.map(|vec| {
|
||||
let first = vec.first().cloned().unwrap_or_else(|| 0);
|
||||
let last = vec.last().map(|n| n + 1).unwrap_or_else(|| 0);
|
||||
(first..last)
|
||||
})
|
||||
.collect::<Vec<Range<u64>>>()
|
||||
.collect::<Vec<u64>>()
|
||||
.chunks(blocks_per_log_query)
|
||||
.take(max_log_requests_per_update)
|
||||
.map(|vec| {
|
||||
let first = vec.first().cloned().unwrap_or_else(|| 0);
|
||||
let last = vec.last().map(|n| n + 1).unwrap_or_else(|| 0);
|
||||
first..last
|
||||
})
|
||||
.unwrap_or_else(|| vec![])
|
||||
})
|
||||
.and_then(move |block_number_chunks| {
|
||||
stream::unfold(
|
||||
block_number_chunks.into_iter(),
|
||||
move |mut chunks| match chunks.next() {
|
||||
.collect::<Vec<Range<u64>>>()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
let logs: Vec<(Range<u64>, Vec<Log>)> =
|
||||
stream::try_unfold(block_number_chunks.into_iter(), |mut chunks| async {
|
||||
match chunks.next() {
|
||||
Some(chunk) => {
|
||||
let chunk_1 = chunk.clone();
|
||||
Some(
|
||||
get_deposit_logs_in_range(
|
||||
&service_1.config().endpoint,
|
||||
&service_1.config().deposit_contract_address,
|
||||
chunk,
|
||||
Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS),
|
||||
)
|
||||
.map_err(Error::GetDepositLogsFailed)
|
||||
.map(|logs| (chunk_1, logs))
|
||||
.map(|logs| (logs, chunks)),
|
||||
match get_deposit_logs_in_range(
|
||||
&endpoint,
|
||||
&deposit_contract_address,
|
||||
chunk,
|
||||
Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(logs) => Ok(Some(((chunk_1, logs), chunks))),
|
||||
Err(e) => Err(Error::GetDepositLogsFailed(e)),
|
||||
}
|
||||
}
|
||||
None => None,
|
||||
},
|
||||
)
|
||||
.fold(0, move |mut sum, (block_range, log_chunk)| {
|
||||
let mut cache = service_2.deposits().write();
|
||||
|
||||
log_chunk
|
||||
.into_iter()
|
||||
.map(|raw_log| {
|
||||
DepositLog::from_log(&raw_log).map_err(|error| {
|
||||
Error::FailedToParseDepositLog {
|
||||
block_range: block_range.clone(),
|
||||
error,
|
||||
}
|
||||
})
|
||||
})
|
||||
// Return early if any of the logs cannot be parsed.
|
||||
//
|
||||
// This costs an additional `collect`, however it enforces that no logs are
|
||||
// imported if any one of them cannot be parsed.
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
.into_iter()
|
||||
.map(|deposit_log| {
|
||||
cache
|
||||
.cache
|
||||
.insert_log(deposit_log)
|
||||
.map_err(Error::FailedToInsertDeposit)?;
|
||||
|
||||
sum += 1;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
// Returns if a deposit is unable to be added to the cache.
|
||||
//
|
||||
// If this error occurs, the cache will no longer be guaranteed to hold either
|
||||
// none or all of the logs for each block (i.e., they may exist _some_ logs for
|
||||
// a block, but not _all_ logs for that block). This scenario can cause the
|
||||
// node to choose an invalid genesis state or propose an invalid block.
|
||||
.collect::<Result<_, _>>()?;
|
||||
|
||||
cache.last_processed_block = Some(block_range.end.saturating_sub(1));
|
||||
|
||||
Ok(sum)
|
||||
None => Ok(None),
|
||||
}
|
||||
})
|
||||
.map(|logs_imported| DepositCacheUpdateOutcome::Success { logs_imported })
|
||||
})
|
||||
.try_collect()
|
||||
.await?;
|
||||
|
||||
let mut logs_imported = 0;
|
||||
for (block_range, log_chunk) in logs.iter() {
|
||||
let mut cache = self.deposits().write();
|
||||
log_chunk
|
||||
.iter()
|
||||
.map(|raw_log| {
|
||||
DepositLog::from_log(&raw_log, self.inner.spec()).map_err(|error| {
|
||||
Error::FailedToParseDepositLog {
|
||||
block_range: block_range.clone(),
|
||||
error,
|
||||
}
|
||||
})
|
||||
})
|
||||
// Return early if any of the logs cannot be parsed.
|
||||
//
|
||||
// This costs an additional `collect`, however it enforces that no logs are
|
||||
// imported if any one of them cannot be parsed.
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
.into_iter()
|
||||
.map(|deposit_log| {
|
||||
cache
|
||||
.cache
|
||||
.insert_log(deposit_log)
|
||||
.map_err(Error::FailedToInsertDeposit)?;
|
||||
|
||||
logs_imported += 1;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
// Returns if a deposit is unable to be added to the cache.
|
||||
//
|
||||
// If this error occurs, the cache will no longer be guaranteed to hold either
|
||||
// none or all of the logs for each block (i.e., they may exist _some_ logs for
|
||||
// a block, but not _all_ logs for that block). This scenario can cause the
|
||||
// node to choose an invalid genesis state or propose an invalid block.
|
||||
.collect::<Result<_, _>>()?;
|
||||
|
||||
cache.last_processed_block = Some(block_range.end.saturating_sub(1));
|
||||
|
||||
metrics::set_gauge(&metrics::DEPOSIT_CACHE_LEN, cache.cache.len() as i64);
|
||||
metrics::set_gauge(
|
||||
&metrics::HIGHEST_PROCESSED_DEPOSIT_BLOCK,
|
||||
cache.last_processed_block.unwrap_or_else(|| 0) as i64,
|
||||
);
|
||||
}
|
||||
|
||||
if logs_imported > 0 {
|
||||
info!(
|
||||
self.log,
|
||||
"Imported deposit log(s)";
|
||||
"latest_block" => self.inner.deposit_cache.read().cache.latest_block_number(),
|
||||
"total" => self.deposit_cache_len(),
|
||||
"new" => logs_imported
|
||||
);
|
||||
} else {
|
||||
debug!(
|
||||
self.log,
|
||||
"No new deposits found";
|
||||
"latest_block" => self.inner.deposit_cache.read().cache.latest_block_number(),
|
||||
"total_deposits" => self.deposit_cache_len(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(DepositCacheUpdateOutcome { logs_imported })
|
||||
}
|
||||
|
||||
/// Contacts the remote eth1 node and attempts to import all blocks up to the configured
|
||||
@@ -478,167 +547,217 @@ impl Service {
|
||||
/// - Err(_) if there is an error.
|
||||
///
|
||||
/// Emits logs for debugging and errors.
|
||||
pub fn update_block_cache(&self) -> impl Future<Item = BlockCacheUpdateOutcome, Error = Error> {
|
||||
let cache_1 = self.inner.clone();
|
||||
let cache_2 = self.inner.clone();
|
||||
let cache_3 = self.inner.clone();
|
||||
let cache_4 = self.inner.clone();
|
||||
let cache_5 = self.inner.clone();
|
||||
|
||||
pub async fn update_block_cache(&self) -> Result<BlockCacheUpdateOutcome, Error> {
|
||||
let block_cache_truncation = self.config().block_cache_truncation;
|
||||
let max_blocks_per_update = self
|
||||
.config()
|
||||
.max_blocks_per_update
|
||||
.unwrap_or_else(usize::max_value);
|
||||
|
||||
let next_required_block = cache_1
|
||||
let next_required_block = self
|
||||
.inner
|
||||
.block_cache
|
||||
.read()
|
||||
.highest_block_number()
|
||||
.map(|n| n + 1)
|
||||
.unwrap_or_else(|| self.config().lowest_cached_block_number);
|
||||
|
||||
get_new_block_numbers(
|
||||
&self.config().endpoint,
|
||||
next_required_block,
|
||||
self.config().follow_distance,
|
||||
)
|
||||
let endpoint = self.config().endpoint.clone();
|
||||
let follow_distance = self.config().follow_distance;
|
||||
|
||||
let range = get_new_block_numbers(&endpoint, next_required_block, follow_distance).await?;
|
||||
// Map the range of required blocks into a Vec.
|
||||
//
|
||||
// If the required range is larger than the size of the cache, drop the exiting cache
|
||||
// because it's exipred and just download enough blocks to fill the cache.
|
||||
.and_then(move |range| {
|
||||
range
|
||||
.map(|range| {
|
||||
if range.start() > range.end() {
|
||||
// Note: this check is not strictly necessary, however it remains to safe
|
||||
// guard against any regression which may cause an underflow in a following
|
||||
// subtraction operation.
|
||||
Err(Error::Internal("Range was not increasing".into()))
|
||||
} else {
|
||||
let range_size = range.end() - range.start();
|
||||
let max_size = block_cache_truncation
|
||||
.map(|n| n as u64)
|
||||
.unwrap_or_else(u64::max_value);
|
||||
let required_block_numbers = if let Some(range) = range {
|
||||
if range.start() > range.end() {
|
||||
// Note: this check is not strictly necessary, however it remains to safe
|
||||
// guard against any regression which may cause an underflow in a following
|
||||
// subtraction operation.
|
||||
return Err(Error::Internal("Range was not increasing".into()));
|
||||
} else {
|
||||
let range_size = range.end() - range.start();
|
||||
let max_size = block_cache_truncation
|
||||
.map(|n| n as u64)
|
||||
.unwrap_or_else(u64::max_value);
|
||||
if range_size > max_size {
|
||||
// If the range of required blocks is larger than `max_size`, drop all
|
||||
// existing blocks and download `max_size` count of blocks.
|
||||
let first_block = range.end() - max_size;
|
||||
(*self.inner.block_cache.write()) = BlockCache::default();
|
||||
(first_block..=*range.end()).collect::<Vec<u64>>()
|
||||
} else {
|
||||
range.collect::<Vec<u64>>()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
// Download the range of blocks and sequentially import them into the cache.
|
||||
// Last processed block in deposit cache
|
||||
let latest_in_cache = self
|
||||
.inner
|
||||
.deposit_cache
|
||||
.read()
|
||||
.last_processed_block
|
||||
.unwrap_or(0);
|
||||
|
||||
if range_size > max_size {
|
||||
// If the range of required blocks is larger than `max_size`, drop all
|
||||
// existing blocks and download `max_size` count of blocks.
|
||||
let first_block = range.end() - max_size;
|
||||
(*cache_5.block_cache.write()) = BlockCache::default();
|
||||
Ok((first_block..=*range.end()).collect::<Vec<u64>>())
|
||||
} else {
|
||||
Ok(range.collect::<Vec<u64>>())
|
||||
let required_block_numbers = required_block_numbers
|
||||
.into_iter()
|
||||
.filter(|x| *x <= latest_in_cache)
|
||||
.take(max_blocks_per_update)
|
||||
.collect::<Vec<_>>();
|
||||
// Produce a stream from the list of required block numbers and return a future that
|
||||
// consumes the it.
|
||||
|
||||
let eth1_blocks: Vec<Eth1Block> = stream::try_unfold(
|
||||
required_block_numbers.into_iter(),
|
||||
|mut block_numbers| async {
|
||||
match block_numbers.next() {
|
||||
Some(block_number) => {
|
||||
match download_eth1_block(self.inner.clone(), block_number).await {
|
||||
Ok(eth1_block) => Ok(Some((eth1_block, block_numbers))),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| Ok(vec![]))
|
||||
})
|
||||
// Download the range of blocks and sequentially import them into the cache.
|
||||
.and_then(move |required_block_numbers| {
|
||||
let required_block_numbers = required_block_numbers
|
||||
.into_iter()
|
||||
.take(max_blocks_per_update);
|
||||
None => Ok(None),
|
||||
}
|
||||
},
|
||||
)
|
||||
.try_collect()
|
||||
.await?;
|
||||
|
||||
// Produce a stream from the list of required block numbers and return a future that
|
||||
// consumes the it.
|
||||
stream::unfold(
|
||||
required_block_numbers,
|
||||
move |mut block_numbers| match block_numbers.next() {
|
||||
Some(block_number) => Some(
|
||||
download_eth1_block(cache_2.clone(), block_number)
|
||||
.map(|v| (v, block_numbers)),
|
||||
),
|
||||
None => None,
|
||||
},
|
||||
)
|
||||
.fold(0, move |sum, eth1_block| {
|
||||
cache_3
|
||||
let mut blocks_imported = 0;
|
||||
for eth1_block in eth1_blocks {
|
||||
self.inner
|
||||
.block_cache
|
||||
.write()
|
||||
.insert_root_or_child(eth1_block)
|
||||
.map_err(Error::FailedToInsertEth1Block)?;
|
||||
|
||||
metrics::set_gauge(
|
||||
&metrics::BLOCK_CACHE_LEN,
|
||||
self.inner.block_cache.read().len() as i64,
|
||||
);
|
||||
metrics::set_gauge(
|
||||
&metrics::LATEST_CACHED_BLOCK_TIMESTAMP,
|
||||
self.inner
|
||||
.block_cache
|
||||
.write()
|
||||
.insert_root_or_child(eth1_block)
|
||||
.map_err(Error::FailedToInsertEth1Block)?;
|
||||
.read()
|
||||
.latest_block_timestamp()
|
||||
.unwrap_or_else(|| 0) as i64,
|
||||
);
|
||||
|
||||
Ok(sum + 1)
|
||||
})
|
||||
})
|
||||
.and_then(move |blocks_imported| {
|
||||
// Prune the block cache, preventing it from growing too large.
|
||||
cache_4.prune_blocks();
|
||||
blocks_imported += 1;
|
||||
}
|
||||
|
||||
Ok(BlockCacheUpdateOutcome::Success {
|
||||
blocks_imported,
|
||||
head_block_number: cache_4.clone().block_cache.read().highest_block_number(),
|
||||
// Prune the block cache, preventing it from growing too large.
|
||||
self.inner.prune_blocks();
|
||||
|
||||
metrics::set_gauge(
|
||||
&metrics::BLOCK_CACHE_LEN,
|
||||
self.inner.block_cache.read().len() as i64,
|
||||
);
|
||||
|
||||
let block_cache = self.inner.block_cache.read();
|
||||
let latest_block_mins = block_cache
|
||||
.latest_block_timestamp()
|
||||
.and_then(|timestamp| {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.ok()
|
||||
.and_then(|now| now.checked_sub(Duration::from_secs(timestamp)))
|
||||
})
|
||||
.map(|duration| format!("{} mins", duration.as_secs() / 60))
|
||||
.unwrap_or_else(|| "n/a".into());
|
||||
|
||||
if blocks_imported > 0 {
|
||||
debug!(
|
||||
self.log,
|
||||
"Imported eth1 block(s)";
|
||||
"latest_block_age" => latest_block_mins,
|
||||
"latest_block" => block_cache.highest_block_number(),
|
||||
"total_cached_blocks" => block_cache.len(),
|
||||
"new" => blocks_imported
|
||||
);
|
||||
} else {
|
||||
debug!(
|
||||
self.log,
|
||||
"No new eth1 blocks imported";
|
||||
"latest_block" => block_cache.highest_block_number(),
|
||||
"cached_blocks" => block_cache.len(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(BlockCacheUpdateOutcome {
|
||||
blocks_imported,
|
||||
head_block_number: self.inner.block_cache.read().highest_block_number(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine the range of blocks that need to be downloaded, given the remotes best block and
|
||||
/// the locally stored best block.
|
||||
fn get_new_block_numbers<'a>(
|
||||
async fn get_new_block_numbers<'a>(
|
||||
endpoint: &str,
|
||||
next_required_block: u64,
|
||||
follow_distance: u64,
|
||||
) -> impl Future<Item = Option<RangeInclusive<u64>>, Error = Error> + 'a {
|
||||
get_block_number(endpoint, Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS))
|
||||
.map_err(Error::GetBlockNumberFailed)
|
||||
.and_then(move |remote_highest_block| {
|
||||
let remote_follow_block = remote_highest_block.saturating_sub(follow_distance);
|
||||
) -> Result<Option<RangeInclusive<u64>>, Error> {
|
||||
let remote_highest_block =
|
||||
get_block_number(endpoint, Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS))
|
||||
.map_err(Error::GetBlockNumberFailed)
|
||||
.await?;
|
||||
let remote_follow_block = remote_highest_block.saturating_sub(follow_distance);
|
||||
|
||||
if next_required_block <= remote_follow_block {
|
||||
Ok(Some(next_required_block..=remote_follow_block))
|
||||
} else if next_required_block > remote_highest_block + 1 {
|
||||
// If this is the case, the node must have gone "backwards" in terms of it's sync
|
||||
// (i.e., it's head block is lower than it was before).
|
||||
//
|
||||
// We assume that the `follow_distance` should be sufficient to ensure this never
|
||||
// happens, otherwise it is an error.
|
||||
Err(Error::RemoteNotSynced {
|
||||
next_required_block,
|
||||
remote_highest_block,
|
||||
follow_distance,
|
||||
})
|
||||
} else {
|
||||
// Return an empty range.
|
||||
Ok(None)
|
||||
}
|
||||
if next_required_block <= remote_follow_block {
|
||||
Ok(Some(next_required_block..=remote_follow_block))
|
||||
} else if next_required_block > remote_highest_block + 1 {
|
||||
// If this is the case, the node must have gone "backwards" in terms of it's sync
|
||||
// (i.e., it's head block is lower than it was before).
|
||||
//
|
||||
// We assume that the `follow_distance` should be sufficient to ensure this never
|
||||
// happens, otherwise it is an error.
|
||||
Err(Error::RemoteNotSynced {
|
||||
next_required_block,
|
||||
remote_highest_block,
|
||||
follow_distance,
|
||||
})
|
||||
} else {
|
||||
// Return an empty range.
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Downloads the `(block, deposit_root, deposit_count)` tuple from an eth1 node for the given
|
||||
/// `block_number`.
|
||||
///
|
||||
/// Performs three async calls to an Eth1 HTTP JSON RPC endpoint.
|
||||
fn download_eth1_block<'a>(
|
||||
cache: Arc<Inner>,
|
||||
block_number: u64,
|
||||
) -> impl Future<Item = Eth1Block, Error = Error> + 'a {
|
||||
async fn download_eth1_block(cache: Arc<Inner>, block_number: u64) -> Result<Eth1Block, Error> {
|
||||
let endpoint = cache.config.read().endpoint.clone();
|
||||
|
||||
let deposit_root = cache
|
||||
.deposit_cache
|
||||
.read()
|
||||
.cache
|
||||
.get_deposit_root_from_cache(block_number);
|
||||
|
||||
let deposit_count = cache
|
||||
.deposit_cache
|
||||
.read()
|
||||
.cache
|
||||
.get_deposit_count_from_cache(block_number);
|
||||
|
||||
// Performs a `get_blockByNumber` call to an eth1 node.
|
||||
get_block(
|
||||
&cache.config.read().endpoint,
|
||||
let http_block = get_block(
|
||||
&endpoint,
|
||||
block_number,
|
||||
Duration::from_millis(GET_BLOCK_TIMEOUT_MILLIS),
|
||||
)
|
||||
.map_err(Error::BlockDownloadFailed)
|
||||
.join3(
|
||||
// Perform 2x `eth_call` via an eth1 node to read the deposit contract root and count.
|
||||
get_deposit_root(
|
||||
&cache.config.read().endpoint,
|
||||
&cache.config.read().deposit_contract_address,
|
||||
block_number,
|
||||
Duration::from_millis(GET_DEPOSIT_ROOT_TIMEOUT_MILLIS),
|
||||
)
|
||||
.map_err(Error::GetDepositRootFailed),
|
||||
get_deposit_count(
|
||||
&cache.config.read().endpoint,
|
||||
&cache.config.read().deposit_contract_address,
|
||||
block_number,
|
||||
Duration::from_millis(GET_DEPOSIT_COUNT_TIMEOUT_MILLIS),
|
||||
)
|
||||
.map_err(Error::GetDepositCountFailed),
|
||||
)
|
||||
.map(|(http_block, deposit_root, deposit_count)| Eth1Block {
|
||||
.await?;
|
||||
|
||||
Ok(Eth1Block {
|
||||
hash: http_block.hash,
|
||||
number: http_block.number,
|
||||
timestamp: http_block.timestamp,
|
||||
|
||||
@@ -4,18 +4,23 @@ use eth1::http::{get_deposit_count, get_deposit_logs_in_range, get_deposit_root,
|
||||
use eth1::{Config, Service};
|
||||
use eth1::{DepositCache, DepositLog};
|
||||
use eth1_test_rig::GanacheEth1Instance;
|
||||
use exit_future;
|
||||
use futures::Future;
|
||||
use futures::compat::Future01CompatExt;
|
||||
use merkle_proof::verify_merkle_proof;
|
||||
use slog::Logger;
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use std::ops::Range;
|
||||
use std::time::Duration;
|
||||
use tokio::runtime::Runtime;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{DepositData, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, Signature};
|
||||
use web3::{transports::Http, Web3};
|
||||
|
||||
const DEPOSIT_CONTRACT_TREE_DEPTH: usize = 32;
|
||||
|
||||
pub fn null_logger() -> Logger {
|
||||
let log_builder = NullLoggerBuilder;
|
||||
log_builder.build().expect("should build logger")
|
||||
}
|
||||
|
||||
pub fn new_env() -> Environment<MinimalEthSpec> {
|
||||
EnvironmentBuilder::minimal()
|
||||
// Use a single thread, so that when all tests are run in parallel they don't have so many
|
||||
@@ -39,7 +44,7 @@ fn random_deposit_data() -> DepositData {
|
||||
pubkey: keypair.pk.into(),
|
||||
withdrawal_credentials: Hash256::zero(),
|
||||
amount: 32_000_000_000,
|
||||
signature: Signature::empty_signature().into(),
|
||||
signature: Signature::empty().into(),
|
||||
};
|
||||
|
||||
deposit.signature = deposit.create_signature(&keypair.sk, &MainnetEthSpec::default_spec());
|
||||
@@ -48,160 +53,66 @@ fn random_deposit_data() -> DepositData {
|
||||
}
|
||||
|
||||
/// Blocking operation to get the deposit logs from the `deposit_contract`.
|
||||
fn blocking_deposit_logs(
|
||||
runtime: &mut Runtime,
|
||||
eth1: &GanacheEth1Instance,
|
||||
range: Range<u64>,
|
||||
) -> Vec<Log> {
|
||||
runtime
|
||||
.block_on(get_deposit_logs_in_range(
|
||||
ð1.endpoint(),
|
||||
ð1.deposit_contract.address(),
|
||||
range,
|
||||
timeout(),
|
||||
))
|
||||
.expect("should get logs")
|
||||
async fn blocking_deposit_logs(eth1: &GanacheEth1Instance, range: Range<u64>) -> Vec<Log> {
|
||||
get_deposit_logs_in_range(
|
||||
ð1.endpoint(),
|
||||
ð1.deposit_contract.address(),
|
||||
range,
|
||||
timeout(),
|
||||
)
|
||||
.await
|
||||
.expect("should get logs")
|
||||
}
|
||||
|
||||
/// Blocking operation to get the deposit root from the `deposit_contract`.
|
||||
fn blocking_deposit_root(
|
||||
runtime: &mut Runtime,
|
||||
eth1: &GanacheEth1Instance,
|
||||
block_number: u64,
|
||||
) -> Option<Hash256> {
|
||||
runtime
|
||||
.block_on(get_deposit_root(
|
||||
ð1.endpoint(),
|
||||
ð1.deposit_contract.address(),
|
||||
block_number,
|
||||
timeout(),
|
||||
))
|
||||
.expect("should get deposit root")
|
||||
async fn blocking_deposit_root(eth1: &GanacheEth1Instance, block_number: u64) -> Option<Hash256> {
|
||||
get_deposit_root(
|
||||
ð1.endpoint(),
|
||||
ð1.deposit_contract.address(),
|
||||
block_number,
|
||||
timeout(),
|
||||
)
|
||||
.await
|
||||
.expect("should get deposit root")
|
||||
}
|
||||
|
||||
/// Blocking operation to get the deposit count from the `deposit_contract`.
|
||||
fn blocking_deposit_count(
|
||||
runtime: &mut Runtime,
|
||||
eth1: &GanacheEth1Instance,
|
||||
block_number: u64,
|
||||
) -> Option<u64> {
|
||||
runtime
|
||||
.block_on(get_deposit_count(
|
||||
ð1.endpoint(),
|
||||
ð1.deposit_contract.address(),
|
||||
block_number,
|
||||
timeout(),
|
||||
))
|
||||
.expect("should get deposit count")
|
||||
async fn blocking_deposit_count(eth1: &GanacheEth1Instance, block_number: u64) -> Option<u64> {
|
||||
get_deposit_count(
|
||||
ð1.endpoint(),
|
||||
ð1.deposit_contract.address(),
|
||||
block_number,
|
||||
timeout(),
|
||||
)
|
||||
.await
|
||||
.expect("should get deposit count")
|
||||
}
|
||||
|
||||
fn get_block_number(runtime: &mut Runtime, web3: &Web3<Http>) -> u64 {
|
||||
runtime
|
||||
.block_on(web3.eth().block_number().map(|v| v.as_u64()))
|
||||
async fn get_block_number(web3: &Web3<Http>) -> u64 {
|
||||
web3.eth()
|
||||
.block_number()
|
||||
.compat()
|
||||
.await
|
||||
.map(|v| v.as_u64())
|
||||
.expect("should get block number")
|
||||
}
|
||||
|
||||
mod auto_update {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn can_auto_update() {
|
||||
let mut env = new_env();
|
||||
let log = env.core_context().log;
|
||||
let runtime = env.runtime();
|
||||
|
||||
let eth1 = runtime
|
||||
.block_on(GanacheEth1Instance::new())
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
|
||||
let now = get_block_number(runtime, &web3);
|
||||
|
||||
let service = Service::new(
|
||||
Config {
|
||||
endpoint: eth1.endpoint(),
|
||||
deposit_contract_address: deposit_contract.address(),
|
||||
deposit_contract_deploy_block: now,
|
||||
lowest_cached_block_number: now,
|
||||
follow_distance: 0,
|
||||
block_cache_truncation: None,
|
||||
..Config::default()
|
||||
},
|
||||
log,
|
||||
);
|
||||
|
||||
// NOTE: this test is sensitive to the response speed of the external web3 server. If
|
||||
// you're experiencing failures, try increasing the update_interval.
|
||||
let update_interval = Duration::from_millis(2_000);
|
||||
|
||||
assert_eq!(
|
||||
service.block_cache_len(),
|
||||
0,
|
||||
"should have imported no blocks"
|
||||
);
|
||||
assert_eq!(
|
||||
service.deposit_cache_len(),
|
||||
0,
|
||||
"should have imported no deposits"
|
||||
);
|
||||
|
||||
let (_exit, signal) = exit_future::signal();
|
||||
|
||||
runtime.executor().spawn(service.auto_update(signal));
|
||||
|
||||
let n = 4;
|
||||
|
||||
for _ in 0..n {
|
||||
deposit_contract
|
||||
.deposit(runtime, random_deposit_data())
|
||||
.expect("should do first deposits");
|
||||
}
|
||||
|
||||
std::thread::sleep(update_interval * 5);
|
||||
|
||||
assert!(
|
||||
service.deposit_cache_len() >= n,
|
||||
"should have imported n deposits"
|
||||
);
|
||||
|
||||
for _ in 0..n {
|
||||
deposit_contract
|
||||
.deposit(runtime, random_deposit_data())
|
||||
.expect("should do second deposits");
|
||||
}
|
||||
|
||||
std::thread::sleep(update_interval * 4);
|
||||
|
||||
assert!(
|
||||
service.block_cache_len() >= n * 2,
|
||||
"should have imported all blocks"
|
||||
);
|
||||
assert!(
|
||||
service.deposit_cache_len() >= n * 2,
|
||||
"should have imported all deposits, not {}",
|
||||
service.deposit_cache_len()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
mod eth1_cache {
|
||||
use super::*;
|
||||
use types::{EthSpec, MainnetEthSpec};
|
||||
|
||||
#[test]
|
||||
fn simple_scenario() {
|
||||
let mut env = new_env();
|
||||
let log = env.core_context().log;
|
||||
let runtime = env.runtime();
|
||||
#[tokio::test]
|
||||
async fn simple_scenario() {
|
||||
let log = null_logger();
|
||||
|
||||
for follow_distance in 0..2 {
|
||||
let eth1 = runtime
|
||||
.block_on(GanacheEth1Instance::new())
|
||||
let eth1 = GanacheEth1Instance::new()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
|
||||
let initial_block_number = get_block_number(runtime, &web3);
|
||||
let initial_block_number = get_block_number(&web3).await;
|
||||
|
||||
let service = Service::new(
|
||||
Config {
|
||||
@@ -212,6 +123,7 @@ mod eth1_cache {
|
||||
..Config::default()
|
||||
},
|
||||
log.clone(),
|
||||
MainnetEthSpec::default_spec(),
|
||||
);
|
||||
|
||||
// Create some blocks and then consume them, performing the test `rounds` times.
|
||||
@@ -230,17 +142,21 @@ mod eth1_cache {
|
||||
};
|
||||
|
||||
for _ in 0..blocks {
|
||||
runtime
|
||||
.block_on(eth1.ganache.evm_mine())
|
||||
.expect("should mine block");
|
||||
eth1.ganache.evm_mine().await.expect("should mine block");
|
||||
}
|
||||
|
||||
runtime
|
||||
.block_on(service.update_block_cache())
|
||||
.expect("should update cache");
|
||||
service
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.expect("should update deposit cache");
|
||||
service
|
||||
.update_block_cache()
|
||||
.await
|
||||
.expect("should update block cache");
|
||||
|
||||
runtime
|
||||
.block_on(service.update_block_cache())
|
||||
service
|
||||
.update_block_cache()
|
||||
.await
|
||||
.expect("should update cache when nothing has changed");
|
||||
|
||||
assert_eq!(
|
||||
@@ -260,14 +176,13 @@ mod eth1_cache {
|
||||
}
|
||||
|
||||
/// Tests the case where we attempt to download more blocks than will fit in the cache.
|
||||
#[test]
|
||||
fn big_skip() {
|
||||
let mut env = new_env();
|
||||
let log = env.core_context().log;
|
||||
let runtime = env.runtime();
|
||||
|
||||
let eth1 = runtime
|
||||
.block_on(GanacheEth1Instance::new())
|
||||
#[tokio::test]
|
||||
async fn big_skip() {
|
||||
let log = null_logger();
|
||||
|
||||
let eth1 = GanacheEth1Instance::new()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
@@ -278,25 +193,29 @@ mod eth1_cache {
|
||||
Config {
|
||||
endpoint: eth1.endpoint(),
|
||||
deposit_contract_address: deposit_contract.address(),
|
||||
lowest_cached_block_number: get_block_number(runtime, &web3),
|
||||
lowest_cached_block_number: get_block_number(&web3).await,
|
||||
follow_distance: 0,
|
||||
block_cache_truncation: Some(cache_len),
|
||||
..Config::default()
|
||||
},
|
||||
log,
|
||||
MainnetEthSpec::default_spec(),
|
||||
);
|
||||
|
||||
let blocks = cache_len * 2;
|
||||
|
||||
for _ in 0..blocks {
|
||||
runtime
|
||||
.block_on(eth1.ganache.evm_mine())
|
||||
.expect("should mine block")
|
||||
eth1.ganache.evm_mine().await.expect("should mine block")
|
||||
}
|
||||
|
||||
runtime
|
||||
.block_on(service.update_block_cache())
|
||||
.expect("should update cache");
|
||||
service
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.expect("should update deposit cache");
|
||||
service
|
||||
.update_block_cache()
|
||||
.await
|
||||
.expect("should update block cache");
|
||||
|
||||
assert_eq!(
|
||||
service.block_cache_len(),
|
||||
@@ -307,14 +226,12 @@ mod eth1_cache {
|
||||
|
||||
/// Tests to ensure that the cache gets pruned when doing multiple downloads smaller than the
|
||||
/// cache size.
|
||||
#[test]
|
||||
fn pruning() {
|
||||
let mut env = new_env();
|
||||
let log = env.core_context().log;
|
||||
let runtime = env.runtime();
|
||||
#[tokio::test]
|
||||
async fn pruning() {
|
||||
let log = null_logger();
|
||||
|
||||
let eth1 = runtime
|
||||
.block_on(GanacheEth1Instance::new())
|
||||
let eth1 = GanacheEth1Instance::new()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
@@ -325,23 +242,27 @@ mod eth1_cache {
|
||||
Config {
|
||||
endpoint: eth1.endpoint(),
|
||||
deposit_contract_address: deposit_contract.address(),
|
||||
lowest_cached_block_number: get_block_number(runtime, &web3),
|
||||
lowest_cached_block_number: get_block_number(&web3).await,
|
||||
follow_distance: 0,
|
||||
block_cache_truncation: Some(cache_len),
|
||||
..Config::default()
|
||||
},
|
||||
log,
|
||||
MainnetEthSpec::default_spec(),
|
||||
);
|
||||
|
||||
for _ in 0..4 {
|
||||
for _ in 0..4u8 {
|
||||
for _ in 0..cache_len / 2 {
|
||||
runtime
|
||||
.block_on(eth1.ganache.evm_mine())
|
||||
.expect("should mine block")
|
||||
eth1.ganache.evm_mine().await.expect("should mine block")
|
||||
}
|
||||
runtime
|
||||
.block_on(service.update_block_cache())
|
||||
.expect("should update cache");
|
||||
service
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.expect("should update deposit cache");
|
||||
service
|
||||
.update_block_cache()
|
||||
.await
|
||||
.expect("should update block cache");
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
@@ -351,16 +272,14 @@ mod eth1_cache {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn double_update() {
|
||||
let mut env = new_env();
|
||||
let log = env.core_context().log;
|
||||
let runtime = env.runtime();
|
||||
#[tokio::test]
|
||||
async fn double_update() {
|
||||
let log = null_logger();
|
||||
|
||||
let n = 16;
|
||||
|
||||
let eth1 = runtime
|
||||
.block_on(GanacheEth1Instance::new())
|
||||
let eth1 = GanacheEth1Instance::new()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
@@ -369,26 +288,24 @@ mod eth1_cache {
|
||||
Config {
|
||||
endpoint: eth1.endpoint(),
|
||||
deposit_contract_address: deposit_contract.address(),
|
||||
lowest_cached_block_number: get_block_number(runtime, &web3),
|
||||
lowest_cached_block_number: get_block_number(&web3).await,
|
||||
follow_distance: 0,
|
||||
..Config::default()
|
||||
},
|
||||
log,
|
||||
MainnetEthSpec::default_spec(),
|
||||
);
|
||||
|
||||
for _ in 0..n {
|
||||
runtime
|
||||
.block_on(eth1.ganache.evm_mine())
|
||||
.expect("should mine block")
|
||||
eth1.ganache.evm_mine().await.expect("should mine block")
|
||||
}
|
||||
|
||||
runtime
|
||||
.block_on(
|
||||
service
|
||||
.update_block_cache()
|
||||
.join(service.update_block_cache()),
|
||||
)
|
||||
.expect("should perform two simultaneous updates");
|
||||
futures::try_join!(
|
||||
service.update_deposit_cache(),
|
||||
service.update_deposit_cache()
|
||||
)
|
||||
.expect("should perform two simultaneous updates of deposit cache");
|
||||
futures::try_join!(service.update_block_cache(), service.update_block_cache())
|
||||
.expect("should perform two simultaneous updates of block cache");
|
||||
|
||||
assert!(service.block_cache_len() >= n, "should grow the cache");
|
||||
}
|
||||
@@ -397,21 +314,19 @@ mod eth1_cache {
|
||||
mod deposit_tree {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn updating() {
|
||||
let mut env = new_env();
|
||||
let log = env.core_context().log;
|
||||
let runtime = env.runtime();
|
||||
#[tokio::test]
|
||||
async fn updating() {
|
||||
let log = null_logger();
|
||||
|
||||
let n = 4;
|
||||
|
||||
let eth1 = runtime
|
||||
.block_on(GanacheEth1Instance::new())
|
||||
let eth1 = GanacheEth1Instance::new()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
|
||||
let start_block = get_block_number(runtime, &web3);
|
||||
let start_block = get_block_number(&web3).await;
|
||||
|
||||
let service = Service::new(
|
||||
Config {
|
||||
@@ -422,23 +337,27 @@ mod deposit_tree {
|
||||
..Config::default()
|
||||
},
|
||||
log,
|
||||
MainnetEthSpec::default_spec(),
|
||||
);
|
||||
|
||||
for round in 0..3 {
|
||||
let deposits: Vec<_> = (0..n).into_iter().map(|_| random_deposit_data()).collect();
|
||||
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||
|
||||
for deposit in &deposits {
|
||||
deposit_contract
|
||||
.deposit(runtime, deposit.clone())
|
||||
.deposit(deposit.clone())
|
||||
.await
|
||||
.expect("should perform a deposit");
|
||||
}
|
||||
|
||||
runtime
|
||||
.block_on(service.update_deposit_cache())
|
||||
service
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.expect("should perform update");
|
||||
|
||||
runtime
|
||||
.block_on(service.update_deposit_cache())
|
||||
service
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.expect("should perform update when nothing has changed");
|
||||
|
||||
let first = n * round;
|
||||
@@ -448,8 +367,8 @@ mod deposit_tree {
|
||||
.deposits()
|
||||
.read()
|
||||
.cache
|
||||
.get_deposits(first..last, last, 32)
|
||||
.expect(&format!("should get deposits in round {}", round));
|
||||
.get_deposits(first, last, last, 32)
|
||||
.unwrap_or_else(|_| panic!("should get deposits in round {}", round));
|
||||
|
||||
assert_eq!(
|
||||
local_deposits.len(),
|
||||
@@ -470,21 +389,19 @@ mod deposit_tree {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn double_update() {
|
||||
let mut env = new_env();
|
||||
let log = env.core_context().log;
|
||||
let runtime = env.runtime();
|
||||
#[tokio::test]
|
||||
async fn double_update() {
|
||||
let log = null_logger();
|
||||
|
||||
let n = 8;
|
||||
|
||||
let eth1 = runtime
|
||||
.block_on(GanacheEth1Instance::new())
|
||||
let eth1 = GanacheEth1Instance::new()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
|
||||
let start_block = get_block_number(runtime, &web3);
|
||||
let start_block = get_block_number(&web3).await;
|
||||
|
||||
let service = Service::new(
|
||||
Config {
|
||||
@@ -496,38 +413,37 @@ mod deposit_tree {
|
||||
..Config::default()
|
||||
},
|
||||
log,
|
||||
MainnetEthSpec::default_spec(),
|
||||
);
|
||||
|
||||
let deposits: Vec<_> = (0..n).into_iter().map(|_| random_deposit_data()).collect();
|
||||
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||
|
||||
for deposit in &deposits {
|
||||
deposit_contract
|
||||
.deposit(runtime, deposit.clone())
|
||||
.deposit(deposit.clone())
|
||||
.await
|
||||
.expect("should perform a deposit");
|
||||
}
|
||||
|
||||
runtime
|
||||
.block_on(
|
||||
service
|
||||
.update_deposit_cache()
|
||||
.join(service.update_deposit_cache()),
|
||||
)
|
||||
.expect("should perform two updates concurrently");
|
||||
futures::try_join!(
|
||||
service.update_deposit_cache(),
|
||||
service.update_deposit_cache()
|
||||
)
|
||||
.expect("should perform two updates concurrently");
|
||||
|
||||
assert_eq!(service.deposit_cache_len(), n);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cache_consistency() {
|
||||
let mut env = new_env();
|
||||
let runtime = env.runtime();
|
||||
|
||||
#[tokio::test]
|
||||
async fn cache_consistency() {
|
||||
let n = 8;
|
||||
|
||||
let deposits: Vec<_> = (0..n).into_iter().map(|_| random_deposit_data()).collect();
|
||||
let spec = &MainnetEthSpec::default_spec();
|
||||
|
||||
let eth1 = runtime
|
||||
.block_on(GanacheEth1Instance::new())
|
||||
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||
|
||||
let eth1 = GanacheEth1Instance::new()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
@@ -538,15 +454,18 @@ mod deposit_tree {
|
||||
// Perform deposits to the smart contract, recording it's state along the way.
|
||||
for deposit in &deposits {
|
||||
deposit_contract
|
||||
.deposit(runtime, deposit.clone())
|
||||
.deposit(deposit.clone())
|
||||
.await
|
||||
.expect("should perform a deposit");
|
||||
let block_number = get_block_number(runtime, &web3);
|
||||
let block_number = get_block_number(&web3).await;
|
||||
deposit_roots.push(
|
||||
blocking_deposit_root(runtime, ð1, block_number)
|
||||
blocking_deposit_root(ð1, block_number)
|
||||
.await
|
||||
.expect("should get root if contract exists"),
|
||||
);
|
||||
deposit_counts.push(
|
||||
blocking_deposit_count(runtime, ð1, block_number)
|
||||
blocking_deposit_count(ð1, block_number)
|
||||
.await
|
||||
.expect("should get count if contract exists"),
|
||||
);
|
||||
}
|
||||
@@ -554,10 +473,11 @@ mod deposit_tree {
|
||||
let mut tree = DepositCache::default();
|
||||
|
||||
// Pull all the deposit logs from the contract.
|
||||
let block_number = get_block_number(runtime, &web3);
|
||||
let logs: Vec<_> = blocking_deposit_logs(runtime, ð1, 0..block_number)
|
||||
let block_number = get_block_number(&web3).await;
|
||||
let logs: Vec<_> = blocking_deposit_logs(ð1, 0..block_number)
|
||||
.await
|
||||
.iter()
|
||||
.map(|raw| DepositLog::from_log(raw).expect("should parse deposit log"))
|
||||
.map(|raw| DepositLog::from_log(raw, spec).expect("should parse deposit log"))
|
||||
.inspect(|log| {
|
||||
tree.insert_log(log.clone())
|
||||
.expect("should add consecutive logs")
|
||||
@@ -586,7 +506,7 @@ mod deposit_tree {
|
||||
|
||||
// Ensure that the root from the deposit tree matches what the contract reported.
|
||||
let (root, deposits) = tree
|
||||
.get_deposits(0..i as u64, deposit_counts[i], DEPOSIT_CONTRACT_TREE_DEPTH)
|
||||
.get_deposits(0, i as u64, deposit_counts[i], DEPOSIT_CONTRACT_TREE_DEPTH)
|
||||
.expect("should get deposits");
|
||||
assert_eq!(
|
||||
root, deposit_roots[i],
|
||||
@@ -599,7 +519,7 @@ mod deposit_tree {
|
||||
for (j, deposit) in deposits.iter().enumerate() {
|
||||
assert!(
|
||||
verify_merkle_proof(
|
||||
Hash256::from_slice(&deposit.data.tree_hash_root()),
|
||||
deposit.data.tree_hash_root(),
|
||||
&deposit.proof,
|
||||
DEPOSIT_CONTRACT_TREE_DEPTH + 1,
|
||||
j,
|
||||
@@ -616,64 +536,59 @@ mod deposit_tree {
|
||||
mod http {
|
||||
use super::*;
|
||||
|
||||
fn get_block(runtime: &mut Runtime, eth1: &GanacheEth1Instance, block_number: u64) -> Block {
|
||||
runtime
|
||||
.block_on(eth1::http::get_block(
|
||||
ð1.endpoint(),
|
||||
block_number,
|
||||
timeout(),
|
||||
))
|
||||
async fn get_block(eth1: &GanacheEth1Instance, block_number: u64) -> Block {
|
||||
eth1::http::get_block(ð1.endpoint(), block_number, timeout())
|
||||
.await
|
||||
.expect("should get block number")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn incrementing_deposits() {
|
||||
let mut env = new_env();
|
||||
let runtime = env.runtime();
|
||||
|
||||
let eth1 = runtime
|
||||
.block_on(GanacheEth1Instance::new())
|
||||
#[tokio::test]
|
||||
async fn incrementing_deposits() {
|
||||
let eth1 = GanacheEth1Instance::new()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
|
||||
let block_number = get_block_number(runtime, &web3);
|
||||
let logs = blocking_deposit_logs(runtime, ð1, 0..block_number);
|
||||
let block_number = get_block_number(&web3).await;
|
||||
let logs = blocking_deposit_logs(ð1, 0..block_number).await;
|
||||
assert_eq!(logs.len(), 0);
|
||||
|
||||
let mut old_root = blocking_deposit_root(runtime, ð1, block_number);
|
||||
let mut old_block = get_block(runtime, ð1, block_number);
|
||||
let mut old_root = blocking_deposit_root(ð1, block_number).await;
|
||||
let mut old_block = get_block(ð1, block_number).await;
|
||||
let mut old_block_number = block_number;
|
||||
|
||||
assert_eq!(
|
||||
blocking_deposit_count(runtime, ð1, block_number),
|
||||
blocking_deposit_count(ð1, block_number).await,
|
||||
Some(0),
|
||||
"should have deposit count zero"
|
||||
);
|
||||
|
||||
for i in 1..=8 {
|
||||
runtime
|
||||
.block_on(eth1.ganache.increase_time(1))
|
||||
eth1.ganache
|
||||
.increase_time(1)
|
||||
.await
|
||||
.expect("should be able to increase time on ganache");
|
||||
|
||||
deposit_contract
|
||||
.deposit(runtime, random_deposit_data())
|
||||
.deposit(random_deposit_data())
|
||||
.await
|
||||
.expect("should perform a deposit");
|
||||
|
||||
// Check the logs.
|
||||
let block_number = get_block_number(runtime, &web3);
|
||||
let logs = blocking_deposit_logs(runtime, ð1, 0..block_number);
|
||||
let block_number = get_block_number(&web3).await;
|
||||
let logs = blocking_deposit_logs(ð1, 0..block_number).await;
|
||||
assert_eq!(logs.len(), i, "the number of logs should be as expected");
|
||||
|
||||
// Check the deposit count.
|
||||
assert_eq!(
|
||||
blocking_deposit_count(runtime, ð1, block_number),
|
||||
blocking_deposit_count(ð1, block_number).await,
|
||||
Some(i as u64),
|
||||
"should have a correct deposit count"
|
||||
);
|
||||
|
||||
// Check the deposit root.
|
||||
let new_root = blocking_deposit_root(runtime, ð1, block_number);
|
||||
let new_root = blocking_deposit_root(ð1, block_number).await;
|
||||
assert_ne!(
|
||||
new_root, old_root,
|
||||
"deposit root should change with each deposit"
|
||||
@@ -681,7 +596,7 @@ mod http {
|
||||
old_root = new_root;
|
||||
|
||||
// Check the block hash.
|
||||
let new_block = get_block(runtime, ð1, block_number);
|
||||
let new_block = get_block(ð1, block_number).await;
|
||||
assert_ne!(
|
||||
new_block.hash, old_block.hash,
|
||||
"block hash should change with each deposit"
|
||||
@@ -711,3 +626,155 @@ mod http {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod fast {
|
||||
use super::*;
|
||||
|
||||
// Adds deposits into deposit cache and matches deposit_count and deposit_root
|
||||
// with the deposit count and root computed from the deposit cache.
|
||||
#[tokio::test]
|
||||
async fn deposit_cache_query() {
|
||||
let log = null_logger();
|
||||
|
||||
let eth1 = GanacheEth1Instance::new()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
|
||||
let now = get_block_number(&web3).await;
|
||||
let service = Service::new(
|
||||
Config {
|
||||
endpoint: eth1.endpoint(),
|
||||
deposit_contract_address: deposit_contract.address(),
|
||||
deposit_contract_deploy_block: now,
|
||||
lowest_cached_block_number: now,
|
||||
follow_distance: 0,
|
||||
block_cache_truncation: None,
|
||||
..Config::default()
|
||||
},
|
||||
log,
|
||||
MainnetEthSpec::default_spec(),
|
||||
);
|
||||
let n = 10;
|
||||
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||
for deposit in &deposits {
|
||||
deposit_contract
|
||||
.deposit(deposit.clone())
|
||||
.await
|
||||
.expect("should perform a deposit");
|
||||
// Mine an extra block between deposits to test for corner cases
|
||||
eth1.ganache.evm_mine().await.expect("should mine block");
|
||||
}
|
||||
|
||||
service
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.expect("should perform update");
|
||||
|
||||
assert!(
|
||||
service.deposit_cache_len() >= n,
|
||||
"should have imported n deposits"
|
||||
);
|
||||
|
||||
for block_num in 0..=get_block_number(&web3).await {
|
||||
let expected_deposit_count = blocking_deposit_count(ð1, block_num).await;
|
||||
let expected_deposit_root = blocking_deposit_root(ð1, block_num).await;
|
||||
|
||||
let deposit_count = service
|
||||
.deposits()
|
||||
.read()
|
||||
.cache
|
||||
.get_deposit_count_from_cache(block_num);
|
||||
let deposit_root = service
|
||||
.deposits()
|
||||
.read()
|
||||
.cache
|
||||
.get_deposit_root_from_cache(block_num);
|
||||
assert_eq!(
|
||||
expected_deposit_count, deposit_count,
|
||||
"deposit count from cache should match queried"
|
||||
);
|
||||
assert_eq!(
|
||||
expected_deposit_root, deposit_root,
|
||||
"deposit root from cache should match queried"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod persist {
|
||||
use super::*;
|
||||
#[tokio::test]
|
||||
async fn test_persist_caches() {
|
||||
let log = null_logger();
|
||||
|
||||
let eth1 = GanacheEth1Instance::new()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
|
||||
let now = get_block_number(&web3).await;
|
||||
let config = Config {
|
||||
endpoint: eth1.endpoint(),
|
||||
deposit_contract_address: deposit_contract.address(),
|
||||
deposit_contract_deploy_block: now,
|
||||
lowest_cached_block_number: now,
|
||||
follow_distance: 0,
|
||||
block_cache_truncation: None,
|
||||
..Config::default()
|
||||
};
|
||||
let service = Service::new(config.clone(), log.clone(), MainnetEthSpec::default_spec());
|
||||
let n = 10;
|
||||
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||
for deposit in &deposits {
|
||||
deposit_contract
|
||||
.deposit(deposit.clone())
|
||||
.await
|
||||
.expect("should perform a deposit");
|
||||
}
|
||||
|
||||
service
|
||||
.update_deposit_cache()
|
||||
.await
|
||||
.expect("should perform update");
|
||||
|
||||
assert!(
|
||||
service.deposit_cache_len() >= n,
|
||||
"should have imported n deposits"
|
||||
);
|
||||
|
||||
let deposit_count = service.deposit_cache_len();
|
||||
|
||||
service
|
||||
.update_block_cache()
|
||||
.await
|
||||
.expect("should perform update");
|
||||
|
||||
assert!(
|
||||
service.block_cache_len() >= n,
|
||||
"should have imported n eth1 blocks"
|
||||
);
|
||||
|
||||
let block_count = service.block_cache_len();
|
||||
|
||||
let eth1_bytes = service.as_bytes();
|
||||
|
||||
// Drop service and recover from bytes
|
||||
drop(service);
|
||||
|
||||
let recovered_service =
|
||||
Service::from_bytes(ð1_bytes, config, log, MainnetEthSpec::default_spec()).unwrap();
|
||||
assert_eq!(
|
||||
recovered_service.block_cache_len(),
|
||||
block_count,
|
||||
"Should have equal cached blocks as before recovery"
|
||||
);
|
||||
assert_eq!(
|
||||
recovered_service.deposit_cache_len(),
|
||||
deposit_count,
|
||||
"Should have equal cached deposits as before recovery"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
[package]
|
||||
name = "eth2-libp2p"
|
||||
version = "0.1.0"
|
||||
authors = ["Age Manning <Age@AgeManning.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
hex = "0.3"
|
||||
# rust-libp2p is presently being sourced from a Sigma Prime fork of the
|
||||
# `libp2p/rust-libp2p` repository.
|
||||
libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "3f9b030e29c9b31f9fe6f2ed27be4a813e2b3701" }
|
||||
enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "3f9b030e29c9b31f9fe6f2ed27be4a813e2b3701", features = ["serde"] }
|
||||
types = { path = "../../eth2/types" }
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
version = { path = "../version" }
|
||||
tokio = "0.1.22"
|
||||
futures = "0.1.29"
|
||||
error-chain = "0.12.1"
|
||||
dirs = "2.0.2"
|
||||
fnv = "1.0.6"
|
||||
unsigned-varint = "0.2.3"
|
||||
lazy_static = "1.4.0"
|
||||
lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" }
|
||||
tokio-io-timeout = "0.3.1"
|
||||
smallvec = "1.0.0"
|
||||
|
||||
[dev-dependencies]
|
||||
slog-stdlog = "4.0.0"
|
||||
slog-term = "2.4.2"
|
||||
slog-async = "2.3.0"
|
||||
@@ -1,334 +0,0 @@
|
||||
use crate::config::*;
|
||||
use crate::discovery::Discovery;
|
||||
use crate::rpc::{RPCEvent, RPCMessage, RPC};
|
||||
use crate::{error, NetworkConfig};
|
||||
use crate::{Topic, TopicHash};
|
||||
use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC};
|
||||
use futures::prelude::*;
|
||||
use libp2p::{
|
||||
core::identity::Keypair,
|
||||
discv5::Discv5Event,
|
||||
gossipsub::{Gossipsub, GossipsubEvent},
|
||||
identify::{Identify, IdentifyEvent},
|
||||
ping::{Ping, PingConfig, PingEvent},
|
||||
swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess},
|
||||
tokio_io::{AsyncRead, AsyncWrite},
|
||||
NetworkBehaviour, PeerId,
|
||||
};
|
||||
use slog::{debug, o};
|
||||
use std::num::NonZeroU32;
|
||||
use std::time::Duration;
|
||||
|
||||
const MAX_IDENTIFY_ADDRESSES: usize = 20;
|
||||
|
||||
/// Builds the network behaviour that manages the core protocols of eth2.
|
||||
/// This core behaviour is managed by `Behaviour` which adds peer management to all core
|
||||
/// behaviours.
|
||||
#[derive(NetworkBehaviour)]
|
||||
#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")]
|
||||
pub struct Behaviour<TSubstream: AsyncRead + AsyncWrite> {
|
||||
/// The routing pub-sub mechanism for eth2.
|
||||
gossipsub: Gossipsub<TSubstream>,
|
||||
/// The Eth2 RPC specified in the wire-0 protocol.
|
||||
eth2_rpc: RPC<TSubstream>,
|
||||
/// Keep regular connection to peers and disconnect if absent.
|
||||
// TODO: Remove Libp2p ping in favour of discv5 ping.
|
||||
ping: Ping<TSubstream>,
|
||||
// TODO: Using id for initial interop. This will be removed by mainnet.
|
||||
/// Provides IP addresses and peer information.
|
||||
identify: Identify<TSubstream>,
|
||||
/// Discovery behaviour.
|
||||
discovery: Discovery<TSubstream>,
|
||||
#[behaviour(ignore)]
|
||||
/// The events generated by this behaviour to be consumed in the swarm poll.
|
||||
events: Vec<BehaviourEvent>,
|
||||
/// Logger for behaviour actions.
|
||||
#[behaviour(ignore)]
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
||||
pub fn new(
|
||||
local_key: &Keypair,
|
||||
net_conf: &NetworkConfig,
|
||||
log: &slog::Logger,
|
||||
) -> error::Result<Self> {
|
||||
let local_peer_id = local_key.public().clone().into_peer_id();
|
||||
let behaviour_log = log.new(o!());
|
||||
|
||||
let ping_config = PingConfig::new()
|
||||
.with_timeout(Duration::from_secs(30))
|
||||
.with_interval(Duration::from_secs(20))
|
||||
.with_max_failures(NonZeroU32::new(2).expect("2 != 0"))
|
||||
.with_keep_alive(false);
|
||||
|
||||
let identify = Identify::new(
|
||||
"lighthouse/libp2p".into(),
|
||||
version::version(),
|
||||
local_key.public(),
|
||||
);
|
||||
|
||||
Ok(Behaviour {
|
||||
eth2_rpc: RPC::new(log.clone()),
|
||||
gossipsub: Gossipsub::new(local_peer_id.clone(), net_conf.gs_config.clone()),
|
||||
discovery: Discovery::new(local_key, net_conf, log)?,
|
||||
ping: Ping::new(ping_config),
|
||||
identify,
|
||||
events: Vec::new(),
|
||||
log: behaviour_log,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn discovery(&self) -> &Discovery<TSubstream> {
|
||||
&self.discovery
|
||||
}
|
||||
|
||||
pub fn gs(&self) -> &Gossipsub<TSubstream> {
|
||||
&self.gossipsub
|
||||
}
|
||||
}
|
||||
|
||||
// Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour
|
||||
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<GossipsubEvent>
|
||||
for Behaviour<TSubstream>
|
||||
{
|
||||
fn inject_event(&mut self, event: GossipsubEvent) {
|
||||
match event {
|
||||
GossipsubEvent::Message(propagation_source, gs_msg) => {
|
||||
let id = gs_msg.id();
|
||||
let msg = PubsubMessage::from_topics(&gs_msg.topics, gs_msg.data);
|
||||
|
||||
// Note: We are keeping track here of the peer that sent us the message, not the
|
||||
// peer that originally published the message.
|
||||
self.events.push(BehaviourEvent::GossipMessage {
|
||||
id,
|
||||
source: propagation_source,
|
||||
topics: gs_msg.topics,
|
||||
message: msg,
|
||||
});
|
||||
}
|
||||
GossipsubEvent::Subscribed { peer_id, topic } => {
|
||||
self.events
|
||||
.push(BehaviourEvent::PeerSubscribed(peer_id, topic));
|
||||
}
|
||||
GossipsubEvent::Unsubscribed { .. } => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<RPCMessage>
|
||||
for Behaviour<TSubstream>
|
||||
{
|
||||
fn inject_event(&mut self, event: RPCMessage) {
|
||||
match event {
|
||||
RPCMessage::PeerDialed(peer_id) => {
|
||||
self.events.push(BehaviourEvent::PeerDialed(peer_id))
|
||||
}
|
||||
RPCMessage::PeerDisconnected(peer_id) => {
|
||||
self.events.push(BehaviourEvent::PeerDisconnected(peer_id))
|
||||
}
|
||||
RPCMessage::RPC(peer_id, rpc_event) => {
|
||||
self.events.push(BehaviourEvent::RPC(peer_id, rpc_event))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<PingEvent>
|
||||
for Behaviour<TSubstream>
|
||||
{
|
||||
fn inject_event(&mut self, _event: PingEvent) {
|
||||
// not interested in ping responses at the moment.
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
||||
/// Consumes the events list when polled.
|
||||
fn poll<TBehaviourIn>(
|
||||
&mut self,
|
||||
) -> Async<NetworkBehaviourAction<TBehaviourIn, BehaviourEvent>> {
|
||||
if !self.events.is_empty() {
|
||||
return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0)));
|
||||
}
|
||||
|
||||
Async::NotReady
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<IdentifyEvent>
|
||||
for Behaviour<TSubstream>
|
||||
{
|
||||
fn inject_event(&mut self, event: IdentifyEvent) {
|
||||
match event {
|
||||
IdentifyEvent::Received {
|
||||
peer_id,
|
||||
mut info,
|
||||
observed_addr,
|
||||
} => {
|
||||
if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES {
|
||||
debug!(
|
||||
self.log,
|
||||
"More than 20 addresses have been identified, truncating"
|
||||
);
|
||||
info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES);
|
||||
}
|
||||
debug!(self.log, "Identified Peer"; "peer" => format!("{}", peer_id),
|
||||
"protocol_version" => info.protocol_version,
|
||||
"agent_version" => info.agent_version,
|
||||
"listening_ addresses" => format!("{:?}", info.listen_addrs),
|
||||
"observed_address" => format!("{:?}", observed_addr),
|
||||
"protocols" => format!("{:?}", info.protocols)
|
||||
);
|
||||
}
|
||||
IdentifyEvent::Sent { .. } => {}
|
||||
IdentifyEvent::Error { .. } => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<Discv5Event>
|
||||
for Behaviour<TSubstream>
|
||||
{
|
||||
fn inject_event(&mut self, _event: Discv5Event) {
|
||||
// discv5 has no events to inject
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements the combined behaviour for the libp2p service.
|
||||
impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
||||
/* Pubsub behaviour functions */
|
||||
|
||||
/// Subscribes to a gossipsub topic.
|
||||
pub fn subscribe(&mut self, topic: Topic) -> bool {
|
||||
self.gossipsub.subscribe(topic)
|
||||
}
|
||||
|
||||
/// Unsubscribe from a gossipsub topic.
|
||||
pub fn unsubscribe(&mut self, topic: Topic) -> bool {
|
||||
self.gossipsub.unsubscribe(topic)
|
||||
}
|
||||
|
||||
/// Publishes a message on the pubsub (gossipsub) behaviour.
|
||||
pub fn publish(&mut self, topics: &[Topic], message: PubsubMessage) {
|
||||
let message_data = message.into_data();
|
||||
for topic in topics {
|
||||
self.gossipsub.publish(topic, message_data.clone());
|
||||
}
|
||||
}
|
||||
|
||||
/// Forwards a message that is waiting in gossipsub's mcache. Messages are only propagated
|
||||
/// once validated by the beacon chain.
|
||||
pub fn propagate_message(&mut self, propagation_source: &PeerId, message_id: String) {
|
||||
self.gossipsub
|
||||
.propagate_message(&message_id, propagation_source);
|
||||
}
|
||||
|
||||
/* Eth2 RPC behaviour functions */
|
||||
|
||||
/// Sends an RPC Request/Response via the RPC protocol.
|
||||
pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) {
|
||||
self.eth2_rpc.send_rpc(peer_id, rpc_event);
|
||||
}
|
||||
|
||||
/* Discovery / Peer management functions */
|
||||
/// Return the list of currently connected peers.
|
||||
pub fn connected_peers(&self) -> usize {
|
||||
self.discovery.connected_peers()
|
||||
}
|
||||
|
||||
/// Notify discovery that the peer has been banned.
|
||||
pub fn peer_banned(&mut self, peer_id: PeerId) {
|
||||
self.discovery.peer_banned(peer_id);
|
||||
}
|
||||
|
||||
/// Notify discovery that the peer has been unbanned.
|
||||
pub fn peer_unbanned(&mut self, peer_id: &PeerId) {
|
||||
self.discovery.peer_unbanned(peer_id);
|
||||
}
|
||||
|
||||
/// Informs the discovery behaviour if a new IP/Port is set at the application layer
|
||||
pub fn update_local_enr_socket(&mut self, socket: std::net::SocketAddr, is_tcp: bool) {
|
||||
self.discovery.update_local_enr(socket, is_tcp);
|
||||
}
|
||||
}
|
||||
|
||||
/// The types of events than can be obtained from polling the behaviour.
|
||||
pub enum BehaviourEvent {
|
||||
/// A received RPC event and the peer that it was received from.
|
||||
RPC(PeerId, RPCEvent),
|
||||
/// We have completed an initial connection to a new peer.
|
||||
PeerDialed(PeerId),
|
||||
/// A peer has disconnected.
|
||||
PeerDisconnected(PeerId),
|
||||
/// A gossipsub message has been received.
|
||||
GossipMessage {
|
||||
/// The gossipsub message id. Used when propagating blocks after validation.
|
||||
id: String,
|
||||
/// The peer from which we received this message, not the peer that published it.
|
||||
source: PeerId,
|
||||
/// The topics that this message was sent on.
|
||||
topics: Vec<TopicHash>,
|
||||
/// The message itself.
|
||||
message: PubsubMessage,
|
||||
},
|
||||
/// Subscribed to peer for given topic
|
||||
PeerSubscribed(PeerId, TopicHash),
|
||||
}
|
||||
|
||||
/// Messages that are passed to and from the pubsub (Gossipsub) behaviour. These are encoded and
|
||||
/// decoded upstream.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum PubsubMessage {
|
||||
/// Gossipsub message providing notification of a new block.
|
||||
Block(Vec<u8>),
|
||||
/// Gossipsub message providing notification of a new attestation.
|
||||
Attestation(Vec<u8>),
|
||||
/// Gossipsub message providing notification of a voluntary exit.
|
||||
VoluntaryExit(Vec<u8>),
|
||||
/// Gossipsub message providing notification of a new proposer slashing.
|
||||
ProposerSlashing(Vec<u8>),
|
||||
/// Gossipsub message providing notification of a new attester slashing.
|
||||
AttesterSlashing(Vec<u8>),
|
||||
/// Gossipsub message from an unknown topic.
|
||||
Unknown(Vec<u8>),
|
||||
}
|
||||
|
||||
impl PubsubMessage {
|
||||
/* Note: This is assuming we are not hashing topics. If we choose to hash topics, these will
|
||||
* need to be modified.
|
||||
*
|
||||
* Also note that a message can be associated with many topics. As soon as one of the topics is
|
||||
* known we match. If none of the topics are known we return an unknown state.
|
||||
*/
|
||||
fn from_topics(topics: &[TopicHash], data: Vec<u8>) -> Self {
|
||||
for topic in topics {
|
||||
// compare the prefix and postfix, then match on the topic
|
||||
let topic_parts: Vec<&str> = topic.as_str().split('/').collect();
|
||||
if topic_parts.len() == 4
|
||||
&& topic_parts[1] == TOPIC_PREFIX
|
||||
&& topic_parts[3] == TOPIC_ENCODING_POSTFIX
|
||||
{
|
||||
match topic_parts[2] {
|
||||
BEACON_BLOCK_TOPIC => return PubsubMessage::Block(data),
|
||||
BEACON_ATTESTATION_TOPIC => return PubsubMessage::Attestation(data),
|
||||
VOLUNTARY_EXIT_TOPIC => return PubsubMessage::VoluntaryExit(data),
|
||||
PROPOSER_SLASHING_TOPIC => return PubsubMessage::ProposerSlashing(data),
|
||||
ATTESTER_SLASHING_TOPIC => return PubsubMessage::AttesterSlashing(data),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
PubsubMessage::Unknown(data)
|
||||
}
|
||||
|
||||
fn into_data(self) -> Vec<u8> {
|
||||
match self {
|
||||
PubsubMessage::Block(data)
|
||||
| PubsubMessage::Attestation(data)
|
||||
| PubsubMessage::VoluntaryExit(data)
|
||||
| PubsubMessage::ProposerSlashing(data)
|
||||
| PubsubMessage::AttesterSlashing(data)
|
||||
| PubsubMessage::Unknown(data) => data,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
use enr::Enr;
|
||||
use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder};
|
||||
use libp2p::Multiaddr;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
/// The gossipsub topic names.
|
||||
// These constants form a topic name of the form /TOPIC_PREFIX/TOPIC/ENCODING_POSTFIX
|
||||
// For example /eth2/beacon_block/ssz
|
||||
pub const TOPIC_PREFIX: &str = "eth2";
|
||||
pub const TOPIC_ENCODING_POSTFIX: &str = "ssz";
|
||||
pub const BEACON_BLOCK_TOPIC: &str = "beacon_block";
|
||||
pub const BEACON_ATTESTATION_TOPIC: &str = "beacon_attestation";
|
||||
pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit";
|
||||
pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing";
|
||||
pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing";
|
||||
pub const SHARD_TOPIC_PREFIX: &str = "shard";
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
/// Network configuration for lighthouse.
|
||||
pub struct Config {
|
||||
/// Data directory where node's keyfile is stored
|
||||
pub network_dir: PathBuf,
|
||||
|
||||
/// IP address to listen on.
|
||||
pub listen_address: std::net::IpAddr,
|
||||
|
||||
/// The TCP port that libp2p listens on.
|
||||
pub libp2p_port: u16,
|
||||
|
||||
/// The address to broadcast to peers about which address we are listening on.
|
||||
pub discovery_address: std::net::IpAddr,
|
||||
|
||||
/// UDP port that discovery listens on.
|
||||
pub discovery_port: u16,
|
||||
|
||||
/// Target number of connected peers.
|
||||
pub max_peers: usize,
|
||||
|
||||
/// A secp256k1 secret key, as bytes in ASCII-encoded hex.
|
||||
///
|
||||
/// With or without `0x` prefix.
|
||||
#[serde(skip)]
|
||||
pub secret_key_hex: Option<String>,
|
||||
|
||||
/// Gossipsub configuration parameters.
|
||||
#[serde(skip)]
|
||||
pub gs_config: GossipsubConfig,
|
||||
|
||||
/// List of nodes to initially connect to.
|
||||
pub boot_nodes: Vec<Enr>,
|
||||
|
||||
/// List of libp2p nodes to initially connect to.
|
||||
pub libp2p_nodes: Vec<Multiaddr>,
|
||||
|
||||
/// Client version
|
||||
pub client_version: String,
|
||||
|
||||
/// List of extra topics to initially subscribe to as strings.
|
||||
pub topics: Vec<String>,
|
||||
|
||||
/// Introduces randomization in network propagation of messages. This should only be set for
|
||||
/// testing purposes and will likely be removed in future versions.
|
||||
// TODO: Remove this functionality for mainnet
|
||||
pub propagation_percentage: Option<u8>,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
/// Generate a default network configuration.
|
||||
fn default() -> Self {
|
||||
let mut network_dir = dirs::home_dir().unwrap_or_else(|| PathBuf::from("."));
|
||||
network_dir.push(".lighthouse");
|
||||
network_dir.push("network");
|
||||
Config {
|
||||
network_dir,
|
||||
listen_address: "127.0.0.1".parse().expect("valid ip address"),
|
||||
libp2p_port: 9000,
|
||||
discovery_address: "127.0.0.1".parse().expect("valid ip address"),
|
||||
discovery_port: 9000,
|
||||
max_peers: 10,
|
||||
secret_key_hex: None,
|
||||
// Note: The topics by default are sent as plain strings. Hashes are an optional
|
||||
// parameter.
|
||||
gs_config: GossipsubConfigBuilder::new()
|
||||
.max_transmit_size(1_048_576)
|
||||
.heartbeat_interval(Duration::from_secs(20)) // TODO: Reduce for mainnet
|
||||
.manual_propagation(true) // require validation before propagation
|
||||
.build(),
|
||||
boot_nodes: vec![],
|
||||
libp2p_nodes: vec![],
|
||||
client_version: version::version(),
|
||||
topics: Vec::new(),
|
||||
propagation_percentage: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,391 +0,0 @@
|
||||
use crate::metrics;
|
||||
use crate::{error, NetworkConfig};
|
||||
/// This manages the discovery and management of peers.
|
||||
///
|
||||
/// Currently using discv5 for peer discovery.
|
||||
///
|
||||
use futures::prelude::*;
|
||||
use libp2p::core::{identity::Keypair, ConnectedPoint, Multiaddr, PeerId};
|
||||
use libp2p::discv5::{Discv5, Discv5Event};
|
||||
use libp2p::enr::{Enr, EnrBuilder, NodeId};
|
||||
use libp2p::multiaddr::Protocol;
|
||||
use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler};
|
||||
use slog::{debug, info, warn};
|
||||
use std::collections::HashSet;
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio::timer::Delay;
|
||||
|
||||
/// Maximum seconds before searching for extra peers.
|
||||
const MAX_TIME_BETWEEN_PEER_SEARCHES: u64 = 120;
|
||||
/// Initial delay between peer searches.
|
||||
const INITIAL_SEARCH_DELAY: u64 = 5;
|
||||
/// Local ENR storage filename.
|
||||
const ENR_FILENAME: &str = "enr.dat";
|
||||
|
||||
/// Lighthouse discovery behaviour. This provides peer management and discovery using the Discv5
|
||||
/// libp2p protocol.
|
||||
pub struct Discovery<TSubstream> {
|
||||
/// The peers currently connected to libp2p streams.
|
||||
connected_peers: HashSet<PeerId>,
|
||||
|
||||
/// The currently banned peers.
|
||||
banned_peers: HashSet<PeerId>,
|
||||
|
||||
/// The target number of connected peers on the libp2p interface.
|
||||
max_peers: usize,
|
||||
|
||||
/// The directory where the ENR is stored.
|
||||
enr_dir: String,
|
||||
|
||||
/// The delay between peer discovery searches.
|
||||
peer_discovery_delay: Delay,
|
||||
|
||||
/// Tracks the last discovery delay. The delay is doubled each round until the max
|
||||
/// time is reached.
|
||||
past_discovery_delay: u64,
|
||||
|
||||
/// The TCP port for libp2p. Used to convert an updated IP address to a multiaddr. Note: This
|
||||
/// assumes that the external TCP port is the same as the internal TCP port if behind a NAT.
|
||||
//TODO: Improve NAT handling limit the above restriction
|
||||
tcp_port: u16,
|
||||
|
||||
/// The discovery behaviour used to discover new peers.
|
||||
discovery: Discv5<TSubstream>,
|
||||
|
||||
/// Logger for the discovery behaviour.
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
impl<TSubstream> Discovery<TSubstream> {
|
||||
pub fn new(
|
||||
local_key: &Keypair,
|
||||
config: &NetworkConfig,
|
||||
log: &slog::Logger,
|
||||
) -> error::Result<Self> {
|
||||
let log = log.clone();
|
||||
|
||||
// checks if current ENR matches that found on disk
|
||||
let local_enr = load_enr(local_key, config, &log)?;
|
||||
|
||||
let enr_dir = match config.network_dir.to_str() {
|
||||
Some(path) => String::from(path),
|
||||
None => String::from(""),
|
||||
};
|
||||
|
||||
info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq());
|
||||
debug!(log, "Discv5 Node ID Initialised"; "node_id" => format!("{}",local_enr.node_id()));
|
||||
|
||||
// the last parameter enables IP limiting. 2 Nodes on the same /24 subnet per bucket and 10
|
||||
// nodes on the same /24 subnet per table.
|
||||
// TODO: IP filtering is currently disabled for the DHT. Enable for production
|
||||
let mut discovery = Discv5::new(local_enr, local_key.clone(), config.listen_address, false)
|
||||
.map_err(|e| format!("Discv5 service failed. Error: {:?}", e))?;
|
||||
|
||||
// Add bootnodes to routing table
|
||||
for bootnode_enr in config.boot_nodes.clone() {
|
||||
debug!(
|
||||
log,
|
||||
"Adding node to routing table";
|
||||
"node_id" => format!("{}",
|
||||
bootnode_enr.node_id())
|
||||
);
|
||||
discovery.add_enr(bootnode_enr);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
connected_peers: HashSet::new(),
|
||||
banned_peers: HashSet::new(),
|
||||
max_peers: config.max_peers,
|
||||
peer_discovery_delay: Delay::new(Instant::now()),
|
||||
past_discovery_delay: INITIAL_SEARCH_DELAY,
|
||||
tcp_port: config.libp2p_port,
|
||||
discovery,
|
||||
log,
|
||||
enr_dir,
|
||||
})
|
||||
}
|
||||
|
||||
/// Allows the application layer to update the `IP` and `port` of the local ENR. The second
|
||||
/// parameter defines whether the port is a TCP port. If false, this is interpreted as a UDP
|
||||
/// port.
|
||||
pub fn update_local_enr(&mut self, socket: std::net::SocketAddr, is_tcp: bool) {
|
||||
// discv5 checks to see if an update is necessary before performing it, so we do not
|
||||
// need to check here
|
||||
if self.discovery.update_local_enr_socket(socket, is_tcp) {
|
||||
let enr = self.discovery.local_enr();
|
||||
info!(
|
||||
self.log,
|
||||
"ENR Updated";
|
||||
"enr" => enr.to_base64(),
|
||||
"seq" => enr.seq(),
|
||||
"address" => format!("{:?}", socket));
|
||||
save_enr_to_disc(Path::new(&self.enr_dir), enr, &self.log)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the nodes local ENR.
|
||||
pub fn local_enr(&self) -> &Enr {
|
||||
self.discovery.local_enr()
|
||||
}
|
||||
|
||||
/// Manually search for peers. This restarts the discovery round, sparking multiple rapid
|
||||
/// queries.
|
||||
pub fn discover_peers(&mut self) {
|
||||
self.past_discovery_delay = INITIAL_SEARCH_DELAY;
|
||||
self.find_peers();
|
||||
}
|
||||
|
||||
/// Add an ENR to the routing table of the discovery mechanism.
|
||||
pub fn add_enr(&mut self, enr: Enr) {
|
||||
self.discovery.add_enr(enr);
|
||||
}
|
||||
|
||||
/// The current number of connected libp2p peers.
|
||||
pub fn connected_peers(&self) -> usize {
|
||||
self.connected_peers.len()
|
||||
}
|
||||
|
||||
/// The current number of connected libp2p peers.
|
||||
pub fn connected_peer_set(&self) -> &HashSet<PeerId> {
|
||||
&self.connected_peers
|
||||
}
|
||||
|
||||
/// The peer has been banned. Add this peer to the banned list to prevent any future
|
||||
/// re-connections.
|
||||
// TODO: Remove the peer from the DHT if present
|
||||
pub fn peer_banned(&mut self, peer_id: PeerId) {
|
||||
self.banned_peers.insert(peer_id);
|
||||
}
|
||||
|
||||
pub fn peer_unbanned(&mut self, peer_id: &PeerId) {
|
||||
self.banned_peers.remove(peer_id);
|
||||
}
|
||||
|
||||
/// Search for new peers using the underlying discovery mechanism.
|
||||
fn find_peers(&mut self) {
|
||||
// pick a random NodeId
|
||||
let random_node = NodeId::random();
|
||||
debug!(self.log, "Searching for peers");
|
||||
self.discovery.find_node(random_node);
|
||||
}
|
||||
}
|
||||
|
||||
// Redirect all behaviour events to underlying discovery behaviour.
|
||||
impl<TSubstream> NetworkBehaviour for Discovery<TSubstream>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
type ProtocolsHandler = <Discv5<TSubstream> as NetworkBehaviour>::ProtocolsHandler;
|
||||
type OutEvent = <Discv5<TSubstream> as NetworkBehaviour>::OutEvent;
|
||||
|
||||
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
||||
NetworkBehaviour::new_handler(&mut self.discovery)
|
||||
}
|
||||
|
||||
fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec<Multiaddr> {
|
||||
// Let discovery track possible known peers.
|
||||
self.discovery.addresses_of_peer(peer_id)
|
||||
}
|
||||
|
||||
fn inject_connected(&mut self, peer_id: PeerId, _endpoint: ConnectedPoint) {
|
||||
self.connected_peers.insert(peer_id);
|
||||
// TODO: Drop peers if over max_peer limit
|
||||
|
||||
metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT);
|
||||
metrics::set_gauge(&metrics::PEERS_CONNECTED, self.connected_peers() as i64);
|
||||
}
|
||||
|
||||
fn inject_disconnected(&mut self, peer_id: &PeerId, _endpoint: ConnectedPoint) {
|
||||
self.connected_peers.remove(peer_id);
|
||||
|
||||
metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT);
|
||||
metrics::set_gauge(&metrics::PEERS_CONNECTED, self.connected_peers() as i64);
|
||||
}
|
||||
|
||||
fn inject_replaced(
|
||||
&mut self,
|
||||
_peer_id: PeerId,
|
||||
_closed: ConnectedPoint,
|
||||
_opened: ConnectedPoint,
|
||||
) {
|
||||
// discv5 doesn't implement
|
||||
}
|
||||
|
||||
fn inject_node_event(
|
||||
&mut self,
|
||||
_peer_id: PeerId,
|
||||
_event: <Self::ProtocolsHandler as ProtocolsHandler>::OutEvent,
|
||||
) {
|
||||
// discv5 doesn't implement
|
||||
}
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
params: &mut impl PollParameters,
|
||||
) -> Async<
|
||||
NetworkBehaviourAction<
|
||||
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||
Self::OutEvent,
|
||||
>,
|
||||
> {
|
||||
// search for peers if it is time
|
||||
loop {
|
||||
match self.peer_discovery_delay.poll() {
|
||||
Ok(Async::Ready(_)) => {
|
||||
if self.connected_peers.len() < self.max_peers {
|
||||
self.find_peers();
|
||||
}
|
||||
// Set to maximum, and update to earlier, once we get our results back.
|
||||
self.peer_discovery_delay.reset(
|
||||
Instant::now() + Duration::from_secs(MAX_TIME_BETWEEN_PEER_SEARCHES),
|
||||
);
|
||||
}
|
||||
Ok(Async::NotReady) => break,
|
||||
Err(e) => {
|
||||
warn!(self.log, "Discovery peer search failed"; "error" => format!("{:?}", e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Poll discovery
|
||||
loop {
|
||||
match self.discovery.poll(params) {
|
||||
Async::Ready(NetworkBehaviourAction::GenerateEvent(event)) => {
|
||||
match event {
|
||||
Discv5Event::Discovered(_enr) => {
|
||||
// not concerned about FINDNODE results, rather the result of an entire
|
||||
// query.
|
||||
}
|
||||
Discv5Event::SocketUpdated(socket) => {
|
||||
info!(self.log, "Address updated"; "ip" => format!("{}",socket.ip()), "udp_port" => format!("{}", socket.port()));
|
||||
metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT);
|
||||
let mut address = Multiaddr::from(socket.ip());
|
||||
address.push(Protocol::Tcp(self.tcp_port));
|
||||
let enr = self.discovery.local_enr();
|
||||
save_enr_to_disc(Path::new(&self.enr_dir), enr, &self.log);
|
||||
|
||||
return Async::Ready(NetworkBehaviourAction::ReportObservedAddr {
|
||||
address,
|
||||
});
|
||||
}
|
||||
Discv5Event::FindNodeResult { closer_peers, .. } => {
|
||||
debug!(self.log, "Discovery query completed"; "peers_found" => closer_peers.len());
|
||||
// update the time to the next query
|
||||
if self.past_discovery_delay < MAX_TIME_BETWEEN_PEER_SEARCHES {
|
||||
self.past_discovery_delay *= 2;
|
||||
}
|
||||
let delay = std::cmp::max(
|
||||
self.past_discovery_delay,
|
||||
MAX_TIME_BETWEEN_PEER_SEARCHES,
|
||||
);
|
||||
self.peer_discovery_delay
|
||||
.reset(Instant::now() + Duration::from_secs(delay));
|
||||
|
||||
if closer_peers.is_empty() {
|
||||
debug!(self.log, "Discovery random query found no peers");
|
||||
}
|
||||
for peer_id in closer_peers {
|
||||
// if we need more peers, attempt a connection
|
||||
if self.connected_peers.len() < self.max_peers
|
||||
&& self.connected_peers.get(&peer_id).is_none()
|
||||
&& !self.banned_peers.contains(&peer_id)
|
||||
{
|
||||
debug!(self.log, "Peer discovered"; "peer_id"=> format!("{:?}", peer_id));
|
||||
return Async::Ready(NetworkBehaviourAction::DialPeer {
|
||||
peer_id,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
// discv5 does not output any other NetworkBehaviourAction
|
||||
Async::Ready(_) => {}
|
||||
Async::NotReady => break,
|
||||
}
|
||||
}
|
||||
Async::NotReady
|
||||
}
|
||||
}
|
||||
|
||||
/// Loads an ENR from file if it exists and matches the current NodeId and sequence number. If none
|
||||
/// exists, generates a new one.
|
||||
///
|
||||
/// If an ENR exists, with the same NodeId and IP address, we use the disk-generated one as its
|
||||
/// ENR sequence will be equal or higher than a newly generated one.
|
||||
fn load_enr(
|
||||
local_key: &Keypair,
|
||||
config: &NetworkConfig,
|
||||
log: &slog::Logger,
|
||||
) -> Result<Enr, String> {
|
||||
// Build the local ENR.
|
||||
// Note: Discovery should update the ENR record's IP to the external IP as seen by the
|
||||
// majority of our peers.
|
||||
let mut local_enr = EnrBuilder::new("v4")
|
||||
.ip(config.discovery_address)
|
||||
.tcp(config.libp2p_port)
|
||||
.udp(config.discovery_port)
|
||||
.build(&local_key)
|
||||
.map_err(|e| format!("Could not build Local ENR: {:?}", e))?;
|
||||
|
||||
let enr_f = config.network_dir.join(ENR_FILENAME);
|
||||
if let Ok(mut enr_file) = File::open(enr_f.clone()) {
|
||||
let mut enr_string = String::new();
|
||||
match enr_file.read_to_string(&mut enr_string) {
|
||||
Err(_) => debug!(log, "Could not read ENR from file"),
|
||||
Ok(_) => {
|
||||
match Enr::from_str(&enr_string) {
|
||||
Ok(enr) => {
|
||||
if enr.node_id() == local_enr.node_id() {
|
||||
if enr.ip().map(Into::into) == Some(config.discovery_address)
|
||||
&& enr.tcp() == Some(config.libp2p_port)
|
||||
&& enr.udp() == Some(config.discovery_port)
|
||||
{
|
||||
debug!(log, "ENR loaded from file"; "file" => format!("{:?}", enr_f));
|
||||
// the stored ENR has the same configuration, use it
|
||||
return Ok(enr);
|
||||
}
|
||||
|
||||
// same node id, different configuration - update the sequence number
|
||||
let new_seq_no = enr.seq().checked_add(1).ok_or_else(|| "ENR sequence number on file is too large. Remove it to generate a new NodeId")?;
|
||||
local_enr.set_seq(new_seq_no, local_key).map_err(|e| {
|
||||
format!("Could not update ENR sequence number: {:?}", e)
|
||||
})?;
|
||||
debug!(log, "ENR sequence number increased"; "seq" => new_seq_no);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(log, "ENR from file could not be decoded"; "error" => format!("{:?}", e));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
save_enr_to_disc(&config.network_dir, &local_enr, log);
|
||||
|
||||
Ok(local_enr)
|
||||
}
|
||||
|
||||
fn save_enr_to_disc(dir: &Path, enr: &Enr, log: &slog::Logger) {
|
||||
let _ = std::fs::create_dir_all(dir);
|
||||
match File::create(dir.join(Path::new(ENR_FILENAME)))
|
||||
.and_then(|mut f| f.write_all(&enr.to_base64().as_bytes()))
|
||||
{
|
||||
Ok(_) => {
|
||||
debug!(log, "ENR written to disk");
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
log,
|
||||
"Could not write ENR to file"; "file" => format!("{:?}{:?}",dir, ENR_FILENAME), "error" => format!("{}", e)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
/// This crate contains the main link for lighthouse to rust-libp2p. It therefore re-exports
|
||||
/// all required libp2p functionality.
|
||||
///
|
||||
/// This crate builds and manages the libp2p services required by the beacon node.
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
pub mod behaviour;
|
||||
mod config;
|
||||
mod discovery;
|
||||
pub mod error;
|
||||
mod metrics;
|
||||
pub mod rpc;
|
||||
mod service;
|
||||
|
||||
pub use behaviour::PubsubMessage;
|
||||
pub use config::{
|
||||
Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, SHARD_TOPIC_PREFIX,
|
||||
TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX,
|
||||
};
|
||||
pub use libp2p::enr::Enr;
|
||||
pub use libp2p::gossipsub::{Topic, TopicHash};
|
||||
pub use libp2p::multiaddr;
|
||||
pub use libp2p::Multiaddr;
|
||||
pub use libp2p::{
|
||||
gossipsub::{GossipsubConfig, GossipsubConfigBuilder},
|
||||
PeerId, Swarm,
|
||||
};
|
||||
pub use rpc::RPCEvent;
|
||||
pub use service::Libp2pEvent;
|
||||
pub use service::Service;
|
||||
@@ -1,20 +0,0 @@
|
||||
pub use lighthouse_metrics::*;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref ADDRESS_UPDATE_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_address_update_total",
|
||||
"Count of libp2p socked updated events (when our view of our IP address has changed)"
|
||||
);
|
||||
pub static ref PEERS_CONNECTED: Result<IntGauge> = try_create_int_gauge(
|
||||
"libp2p_peer_connected_peers_total",
|
||||
"Count of libp2p peers currently connected"
|
||||
);
|
||||
pub static ref PEER_CONNECT_EVENT_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_peer_connect_event_total",
|
||||
"Count of libp2p peer connect events (not the current number of connected peers)"
|
||||
);
|
||||
pub static ref PEER_DISCONNECT_EVENT_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_peer_disconnect_event_total",
|
||||
"Count of libp2p peer disconnect events"
|
||||
);
|
||||
}
|
||||
@@ -1,154 +0,0 @@
|
||||
//! This handles the various supported encoding mechanism for the Eth 2.0 RPC.
|
||||
|
||||
use crate::rpc::{ErrorMessage, RPCErrorResponse, RPCRequest, RPCResponse};
|
||||
use libp2p::bytes::BufMut;
|
||||
use libp2p::bytes::BytesMut;
|
||||
use tokio::codec::{Decoder, Encoder};
|
||||
|
||||
pub trait OutboundCodec: Encoder + Decoder {
|
||||
type ErrorType;
|
||||
|
||||
fn decode_error(
|
||||
&mut self,
|
||||
src: &mut BytesMut,
|
||||
) -> Result<Option<Self::ErrorType>, <Self as Decoder>::Error>;
|
||||
}
|
||||
|
||||
/* Global Inbound Codec */
|
||||
// This deals with Decoding RPC Requests from other peers and encoding our responses
|
||||
|
||||
pub struct BaseInboundCodec<TCodec>
|
||||
where
|
||||
TCodec: Encoder + Decoder,
|
||||
{
|
||||
/// Inner codec for handling various encodings
|
||||
inner: TCodec,
|
||||
}
|
||||
|
||||
impl<TCodec> BaseInboundCodec<TCodec>
|
||||
where
|
||||
TCodec: Encoder + Decoder,
|
||||
{
|
||||
pub fn new(codec: TCodec) -> Self {
|
||||
BaseInboundCodec { inner: codec }
|
||||
}
|
||||
}
|
||||
|
||||
/* Global Outbound Codec */
|
||||
// This deals with Decoding RPC Responses from other peers and encoding our requests
|
||||
pub struct BaseOutboundCodec<TOutboundCodec>
|
||||
where
|
||||
TOutboundCodec: OutboundCodec,
|
||||
{
|
||||
/// Inner codec for handling various encodings.
|
||||
inner: TOutboundCodec,
|
||||
/// Keeps track of the current response code for a chunk.
|
||||
current_response_code: Option<u8>,
|
||||
}
|
||||
|
||||
impl<TOutboundCodec> BaseOutboundCodec<TOutboundCodec>
|
||||
where
|
||||
TOutboundCodec: OutboundCodec,
|
||||
{
|
||||
pub fn new(codec: TOutboundCodec) -> Self {
|
||||
BaseOutboundCodec {
|
||||
inner: codec,
|
||||
current_response_code: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Implementation of the Encoding/Decoding for the global codecs */
|
||||
|
||||
/* Base Inbound Codec */
|
||||
|
||||
// This Encodes RPC Responses sent to external peers
|
||||
impl<TCodec> Encoder for BaseInboundCodec<TCodec>
|
||||
where
|
||||
TCodec: Decoder + Encoder<Item = RPCErrorResponse>,
|
||||
{
|
||||
type Item = RPCErrorResponse;
|
||||
type Error = <TCodec as Encoder>::Error;
|
||||
|
||||
fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
dst.clear();
|
||||
dst.reserve(1);
|
||||
dst.put_u8(
|
||||
item.as_u8()
|
||||
.expect("Should never encode a stream termination"),
|
||||
);
|
||||
self.inner.encode(item, dst)
|
||||
}
|
||||
}
|
||||
|
||||
// This Decodes RPC Requests from external peers
|
||||
impl<TCodec> Decoder for BaseInboundCodec<TCodec>
|
||||
where
|
||||
TCodec: Encoder + Decoder<Item = RPCRequest>,
|
||||
{
|
||||
type Item = RPCRequest;
|
||||
type Error = <TCodec as Decoder>::Error;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
self.inner.decode(src)
|
||||
}
|
||||
}
|
||||
|
||||
/* Base Outbound Codec */
|
||||
|
||||
// This Encodes RPC Requests sent to external peers
|
||||
impl<TCodec> Encoder for BaseOutboundCodec<TCodec>
|
||||
where
|
||||
TCodec: OutboundCodec + Encoder<Item = RPCRequest>,
|
||||
{
|
||||
type Item = RPCRequest;
|
||||
type Error = <TCodec as Encoder>::Error;
|
||||
|
||||
fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
self.inner.encode(item, dst)
|
||||
}
|
||||
}
|
||||
|
||||
// This decodes RPC Responses received from external peers
|
||||
impl<TCodec> Decoder for BaseOutboundCodec<TCodec>
|
||||
where
|
||||
TCodec: OutboundCodec<ErrorType = ErrorMessage> + Decoder<Item = RPCResponse>,
|
||||
{
|
||||
type Item = RPCErrorResponse;
|
||||
type Error = <TCodec as Decoder>::Error;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
// if we have only received the response code, wait for more bytes
|
||||
if src.len() <= 1 {
|
||||
return Ok(None);
|
||||
}
|
||||
// using the response code determine which kind of payload needs to be decoded.
|
||||
let response_code = self.current_response_code.unwrap_or_else(|| {
|
||||
let resp_code = src.split_to(1)[0];
|
||||
self.current_response_code = Some(resp_code);
|
||||
resp_code
|
||||
});
|
||||
|
||||
let inner_result = {
|
||||
if RPCErrorResponse::is_response(response_code) {
|
||||
// decode an actual response and mutates the buffer if enough bytes have been read
|
||||
// returning the result.
|
||||
self.inner
|
||||
.decode(src)
|
||||
.map(|r| r.map(RPCErrorResponse::Success))
|
||||
} else {
|
||||
// decode an error
|
||||
self.inner
|
||||
.decode_error(src)
|
||||
.map(|r| r.map(|resp| RPCErrorResponse::from_error(response_code, resp)))
|
||||
}
|
||||
};
|
||||
// if the inner decoder was capable of decoding a chunk, we need to reset the current
|
||||
// response code for the next chunk
|
||||
if let Ok(Some(_)) = inner_result {
|
||||
self.current_response_code = None;
|
||||
}
|
||||
// return the result
|
||||
inner_result
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
pub(crate) mod base;
|
||||
pub(crate) mod ssz;
|
||||
|
||||
use self::base::{BaseInboundCodec, BaseOutboundCodec};
|
||||
use self::ssz::{SSZInboundCodec, SSZOutboundCodec};
|
||||
use crate::rpc::protocol::RPCError;
|
||||
use crate::rpc::{RPCErrorResponse, RPCRequest};
|
||||
use libp2p::bytes::BytesMut;
|
||||
use tokio::codec::{Decoder, Encoder};
|
||||
|
||||
// Known types of codecs
|
||||
pub enum InboundCodec {
|
||||
SSZ(BaseInboundCodec<SSZInboundCodec>),
|
||||
}
|
||||
|
||||
pub enum OutboundCodec {
|
||||
SSZ(BaseOutboundCodec<SSZOutboundCodec>),
|
||||
}
|
||||
|
||||
impl Encoder for InboundCodec {
|
||||
type Item = RPCErrorResponse;
|
||||
type Error = RPCError;
|
||||
|
||||
fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
match self {
|
||||
InboundCodec::SSZ(codec) => codec.encode(item, dst),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for InboundCodec {
|
||||
type Item = RPCRequest;
|
||||
type Error = RPCError;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
match self {
|
||||
InboundCodec::SSZ(codec) => codec.decode(src),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Encoder for OutboundCodec {
|
||||
type Item = RPCRequest;
|
||||
type Error = RPCError;
|
||||
|
||||
fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
match self {
|
||||
OutboundCodec::SSZ(codec) => codec.encode(item, dst),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for OutboundCodec {
|
||||
type Item = RPCErrorResponse;
|
||||
type Error = RPCError;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
match self {
|
||||
OutboundCodec::SSZ(codec) => codec.decode(src),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,229 +0,0 @@
|
||||
use crate::rpc::methods::*;
|
||||
use crate::rpc::{
|
||||
codec::base::OutboundCodec,
|
||||
protocol::{
|
||||
ProtocolId, RPCError, RPC_BLOCKS_BY_RANGE, RPC_BLOCKS_BY_ROOT, RPC_GOODBYE, RPC_STATUS,
|
||||
},
|
||||
};
|
||||
use crate::rpc::{ErrorMessage, RPCErrorResponse, RPCRequest, RPCResponse};
|
||||
use libp2p::bytes::{BufMut, Bytes, BytesMut};
|
||||
use ssz::{Decode, Encode};
|
||||
use tokio::codec::{Decoder, Encoder};
|
||||
use unsigned_varint::codec::UviBytes;
|
||||
|
||||
/* Inbound Codec */
|
||||
|
||||
pub struct SSZInboundCodec {
|
||||
inner: UviBytes,
|
||||
protocol: ProtocolId,
|
||||
}
|
||||
|
||||
impl SSZInboundCodec {
|
||||
pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self {
|
||||
let mut uvi_codec = UviBytes::default();
|
||||
uvi_codec.set_max_len(max_packet_size);
|
||||
|
||||
// this encoding only applies to ssz.
|
||||
debug_assert!(protocol.encoding.as_str() == "ssz");
|
||||
|
||||
SSZInboundCodec {
|
||||
inner: uvi_codec,
|
||||
protocol,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Encoder for inbound streams: Encodes RPC Responses sent to peers.
|
||||
impl Encoder for SSZInboundCodec {
|
||||
type Item = RPCErrorResponse;
|
||||
type Error = RPCError;
|
||||
|
||||
fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
let bytes = match item {
|
||||
RPCErrorResponse::Success(resp) => {
|
||||
match resp {
|
||||
RPCResponse::Status(res) => res.as_ssz_bytes(),
|
||||
RPCResponse::BlocksByRange(res) => res, // already raw bytes
|
||||
RPCResponse::BlocksByRoot(res) => res, // already raw bytes
|
||||
}
|
||||
}
|
||||
RPCErrorResponse::InvalidRequest(err) => err.as_ssz_bytes(),
|
||||
RPCErrorResponse::ServerError(err) => err.as_ssz_bytes(),
|
||||
RPCErrorResponse::Unknown(err) => err.as_ssz_bytes(),
|
||||
RPCErrorResponse::StreamTermination(_) => {
|
||||
unreachable!("Code error - attempting to encode a stream termination")
|
||||
}
|
||||
};
|
||||
if !bytes.is_empty() {
|
||||
// length-prefix and return
|
||||
return self
|
||||
.inner
|
||||
.encode(Bytes::from(bytes), dst)
|
||||
.map_err(RPCError::from);
|
||||
} else {
|
||||
// payload is empty, add a 0-byte length prefix
|
||||
dst.reserve(1);
|
||||
dst.put_u8(0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Decoder for inbound streams: Decodes RPC requests from peers
|
||||
impl Decoder for SSZInboundCodec {
|
||||
type Item = RPCRequest;
|
||||
type Error = RPCError;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
match self.inner.decode(src).map_err(RPCError::from) {
|
||||
Ok(Some(packet)) => match self.protocol.message_name.as_str() {
|
||||
RPC_STATUS => match self.protocol.version.as_str() {
|
||||
"1" => Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes(
|
||||
&packet,
|
||||
)?))),
|
||||
_ => unreachable!("Cannot negotiate an unknown version"),
|
||||
},
|
||||
RPC_GOODBYE => match self.protocol.version.as_str() {
|
||||
"1" => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes(
|
||||
&packet,
|
||||
)?))),
|
||||
_ => unreachable!("Cannot negotiate an unknown version"),
|
||||
},
|
||||
RPC_BLOCKS_BY_RANGE => match self.protocol.version.as_str() {
|
||||
"1" => Ok(Some(RPCRequest::BlocksByRange(
|
||||
BlocksByRangeRequest::from_ssz_bytes(&packet)?,
|
||||
))),
|
||||
_ => unreachable!("Cannot negotiate an unknown version"),
|
||||
},
|
||||
RPC_BLOCKS_BY_ROOT => match self.protocol.version.as_str() {
|
||||
"1" => Ok(Some(RPCRequest::BlocksByRoot(BlocksByRootRequest {
|
||||
block_roots: Vec::from_ssz_bytes(&packet)?,
|
||||
}))),
|
||||
_ => unreachable!("Cannot negotiate an unknown version"),
|
||||
},
|
||||
_ => unreachable!("Cannot negotiate an unknown protocol"),
|
||||
},
|
||||
Ok(None) => Ok(None),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Outbound Codec: Codec for initiating RPC requests */
|
||||
|
||||
pub struct SSZOutboundCodec {
|
||||
inner: UviBytes,
|
||||
protocol: ProtocolId,
|
||||
}
|
||||
|
||||
impl SSZOutboundCodec {
|
||||
pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self {
|
||||
let mut uvi_codec = UviBytes::default();
|
||||
uvi_codec.set_max_len(max_packet_size);
|
||||
|
||||
// this encoding only applies to ssz.
|
||||
debug_assert!(protocol.encoding.as_str() == "ssz");
|
||||
|
||||
SSZOutboundCodec {
|
||||
inner: uvi_codec,
|
||||
protocol,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Encoder for outbound streams: Encodes RPC Requests to peers
|
||||
impl Encoder for SSZOutboundCodec {
|
||||
type Item = RPCRequest;
|
||||
type Error = RPCError;
|
||||
|
||||
fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
let bytes = match item {
|
||||
RPCRequest::Status(req) => req.as_ssz_bytes(),
|
||||
RPCRequest::Goodbye(req) => req.as_ssz_bytes(),
|
||||
RPCRequest::BlocksByRange(req) => req.as_ssz_bytes(),
|
||||
RPCRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(),
|
||||
};
|
||||
// length-prefix
|
||||
self.inner
|
||||
.encode(libp2p::bytes::Bytes::from(bytes), dst)
|
||||
.map_err(RPCError::from)
|
||||
}
|
||||
}
|
||||
|
||||
// Decoder for outbound streams: Decodes RPC responses from peers.
|
||||
//
|
||||
// The majority of the decoding has now been pushed upstream due to the changing specification.
|
||||
// We prefer to decode blocks and attestations with extra knowledge about the chain to perform
|
||||
// faster verification checks before decoding entire blocks/attestations.
|
||||
impl Decoder for SSZOutboundCodec {
|
||||
type Item = RPCResponse;
|
||||
type Error = RPCError;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
if src.len() == 1 && src[0] == 0_u8 {
|
||||
// the object is empty. We return the empty object if this is the case
|
||||
// clear the buffer and return an empty object
|
||||
src.clear();
|
||||
match self.protocol.message_name.as_str() {
|
||||
RPC_STATUS => match self.protocol.version.as_str() {
|
||||
"1" => Err(RPCError::Custom(
|
||||
"Status stream terminated unexpectedly".into(),
|
||||
)), // cannot have an empty HELLO message. The stream has terminated unexpectedly
|
||||
_ => unreachable!("Cannot negotiate an unknown version"),
|
||||
},
|
||||
RPC_GOODBYE => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")),
|
||||
RPC_BLOCKS_BY_RANGE => match self.protocol.version.as_str() {
|
||||
"1" => Ok(Some(RPCResponse::BlocksByRange(Vec::new()))),
|
||||
_ => unreachable!("Cannot negotiate an unknown version"),
|
||||
},
|
||||
RPC_BLOCKS_BY_ROOT => match self.protocol.version.as_str() {
|
||||
"1" => Ok(Some(RPCResponse::BlocksByRoot(Vec::new()))),
|
||||
_ => unreachable!("Cannot negotiate an unknown version"),
|
||||
},
|
||||
_ => unreachable!("Cannot negotiate an unknown protocol"),
|
||||
}
|
||||
} else {
|
||||
match self.inner.decode(src).map_err(RPCError::from) {
|
||||
Ok(Some(mut packet)) => {
|
||||
// take the bytes from the buffer
|
||||
let raw_bytes = packet.take();
|
||||
|
||||
match self.protocol.message_name.as_str() {
|
||||
RPC_STATUS => match self.protocol.version.as_str() {
|
||||
"1" => Ok(Some(RPCResponse::Status(StatusMessage::from_ssz_bytes(
|
||||
&raw_bytes,
|
||||
)?))),
|
||||
_ => unreachable!("Cannot negotiate an unknown version"),
|
||||
},
|
||||
RPC_GOODBYE => {
|
||||
Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response"))
|
||||
}
|
||||
RPC_BLOCKS_BY_RANGE => match self.protocol.version.as_str() {
|
||||
"1" => Ok(Some(RPCResponse::BlocksByRange(raw_bytes.to_vec()))),
|
||||
_ => unreachable!("Cannot negotiate an unknown version"),
|
||||
},
|
||||
RPC_BLOCKS_BY_ROOT => match self.protocol.version.as_str() {
|
||||
"1" => Ok(Some(RPCResponse::BlocksByRoot(raw_bytes.to_vec()))),
|
||||
_ => unreachable!("Cannot negotiate an unknown version"),
|
||||
},
|
||||
_ => unreachable!("Cannot negotiate an unknown protocol"),
|
||||
}
|
||||
}
|
||||
Ok(None) => Ok(None), // waiting for more bytes
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl OutboundCodec for SSZOutboundCodec {
|
||||
type ErrorType = ErrorMessage;
|
||||
|
||||
fn decode_error(&mut self, src: &mut BytesMut) -> Result<Option<Self::ErrorType>, RPCError> {
|
||||
match self.inner.decode(src).map_err(RPCError::from) {
|
||||
Ok(Some(packet)) => Ok(Some(ErrorMessage::from_ssz_bytes(&packet)?)),
|
||||
Ok(None) => Ok(None),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,691 +0,0 @@
|
||||
#![allow(clippy::type_complexity)]
|
||||
#![allow(clippy::cognitive_complexity)]
|
||||
|
||||
use super::methods::{RPCErrorResponse, RequestId};
|
||||
use super::protocol::{RPCError, RPCProtocol, RPCRequest};
|
||||
use super::RPCEvent;
|
||||
use crate::rpc::protocol::{InboundFramed, OutboundFramed};
|
||||
use core::marker::PhantomData;
|
||||
use fnv::FnvHashMap;
|
||||
use futures::prelude::*;
|
||||
use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeError};
|
||||
use libp2p::swarm::protocols_handler::{
|
||||
KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
|
||||
};
|
||||
use slog::{crit, debug, error, trace};
|
||||
use smallvec::SmallVec;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio::timer::{delay_queue, DelayQueue};
|
||||
|
||||
//TODO: Implement close() on the substream types to improve the poll code.
|
||||
//TODO: Implement check_timeout() on the substream types
|
||||
|
||||
/// The time (in seconds) before a substream that is awaiting a response from the user times out.
|
||||
pub const RESPONSE_TIMEOUT: u64 = 10;
|
||||
|
||||
/// The number of times to retry an outbound upgrade in the case of IO errors.
|
||||
const IO_ERROR_RETRIES: u8 = 3;
|
||||
|
||||
/// Inbound requests are given a sequential `RequestId` to keep track of.
|
||||
type InboundRequestId = RequestId;
|
||||
/// Outbound requests are associated with an id that is given by the application that sent the
|
||||
/// request.
|
||||
type OutboundRequestId = RequestId;
|
||||
|
||||
/// Implementation of `ProtocolsHandler` for the RPC protocol.
|
||||
pub struct RPCHandler<TSubstream>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
/// The upgrade for inbound substreams.
|
||||
listen_protocol: SubstreamProtocol<RPCProtocol>,
|
||||
|
||||
/// If `Some`, something bad happened and we should shut down the handler with an error.
|
||||
pending_error: Option<(RequestId, ProtocolsHandlerUpgrErr<RPCError>)>,
|
||||
|
||||
/// Queue of events to produce in `poll()`.
|
||||
events_out: SmallVec<[RPCEvent; 4]>,
|
||||
|
||||
/// Queue of outbound substreams to open.
|
||||
dial_queue: SmallVec<[RPCEvent; 4]>,
|
||||
|
||||
/// Current number of concurrent outbound substreams being opened.
|
||||
dial_negotiated: u32,
|
||||
|
||||
/// Current inbound substreams awaiting processing.
|
||||
inbound_substreams:
|
||||
FnvHashMap<InboundRequestId, (InboundSubstreamState<TSubstream>, delay_queue::Key)>,
|
||||
|
||||
/// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout.
|
||||
inbound_substreams_delay: DelayQueue<InboundRequestId>,
|
||||
|
||||
/// Map of outbound substreams that need to be driven to completion. The `RequestId` is
|
||||
/// maintained by the application sending the request.
|
||||
outbound_substreams:
|
||||
FnvHashMap<OutboundRequestId, (OutboundSubstreamState<TSubstream>, delay_queue::Key)>,
|
||||
|
||||
/// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout.
|
||||
outbound_substreams_delay: DelayQueue<OutboundRequestId>,
|
||||
|
||||
/// Map of outbound items that are queued as the stream processes them.
|
||||
queued_outbound_items: FnvHashMap<RequestId, Vec<RPCErrorResponse>>,
|
||||
|
||||
/// Sequential Id for waiting substreams.
|
||||
current_substream_id: RequestId,
|
||||
|
||||
/// Maximum number of concurrent outbound substreams being opened. Value is never modified.
|
||||
max_dial_negotiated: u32,
|
||||
|
||||
/// Value to return from `connection_keep_alive`.
|
||||
keep_alive: KeepAlive,
|
||||
|
||||
/// After the given duration has elapsed, an inactive connection will shutdown.
|
||||
inactive_timeout: Duration,
|
||||
|
||||
/// Try to negotiate the outbound upgrade a few times if there is an IO error before reporting the request as failed.
|
||||
/// This keeps track of the number of attempts.
|
||||
outbound_io_error_retries: u8,
|
||||
|
||||
/// Logger for handling RPC streams
|
||||
log: slog::Logger,
|
||||
|
||||
/// Marker to pin the generic stream.
|
||||
_phantom: PhantomData<TSubstream>,
|
||||
}
|
||||
|
||||
/// State of an outbound substream. Either waiting for a response, or in the process of sending.
|
||||
pub enum InboundSubstreamState<TSubstream>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
/// A response has been sent, pending writing and flush.
|
||||
ResponsePendingSend {
|
||||
/// The substream used to send the response
|
||||
substream: futures::sink::Send<InboundFramed<TSubstream>>,
|
||||
/// Whether a stream termination is requested. If true the stream will be closed after
|
||||
/// this send. Otherwise it will transition to an idle state until a stream termination is
|
||||
/// requested or a timeout is reached.
|
||||
closing: bool,
|
||||
},
|
||||
/// The response stream is idle and awaiting input from the application to send more chunked
|
||||
/// responses.
|
||||
ResponseIdle(InboundFramed<TSubstream>),
|
||||
/// The substream is attempting to shutdown.
|
||||
Closing(InboundFramed<TSubstream>),
|
||||
/// Temporary state during processing
|
||||
Poisoned,
|
||||
}
|
||||
|
||||
pub enum OutboundSubstreamState<TSubstream> {
|
||||
/// A request has been sent, and we are awaiting a response. This future is driven in the
|
||||
/// handler because GOODBYE requests can be handled and responses dropped instantly.
|
||||
RequestPendingResponse {
|
||||
/// The framed negotiated substream.
|
||||
substream: OutboundFramed<TSubstream>,
|
||||
/// Keeps track of the actual request sent.
|
||||
request: RPCRequest,
|
||||
},
|
||||
/// Closing an outbound substream>
|
||||
Closing(OutboundFramed<TSubstream>),
|
||||
/// Temporary state during processing
|
||||
Poisoned,
|
||||
}
|
||||
|
||||
impl<TSubstream> RPCHandler<TSubstream>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
pub fn new(
|
||||
listen_protocol: SubstreamProtocol<RPCProtocol>,
|
||||
inactive_timeout: Duration,
|
||||
log: &slog::Logger,
|
||||
) -> Self {
|
||||
RPCHandler {
|
||||
listen_protocol,
|
||||
pending_error: None,
|
||||
events_out: SmallVec::new(),
|
||||
dial_queue: SmallVec::new(),
|
||||
dial_negotiated: 0,
|
||||
queued_outbound_items: FnvHashMap::default(),
|
||||
inbound_substreams: FnvHashMap::default(),
|
||||
outbound_substreams: FnvHashMap::default(),
|
||||
inbound_substreams_delay: DelayQueue::new(),
|
||||
outbound_substreams_delay: DelayQueue::new(),
|
||||
current_substream_id: 1,
|
||||
max_dial_negotiated: 8,
|
||||
keep_alive: KeepAlive::Yes,
|
||||
inactive_timeout,
|
||||
outbound_io_error_retries: 0,
|
||||
log: log.clone(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of pending requests.
|
||||
pub fn pending_requests(&self) -> u32 {
|
||||
self.dial_negotiated + self.dial_queue.len() as u32
|
||||
}
|
||||
|
||||
/// Returns a reference to the listen protocol configuration.
|
||||
///
|
||||
/// > **Note**: If you modify the protocol, modifications will only applies to future inbound
|
||||
/// > substreams, not the ones already being negotiated.
|
||||
pub fn listen_protocol_ref(&self) -> &SubstreamProtocol<RPCProtocol> {
|
||||
&self.listen_protocol
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the listen protocol configuration.
|
||||
///
|
||||
/// > **Note**: If you modify the protocol, modifications will only applies to future inbound
|
||||
/// > substreams, not the ones already being negotiated.
|
||||
pub fn listen_protocol_mut(&mut self) -> &mut SubstreamProtocol<RPCProtocol> {
|
||||
&mut self.listen_protocol
|
||||
}
|
||||
|
||||
/// Opens an outbound substream with a request.
|
||||
pub fn send_request(&mut self, rpc_event: RPCEvent) {
|
||||
self.keep_alive = KeepAlive::Yes;
|
||||
|
||||
self.dial_queue.push(rpc_event);
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream> ProtocolsHandler for RPCHandler<TSubstream>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
type InEvent = RPCEvent;
|
||||
type OutEvent = RPCEvent;
|
||||
type Error = ProtocolsHandlerUpgrErr<RPCError>;
|
||||
type Substream = TSubstream;
|
||||
type InboundProtocol = RPCProtocol;
|
||||
type OutboundProtocol = RPCRequest;
|
||||
type OutboundOpenInfo = RPCEvent; // Keep track of the id and the request
|
||||
|
||||
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol> {
|
||||
self.listen_protocol.clone()
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_inbound(
|
||||
&mut self,
|
||||
out: <RPCProtocol as InboundUpgrade<TSubstream>>::Output,
|
||||
) {
|
||||
// update the keep alive timeout if there are no more remaining outbound streams
|
||||
if let KeepAlive::Until(_) = self.keep_alive {
|
||||
self.keep_alive = KeepAlive::Until(Instant::now() + self.inactive_timeout);
|
||||
}
|
||||
|
||||
let (req, substream) = out;
|
||||
// drop the stream and return a 0 id for goodbye "requests"
|
||||
if let r @ RPCRequest::Goodbye(_) = req {
|
||||
self.events_out.push(RPCEvent::Request(0, r));
|
||||
return;
|
||||
}
|
||||
|
||||
// New inbound request. Store the stream and tag the output.
|
||||
let delay_key = self.inbound_substreams_delay.insert(
|
||||
self.current_substream_id,
|
||||
Duration::from_secs(RESPONSE_TIMEOUT),
|
||||
);
|
||||
let awaiting_stream = InboundSubstreamState::ResponseIdle(substream);
|
||||
self.inbound_substreams
|
||||
.insert(self.current_substream_id, (awaiting_stream, delay_key));
|
||||
|
||||
self.events_out
|
||||
.push(RPCEvent::Request(self.current_substream_id, req));
|
||||
self.current_substream_id += 1;
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_outbound(
|
||||
&mut self,
|
||||
out: <RPCRequest as OutboundUpgrade<TSubstream>>::Output,
|
||||
rpc_event: Self::OutboundOpenInfo,
|
||||
) {
|
||||
self.dial_negotiated -= 1;
|
||||
|
||||
if self.dial_negotiated == 0
|
||||
&& self.dial_queue.is_empty()
|
||||
&& self.outbound_substreams.is_empty()
|
||||
{
|
||||
self.keep_alive = KeepAlive::Until(Instant::now() + self.inactive_timeout);
|
||||
} else {
|
||||
self.keep_alive = KeepAlive::Yes;
|
||||
}
|
||||
|
||||
// add the stream to substreams if we expect a response, otherwise drop the stream.
|
||||
match rpc_event {
|
||||
RPCEvent::Request(id, request) if request.expect_response() => {
|
||||
// new outbound request. Store the stream and tag the output.
|
||||
let delay_key = self
|
||||
.outbound_substreams_delay
|
||||
.insert(id, Duration::from_secs(RESPONSE_TIMEOUT));
|
||||
let awaiting_stream = OutboundSubstreamState::RequestPendingResponse {
|
||||
substream: out,
|
||||
request,
|
||||
};
|
||||
self.outbound_substreams
|
||||
.insert(id, (awaiting_stream, delay_key));
|
||||
}
|
||||
_ => { // a response is not expected, drop the stream for all other requests
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Note: If the substream has closed due to inactivity, or the substream is in the
|
||||
// wrong state a response will fail silently.
|
||||
fn inject_event(&mut self, rpc_event: Self::InEvent) {
|
||||
match rpc_event {
|
||||
RPCEvent::Request(_, _) => self.send_request(rpc_event),
|
||||
RPCEvent::Response(rpc_id, response) => {
|
||||
// check if the stream matching the response still exists
|
||||
// variables indicating if the response is an error response or a multi-part
|
||||
// response
|
||||
let res_is_error = response.is_error();
|
||||
let res_is_multiple = response.multiple_responses();
|
||||
|
||||
match self.inbound_substreams.get_mut(&rpc_id) {
|
||||
Some((substream_state, _)) => {
|
||||
match std::mem::replace(substream_state, InboundSubstreamState::Poisoned) {
|
||||
InboundSubstreamState::ResponseIdle(substream) => {
|
||||
// close the stream if there is no response
|
||||
if let RPCErrorResponse::StreamTermination(_) = response {
|
||||
trace!(self.log, "Stream termination sent. Ending the stream");
|
||||
*substream_state = InboundSubstreamState::Closing(substream);
|
||||
} else {
|
||||
// send the response
|
||||
// if it's a single rpc request or an error, close the stream after
|
||||
*substream_state = InboundSubstreamState::ResponsePendingSend {
|
||||
substream: substream.send(response),
|
||||
closing: !res_is_multiple | res_is_error, // close if an error or we are not expecting more responses
|
||||
};
|
||||
}
|
||||
}
|
||||
InboundSubstreamState::ResponsePendingSend { substream, closing }
|
||||
if res_is_multiple =>
|
||||
{
|
||||
// the stream is in use, add the request to a pending queue
|
||||
(*self
|
||||
.queued_outbound_items
|
||||
.entry(rpc_id)
|
||||
.or_insert_with(Vec::new))
|
||||
.push(response);
|
||||
|
||||
// return the state
|
||||
*substream_state = InboundSubstreamState::ResponsePendingSend {
|
||||
substream,
|
||||
closing,
|
||||
};
|
||||
}
|
||||
InboundSubstreamState::Closing(substream) => {
|
||||
*substream_state = InboundSubstreamState::Closing(substream);
|
||||
debug!(self.log, "Response not sent. Stream is closing"; "response" => format!("{}",response));
|
||||
}
|
||||
InboundSubstreamState::ResponsePendingSend { substream, .. } => {
|
||||
*substream_state = InboundSubstreamState::ResponsePendingSend {
|
||||
substream,
|
||||
closing: true,
|
||||
};
|
||||
error!(self.log, "Attempted sending multiple responses to a single response request");
|
||||
}
|
||||
InboundSubstreamState::Poisoned => {
|
||||
crit!(self.log, "Poisoned inbound substream");
|
||||
unreachable!("Coding error: Poisoned substream");
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
debug!(self.log, "Stream has expired. Response not sent"; "response" => format!("{}",response));
|
||||
}
|
||||
};
|
||||
}
|
||||
// We do not send errors as responses
|
||||
RPCEvent::Error(_, _) => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_dial_upgrade_error(
|
||||
&mut self,
|
||||
request: Self::OutboundOpenInfo,
|
||||
error: ProtocolsHandlerUpgrErr<
|
||||
<Self::OutboundProtocol as OutboundUpgrade<Self::Substream>>::Error,
|
||||
>,
|
||||
) {
|
||||
if let ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(RPCError::IoError(_))) = error {
|
||||
self.outbound_io_error_retries += 1;
|
||||
if self.outbound_io_error_retries < IO_ERROR_RETRIES {
|
||||
self.send_request(request);
|
||||
return;
|
||||
}
|
||||
}
|
||||
self.outbound_io_error_retries = 0;
|
||||
// add the error
|
||||
let request_id = {
|
||||
if let RPCEvent::Request(id, _) = request {
|
||||
id
|
||||
} else {
|
||||
0
|
||||
}
|
||||
};
|
||||
if self.pending_error.is_none() {
|
||||
self.pending_error = Some((request_id, error));
|
||||
}
|
||||
}
|
||||
|
||||
fn connection_keep_alive(&self) -> KeepAlive {
|
||||
self.keep_alive
|
||||
}
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
) -> Poll<
|
||||
ProtocolsHandlerEvent<Self::OutboundProtocol, Self::OutboundOpenInfo, Self::OutEvent>,
|
||||
Self::Error,
|
||||
> {
|
||||
if let Some((request_id, err)) = self.pending_error.take() {
|
||||
// Returning an error here will result in dropping the peer.
|
||||
match err {
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(
|
||||
RPCError::InvalidProtocol(protocol_string),
|
||||
)) => {
|
||||
// Peer does not support the protocol.
|
||||
// TODO: We currently will not drop the peer, for maximal compatibility with
|
||||
// other clients testing their software. In the future, we will need to decide
|
||||
// which protocols are a bare minimum to support before kicking the peer.
|
||||
error!(self.log, "Peer doesn't support the RPC protocol"; "protocol" => protocol_string);
|
||||
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
|
||||
RPCEvent::Error(request_id, RPCError::InvalidProtocol(protocol_string)),
|
||||
)));
|
||||
}
|
||||
ProtocolsHandlerUpgrErr::Timeout | ProtocolsHandlerUpgrErr::Timer => {
|
||||
// negotiation timeout, mark the request as failed
|
||||
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
|
||||
RPCEvent::Error(
|
||||
request_id,
|
||||
RPCError::Custom("Protocol negotiation timeout".into()),
|
||||
),
|
||||
)));
|
||||
}
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) => {
|
||||
// IO/Decode/Custom Error, report to the application
|
||||
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
|
||||
RPCEvent::Error(request_id, err),
|
||||
)));
|
||||
}
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => {
|
||||
// Error during negotiation
|
||||
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
|
||||
RPCEvent::Error(request_id, RPCError::Custom(format!("{}", err))),
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// return any events that need to be reported
|
||||
if !self.events_out.is_empty() {
|
||||
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
|
||||
self.events_out.remove(0),
|
||||
)));
|
||||
} else {
|
||||
self.events_out.shrink_to_fit();
|
||||
}
|
||||
|
||||
// purge expired inbound substreams
|
||||
while let Async::Ready(Some(stream_id)) = self
|
||||
.inbound_substreams_delay
|
||||
.poll()
|
||||
.map_err(|_| ProtocolsHandlerUpgrErr::Timer)?
|
||||
{
|
||||
self.inbound_substreams.remove(stream_id.get_ref());
|
||||
}
|
||||
|
||||
// purge expired outbound substreams
|
||||
if let Async::Ready(Some(stream_id)) = self
|
||||
.outbound_substreams_delay
|
||||
.poll()
|
||||
.map_err(|_| ProtocolsHandlerUpgrErr::Timer)?
|
||||
{
|
||||
self.outbound_substreams.remove(stream_id.get_ref());
|
||||
// notify the user
|
||||
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
|
||||
RPCEvent::Error(
|
||||
stream_id.get_ref().clone(),
|
||||
RPCError::Custom("Stream timed out".into()),
|
||||
),
|
||||
)));
|
||||
}
|
||||
|
||||
// drive inbound streams that need to be processed
|
||||
for request_id in self.inbound_substreams.keys().copied().collect::<Vec<_>>() {
|
||||
// Drain all queued items until all messages have been processed for this stream
|
||||
// TODO Improve this code logic
|
||||
let mut new_items_to_send = true;
|
||||
while new_items_to_send {
|
||||
new_items_to_send = false;
|
||||
match self.inbound_substreams.entry(request_id) {
|
||||
Entry::Occupied(mut entry) => {
|
||||
match std::mem::replace(
|
||||
&mut entry.get_mut().0,
|
||||
InboundSubstreamState::Poisoned,
|
||||
) {
|
||||
InboundSubstreamState::ResponsePendingSend {
|
||||
mut substream,
|
||||
closing,
|
||||
} => {
|
||||
match substream.poll() {
|
||||
Ok(Async::Ready(raw_substream)) => {
|
||||
// completed the send
|
||||
|
||||
// close the stream if required
|
||||
if closing {
|
||||
entry.get_mut().0 =
|
||||
InboundSubstreamState::Closing(raw_substream)
|
||||
} else {
|
||||
// check for queued chunks and update the stream
|
||||
entry.get_mut().0 = apply_queued_responses(
|
||||
raw_substream,
|
||||
&mut self
|
||||
.queued_outbound_items
|
||||
.get_mut(&request_id),
|
||||
&mut new_items_to_send,
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(Async::NotReady) => {
|
||||
entry.get_mut().0 =
|
||||
InboundSubstreamState::ResponsePendingSend {
|
||||
substream,
|
||||
closing,
|
||||
};
|
||||
}
|
||||
Err(e) => {
|
||||
let delay_key = &entry.get().1;
|
||||
self.inbound_substreams_delay.remove(delay_key);
|
||||
entry.remove_entry();
|
||||
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
|
||||
RPCEvent::Error(0, e),
|
||||
)));
|
||||
}
|
||||
};
|
||||
}
|
||||
InboundSubstreamState::ResponseIdle(substream) => {
|
||||
entry.get_mut().0 = apply_queued_responses(
|
||||
substream,
|
||||
&mut self.queued_outbound_items.get_mut(&request_id),
|
||||
&mut new_items_to_send,
|
||||
);
|
||||
}
|
||||
InboundSubstreamState::Closing(mut substream) => {
|
||||
match substream.close() {
|
||||
Ok(Async::Ready(())) | Err(_) => {
|
||||
trace!(self.log, "Inbound stream dropped");
|
||||
let delay_key = &entry.get().1;
|
||||
self.queued_outbound_items.remove(&request_id);
|
||||
self.inbound_substreams_delay.remove(delay_key);
|
||||
entry.remove();
|
||||
} // drop the stream
|
||||
Ok(Async::NotReady) => {
|
||||
entry.get_mut().0 =
|
||||
InboundSubstreamState::Closing(substream);
|
||||
}
|
||||
}
|
||||
}
|
||||
InboundSubstreamState::Poisoned => {
|
||||
crit!(self.log, "Poisoned outbound substream");
|
||||
unreachable!("Coding Error: Inbound Substream is poisoned");
|
||||
}
|
||||
};
|
||||
}
|
||||
Entry::Vacant(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// drive outbound streams that need to be processed
|
||||
for request_id in self.outbound_substreams.keys().copied().collect::<Vec<_>>() {
|
||||
match self.outbound_substreams.entry(request_id) {
|
||||
Entry::Occupied(mut entry) => {
|
||||
match std::mem::replace(
|
||||
&mut entry.get_mut().0,
|
||||
OutboundSubstreamState::Poisoned,
|
||||
) {
|
||||
OutboundSubstreamState::RequestPendingResponse {
|
||||
mut substream,
|
||||
request,
|
||||
} => match substream.poll() {
|
||||
Ok(Async::Ready(Some(response))) => {
|
||||
if request.multiple_responses() {
|
||||
entry.get_mut().0 =
|
||||
OutboundSubstreamState::RequestPendingResponse {
|
||||
substream,
|
||||
request,
|
||||
};
|
||||
let delay_key = &entry.get().1;
|
||||
self.outbound_substreams_delay
|
||||
.reset(delay_key, Duration::from_secs(RESPONSE_TIMEOUT));
|
||||
} else {
|
||||
trace!(self.log, "Closing single stream request");
|
||||
// only expect a single response, close the stream
|
||||
entry.get_mut().0 = OutboundSubstreamState::Closing(substream);
|
||||
}
|
||||
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
|
||||
RPCEvent::Response(request_id, response),
|
||||
)));
|
||||
}
|
||||
Ok(Async::Ready(None)) => {
|
||||
// stream closed
|
||||
// if we expected multiple streams send a stream termination,
|
||||
// else report the stream terminating only.
|
||||
trace!(self.log, "RPC Response - stream closed by remote");
|
||||
// drop the stream
|
||||
let delay_key = &entry.get().1;
|
||||
self.outbound_substreams_delay.remove(delay_key);
|
||||
entry.remove_entry();
|
||||
// notify the application error
|
||||
if request.multiple_responses() {
|
||||
// return an end of stream result
|
||||
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
|
||||
RPCEvent::Response(
|
||||
request_id,
|
||||
RPCErrorResponse::StreamTermination(
|
||||
request.stream_termination(),
|
||||
),
|
||||
),
|
||||
)));
|
||||
} // else we return an error, stream should not have closed early.
|
||||
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
|
||||
RPCEvent::Error(
|
||||
request_id,
|
||||
RPCError::Custom(
|
||||
"Stream closed early. Empty response".into(),
|
||||
),
|
||||
),
|
||||
)));
|
||||
}
|
||||
Ok(Async::NotReady) => {
|
||||
entry.get_mut().0 = OutboundSubstreamState::RequestPendingResponse {
|
||||
substream,
|
||||
request,
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
// drop the stream
|
||||
let delay_key = &entry.get().1;
|
||||
self.outbound_substreams_delay.remove(delay_key);
|
||||
entry.remove_entry();
|
||||
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
|
||||
RPCEvent::Error(request_id, e),
|
||||
)));
|
||||
}
|
||||
},
|
||||
OutboundSubstreamState::Closing(mut substream) => match substream.close() {
|
||||
Ok(Async::Ready(())) | Err(_) => {
|
||||
trace!(self.log, "Outbound stream dropped");
|
||||
// drop the stream
|
||||
let delay_key = &entry.get().1;
|
||||
self.outbound_substreams_delay.remove(delay_key);
|
||||
entry.remove_entry();
|
||||
}
|
||||
Ok(Async::NotReady) => {
|
||||
entry.get_mut().0 = OutboundSubstreamState::Closing(substream);
|
||||
}
|
||||
},
|
||||
OutboundSubstreamState::Poisoned => {
|
||||
crit!(self.log, "Poisoned outbound substream");
|
||||
unreachable!("Coding Error: Outbound substream is poisoned")
|
||||
}
|
||||
}
|
||||
}
|
||||
Entry::Vacant(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
// establish outbound substreams
|
||||
if !self.dial_queue.is_empty() {
|
||||
if self.dial_negotiated < self.max_dial_negotiated {
|
||||
self.dial_negotiated += 1;
|
||||
let rpc_event = self.dial_queue.remove(0);
|
||||
if let RPCEvent::Request(id, req) = rpc_event {
|
||||
return Ok(Async::Ready(
|
||||
ProtocolsHandlerEvent::OutboundSubstreamRequest {
|
||||
protocol: SubstreamProtocol::new(req.clone()),
|
||||
info: RPCEvent::Request(id, req),
|
||||
},
|
||||
));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
self.dial_queue.shrink_to_fit();
|
||||
}
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
|
||||
// Check for new items to send to the peer and update the underlying stream
|
||||
fn apply_queued_responses<TSubstream: AsyncRead + AsyncWrite>(
|
||||
raw_substream: InboundFramed<TSubstream>,
|
||||
queued_outbound_items: &mut Option<&mut Vec<RPCErrorResponse>>,
|
||||
new_items_to_send: &mut bool,
|
||||
) -> InboundSubstreamState<TSubstream> {
|
||||
match queued_outbound_items {
|
||||
Some(ref mut queue) if !queue.is_empty() => {
|
||||
*new_items_to_send = true;
|
||||
// we have queued items
|
||||
match queue.remove(0) {
|
||||
RPCErrorResponse::StreamTermination(_) => {
|
||||
// close the stream if this is a stream termination
|
||||
InboundSubstreamState::Closing(raw_substream)
|
||||
}
|
||||
chunk => InboundSubstreamState::ResponsePendingSend {
|
||||
substream: raw_substream.send(chunk),
|
||||
closing: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// no items queued set to idle
|
||||
InboundSubstreamState::ResponseIdle(raw_substream)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,286 +0,0 @@
|
||||
//! Available RPC methods types and ids.
|
||||
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use types::{Epoch, Hash256, Slot};
|
||||
|
||||
/* Request/Response data structures for RPC methods */
|
||||
|
||||
/* Requests */
|
||||
|
||||
pub type RequestId = usize;
|
||||
|
||||
/// The STATUS request/response handshake message.
|
||||
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
||||
pub struct StatusMessage {
|
||||
/// The fork version of the chain we are broadcasting.
|
||||
pub fork_version: [u8; 4],
|
||||
|
||||
/// Latest finalized root.
|
||||
pub finalized_root: Hash256,
|
||||
|
||||
/// Latest finalized epoch.
|
||||
pub finalized_epoch: Epoch,
|
||||
|
||||
/// The latest block root.
|
||||
pub head_root: Hash256,
|
||||
|
||||
/// The slot associated with the latest block root.
|
||||
pub head_slot: Slot,
|
||||
}
|
||||
|
||||
/// The reason given for a `Goodbye` message.
|
||||
///
|
||||
/// Note: any unknown `u64::into(n)` will resolve to `Goodbye::Unknown` for any unknown `n`,
|
||||
/// however `GoodbyeReason::Unknown.into()` will go into `0_u64`. Therefore de-serializing then
|
||||
/// re-serializing may not return the same bytes.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum GoodbyeReason {
|
||||
/// This node has shutdown.
|
||||
ClientShutdown = 1,
|
||||
|
||||
/// Incompatible networks.
|
||||
IrrelevantNetwork = 2,
|
||||
|
||||
/// Error/fault in the RPC.
|
||||
Fault = 3,
|
||||
|
||||
/// Unknown reason.
|
||||
Unknown = 0,
|
||||
}
|
||||
|
||||
impl From<u64> for GoodbyeReason {
|
||||
fn from(id: u64) -> GoodbyeReason {
|
||||
match id {
|
||||
1 => GoodbyeReason::ClientShutdown,
|
||||
2 => GoodbyeReason::IrrelevantNetwork,
|
||||
3 => GoodbyeReason::Fault,
|
||||
_ => GoodbyeReason::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<u64> for GoodbyeReason {
|
||||
fn into(self) -> u64 {
|
||||
self as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl ssz::Encode for GoodbyeReason {
|
||||
fn is_ssz_fixed_len() -> bool {
|
||||
<u64 as ssz::Encode>::is_ssz_fixed_len()
|
||||
}
|
||||
|
||||
fn ssz_fixed_len() -> usize {
|
||||
<u64 as ssz::Encode>::ssz_fixed_len()
|
||||
}
|
||||
|
||||
fn ssz_bytes_len(&self) -> usize {
|
||||
0_u64.ssz_bytes_len()
|
||||
}
|
||||
|
||||
fn ssz_append(&self, buf: &mut Vec<u8>) {
|
||||
let conv: u64 = self.clone().into();
|
||||
conv.ssz_append(buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl ssz::Decode for GoodbyeReason {
|
||||
fn is_ssz_fixed_len() -> bool {
|
||||
<u64 as ssz::Decode>::is_ssz_fixed_len()
|
||||
}
|
||||
|
||||
fn ssz_fixed_len() -> usize {
|
||||
<u64 as ssz::Decode>::ssz_fixed_len()
|
||||
}
|
||||
|
||||
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
|
||||
u64::from_ssz_bytes(bytes).and_then(|n| Ok(n.into()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Request a number of beacon block roots from a peer.
|
||||
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
||||
pub struct BlocksByRangeRequest {
|
||||
/// The hash tree root of a block on the requested chain.
|
||||
pub head_block_root: Hash256,
|
||||
|
||||
/// The starting slot to request blocks.
|
||||
pub start_slot: u64,
|
||||
|
||||
/// The number of blocks from the start slot.
|
||||
pub count: u64,
|
||||
|
||||
/// The step increment to receive blocks.
|
||||
///
|
||||
/// A value of 1 returns every block.
|
||||
/// A value of 2 returns every second block.
|
||||
/// A value of 3 returns every third block and so on.
|
||||
pub step: u64,
|
||||
}
|
||||
|
||||
/// Request a number of beacon block bodies from a peer.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct BlocksByRootRequest {
|
||||
/// The list of beacon block bodies being requested.
|
||||
pub block_roots: Vec<Hash256>,
|
||||
}
|
||||
|
||||
/* RPC Handling and Grouping */
|
||||
// Collection of enums and structs used by the Codecs to encode/decode RPC messages
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum RPCResponse {
|
||||
/// A HELLO message.
|
||||
Status(StatusMessage),
|
||||
|
||||
/// A response to a get BLOCKS_BY_RANGE request. A None response signifies the end of the
|
||||
/// batch.
|
||||
BlocksByRange(Vec<u8>),
|
||||
|
||||
/// A response to a get BLOCKS_BY_ROOT request.
|
||||
BlocksByRoot(Vec<u8>),
|
||||
}
|
||||
|
||||
/// Indicates which response is being terminated by a stream termination response.
|
||||
#[derive(Debug)]
|
||||
pub enum ResponseTermination {
|
||||
/// Blocks by range stream termination.
|
||||
BlocksByRange,
|
||||
|
||||
/// Blocks by root stream termination.
|
||||
BlocksByRoot,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum RPCErrorResponse {
|
||||
/// The response is a successful.
|
||||
Success(RPCResponse),
|
||||
|
||||
/// The response was invalid.
|
||||
InvalidRequest(ErrorMessage),
|
||||
|
||||
/// The response indicates a server error.
|
||||
ServerError(ErrorMessage),
|
||||
|
||||
/// There was an unknown response.
|
||||
Unknown(ErrorMessage),
|
||||
|
||||
/// Received a stream termination indicating which response is being terminated.
|
||||
StreamTermination(ResponseTermination),
|
||||
}
|
||||
|
||||
impl RPCErrorResponse {
|
||||
/// Used to encode the response in the codec.
|
||||
pub fn as_u8(&self) -> Option<u8> {
|
||||
match self {
|
||||
RPCErrorResponse::Success(_) => Some(0),
|
||||
RPCErrorResponse::InvalidRequest(_) => Some(1),
|
||||
RPCErrorResponse::ServerError(_) => Some(2),
|
||||
RPCErrorResponse::Unknown(_) => Some(255),
|
||||
RPCErrorResponse::StreamTermination(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Tells the codec whether to decode as an RPCResponse or an error.
|
||||
pub fn is_response(response_code: u8) -> bool {
|
||||
match response_code {
|
||||
0 => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds an RPCErrorResponse from a response code and an ErrorMessage
|
||||
pub fn from_error(response_code: u8, err: ErrorMessage) -> Self {
|
||||
match response_code {
|
||||
1 => RPCErrorResponse::InvalidRequest(err),
|
||||
2 => RPCErrorResponse::ServerError(err),
|
||||
_ => RPCErrorResponse::Unknown(err),
|
||||
}
|
||||
}
|
||||
|
||||
/// Specifies which response allows for multiple chunks for the stream handler.
|
||||
pub fn multiple_responses(&self) -> bool {
|
||||
match self {
|
||||
RPCErrorResponse::Success(resp) => match resp {
|
||||
RPCResponse::Status(_) => false,
|
||||
RPCResponse::BlocksByRange(_) => true,
|
||||
RPCResponse::BlocksByRoot(_) => true,
|
||||
},
|
||||
RPCErrorResponse::InvalidRequest(_) => true,
|
||||
RPCErrorResponse::ServerError(_) => true,
|
||||
RPCErrorResponse::Unknown(_) => true,
|
||||
// Stream terminations are part of responses that have chunks
|
||||
RPCErrorResponse::StreamTermination(_) => true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if this response is an error. Used to terminate the stream after an error is
|
||||
/// sent.
|
||||
pub fn is_error(&self) -> bool {
|
||||
match self {
|
||||
RPCErrorResponse::Success(_) => false,
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Encode, Decode, Debug)]
|
||||
pub struct ErrorMessage {
|
||||
/// The UTF-8 encoded Error message string.
|
||||
pub error_message: Vec<u8>,
|
||||
}
|
||||
|
||||
impl ErrorMessage {
|
||||
pub fn as_string(&self) -> String {
|
||||
String::from_utf8(self.error_message.clone()).unwrap_or_else(|_| "".into())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for StatusMessage {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Status Message: Fork Version: {:?}, Finalized Root: {}, Finalized Epoch: {}, Head Root: {}, Head Slot: {}", self.fork_version, self.finalized_root, self.finalized_epoch, self.head_root, self.head_slot)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for RPCResponse {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
RPCResponse::Status(status) => write!(f, "{}", status),
|
||||
RPCResponse::BlocksByRange(_) => write!(f, "<BlocksByRange>"),
|
||||
RPCResponse::BlocksByRoot(_) => write!(f, "<BlocksByRoot>"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for RPCErrorResponse {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
RPCErrorResponse::Success(res) => write!(f, "{}", res),
|
||||
RPCErrorResponse::InvalidRequest(err) => write!(f, "Invalid Request: {:?}", err),
|
||||
RPCErrorResponse::ServerError(err) => write!(f, "Server Error: {:?}", err),
|
||||
RPCErrorResponse::Unknown(err) => write!(f, "Unknown Error: {:?}", err),
|
||||
RPCErrorResponse::StreamTermination(_) => write!(f, "Stream Termination"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for GoodbyeReason {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
GoodbyeReason::ClientShutdown => write!(f, "Client Shutdown"),
|
||||
GoodbyeReason::IrrelevantNetwork => write!(f, "Irrelevant Network"),
|
||||
GoodbyeReason::Fault => write!(f, "Fault"),
|
||||
GoodbyeReason::Unknown => write!(f, "Unknown Reason"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BlocksByRangeRequest {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Head Block Root: {}, Start Slot: {}, Count: {}, Step: {}",
|
||||
self.head_block_root, self.start_slot, self.count, self.step
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,164 +0,0 @@
|
||||
//! The Ethereum 2.0 Wire Protocol
|
||||
//!
|
||||
//! This protocol is a purpose built Ethereum 2.0 libp2p protocol. It's role is to facilitate
|
||||
//! direct peer-to-peer communication primarily for sending/receiving chain information for
|
||||
//! syncing.
|
||||
|
||||
use futures::prelude::*;
|
||||
use handler::RPCHandler;
|
||||
use libp2p::core::ConnectedPoint;
|
||||
use libp2p::swarm::{
|
||||
protocols_handler::ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, PollParameters,
|
||||
SubstreamProtocol,
|
||||
};
|
||||
use libp2p::{Multiaddr, PeerId};
|
||||
pub use methods::{
|
||||
ErrorMessage, RPCErrorResponse, RPCResponse, RequestId, ResponseTermination, StatusMessage,
|
||||
};
|
||||
pub use protocol::{RPCError, RPCProtocol, RPCRequest};
|
||||
use slog::o;
|
||||
use std::marker::PhantomData;
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
|
||||
pub(crate) mod codec;
|
||||
mod handler;
|
||||
pub mod methods;
|
||||
mod protocol;
|
||||
|
||||
/// The return type used in the behaviour and the resultant event from the protocols handler.
|
||||
#[derive(Debug)]
|
||||
pub enum RPCEvent {
|
||||
/// An inbound/outbound request for RPC protocol. The first parameter is a sequential
|
||||
/// id which tracks an awaiting substream for the response.
|
||||
Request(RequestId, RPCRequest),
|
||||
/// A response that is being sent or has been received from the RPC protocol. The first parameter returns
|
||||
/// that which was sent with the corresponding request, the second is a single chunk of a
|
||||
/// response.
|
||||
Response(RequestId, RPCErrorResponse),
|
||||
/// An Error occurred.
|
||||
Error(RequestId, RPCError),
|
||||
}
|
||||
|
||||
impl RPCEvent {
|
||||
pub fn id(&self) -> usize {
|
||||
match *self {
|
||||
RPCEvent::Request(id, _) => id,
|
||||
RPCEvent::Response(id, _) => id,
|
||||
RPCEvent::Error(id, _) => id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for RPCEvent {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
RPCEvent::Request(id, req) => write!(f, "RPC Request(id: {}, {})", id, req),
|
||||
RPCEvent::Response(id, res) => write!(f, "RPC Response(id: {}, {})", id, res),
|
||||
RPCEvent::Error(id, err) => write!(f, "RPC Request(id: {}, error: {:?})", id, err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level
|
||||
/// logic.
|
||||
pub struct RPC<TSubstream> {
|
||||
/// Queue of events to processed.
|
||||
events: Vec<NetworkBehaviourAction<RPCEvent, RPCMessage>>,
|
||||
/// Pins the generic substream.
|
||||
marker: PhantomData<(TSubstream)>,
|
||||
/// Slog logger for RPC behaviour.
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
impl<TSubstream> RPC<TSubstream> {
|
||||
pub fn new(log: slog::Logger) -> Self {
|
||||
let log = log.new(o!("service" => "libp2p_rpc"));
|
||||
RPC {
|
||||
events: Vec::new(),
|
||||
marker: PhantomData,
|
||||
log,
|
||||
}
|
||||
}
|
||||
|
||||
/// Submits an RPC request.
|
||||
///
|
||||
/// The peer must be connected for this to succeed.
|
||||
pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) {
|
||||
self.events.push(NetworkBehaviourAction::SendEvent {
|
||||
peer_id,
|
||||
event: rpc_event,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream> NetworkBehaviour for RPC<TSubstream>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
type ProtocolsHandler = RPCHandler<TSubstream>;
|
||||
type OutEvent = RPCMessage;
|
||||
|
||||
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
||||
RPCHandler::new(
|
||||
SubstreamProtocol::new(RPCProtocol),
|
||||
Duration::from_secs(30),
|
||||
&self.log,
|
||||
)
|
||||
}
|
||||
|
||||
// handled by discovery
|
||||
fn addresses_of_peer(&mut self, _peer_id: &PeerId) -> Vec<Multiaddr> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
fn inject_connected(&mut self, peer_id: PeerId, connected_point: ConnectedPoint) {
|
||||
// if initialised the connection, report this upwards to send the HELLO request
|
||||
if let ConnectedPoint::Dialer { .. } = connected_point {
|
||||
self.events.push(NetworkBehaviourAction::GenerateEvent(
|
||||
RPCMessage::PeerDialed(peer_id),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_disconnected(&mut self, peer_id: &PeerId, _: ConnectedPoint) {
|
||||
// inform the rpc handler that the peer has disconnected
|
||||
self.events.push(NetworkBehaviourAction::GenerateEvent(
|
||||
RPCMessage::PeerDisconnected(peer_id.clone()),
|
||||
));
|
||||
}
|
||||
|
||||
fn inject_node_event(
|
||||
&mut self,
|
||||
source: PeerId,
|
||||
event: <Self::ProtocolsHandler as ProtocolsHandler>::OutEvent,
|
||||
) {
|
||||
// send the event to the user
|
||||
self.events
|
||||
.push(NetworkBehaviourAction::GenerateEvent(RPCMessage::RPC(
|
||||
source, event,
|
||||
)));
|
||||
}
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
_: &mut impl PollParameters,
|
||||
) -> Async<
|
||||
NetworkBehaviourAction<
|
||||
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||
Self::OutEvent,
|
||||
>,
|
||||
> {
|
||||
if !self.events.is_empty() {
|
||||
return Async::Ready(self.events.remove(0));
|
||||
}
|
||||
Async::NotReady
|
||||
}
|
||||
}
|
||||
|
||||
/// Messages sent to the user from the RPC protocol.
|
||||
pub enum RPCMessage {
|
||||
RPC(PeerId, RPCEvent),
|
||||
PeerDialed(PeerId),
|
||||
PeerDisconnected(PeerId),
|
||||
}
|
||||
@@ -1,352 +0,0 @@
|
||||
#![allow(clippy::type_complexity)]
|
||||
|
||||
use super::methods::*;
|
||||
use crate::rpc::{
|
||||
codec::{
|
||||
base::{BaseInboundCodec, BaseOutboundCodec},
|
||||
ssz::{SSZInboundCodec, SSZOutboundCodec},
|
||||
InboundCodec, OutboundCodec,
|
||||
},
|
||||
methods::ResponseTermination,
|
||||
};
|
||||
use futures::{
|
||||
future::{self, FutureResult},
|
||||
sink, stream, Sink, Stream,
|
||||
};
|
||||
use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo};
|
||||
use std::io;
|
||||
use std::time::Duration;
|
||||
use tokio::codec::Framed;
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio::prelude::*;
|
||||
use tokio::timer::timeout;
|
||||
use tokio::util::FutureExt;
|
||||
use tokio_io_timeout::TimeoutStream;
|
||||
|
||||
/// The maximum bytes that can be sent across the RPC.
|
||||
const MAX_RPC_SIZE: usize = 4_194_304; // 4M
|
||||
/// The protocol prefix the RPC protocol id.
|
||||
const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req";
|
||||
/// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte).
|
||||
const TTFB_TIMEOUT: u64 = 5;
|
||||
/// The number of seconds to wait for the first bytes of a request once a protocol has been
|
||||
/// established before the stream is terminated.
|
||||
const REQUEST_TIMEOUT: u64 = 15;
|
||||
|
||||
/// Protocol names to be used.
|
||||
/// The Status protocol name.
|
||||
pub const RPC_STATUS: &str = "status";
|
||||
/// The Goodbye protocol name.
|
||||
pub const RPC_GOODBYE: &str = "goodbye";
|
||||
/// The `BlocksByRange` protocol name.
|
||||
pub const RPC_BLOCKS_BY_RANGE: &str = "beacon_blocks_by_range";
|
||||
/// The `BlocksByRoot` protocol name.
|
||||
pub const RPC_BLOCKS_BY_ROOT: &str = "beacon_blocks_by_root";
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RPCProtocol;
|
||||
|
||||
impl UpgradeInfo for RPCProtocol {
|
||||
type Info = ProtocolId;
|
||||
type InfoIter = Vec<Self::Info>;
|
||||
|
||||
fn protocol_info(&self) -> Self::InfoIter {
|
||||
vec![
|
||||
ProtocolId::new(RPC_STATUS, "1", "ssz"),
|
||||
ProtocolId::new(RPC_GOODBYE, "1", "ssz"),
|
||||
ProtocolId::new(RPC_BLOCKS_BY_RANGE, "1", "ssz"),
|
||||
ProtocolId::new(RPC_BLOCKS_BY_ROOT, "1", "ssz"),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
/// Tracks the types in a protocol id.
|
||||
#[derive(Clone)]
|
||||
pub struct ProtocolId {
|
||||
/// The rpc message type/name.
|
||||
pub message_name: String,
|
||||
|
||||
/// The version of the RPC.
|
||||
pub version: String,
|
||||
|
||||
/// The encoding of the RPC.
|
||||
pub encoding: String,
|
||||
|
||||
/// The protocol id that is formed from the above fields.
|
||||
protocol_id: String,
|
||||
}
|
||||
|
||||
/// An RPC protocol ID.
|
||||
impl ProtocolId {
|
||||
pub fn new(message_name: &str, version: &str, encoding: &str) -> Self {
|
||||
let protocol_id = format!(
|
||||
"{}/{}/{}/{}",
|
||||
PROTOCOL_PREFIX, message_name, version, encoding
|
||||
);
|
||||
|
||||
ProtocolId {
|
||||
message_name: message_name.into(),
|
||||
version: version.into(),
|
||||
encoding: encoding.into(),
|
||||
protocol_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProtocolName for ProtocolId {
|
||||
fn protocol_name(&self) -> &[u8] {
|
||||
self.protocol_id.as_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
/* Inbound upgrade */
|
||||
|
||||
// The inbound protocol reads the request, decodes it and returns the stream to the protocol
|
||||
// handler to respond to once ready.
|
||||
|
||||
pub type InboundOutput<TSocket> = (RPCRequest, InboundFramed<TSocket>);
|
||||
pub type InboundFramed<TSocket> = Framed<TimeoutStream<upgrade::Negotiated<TSocket>>, InboundCodec>;
|
||||
type FnAndThen<TSocket> = fn(
|
||||
(Option<RPCRequest>, InboundFramed<TSocket>),
|
||||
) -> FutureResult<InboundOutput<TSocket>, RPCError>;
|
||||
type FnMapErr<TSocket> = fn(timeout::Error<(RPCError, InboundFramed<TSocket>)>) -> RPCError;
|
||||
|
||||
impl<TSocket> InboundUpgrade<TSocket> for RPCProtocol
|
||||
where
|
||||
TSocket: AsyncRead + AsyncWrite,
|
||||
{
|
||||
type Output = InboundOutput<TSocket>;
|
||||
type Error = RPCError;
|
||||
|
||||
type Future = future::AndThen<
|
||||
future::MapErr<
|
||||
timeout::Timeout<stream::StreamFuture<InboundFramed<TSocket>>>,
|
||||
FnMapErr<TSocket>,
|
||||
>,
|
||||
FutureResult<InboundOutput<TSocket>, RPCError>,
|
||||
FnAndThen<TSocket>,
|
||||
>;
|
||||
|
||||
fn upgrade_inbound(
|
||||
self,
|
||||
socket: upgrade::Negotiated<TSocket>,
|
||||
protocol: ProtocolId,
|
||||
) -> Self::Future {
|
||||
match protocol.encoding.as_str() {
|
||||
"ssz" | _ => {
|
||||
let ssz_codec = BaseInboundCodec::new(SSZInboundCodec::new(protocol, MAX_RPC_SIZE));
|
||||
let codec = InboundCodec::SSZ(ssz_codec);
|
||||
let mut timed_socket = TimeoutStream::new(socket);
|
||||
timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT)));
|
||||
Framed::new(timed_socket, codec)
|
||||
.into_future()
|
||||
.timeout(Duration::from_secs(REQUEST_TIMEOUT))
|
||||
.map_err(RPCError::from as FnMapErr<TSocket>)
|
||||
.and_then({
|
||||
|(req, stream)| match req {
|
||||
Some(req) => futures::future::ok((req, stream)),
|
||||
None => futures::future::err(RPCError::Custom(
|
||||
"Stream terminated early".into(),
|
||||
)),
|
||||
}
|
||||
} as FnAndThen<TSocket>)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Outbound request */
|
||||
|
||||
// Combines all the RPC requests into a single enum to implement `UpgradeInfo` and
|
||||
// `OutboundUpgrade`
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum RPCRequest {
|
||||
Status(StatusMessage),
|
||||
Goodbye(GoodbyeReason),
|
||||
BlocksByRange(BlocksByRangeRequest),
|
||||
BlocksByRoot(BlocksByRootRequest),
|
||||
}
|
||||
|
||||
impl UpgradeInfo for RPCRequest {
|
||||
type Info = ProtocolId;
|
||||
type InfoIter = Vec<Self::Info>;
|
||||
|
||||
// add further protocols as we support more encodings/versions
|
||||
fn protocol_info(&self) -> Self::InfoIter {
|
||||
self.supported_protocols()
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements the encoding per supported protocol for RPCRequest.
|
||||
impl RPCRequest {
|
||||
pub fn supported_protocols(&self) -> Vec<ProtocolId> {
|
||||
match self {
|
||||
// add more protocols when versions/encodings are supported
|
||||
RPCRequest::Status(_) => vec![ProtocolId::new(RPC_STATUS, "1", "ssz")],
|
||||
RPCRequest::Goodbye(_) => vec![ProtocolId::new(RPC_GOODBYE, "1", "ssz")],
|
||||
RPCRequest::BlocksByRange(_) => vec![ProtocolId::new(RPC_BLOCKS_BY_RANGE, "1", "ssz")],
|
||||
RPCRequest::BlocksByRoot(_) => vec![ProtocolId::new(RPC_BLOCKS_BY_ROOT, "1", "ssz")],
|
||||
}
|
||||
}
|
||||
|
||||
/* These functions are used in the handler for stream management */
|
||||
|
||||
/// This specifies whether a stream should remain open and await a response, given a request.
|
||||
/// A GOODBYE request has no response.
|
||||
pub fn expect_response(&self) -> bool {
|
||||
match self {
|
||||
RPCRequest::Status(_) => true,
|
||||
RPCRequest::Goodbye(_) => false,
|
||||
RPCRequest::BlocksByRange(_) => true,
|
||||
RPCRequest::BlocksByRoot(_) => true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns which methods expect multiple responses from the stream. If this is false and
|
||||
/// the stream terminates, an error is given.
|
||||
pub fn multiple_responses(&self) -> bool {
|
||||
match self {
|
||||
RPCRequest::Status(_) => false,
|
||||
RPCRequest::Goodbye(_) => false,
|
||||
RPCRequest::BlocksByRange(_) => true,
|
||||
RPCRequest::BlocksByRoot(_) => true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `ResponseTermination` type associated with the request if a stream gets
|
||||
/// terminated.
|
||||
pub fn stream_termination(&self) -> ResponseTermination {
|
||||
match self {
|
||||
// this only gets called after `multiple_responses()` returns true. Therefore, only
|
||||
// variants that have `multiple_responses()` can have values.
|
||||
RPCRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
|
||||
RPCRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
|
||||
RPCRequest::Status(_) => unreachable!(),
|
||||
RPCRequest::Goodbye(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* RPC Response type - used for outbound upgrades */
|
||||
|
||||
/* Outbound upgrades */
|
||||
|
||||
pub type OutboundFramed<TSocket> = Framed<upgrade::Negotiated<TSocket>, OutboundCodec>;
|
||||
|
||||
impl<TSocket> OutboundUpgrade<TSocket> for RPCRequest
|
||||
where
|
||||
TSocket: AsyncRead + AsyncWrite,
|
||||
{
|
||||
type Output = OutboundFramed<TSocket>;
|
||||
type Error = RPCError;
|
||||
type Future = sink::Send<OutboundFramed<TSocket>>;
|
||||
fn upgrade_outbound(
|
||||
self,
|
||||
socket: upgrade::Negotiated<TSocket>,
|
||||
protocol: Self::Info,
|
||||
) -> Self::Future {
|
||||
match protocol.encoding.as_str() {
|
||||
"ssz" | _ => {
|
||||
let ssz_codec =
|
||||
BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, MAX_RPC_SIZE));
|
||||
let codec = OutboundCodec::SSZ(ssz_codec);
|
||||
Framed::new(socket, codec).send(self)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error in RPC Encoding/Decoding.
|
||||
#[derive(Debug)]
|
||||
pub enum RPCError {
|
||||
/// Error when reading the packet from the socket.
|
||||
ReadError(upgrade::ReadOneError),
|
||||
/// Error when decoding the raw buffer from ssz.
|
||||
SSZDecodeError(ssz::DecodeError),
|
||||
/// Invalid Protocol ID.
|
||||
InvalidProtocol(&'static str),
|
||||
/// IO Error.
|
||||
IoError(io::Error),
|
||||
/// Waiting for a request/response timed out, or timer error'd.
|
||||
StreamTimeout,
|
||||
/// The peer returned a valid RPCErrorResponse but the response was an error.
|
||||
RPCErrorResponse,
|
||||
/// Custom message.
|
||||
Custom(String),
|
||||
}
|
||||
|
||||
impl From<upgrade::ReadOneError> for RPCError {
|
||||
#[inline]
|
||||
fn from(err: upgrade::ReadOneError) -> Self {
|
||||
RPCError::ReadError(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ssz::DecodeError> for RPCError {
|
||||
#[inline]
|
||||
fn from(err: ssz::DecodeError) -> Self {
|
||||
RPCError::SSZDecodeError(err)
|
||||
}
|
||||
}
|
||||
impl<T> From<tokio::timer::timeout::Error<T>> for RPCError {
|
||||
fn from(err: tokio::timer::timeout::Error<T>) -> Self {
|
||||
if err.is_elapsed() {
|
||||
RPCError::StreamTimeout
|
||||
} else {
|
||||
RPCError::Custom("Stream timer failed".into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<()> for RPCError {
|
||||
fn from(_err: ()) -> Self {
|
||||
RPCError::Custom("".into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for RPCError {
|
||||
fn from(err: io::Error) -> Self {
|
||||
RPCError::IoError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Error trait is required for `ProtocolsHandler`
|
||||
impl std::fmt::Display for RPCError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match *self {
|
||||
RPCError::ReadError(ref err) => write!(f, "Error while reading from socket: {}", err),
|
||||
RPCError::SSZDecodeError(ref err) => write!(f, "Error while decoding ssz: {:?}", err),
|
||||
RPCError::InvalidProtocol(ref err) => write!(f, "Invalid Protocol: {}", err),
|
||||
RPCError::IoError(ref err) => write!(f, "IO Error: {}", err),
|
||||
RPCError::RPCErrorResponse => write!(f, "RPC Response Error"),
|
||||
RPCError::StreamTimeout => write!(f, "Stream Timeout"),
|
||||
RPCError::Custom(ref err) => write!(f, "{}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for RPCError {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
match *self {
|
||||
RPCError::ReadError(ref err) => Some(err),
|
||||
RPCError::SSZDecodeError(_) => None,
|
||||
RPCError::InvalidProtocol(_) => None,
|
||||
RPCError::IoError(ref err) => Some(err),
|
||||
RPCError::StreamTimeout => None,
|
||||
RPCError::RPCErrorResponse => None,
|
||||
RPCError::Custom(_) => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for RPCRequest {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
RPCRequest::Status(status) => write!(f, "Status Message: {}", status),
|
||||
RPCRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason),
|
||||
RPCRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req),
|
||||
RPCRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,416 +0,0 @@
|
||||
use crate::behaviour::{Behaviour, BehaviourEvent, PubsubMessage};
|
||||
use crate::config::*;
|
||||
use crate::error;
|
||||
use crate::multiaddr::Protocol;
|
||||
use crate::rpc::RPCEvent;
|
||||
use crate::NetworkConfig;
|
||||
use crate::{Topic, TopicHash};
|
||||
use futures::prelude::*;
|
||||
use futures::Stream;
|
||||
use libp2p::core::{
|
||||
identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, nodes::Substream,
|
||||
transport::boxed::Boxed, ConnectedPoint,
|
||||
};
|
||||
use libp2p::{core, secio, swarm::NetworkBehaviour, PeerId, Swarm, Transport};
|
||||
use slog::{crit, debug, error, info, trace, warn};
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::time::Duration;
|
||||
use tokio::timer::DelayQueue;
|
||||
|
||||
type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>;
|
||||
type Libp2pBehaviour = Behaviour<Substream<StreamMuxerBox>>;
|
||||
|
||||
const NETWORK_KEY_FILENAME: &str = "key";
|
||||
/// The time in milliseconds to wait before banning a peer. This allows for any Goodbye messages to be
|
||||
/// flushed and protocols to be negotiated.
|
||||
const BAN_PEER_WAIT_TIMEOUT: u64 = 200;
|
||||
|
||||
/// The configuration and state of the libp2p components for the beacon node.
|
||||
pub struct Service {
|
||||
/// The libp2p Swarm handler.
|
||||
//TODO: Make this private
|
||||
pub swarm: Swarm<Libp2pStream, Libp2pBehaviour>,
|
||||
|
||||
/// This node's PeerId.
|
||||
pub local_peer_id: PeerId,
|
||||
|
||||
/// A current list of peers to ban after a given timeout.
|
||||
peers_to_ban: DelayQueue<PeerId>,
|
||||
|
||||
/// A list of timeouts after which peers become unbanned.
|
||||
peer_ban_timeout: DelayQueue<PeerId>,
|
||||
|
||||
/// Indicates if the listening address have been verified and compared to the expected ENR.
|
||||
verified_listen_address: bool,
|
||||
|
||||
/// The libp2p logger handle.
|
||||
pub log: slog::Logger,
|
||||
}
|
||||
|
||||
impl Service {
|
||||
pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result<Self> {
|
||||
trace!(log, "Libp2p Service starting");
|
||||
|
||||
let local_keypair = if let Some(hex_bytes) = &config.secret_key_hex {
|
||||
keypair_from_hex(hex_bytes)?
|
||||
} else {
|
||||
load_private_key(&config, &log)
|
||||
};
|
||||
|
||||
// load the private key from CLI flag, disk or generate a new one
|
||||
let local_peer_id = PeerId::from(local_keypair.public());
|
||||
info!(log, "Libp2p Service"; "peer_id" => format!("{:?}", local_peer_id));
|
||||
|
||||
let mut swarm = {
|
||||
// Set up the transport - tcp/ws with secio and mplex/yamux
|
||||
let transport = build_transport(local_keypair.clone());
|
||||
// Lighthouse network behaviour
|
||||
let behaviour = Behaviour::new(&local_keypair, &config, &log)?;
|
||||
Swarm::new(transport, behaviour, local_peer_id.clone())
|
||||
};
|
||||
|
||||
// listen on the specified address
|
||||
let listen_multiaddr = {
|
||||
let mut m = Multiaddr::from(config.listen_address);
|
||||
m.push(Protocol::Tcp(config.libp2p_port));
|
||||
m
|
||||
};
|
||||
|
||||
match Swarm::listen_on(&mut swarm, listen_multiaddr.clone()) {
|
||||
Ok(_) => {
|
||||
let mut log_address = listen_multiaddr;
|
||||
log_address.push(Protocol::P2p(local_peer_id.clone().into()));
|
||||
info!(log, "Listening established"; "address" => format!("{}", log_address));
|
||||
}
|
||||
Err(err) => {
|
||||
crit!(
|
||||
log,
|
||||
"Unable to listen on libp2p address";
|
||||
"error" => format!("{:?}", err),
|
||||
"listen_multiaddr" => format!("{}", listen_multiaddr),
|
||||
);
|
||||
return Err("Libp2p was unable to listen on the given listen address.".into());
|
||||
}
|
||||
};
|
||||
|
||||
// helper closure for dialing peers
|
||||
let mut dial_addr = |multiaddr: Multiaddr| {
|
||||
match Swarm::dial_addr(&mut swarm, multiaddr.clone()) {
|
||||
Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => format!("{}", multiaddr)),
|
||||
Err(err) => debug!(
|
||||
log,
|
||||
"Could not connect to peer"; "address" => format!("{}", multiaddr), "error" => format!("{:?}", err)
|
||||
),
|
||||
};
|
||||
};
|
||||
|
||||
// attempt to connect to user-input libp2p nodes
|
||||
for multiaddr in config.libp2p_nodes {
|
||||
dial_addr(multiaddr);
|
||||
}
|
||||
|
||||
// attempt to connect to any specified boot-nodes
|
||||
for bootnode_enr in config.boot_nodes {
|
||||
for multiaddr in bootnode_enr.multiaddr() {
|
||||
// ignore udp multiaddr if it exists
|
||||
let components = multiaddr.iter().collect::<Vec<_>>();
|
||||
if let Protocol::Udp(_) = components[1] {
|
||||
continue;
|
||||
}
|
||||
dial_addr(multiaddr);
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe to default gossipsub topics
|
||||
let mut topics = vec![];
|
||||
|
||||
/* Here we subscribe to all the required gossipsub topics required for interop.
|
||||
* The topic builder adds the required prefix and postfix to the hardcoded topics that we
|
||||
* must subscribe to.
|
||||
*/
|
||||
let topic_builder = |topic| {
|
||||
Topic::new(format!(
|
||||
"/{}/{}/{}",
|
||||
TOPIC_PREFIX, topic, TOPIC_ENCODING_POSTFIX,
|
||||
))
|
||||
};
|
||||
topics.push(topic_builder(BEACON_BLOCK_TOPIC));
|
||||
topics.push(topic_builder(BEACON_ATTESTATION_TOPIC));
|
||||
topics.push(topic_builder(VOLUNTARY_EXIT_TOPIC));
|
||||
topics.push(topic_builder(PROPOSER_SLASHING_TOPIC));
|
||||
topics.push(topic_builder(ATTESTER_SLASHING_TOPIC));
|
||||
|
||||
// Add any topics specified by the user
|
||||
topics.append(&mut config.topics.iter().cloned().map(Topic::new).collect());
|
||||
|
||||
let mut subscribed_topics = vec![];
|
||||
for topic in topics {
|
||||
if swarm.subscribe(topic.clone()) {
|
||||
trace!(log, "Subscribed to topic"; "topic" => format!("{}", topic));
|
||||
subscribed_topics.push(topic);
|
||||
} else {
|
||||
warn!(log, "Could not subscribe to topic"; "topic" => format!("{}", topic));
|
||||
}
|
||||
}
|
||||
info!(log, "Subscribed to topics"; "topics" => format!("{:?}", subscribed_topics.iter().map(|t| format!("{}", t)).collect::<Vec<String>>()));
|
||||
|
||||
Ok(Service {
|
||||
local_peer_id,
|
||||
swarm,
|
||||
peers_to_ban: DelayQueue::new(),
|
||||
peer_ban_timeout: DelayQueue::new(),
|
||||
verified_listen_address: false,
|
||||
log,
|
||||
})
|
||||
}
|
||||
|
||||
/// Adds a peer to be banned for a period of time, specified by a timeout.
|
||||
pub fn disconnect_and_ban_peer(&mut self, peer_id: PeerId, timeout: Duration) {
|
||||
error!(self.log, "Disconnecting and banning peer"; "peer_id" => format!("{:?}", peer_id), "timeout" => format!("{:?}", timeout));
|
||||
self.peers_to_ban.insert(
|
||||
peer_id.clone(),
|
||||
Duration::from_millis(BAN_PEER_WAIT_TIMEOUT),
|
||||
);
|
||||
self.peer_ban_timeout.insert(peer_id, timeout);
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for Service {
|
||||
type Item = Libp2pEvent;
|
||||
type Error = crate::error::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
loop {
|
||||
match self.swarm.poll() {
|
||||
Ok(Async::Ready(Some(event))) => match event {
|
||||
BehaviourEvent::GossipMessage {
|
||||
id,
|
||||
source,
|
||||
topics,
|
||||
message,
|
||||
} => {
|
||||
trace!(self.log, "Gossipsub message received"; "service" => "Swarm");
|
||||
return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage {
|
||||
id,
|
||||
source,
|
||||
topics,
|
||||
message,
|
||||
})));
|
||||
}
|
||||
BehaviourEvent::RPC(peer_id, event) => {
|
||||
return Ok(Async::Ready(Some(Libp2pEvent::RPC(peer_id, event))));
|
||||
}
|
||||
BehaviourEvent::PeerDialed(peer_id) => {
|
||||
return Ok(Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id))));
|
||||
}
|
||||
BehaviourEvent::PeerDisconnected(peer_id) => {
|
||||
return Ok(Async::Ready(Some(Libp2pEvent::PeerDisconnected(peer_id))));
|
||||
}
|
||||
BehaviourEvent::PeerSubscribed(peer_id, topic) => {
|
||||
return Ok(Async::Ready(Some(Libp2pEvent::PeerSubscribed(
|
||||
peer_id, topic,
|
||||
))));
|
||||
}
|
||||
},
|
||||
Ok(Async::Ready(None)) => unreachable!("Swarm stream shouldn't end"),
|
||||
Ok(Async::NotReady) => break,
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
|
||||
// check if peers need to be banned
|
||||
loop {
|
||||
match self.peers_to_ban.poll() {
|
||||
Ok(Async::Ready(Some(peer_id))) => {
|
||||
let peer_id = peer_id.into_inner();
|
||||
Swarm::ban_peer_id(&mut self.swarm, peer_id.clone());
|
||||
// TODO: Correctly notify protocols of the disconnect
|
||||
// TODO: Also remove peer from the DHT: https://github.com/sigp/lighthouse/issues/629
|
||||
let dummy_connected_point = ConnectedPoint::Dialer {
|
||||
address: "/ip4/0.0.0.0"
|
||||
.parse::<Multiaddr>()
|
||||
.expect("valid multiaddr"),
|
||||
};
|
||||
self.swarm
|
||||
.inject_disconnected(&peer_id, dummy_connected_point);
|
||||
// inform the behaviour that the peer has been banned
|
||||
self.swarm.peer_banned(peer_id);
|
||||
}
|
||||
Ok(Async::NotReady) | Ok(Async::Ready(None)) => break,
|
||||
Err(e) => {
|
||||
warn!(self.log, "Peer banning queue failed"; "error" => format!("{:?}", e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// un-ban peer if it's timeout has expired
|
||||
loop {
|
||||
match self.peer_ban_timeout.poll() {
|
||||
Ok(Async::Ready(Some(peer_id))) => {
|
||||
let peer_id = peer_id.into_inner();
|
||||
debug!(self.log, "Peer has been unbanned"; "peer" => format!("{:?}", peer_id));
|
||||
self.swarm.peer_unbanned(&peer_id);
|
||||
Swarm::unban_peer_id(&mut self.swarm, peer_id);
|
||||
}
|
||||
Ok(Async::NotReady) | Ok(Async::Ready(None)) => break,
|
||||
Err(e) => {
|
||||
warn!(self.log, "Peer banning timeout queue failed"; "error" => format!("{:?}", e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// swarm is not ready
|
||||
// check to see if the address is different to the config. If so, update our ENR
|
||||
if !self.verified_listen_address {
|
||||
let multiaddr = Swarm::listeners(&self.swarm).next();
|
||||
if let Some(multiaddr) = multiaddr {
|
||||
self.verified_listen_address = true;
|
||||
if let Some(socket_addr) = multiaddr_to_socket_addr(multiaddr) {
|
||||
self.swarm.update_local_enr_socket(socket_addr, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a multiaddr to a `SocketAddr` if the multiaddr has the TCP/IP form. Libp2p currently
|
||||
/// only supports TCP, so the UDP case is currently ignored.
|
||||
fn multiaddr_to_socket_addr(multiaddr: &Multiaddr) -> Option<std::net::SocketAddr> {
|
||||
let protocols = multiaddr.iter().collect::<Vec<_>>();
|
||||
// assume the IP protocol
|
||||
match protocols[0] {
|
||||
Protocol::Ip4(address) => {
|
||||
if let Protocol::Tcp(port) = protocols[1] {
|
||||
Some(std::net::SocketAddr::new(address.into(), port))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
Protocol::Ip6(address) => {
|
||||
if let Protocol::Tcp(port) = protocols[1] {
|
||||
Some(std::net::SocketAddr::new(address.into(), port))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// The implementation supports TCP/IP, WebSockets over TCP/IP, secio as the encryption layer, and
|
||||
/// mplex or yamux as the multiplexing layer.
|
||||
fn build_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox), Error> {
|
||||
// TODO: The Wire protocol currently doesn't specify encryption and this will need to be customised
|
||||
// in the future.
|
||||
let transport = libp2p::tcp::TcpConfig::new().nodelay(true);
|
||||
let transport = libp2p::dns::DnsConfig::new(transport);
|
||||
#[cfg(feature = "libp2p-websocket")]
|
||||
let transport = {
|
||||
let trans_clone = transport.clone();
|
||||
transport.or_transport(websocket::WsConfig::new(trans_clone))
|
||||
};
|
||||
transport
|
||||
.upgrade(core::upgrade::Version::V1)
|
||||
.authenticate(secio::SecioConfig::new(local_private_key))
|
||||
.multiplex(core::upgrade::SelectUpgrade::new(
|
||||
libp2p::yamux::Config::default(),
|
||||
libp2p::mplex::MplexConfig::new(),
|
||||
))
|
||||
.map(|(peer, muxer), _| (peer, core::muxing::StreamMuxerBox::new(muxer)))
|
||||
.timeout(Duration::from_secs(20))
|
||||
.timeout(Duration::from_secs(20))
|
||||
.map_err(|err| Error::new(ErrorKind::Other, err))
|
||||
.boxed()
|
||||
}
|
||||
|
||||
/// Events that can be obtained from polling the Libp2p Service.
|
||||
pub enum Libp2pEvent {
|
||||
/// An RPC response request has been received on the swarm.
|
||||
RPC(PeerId, RPCEvent),
|
||||
/// Initiated the connection to a new peer.
|
||||
PeerDialed(PeerId),
|
||||
/// A peer has disconnected.
|
||||
PeerDisconnected(PeerId),
|
||||
/// Received pubsub message.
|
||||
PubsubMessage {
|
||||
id: String,
|
||||
source: PeerId,
|
||||
topics: Vec<TopicHash>,
|
||||
message: PubsubMessage,
|
||||
},
|
||||
/// Subscribed to peer for a topic hash.
|
||||
PeerSubscribed(PeerId, TopicHash),
|
||||
}
|
||||
|
||||
fn keypair_from_hex(hex_bytes: &str) -> error::Result<Keypair> {
|
||||
let hex_bytes = if hex_bytes.starts_with("0x") {
|
||||
hex_bytes[2..].to_string()
|
||||
} else {
|
||||
hex_bytes.to_string()
|
||||
};
|
||||
|
||||
hex::decode(&hex_bytes)
|
||||
.map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into())
|
||||
.and_then(keypair_from_bytes)
|
||||
}
|
||||
|
||||
fn keypair_from_bytes(mut bytes: Vec<u8>) -> error::Result<Keypair> {
|
||||
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes)
|
||||
.map(|secret| {
|
||||
let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into();
|
||||
Keypair::Secp256k1(keypair)
|
||||
})
|
||||
.map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into())
|
||||
}
|
||||
|
||||
/// Loads a private key from disk. If this fails, a new key is
|
||||
/// generated and is then saved to disk.
|
||||
///
|
||||
/// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5.
|
||||
fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair {
|
||||
// TODO: Currently using secp256k1 keypairs - currently required for discv5
|
||||
// check for key from disk
|
||||
let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME);
|
||||
if let Ok(mut network_key_file) = File::open(network_key_f.clone()) {
|
||||
let mut key_bytes: Vec<u8> = Vec::with_capacity(36);
|
||||
match network_key_file.read_to_end(&mut key_bytes) {
|
||||
Err(_) => debug!(log, "Could not read network key file"),
|
||||
Ok(_) => {
|
||||
// only accept secp256k1 keys for now
|
||||
if let Ok(secret_key) =
|
||||
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes)
|
||||
{
|
||||
let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into();
|
||||
debug!(log, "Loaded network key from disk.");
|
||||
return Keypair::Secp256k1(kp);
|
||||
} else {
|
||||
debug!(log, "Network key file is not a valid secp256k1 key");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if a key could not be loaded from disk, generate a new one and save it
|
||||
let local_private_key = Keypair::generate_secp256k1();
|
||||
if let Keypair::Secp256k1(key) = local_private_key.clone() {
|
||||
let _ = std::fs::create_dir_all(&config.network_dir);
|
||||
match File::create(network_key_f.clone())
|
||||
.and_then(|mut f| f.write_all(&key.secret().to_bytes()))
|
||||
{
|
||||
Ok(_) => {
|
||||
debug!(log, "New network key generated and written to disk");
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
log,
|
||||
"Could not write node key to file: {:?}. error: {}", network_key_f, e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
local_private_key
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
#![cfg(test)]
|
||||
use enr::Enr;
|
||||
use eth2_libp2p::Multiaddr;
|
||||
use eth2_libp2p::NetworkConfig;
|
||||
use eth2_libp2p::Service as LibP2PService;
|
||||
use slog::{debug, error, o, Drain};
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger {
|
||||
let decorator = slog_term::TermDecorator::new().build();
|
||||
let drain = slog_term::FullFormat::new(decorator).build().fuse();
|
||||
let drain = slog_async::Async::new(drain).build().fuse();
|
||||
|
||||
if enabled {
|
||||
slog::Logger::root(drain.filter_level(level).fuse(), o!())
|
||||
} else {
|
||||
slog::Logger::root(drain.filter(|_| false).fuse(), o!())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_config(
|
||||
port: u16,
|
||||
mut boot_nodes: Vec<Enr>,
|
||||
secret_key: Option<String>,
|
||||
) -> NetworkConfig {
|
||||
let mut config = NetworkConfig::default();
|
||||
config.libp2p_port = port; // tcp port
|
||||
config.discovery_port = port; // udp port
|
||||
config.boot_nodes.append(&mut boot_nodes);
|
||||
config.secret_key_hex = secret_key;
|
||||
config.network_dir.push(port.to_string());
|
||||
// Reduce gossipsub heartbeat parameters
|
||||
config.gs_config.heartbeat_initial_delay = Duration::from_millis(500);
|
||||
config.gs_config.heartbeat_interval = Duration::from_millis(500);
|
||||
config
|
||||
}
|
||||
|
||||
pub fn build_libp2p_instance(
|
||||
port: u16,
|
||||
boot_nodes: Vec<Enr>,
|
||||
secret_key: Option<String>,
|
||||
log: slog::Logger,
|
||||
) -> LibP2PService {
|
||||
let config = build_config(port, boot_nodes, secret_key);
|
||||
// launch libp2p service
|
||||
let libp2p_service = LibP2PService::new(config.clone(), log.clone()).unwrap();
|
||||
libp2p_service
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn get_enr(node: &LibP2PService) -> Enr {
|
||||
node.swarm.discovery().local_enr().clone()
|
||||
}
|
||||
|
||||
// Returns `n` libp2p peers in fully connected topology.
|
||||
#[allow(dead_code)]
|
||||
pub fn build_full_mesh(log: slog::Logger, n: usize, start_port: Option<u16>) -> Vec<LibP2PService> {
|
||||
let base_port = start_port.unwrap_or(9000);
|
||||
let mut nodes: Vec<LibP2PService> = (base_port..base_port + n as u16)
|
||||
.map(|p| build_libp2p_instance(p, vec![], None, log.clone()))
|
||||
.collect();
|
||||
let multiaddrs: Vec<Multiaddr> = nodes
|
||||
.iter()
|
||||
.map(|x| get_enr(&x).multiaddr()[1].clone())
|
||||
.collect();
|
||||
|
||||
for i in 0..n {
|
||||
for j in i..n {
|
||||
if i != j {
|
||||
match libp2p::Swarm::dial_addr(&mut nodes[i].swarm, multiaddrs[j].clone()) {
|
||||
Ok(()) => debug!(log, "Connected"),
|
||||
Err(_) => error!(log, "Failed to connect"),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
nodes
|
||||
}
|
||||
|
||||
// Constructs a pair of nodes with seperate loggers. The sender dials the receiver.
|
||||
// This returns a (sender, receiver) pair.
|
||||
#[allow(dead_code)]
|
||||
pub fn build_node_pair(log: &slog::Logger, start_port: u16) -> (LibP2PService, LibP2PService) {
|
||||
let sender_log = log.new(o!("who" => "sender"));
|
||||
let receiver_log = log.new(o!("who" => "receiver"));
|
||||
|
||||
let mut sender = build_libp2p_instance(start_port, vec![], None, sender_log);
|
||||
let receiver = build_libp2p_instance(start_port + 1, vec![], None, receiver_log);
|
||||
|
||||
let receiver_multiaddr = receiver.swarm.discovery().local_enr().clone().multiaddr()[1].clone();
|
||||
match libp2p::Swarm::dial_addr(&mut sender.swarm, receiver_multiaddr) {
|
||||
Ok(()) => debug!(log, "Sender dialed receiver"),
|
||||
Err(_) => error!(log, "Dialing failed"),
|
||||
};
|
||||
(sender, receiver)
|
||||
}
|
||||
|
||||
// Returns `n` peers in a linear topology
|
||||
#[allow(dead_code)]
|
||||
pub fn build_linear(log: slog::Logger, n: usize, start_port: Option<u16>) -> Vec<LibP2PService> {
|
||||
let base_port = start_port.unwrap_or(9000);
|
||||
let mut nodes: Vec<LibP2PService> = (base_port..base_port + n as u16)
|
||||
.map(|p| build_libp2p_instance(p, vec![], None, log.clone()))
|
||||
.collect();
|
||||
let multiaddrs: Vec<Multiaddr> = nodes
|
||||
.iter()
|
||||
.map(|x| get_enr(&x).multiaddr()[1].clone())
|
||||
.collect();
|
||||
for i in 0..n - 1 {
|
||||
match libp2p::Swarm::dial_addr(&mut nodes[i].swarm, multiaddrs[i + 1].clone()) {
|
||||
Ok(()) => debug!(log, "Connected"),
|
||||
Err(_) => error!(log, "Failed to connect"),
|
||||
};
|
||||
}
|
||||
nodes
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
#![cfg(test)]
|
||||
use eth2_libp2p::*;
|
||||
use futures::prelude::*;
|
||||
use slog::{debug, Level};
|
||||
|
||||
mod common;
|
||||
|
||||
/* Gossipsub tests */
|
||||
// Note: The aim of these tests is not to test the robustness of the gossip network
|
||||
// but to check if the gossipsub implementation is behaving according to the specifications.
|
||||
|
||||
// Test if gossipsub message are forwarded by nodes with a simple linear topology.
|
||||
//
|
||||
// Topology used in test
|
||||
//
|
||||
// node1 <-> node2 <-> node3 ..... <-> node(n-1) <-> node(n)
|
||||
|
||||
#[test]
|
||||
fn test_gossipsub_forward() {
|
||||
// set up the logging. The level and enabled or not
|
||||
let log = common::build_log(Level::Info, false);
|
||||
|
||||
let num_nodes = 20;
|
||||
let mut nodes = common::build_linear(log.clone(), num_nodes, Some(19000));
|
||||
let mut received_count = 0;
|
||||
let pubsub_message = PubsubMessage::Block(vec![0; 4]);
|
||||
let publishing_topic: String = "/eth2/beacon_block/ssz".into();
|
||||
let mut subscribed_count = 0;
|
||||
tokio::run(futures::future::poll_fn(move || -> Result<_, ()> {
|
||||
for node in nodes.iter_mut() {
|
||||
loop {
|
||||
match node.poll().unwrap() {
|
||||
Async::Ready(Some(Libp2pEvent::PubsubMessage {
|
||||
topics,
|
||||
message,
|
||||
source,
|
||||
id,
|
||||
})) => {
|
||||
assert_eq!(topics.len(), 1);
|
||||
// Assert topic is the published topic
|
||||
assert_eq!(
|
||||
topics.first().unwrap(),
|
||||
&TopicHash::from_raw(publishing_topic.clone())
|
||||
);
|
||||
// Assert message received is the correct one
|
||||
assert_eq!(message, pubsub_message.clone());
|
||||
received_count += 1;
|
||||
// Since `propagate_message` is false, need to propagate manually
|
||||
node.swarm.propagate_message(&source, id);
|
||||
// Test should succeed if all nodes except the publisher receive the message
|
||||
if received_count == num_nodes - 1 {
|
||||
debug!(log.clone(), "Received message at {} nodes", num_nodes - 1);
|
||||
return Ok(Async::Ready(()));
|
||||
}
|
||||
}
|
||||
Async::Ready(Some(Libp2pEvent::PeerSubscribed(_, topic))) => {
|
||||
// Received topics is one of subscribed eth2 topics
|
||||
assert!(topic.clone().into_string().starts_with("/eth2/"));
|
||||
// Publish on beacon block topic
|
||||
if topic == TopicHash::from_raw("/eth2/beacon_block/ssz") {
|
||||
subscribed_count += 1;
|
||||
// Every node except the corner nodes are connected to 2 nodes.
|
||||
if subscribed_count == (num_nodes * 2) - 2 {
|
||||
node.swarm.publish(
|
||||
&vec![Topic::new(topic.into_string())],
|
||||
pubsub_message.clone(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Async::NotReady)
|
||||
}))
|
||||
}
|
||||
|
||||
// Test publishing of a message with a full mesh for the topic
|
||||
// Not very useful but this is the bare minimum functionality.
|
||||
#[test]
|
||||
fn test_gossipsub_full_mesh_publish() {
|
||||
// set up the logging. The level and enabled or not
|
||||
let log = common::build_log(Level::Info, false);
|
||||
|
||||
let num_nodes = 20;
|
||||
let mut nodes = common::build_full_mesh(log, num_nodes, Some(11320));
|
||||
let mut publishing_node = nodes.pop().unwrap();
|
||||
let pubsub_message = PubsubMessage::Block(vec![0; 4]);
|
||||
let publishing_topic: String = "/eth2/beacon_block/ssz".into();
|
||||
let mut subscribed_count = 0;
|
||||
let mut received_count = 0;
|
||||
tokio::run(futures::future::poll_fn(move || -> Result<_, ()> {
|
||||
for node in nodes.iter_mut() {
|
||||
loop {
|
||||
match node.poll().unwrap() {
|
||||
Async::Ready(Some(Libp2pEvent::PubsubMessage {
|
||||
topics, message, ..
|
||||
})) => {
|
||||
assert_eq!(topics.len(), 1);
|
||||
// Assert topic is the published topic
|
||||
assert_eq!(
|
||||
topics.first().unwrap(),
|
||||
&TopicHash::from_raw(publishing_topic.clone())
|
||||
);
|
||||
// Assert message received is the correct one
|
||||
assert_eq!(message, pubsub_message.clone());
|
||||
received_count += 1;
|
||||
if received_count == num_nodes - 1 {
|
||||
return Ok(Async::Ready(()));
|
||||
}
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
loop {
|
||||
match publishing_node.poll().unwrap() {
|
||||
Async::Ready(Some(Libp2pEvent::PeerSubscribed(_, topic))) => {
|
||||
// Received topics is one of subscribed eth2 topics
|
||||
assert!(topic.clone().into_string().starts_with("/eth2/"));
|
||||
// Publish on beacon block topic
|
||||
if topic == TopicHash::from_raw("/eth2/beacon_block/ssz") {
|
||||
subscribed_count += 1;
|
||||
if subscribed_count == num_nodes - 1 {
|
||||
publishing_node.swarm.publish(
|
||||
&vec![Topic::new(topic.into_string())],
|
||||
pubsub_message.clone(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
Ok(Async::NotReady)
|
||||
}))
|
||||
}
|
||||
@@ -1,575 +0,0 @@
|
||||
#![cfg(test)]
|
||||
use eth2_libp2p::rpc::methods::*;
|
||||
use eth2_libp2p::rpc::*;
|
||||
use eth2_libp2p::{Libp2pEvent, RPCEvent};
|
||||
use slog::{warn, Level};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
use tokio::prelude::*;
|
||||
use types::{Epoch, Hash256, Slot};
|
||||
|
||||
mod common;
|
||||
|
||||
#[test]
|
||||
// Tests the STATUS RPC message
|
||||
fn test_status_rpc() {
|
||||
// set up the logging. The level and enabled logging or not
|
||||
let log_level = Level::Trace;
|
||||
let enable_logging = false;
|
||||
|
||||
let log = common::build_log(log_level, enable_logging);
|
||||
|
||||
// get sender/receiver
|
||||
let (mut sender, mut receiver) = common::build_node_pair(&log, 10500);
|
||||
|
||||
// Dummy STATUS RPC message
|
||||
let rpc_request = RPCRequest::Status(StatusMessage {
|
||||
fork_version: [0; 4],
|
||||
finalized_root: Hash256::from_low_u64_be(0),
|
||||
finalized_epoch: Epoch::new(1),
|
||||
head_root: Hash256::from_low_u64_be(0),
|
||||
head_slot: Slot::new(1),
|
||||
});
|
||||
|
||||
// Dummy STATUS RPC message
|
||||
let rpc_response = RPCResponse::Status(StatusMessage {
|
||||
fork_version: [0; 4],
|
||||
finalized_root: Hash256::from_low_u64_be(0),
|
||||
finalized_epoch: Epoch::new(1),
|
||||
head_root: Hash256::from_low_u64_be(0),
|
||||
head_slot: Slot::new(1),
|
||||
});
|
||||
|
||||
let sender_request = rpc_request.clone();
|
||||
let sender_log = log.clone();
|
||||
let sender_response = rpc_response.clone();
|
||||
|
||||
// build the sender future
|
||||
let sender_future = future::poll_fn(move || -> Poll<bool, ()> {
|
||||
loop {
|
||||
match sender.poll().unwrap() {
|
||||
Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id))) => {
|
||||
// Send a STATUS message
|
||||
warn!(sender_log, "Sending RPC");
|
||||
sender
|
||||
.swarm
|
||||
.send_rpc(peer_id, RPCEvent::Request(1, sender_request.clone()));
|
||||
}
|
||||
Async::Ready(Some(Libp2pEvent::RPC(_, event))) => match event {
|
||||
// Should receive the RPC response
|
||||
RPCEvent::Response(id, response @ RPCErrorResponse::Success(_)) => {
|
||||
warn!(sender_log, "Sender Received");
|
||||
assert_eq!(id, 1);
|
||||
|
||||
let response = {
|
||||
match response {
|
||||
RPCErrorResponse::Success(r) => r,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
};
|
||||
assert_eq!(response, sender_response.clone());
|
||||
|
||||
warn!(sender_log, "Sender Completed");
|
||||
return Ok(Async::Ready(true));
|
||||
}
|
||||
_ => panic!("Received invalid RPC message"),
|
||||
},
|
||||
Async::Ready(Some(_)) => (),
|
||||
Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady),
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
// build the receiver future
|
||||
let receiver_future = future::poll_fn(move || -> Poll<bool, ()> {
|
||||
loop {
|
||||
match receiver.poll().unwrap() {
|
||||
Async::Ready(Some(Libp2pEvent::RPC(peer_id, event))) => match event {
|
||||
// Should receive sent RPC request
|
||||
RPCEvent::Request(id, request) => {
|
||||
assert_eq!(id, 1);
|
||||
assert_eq!(rpc_request.clone(), request);
|
||||
|
||||
// send the response
|
||||
warn!(log, "Receiver Received");
|
||||
receiver.swarm.send_rpc(
|
||||
peer_id,
|
||||
RPCEvent::Response(id, RPCErrorResponse::Success(rpc_response.clone())),
|
||||
);
|
||||
}
|
||||
_ => panic!("Received invalid RPC message"),
|
||||
},
|
||||
Async::Ready(Some(_)) => (),
|
||||
Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// execute the futures and check the result
|
||||
let test_result = Arc::new(Mutex::new(false));
|
||||
let error_result = test_result.clone();
|
||||
let thread_result = test_result.clone();
|
||||
tokio::run(
|
||||
sender_future
|
||||
.select(receiver_future)
|
||||
.timeout(Duration::from_millis(1000))
|
||||
.map_err(move |_| *error_result.lock().unwrap() = false)
|
||||
.map(move |result| {
|
||||
*thread_result.lock().unwrap() = result.0;
|
||||
()
|
||||
}),
|
||||
);
|
||||
assert!(*test_result.lock().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests a streamed BlocksByRange RPC Message
|
||||
fn test_blocks_by_range_chunked_rpc() {
|
||||
// set up the logging. The level and enabled logging or not
|
||||
let log_level = Level::Trace;
|
||||
let enable_logging = false;
|
||||
|
||||
let messages_to_send = 10;
|
||||
|
||||
let log = common::build_log(log_level, enable_logging);
|
||||
|
||||
// get sender/receiver
|
||||
let (mut sender, mut receiver) = common::build_node_pair(&log, 10505);
|
||||
|
||||
// BlocksByRange Request
|
||||
let rpc_request = RPCRequest::BlocksByRange(BlocksByRangeRequest {
|
||||
head_block_root: Hash256::from_low_u64_be(0),
|
||||
start_slot: 0,
|
||||
count: messages_to_send,
|
||||
step: 0,
|
||||
});
|
||||
|
||||
// BlocksByRange Response
|
||||
let rpc_response = RPCResponse::BlocksByRange(vec![13, 13, 13]);
|
||||
|
||||
let sender_request = rpc_request.clone();
|
||||
let sender_log = log.clone();
|
||||
let sender_response = rpc_response.clone();
|
||||
|
||||
// keep count of the number of messages received
|
||||
let messages_received = Arc::new(Mutex::new(0));
|
||||
// build the sender future
|
||||
let sender_future = future::poll_fn(move || -> Poll<bool, ()> {
|
||||
loop {
|
||||
match sender.poll().unwrap() {
|
||||
Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id))) => {
|
||||
// Send a BlocksByRange request
|
||||
warn!(sender_log, "Sender sending RPC request");
|
||||
sender
|
||||
.swarm
|
||||
.send_rpc(peer_id, RPCEvent::Request(1, sender_request.clone()));
|
||||
}
|
||||
Async::Ready(Some(Libp2pEvent::RPC(_, event))) => match event {
|
||||
// Should receive the RPC response
|
||||
RPCEvent::Response(id, response) => {
|
||||
warn!(sender_log, "Sender received a response");
|
||||
assert_eq!(id, 1);
|
||||
match response {
|
||||
RPCErrorResponse::Success(res) => {
|
||||
assert_eq!(res, sender_response.clone());
|
||||
*messages_received.lock().unwrap() += 1;
|
||||
warn!(sender_log, "Chunk received");
|
||||
}
|
||||
RPCErrorResponse::StreamTermination(
|
||||
ResponseTermination::BlocksByRange,
|
||||
) => {
|
||||
// should be exactly 10 messages before terminating
|
||||
assert_eq!(*messages_received.lock().unwrap(), messages_to_send);
|
||||
// end the test
|
||||
return Ok(Async::Ready(true));
|
||||
}
|
||||
_ => panic!("Invalid RPC received"),
|
||||
}
|
||||
}
|
||||
_ => panic!("Received invalid RPC message"),
|
||||
},
|
||||
Async::Ready(Some(_)) => {}
|
||||
Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady),
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
// build the receiver future
|
||||
let receiver_future = future::poll_fn(move || -> Poll<bool, ()> {
|
||||
loop {
|
||||
match receiver.poll().unwrap() {
|
||||
Async::Ready(Some(Libp2pEvent::RPC(peer_id, event))) => match event {
|
||||
// Should receive the sent RPC request
|
||||
RPCEvent::Request(id, request) => {
|
||||
assert_eq!(id, 1);
|
||||
assert_eq!(rpc_request.clone(), request);
|
||||
|
||||
// send the response
|
||||
warn!(log, "Receiver got request");
|
||||
|
||||
for _ in 1..=messages_to_send {
|
||||
receiver.swarm.send_rpc(
|
||||
peer_id.clone(),
|
||||
RPCEvent::Response(
|
||||
id,
|
||||
RPCErrorResponse::Success(rpc_response.clone()),
|
||||
),
|
||||
);
|
||||
}
|
||||
// send the stream termination
|
||||
receiver.swarm.send_rpc(
|
||||
peer_id,
|
||||
RPCEvent::Response(
|
||||
id,
|
||||
RPCErrorResponse::StreamTermination(
|
||||
ResponseTermination::BlocksByRange,
|
||||
),
|
||||
),
|
||||
);
|
||||
}
|
||||
_ => panic!("Received invalid RPC message"),
|
||||
},
|
||||
Async::Ready(Some(_)) => (),
|
||||
Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// execute the futures and check the result
|
||||
let test_result = Arc::new(Mutex::new(false));
|
||||
let error_result = test_result.clone();
|
||||
let thread_result = test_result.clone();
|
||||
tokio::run(
|
||||
sender_future
|
||||
.select(receiver_future)
|
||||
.timeout(Duration::from_millis(1000))
|
||||
.map_err(move |_| *error_result.lock().unwrap() = false)
|
||||
.map(move |result| {
|
||||
*thread_result.lock().unwrap() = result.0;
|
||||
()
|
||||
}),
|
||||
);
|
||||
assert!(*test_result.lock().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests an empty response to a BlocksByRange RPC Message
|
||||
fn test_blocks_by_range_single_empty_rpc() {
|
||||
// set up the logging. The level and enabled logging or not
|
||||
let log_level = Level::Trace;
|
||||
let enable_logging = false;
|
||||
|
||||
let log = common::build_log(log_level, enable_logging);
|
||||
|
||||
// get sender/receiver
|
||||
let (mut sender, mut receiver) = common::build_node_pair(&log, 10510);
|
||||
|
||||
// BlocksByRange Request
|
||||
let rpc_request = RPCRequest::BlocksByRange(BlocksByRangeRequest {
|
||||
head_block_root: Hash256::from_low_u64_be(0),
|
||||
start_slot: 0,
|
||||
count: 10,
|
||||
step: 0,
|
||||
});
|
||||
|
||||
// BlocksByRange Response
|
||||
let rpc_response = RPCResponse::BlocksByRange(vec![]);
|
||||
|
||||
let sender_request = rpc_request.clone();
|
||||
let sender_log = log.clone();
|
||||
let sender_response = rpc_response.clone();
|
||||
|
||||
// keep count of the number of messages received
|
||||
let messages_received = Arc::new(Mutex::new(0));
|
||||
// build the sender future
|
||||
let sender_future = future::poll_fn(move || -> Poll<bool, ()> {
|
||||
loop {
|
||||
match sender.poll().unwrap() {
|
||||
Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id))) => {
|
||||
// Send a BlocksByRange request
|
||||
warn!(sender_log, "Sender sending RPC request");
|
||||
sender
|
||||
.swarm
|
||||
.send_rpc(peer_id, RPCEvent::Request(1, sender_request.clone()));
|
||||
}
|
||||
Async::Ready(Some(Libp2pEvent::RPC(_, event))) => match event {
|
||||
// Should receive the RPC response
|
||||
RPCEvent::Response(id, response) => {
|
||||
warn!(sender_log, "Sender received a response");
|
||||
assert_eq!(id, 1);
|
||||
match response {
|
||||
RPCErrorResponse::Success(res) => {
|
||||
assert_eq!(res, sender_response.clone());
|
||||
*messages_received.lock().unwrap() += 1;
|
||||
warn!(sender_log, "Chunk received");
|
||||
}
|
||||
RPCErrorResponse::StreamTermination(
|
||||
ResponseTermination::BlocksByRange,
|
||||
) => {
|
||||
// should be exactly 1 messages before terminating
|
||||
assert_eq!(*messages_received.lock().unwrap(), 1);
|
||||
// end the test
|
||||
return Ok(Async::Ready(true));
|
||||
}
|
||||
_ => panic!("Invalid RPC received"),
|
||||
}
|
||||
}
|
||||
m => panic!("Received invalid RPC message: {}", m),
|
||||
},
|
||||
Async::Ready(Some(_)) => {}
|
||||
Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady),
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
// build the receiver future
|
||||
let receiver_future = future::poll_fn(move || -> Poll<bool, ()> {
|
||||
loop {
|
||||
match receiver.poll().unwrap() {
|
||||
Async::Ready(Some(Libp2pEvent::RPC(peer_id, event))) => match event {
|
||||
// Should receive the sent RPC request
|
||||
RPCEvent::Request(id, request) => {
|
||||
assert_eq!(id, 1);
|
||||
assert_eq!(rpc_request.clone(), request);
|
||||
|
||||
// send the response
|
||||
warn!(log, "Receiver got request");
|
||||
|
||||
receiver.swarm.send_rpc(
|
||||
peer_id.clone(),
|
||||
RPCEvent::Response(id, RPCErrorResponse::Success(rpc_response.clone())),
|
||||
);
|
||||
// send the stream termination
|
||||
receiver.swarm.send_rpc(
|
||||
peer_id,
|
||||
RPCEvent::Response(
|
||||
id,
|
||||
RPCErrorResponse::StreamTermination(
|
||||
ResponseTermination::BlocksByRange,
|
||||
),
|
||||
),
|
||||
);
|
||||
}
|
||||
_ => panic!("Received invalid RPC message"),
|
||||
},
|
||||
Async::Ready(Some(_)) => (),
|
||||
Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// execute the futures and check the result
|
||||
let test_result = Arc::new(Mutex::new(false));
|
||||
let error_result = test_result.clone();
|
||||
let thread_result = test_result.clone();
|
||||
tokio::run(
|
||||
sender_future
|
||||
.select(receiver_future)
|
||||
.timeout(Duration::from_millis(1000))
|
||||
.map_err(move |_| *error_result.lock().unwrap() = false)
|
||||
.map(move |result| {
|
||||
*thread_result.lock().unwrap() = result.0;
|
||||
()
|
||||
}),
|
||||
);
|
||||
assert!(*test_result.lock().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests a streamed, chunked BlocksByRoot RPC Message
|
||||
fn test_blocks_by_root_chunked_rpc() {
|
||||
// set up the logging. The level and enabled logging or not
|
||||
let log_level = Level::Trace;
|
||||
let enable_logging = false;
|
||||
|
||||
let messages_to_send = 3;
|
||||
|
||||
let log = common::build_log(log_level, enable_logging);
|
||||
|
||||
// get sender/receiver
|
||||
let (mut sender, mut receiver) = common::build_node_pair(&log, 10515);
|
||||
|
||||
// BlocksByRoot Request
|
||||
let rpc_request = RPCRequest::BlocksByRoot(BlocksByRootRequest {
|
||||
block_roots: vec![Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0)],
|
||||
});
|
||||
|
||||
// BlocksByRoot Response
|
||||
let rpc_response = RPCResponse::BlocksByRoot(vec![13, 13, 13]);
|
||||
|
||||
let sender_request = rpc_request.clone();
|
||||
let sender_log = log.clone();
|
||||
let sender_response = rpc_response.clone();
|
||||
|
||||
// keep count of the number of messages received
|
||||
let messages_received = Arc::new(Mutex::new(0));
|
||||
// build the sender future
|
||||
let sender_future = future::poll_fn(move || -> Poll<bool, ()> {
|
||||
loop {
|
||||
match sender.poll().unwrap() {
|
||||
Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id))) => {
|
||||
// Send a BlocksByRoot request
|
||||
warn!(sender_log, "Sender sending RPC request");
|
||||
sender
|
||||
.swarm
|
||||
.send_rpc(peer_id, RPCEvent::Request(1, sender_request.clone()));
|
||||
}
|
||||
Async::Ready(Some(Libp2pEvent::RPC(_, event))) => match event {
|
||||
// Should receive the RPC response
|
||||
RPCEvent::Response(id, response) => {
|
||||
warn!(sender_log, "Sender received a response");
|
||||
assert_eq!(id, 1);
|
||||
match response {
|
||||
RPCErrorResponse::Success(res) => {
|
||||
assert_eq!(res, sender_response.clone());
|
||||
*messages_received.lock().unwrap() += 1;
|
||||
warn!(sender_log, "Chunk received");
|
||||
}
|
||||
RPCErrorResponse::StreamTermination(
|
||||
ResponseTermination::BlocksByRoot,
|
||||
) => {
|
||||
// should be exactly 10 messages before terminating
|
||||
assert_eq!(*messages_received.lock().unwrap(), messages_to_send);
|
||||
// end the test
|
||||
return Ok(Async::Ready(true));
|
||||
}
|
||||
m => panic!("Invalid RPC received: {}", m),
|
||||
}
|
||||
}
|
||||
m => panic!("Received invalid RPC message: {}", m),
|
||||
},
|
||||
Async::Ready(Some(_)) => {}
|
||||
Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady),
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
// build the receiver future
|
||||
let receiver_future = future::poll_fn(move || -> Poll<bool, ()> {
|
||||
loop {
|
||||
match receiver.poll().unwrap() {
|
||||
Async::Ready(Some(Libp2pEvent::RPC(peer_id, event))) => match event {
|
||||
// Should receive the sent RPC request
|
||||
RPCEvent::Request(id, request) => {
|
||||
assert_eq!(id, 1);
|
||||
assert_eq!(rpc_request.clone(), request);
|
||||
|
||||
// send the response
|
||||
warn!(log, "Receiver got request");
|
||||
|
||||
for _ in 1..=messages_to_send {
|
||||
receiver.swarm.send_rpc(
|
||||
peer_id.clone(),
|
||||
RPCEvent::Response(
|
||||
id,
|
||||
RPCErrorResponse::Success(rpc_response.clone()),
|
||||
),
|
||||
);
|
||||
}
|
||||
// send the stream termination
|
||||
receiver.swarm.send_rpc(
|
||||
peer_id,
|
||||
RPCEvent::Response(
|
||||
id,
|
||||
RPCErrorResponse::StreamTermination(
|
||||
ResponseTermination::BlocksByRoot,
|
||||
),
|
||||
),
|
||||
);
|
||||
}
|
||||
_ => panic!("Received invalid RPC message"),
|
||||
},
|
||||
Async::Ready(Some(_)) => (),
|
||||
Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// execute the futures and check the result
|
||||
let test_result = Arc::new(Mutex::new(false));
|
||||
let error_result = test_result.clone();
|
||||
let thread_result = test_result.clone();
|
||||
tokio::run(
|
||||
sender_future
|
||||
.select(receiver_future)
|
||||
.timeout(Duration::from_millis(1000))
|
||||
.map_err(move |_| *error_result.lock().unwrap() = false)
|
||||
.map(move |result| {
|
||||
*thread_result.lock().unwrap() = result.0;
|
||||
()
|
||||
}),
|
||||
);
|
||||
assert!(*test_result.lock().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests a Goodbye RPC message
|
||||
fn test_goodbye_rpc() {
|
||||
// set up the logging. The level and enabled logging or not
|
||||
let log_level = Level::Trace;
|
||||
let enable_logging = false;
|
||||
|
||||
let log = common::build_log(log_level, enable_logging);
|
||||
|
||||
// get sender/receiver
|
||||
let (mut sender, mut receiver) = common::build_node_pair(&log, 10520);
|
||||
|
||||
// Goodbye Request
|
||||
let rpc_request = RPCRequest::Goodbye(GoodbyeReason::ClientShutdown);
|
||||
|
||||
let sender_request = rpc_request.clone();
|
||||
let sender_log = log.clone();
|
||||
|
||||
// build the sender future
|
||||
let sender_future = future::poll_fn(move || -> Poll<bool, ()> {
|
||||
loop {
|
||||
match sender.poll().unwrap() {
|
||||
Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id))) => {
|
||||
// Send a Goodbye request
|
||||
warn!(sender_log, "Sender sending RPC request");
|
||||
sender
|
||||
.swarm
|
||||
.send_rpc(peer_id, RPCEvent::Request(1, sender_request.clone()));
|
||||
}
|
||||
Async::Ready(Some(_)) => {}
|
||||
Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady),
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
// build the receiver future
|
||||
let receiver_future = future::poll_fn(move || -> Poll<bool, ()> {
|
||||
loop {
|
||||
match receiver.poll().unwrap() {
|
||||
Async::Ready(Some(Libp2pEvent::RPC(_, event))) => match event {
|
||||
// Should receive the sent RPC request
|
||||
RPCEvent::Request(id, request) => {
|
||||
assert_eq!(id, 0);
|
||||
assert_eq!(rpc_request.clone(), request);
|
||||
// receives the goodbye. Nothing left to do
|
||||
return Ok(Async::Ready(true));
|
||||
}
|
||||
_ => panic!("Received invalid RPC message"),
|
||||
},
|
||||
Async::Ready(Some(_)) => (),
|
||||
Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// execute the futures and check the result
|
||||
let test_result = Arc::new(Mutex::new(false));
|
||||
let error_result = test_result.clone();
|
||||
let thread_result = test_result.clone();
|
||||
tokio::run(
|
||||
sender_future
|
||||
.select(receiver_future)
|
||||
.timeout(Duration::from_millis(1000))
|
||||
.map_err(move |_| *error_result.lock().unwrap() = false)
|
||||
.map(move |result| {
|
||||
*thread_result.lock().unwrap() = result.0;
|
||||
()
|
||||
}),
|
||||
);
|
||||
assert!(*test_result.lock().unwrap());
|
||||
}
|
||||
58
beacon_node/eth2_libp2p/Cargo.toml
Normal file
58
beacon_node/eth2_libp2p/Cargo.toml
Normal file
@@ -0,0 +1,58 @@
|
||||
[package]
|
||||
name = "eth2_libp2p"
|
||||
version = "0.2.0"
|
||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
hex = "0.4.2"
|
||||
types = { path = "../../consensus/types" }
|
||||
hashset_delay = { path = "../../common/hashset_delay" }
|
||||
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
||||
serde = { version = "1.0.110", features = ["derive"] }
|
||||
serde_derive = "1.0.110"
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
lighthouse_version = { path = "../../common/lighthouse_version" }
|
||||
tokio = { version = "0.2.21", features = ["time", "macros"] }
|
||||
futures = "0.3.5"
|
||||
error-chain = "0.12.2"
|
||||
dirs = "2.0.2"
|
||||
fnv = "1.0.7"
|
||||
unsigned-varint = { git = "https://github.com/sigp/unsigned-varint", branch = "latest-codecs", features = ["codec"] }
|
||||
lazy_static = "1.4.0"
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||
smallvec = "1.4.1"
|
||||
lru = "0.5.1"
|
||||
parking_lot = "0.11.0"
|
||||
sha2 = "0.9.1"
|
||||
base64 = "0.12.1"
|
||||
snap = "1.0.0"
|
||||
void = "1.0.2"
|
||||
tokio-io-timeout = "0.4.0"
|
||||
tokio-util = { version = "0.3.1", features = ["codec", "compat"] }
|
||||
discv5 = { version = "0.1.0-alpha.10", features = ["libp2p"] }
|
||||
tiny-keccak = "2.0.2"
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
# TODO: Remove rand crate for mainnet
|
||||
rand = "0.7.3"
|
||||
regex = "1.3.9"
|
||||
|
||||
[dependencies.libp2p]
|
||||
#version = "0.23.0"
|
||||
git = "https://github.com/sigp/rust-libp2p"
|
||||
rev = "03f998022ce2f566a6c6e6c4206bc0ce4d45109f"
|
||||
default-features = false
|
||||
features = ["websocket", "identify", "mplex", "noise", "gossipsub", "dns", "tcp-tokio"]
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "0.2.21", features = ["full"] }
|
||||
slog-stdlog = "4.0.0"
|
||||
slog-term = "2.5.0"
|
||||
slog-async = "2.5.0"
|
||||
tempdir = "0.3.7"
|
||||
exit-future = "0.2.0"
|
||||
|
||||
[features]
|
||||
libp2p-websocket = []
|
||||
372
beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs
Normal file
372
beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs
Normal file
@@ -0,0 +1,372 @@
|
||||
use crate::rpc::*;
|
||||
use libp2p::{
|
||||
core::either::{EitherError, EitherOutput},
|
||||
core::upgrade::{EitherUpgrade, InboundUpgrade, OutboundUpgrade, SelectUpgrade, UpgradeError},
|
||||
gossipsub::Gossipsub,
|
||||
identify::Identify,
|
||||
swarm::{
|
||||
protocols_handler::{
|
||||
KeepAlive, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
|
||||
},
|
||||
NegotiatedSubstream, NetworkBehaviour, ProtocolsHandler,
|
||||
},
|
||||
};
|
||||
use std::task::{Context, Poll};
|
||||
use types::EthSpec;
|
||||
|
||||
/* Auxiliary types for simplicity */
|
||||
type GossipHandler = <Gossipsub as NetworkBehaviour>::ProtocolsHandler;
|
||||
type RPCHandler<TSpec> = <RPC<TSpec> as NetworkBehaviour>::ProtocolsHandler;
|
||||
type IdentifyHandler = <Identify as NetworkBehaviour>::ProtocolsHandler;
|
||||
|
||||
/// Handler that combines Lighthouse's Behaviours' handlers in a delegating manner.
|
||||
pub(super) struct DelegatingHandler<TSpec: EthSpec> {
|
||||
/// Handler for the Gossipsub protocol.
|
||||
gossip_handler: GossipHandler,
|
||||
/// Handler for the RPC protocol.
|
||||
rpc_handler: RPCHandler<TSpec>,
|
||||
/// Handler for the Identify protocol.
|
||||
identify_handler: IdentifyHandler,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> DelegatingHandler<TSpec> {
|
||||
pub fn new(gossipsub: &mut Gossipsub, rpc: &mut RPC<TSpec>, identify: &mut Identify) -> Self {
|
||||
DelegatingHandler {
|
||||
gossip_handler: gossipsub.new_handler(),
|
||||
rpc_handler: rpc.new_handler(),
|
||||
identify_handler: identify.new_handler(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Gives mutable access to the rpc handler.
|
||||
pub fn rpc_mut(&mut self) -> &mut RPCHandler<TSpec> {
|
||||
&mut self.rpc_handler
|
||||
}
|
||||
|
||||
/// Gives access to the rpc handler.
|
||||
pub fn rpc(&self) -> &RPCHandler<TSpec> {
|
||||
&self.rpc_handler
|
||||
}
|
||||
|
||||
/// Gives access to identify's handler.
|
||||
pub fn _identify(&self) -> &IdentifyHandler {
|
||||
&self.identify_handler
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: this can all be created with macros
|
||||
|
||||
/// Wrapper around the `ProtocolsHandler::InEvent` types of the handlers.
|
||||
/// Simply delegated to the corresponding behaviour's handler.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum DelegateIn<TSpec: EthSpec> {
|
||||
Gossipsub(<GossipHandler as ProtocolsHandler>::InEvent),
|
||||
RPC(<RPCHandler<TSpec> as ProtocolsHandler>::InEvent),
|
||||
Identify(<IdentifyHandler as ProtocolsHandler>::InEvent),
|
||||
}
|
||||
|
||||
/// Wrapper around the `ProtocolsHandler::OutEvent` types of the handlers.
|
||||
/// Simply delegated to the corresponding behaviour's handler.
|
||||
pub enum DelegateOut<TSpec: EthSpec> {
|
||||
Gossipsub(<GossipHandler as ProtocolsHandler>::OutEvent),
|
||||
RPC(<RPCHandler<TSpec> as ProtocolsHandler>::OutEvent),
|
||||
Identify(Box<<IdentifyHandler as ProtocolsHandler>::OutEvent>),
|
||||
}
|
||||
|
||||
/// Wrapper around the `ProtocolsHandler::Error` types of the handlers.
|
||||
/// Simply delegated to the corresponding behaviour's handler.
|
||||
#[derive(Debug)]
|
||||
pub enum DelegateError<TSpec: EthSpec> {
|
||||
Gossipsub(<GossipHandler as ProtocolsHandler>::Error),
|
||||
RPC(<RPCHandler<TSpec> as ProtocolsHandler>::Error),
|
||||
Identify(<IdentifyHandler as ProtocolsHandler>::Error),
|
||||
Disconnected,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> std::error::Error for DelegateError<TSpec> {}
|
||||
|
||||
impl<TSpec: EthSpec> std::fmt::Display for DelegateError<TSpec> {
|
||||
fn fmt(
|
||||
&self,
|
||||
formater: &mut std::fmt::Formatter<'_>,
|
||||
) -> std::result::Result<(), std::fmt::Error> {
|
||||
match self {
|
||||
DelegateError::Gossipsub(err) => err.fmt(formater),
|
||||
DelegateError::RPC(err) => err.fmt(formater),
|
||||
DelegateError::Identify(err) => err.fmt(formater),
|
||||
DelegateError::Disconnected => write!(formater, "Disconnected"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type DelegateInProto<TSpec> = SelectUpgrade<
|
||||
<GossipHandler as ProtocolsHandler>::InboundProtocol,
|
||||
SelectUpgrade<
|
||||
<RPCHandler<TSpec> as ProtocolsHandler>::InboundProtocol,
|
||||
<IdentifyHandler as ProtocolsHandler>::InboundProtocol,
|
||||
>,
|
||||
>;
|
||||
|
||||
pub type DelegateOutProto<TSpec> = EitherUpgrade<
|
||||
<GossipHandler as ProtocolsHandler>::OutboundProtocol,
|
||||
EitherUpgrade<
|
||||
<RPCHandler<TSpec> as ProtocolsHandler>::OutboundProtocol,
|
||||
<IdentifyHandler as ProtocolsHandler>::OutboundProtocol,
|
||||
>,
|
||||
>;
|
||||
|
||||
// TODO: prob make this an enum
|
||||
pub type DelegateOutInfo<TSpec> = EitherOutput<
|
||||
<GossipHandler as ProtocolsHandler>::OutboundOpenInfo,
|
||||
EitherOutput<
|
||||
<RPCHandler<TSpec> as ProtocolsHandler>::OutboundOpenInfo,
|
||||
<IdentifyHandler as ProtocolsHandler>::OutboundOpenInfo,
|
||||
>,
|
||||
>;
|
||||
|
||||
impl<TSpec: EthSpec> ProtocolsHandler for DelegatingHandler<TSpec> {
|
||||
type InEvent = DelegateIn<TSpec>;
|
||||
type OutEvent = DelegateOut<TSpec>;
|
||||
type Error = DelegateError<TSpec>;
|
||||
type InboundProtocol = DelegateInProto<TSpec>;
|
||||
type OutboundProtocol = DelegateOutProto<TSpec>;
|
||||
type OutboundOpenInfo = DelegateOutInfo<TSpec>;
|
||||
type InboundOpenInfo = ();
|
||||
|
||||
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, ()> {
|
||||
let gossip_proto = self.gossip_handler.listen_protocol();
|
||||
let rpc_proto = self.rpc_handler.listen_protocol();
|
||||
let identify_proto = self.identify_handler.listen_protocol();
|
||||
|
||||
let timeout = *gossip_proto
|
||||
.timeout()
|
||||
.max(rpc_proto.timeout())
|
||||
.max(identify_proto.timeout());
|
||||
|
||||
let select = SelectUpgrade::new(
|
||||
gossip_proto.into_upgrade().1,
|
||||
SelectUpgrade::new(rpc_proto.into_upgrade().1, identify_proto.into_upgrade().1),
|
||||
);
|
||||
|
||||
SubstreamProtocol::new(select, ()).with_timeout(timeout)
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_inbound(
|
||||
&mut self,
|
||||
out: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
|
||||
_info: Self::InboundOpenInfo,
|
||||
) {
|
||||
match out {
|
||||
// Gossipsub
|
||||
EitherOutput::First(out) => {
|
||||
self.gossip_handler.inject_fully_negotiated_inbound(out, ())
|
||||
}
|
||||
// RPC
|
||||
EitherOutput::Second(EitherOutput::First(out)) => {
|
||||
self.rpc_handler.inject_fully_negotiated_inbound(out, ())
|
||||
}
|
||||
// Identify
|
||||
EitherOutput::Second(EitherOutput::Second(out)) => self
|
||||
.identify_handler
|
||||
.inject_fully_negotiated_inbound(out, ()),
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_outbound(
|
||||
&mut self,
|
||||
protocol: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
|
||||
info: Self::OutboundOpenInfo,
|
||||
) {
|
||||
match (protocol, info) {
|
||||
// Gossipsub
|
||||
(EitherOutput::First(protocol), EitherOutput::First(info)) => self
|
||||
.gossip_handler
|
||||
.inject_fully_negotiated_outbound(protocol, info),
|
||||
// RPC
|
||||
(
|
||||
EitherOutput::Second(EitherOutput::First(protocol)),
|
||||
EitherOutput::Second(EitherOutput::First(info)),
|
||||
) => self
|
||||
.rpc_handler
|
||||
.inject_fully_negotiated_outbound(protocol, info),
|
||||
// Identify
|
||||
(
|
||||
EitherOutput::Second(EitherOutput::Second(protocol)),
|
||||
EitherOutput::Second(EitherOutput::Second(())),
|
||||
) => self
|
||||
.identify_handler
|
||||
.inject_fully_negotiated_outbound(protocol, ()),
|
||||
// Reaching here means we got a protocol and info for different behaviours
|
||||
_ => unreachable!("output and protocol don't match"),
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_event(&mut self, event: Self::InEvent) {
|
||||
match event {
|
||||
DelegateIn::Gossipsub(ev) => self.gossip_handler.inject_event(ev),
|
||||
DelegateIn::RPC(ev) => self.rpc_handler.inject_event(ev),
|
||||
DelegateIn::Identify(()) => self.identify_handler.inject_event(()),
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_dial_upgrade_error(
|
||||
&mut self,
|
||||
info: Self::OutboundOpenInfo,
|
||||
error: ProtocolsHandlerUpgrErr<
|
||||
<Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Error,
|
||||
>,
|
||||
) {
|
||||
// TODO: find how to clean up
|
||||
match info {
|
||||
// Gossipsub
|
||||
EitherOutput::First(info) => match error {
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => {
|
||||
self.gossip_handler.inject_dial_upgrade_error(
|
||||
info,
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)),
|
||||
)
|
||||
}
|
||||
ProtocolsHandlerUpgrErr::Timer => self
|
||||
.gossip_handler
|
||||
.inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timer),
|
||||
ProtocolsHandlerUpgrErr::Timeout => self
|
||||
.gossip_handler
|
||||
.inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timeout),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::A(err))) => {
|
||||
self.gossip_handler.inject_dial_upgrade_error(
|
||||
info,
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)),
|
||||
)
|
||||
}
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => {
|
||||
unreachable!("info and error don't match")
|
||||
}
|
||||
},
|
||||
// RPC
|
||||
EitherOutput::Second(EitherOutput::First(info)) => match error {
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => {
|
||||
self.rpc_handler.inject_dial_upgrade_error(
|
||||
info,
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)),
|
||||
)
|
||||
}
|
||||
ProtocolsHandlerUpgrErr::Timer => self
|
||||
.rpc_handler
|
||||
.inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timer),
|
||||
ProtocolsHandlerUpgrErr::Timeout => self
|
||||
.rpc_handler
|
||||
.inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timeout),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B(
|
||||
EitherError::A(err),
|
||||
))) => self.rpc_handler.inject_dial_upgrade_error(
|
||||
info,
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)),
|
||||
),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => {
|
||||
unreachable!("info and error don't match")
|
||||
}
|
||||
},
|
||||
// Identify
|
||||
EitherOutput::Second(EitherOutput::Second(())) => match error {
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => {
|
||||
self.identify_handler.inject_dial_upgrade_error(
|
||||
(),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)),
|
||||
)
|
||||
}
|
||||
ProtocolsHandlerUpgrErr::Timer => self
|
||||
.identify_handler
|
||||
.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timer),
|
||||
ProtocolsHandlerUpgrErr::Timeout => self
|
||||
.identify_handler
|
||||
.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B(
|
||||
EitherError::B(err),
|
||||
))) => self.identify_handler.inject_dial_upgrade_error(
|
||||
(),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)),
|
||||
),
|
||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => {
|
||||
unreachable!("info and error don't match")
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn connection_keep_alive(&self) -> KeepAlive {
|
||||
self.gossip_handler
|
||||
.connection_keep_alive()
|
||||
.max(self.rpc_handler.connection_keep_alive())
|
||||
.max(self.identify_handler.connection_keep_alive())
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context,
|
||||
) -> Poll<
|
||||
ProtocolsHandlerEvent<
|
||||
Self::OutboundProtocol,
|
||||
Self::OutboundOpenInfo,
|
||||
Self::OutEvent,
|
||||
Self::Error,
|
||||
>,
|
||||
> {
|
||||
match self.gossip_handler.poll(cx) {
|
||||
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::Gossipsub(event)));
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::Close(event)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Gossipsub(
|
||||
event,
|
||||
)));
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
|
||||
protocol: protocol
|
||||
.map_upgrade(EitherUpgrade::A)
|
||||
.map_info(EitherOutput::First),
|
||||
});
|
||||
}
|
||||
Poll::Pending => (),
|
||||
};
|
||||
|
||||
match self.rpc_handler.poll(cx) {
|
||||
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::RPC(event)));
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::Close(event)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::RPC(event)));
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
|
||||
protocol: protocol
|
||||
.map_upgrade(|u| EitherUpgrade::B(EitherUpgrade::A(u)))
|
||||
.map_info(|info| EitherOutput::Second(EitherOutput::First(info))),
|
||||
});
|
||||
}
|
||||
Poll::Pending => (),
|
||||
};
|
||||
|
||||
match self.identify_handler.poll(cx) {
|
||||
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::Identify(
|
||||
Box::new(event),
|
||||
)));
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::Close(event)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Identify(event)));
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
|
||||
protocol: protocol
|
||||
.map_upgrade(|u| EitherUpgrade::B(EitherUpgrade::B(u)))
|
||||
.map_info(|_| EitherOutput::Second(EitherOutput::Second(()))),
|
||||
});
|
||||
}
|
||||
Poll::Pending => (),
|
||||
};
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
140
beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs
Normal file
140
beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs
Normal file
@@ -0,0 +1,140 @@
|
||||
use crate::rpc::*;
|
||||
use delegate::DelegatingHandler;
|
||||
pub(super) use delegate::{
|
||||
DelegateError, DelegateIn, DelegateInProto, DelegateOut, DelegateOutInfo, DelegateOutProto,
|
||||
};
|
||||
use libp2p::{
|
||||
core::upgrade::{InboundUpgrade, OutboundUpgrade},
|
||||
gossipsub::Gossipsub,
|
||||
identify::Identify,
|
||||
swarm::protocols_handler::{
|
||||
KeepAlive, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
|
||||
},
|
||||
swarm::{NegotiatedSubstream, ProtocolsHandler},
|
||||
};
|
||||
use std::task::{Context, Poll};
|
||||
use types::EthSpec;
|
||||
|
||||
mod delegate;
|
||||
|
||||
/// Handler that combines Lighthouse's Behaviours' handlers in a delegating manner.
|
||||
pub struct BehaviourHandler<TSpec: EthSpec> {
|
||||
/// Handler combining all sub behaviour's handlers.
|
||||
delegate: DelegatingHandler<TSpec>,
|
||||
/// Flag indicating if the handler is shutting down.
|
||||
shutting_down: bool,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> BehaviourHandler<TSpec> {
|
||||
pub fn new(gossipsub: &mut Gossipsub, rpc: &mut RPC<TSpec>, identify: &mut Identify) -> Self {
|
||||
BehaviourHandler {
|
||||
delegate: DelegatingHandler::new(gossipsub, rpc, identify),
|
||||
shutting_down: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum BehaviourHandlerIn<TSpec: EthSpec> {
|
||||
Delegate(DelegateIn<TSpec>),
|
||||
/// Start the shutdown process.
|
||||
Shutdown(Option<(RequestId, RPCRequest<TSpec>)>),
|
||||
}
|
||||
|
||||
pub enum BehaviourHandlerOut<TSpec: EthSpec> {
|
||||
Delegate(Box<DelegateOut<TSpec>>),
|
||||
// TODO: replace custom with events to send
|
||||
Custom,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> ProtocolsHandler for BehaviourHandler<TSpec> {
|
||||
type InEvent = BehaviourHandlerIn<TSpec>;
|
||||
type OutEvent = BehaviourHandlerOut<TSpec>;
|
||||
type Error = DelegateError<TSpec>;
|
||||
type InboundProtocol = DelegateInProto<TSpec>;
|
||||
type OutboundProtocol = DelegateOutProto<TSpec>;
|
||||
type OutboundOpenInfo = DelegateOutInfo<TSpec>;
|
||||
type InboundOpenInfo = ();
|
||||
|
||||
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, ()> {
|
||||
self.delegate.listen_protocol()
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_inbound(
|
||||
&mut self,
|
||||
out: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
|
||||
_info: Self::InboundOpenInfo,
|
||||
) {
|
||||
self.delegate.inject_fully_negotiated_inbound(out, ())
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_outbound(
|
||||
&mut self,
|
||||
out: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
|
||||
info: Self::OutboundOpenInfo,
|
||||
) {
|
||||
self.delegate.inject_fully_negotiated_outbound(out, info)
|
||||
}
|
||||
|
||||
fn inject_event(&mut self, event: Self::InEvent) {
|
||||
match event {
|
||||
BehaviourHandlerIn::Delegate(delegated_ev) => self.delegate.inject_event(delegated_ev),
|
||||
/* Events coming from the behaviour */
|
||||
BehaviourHandlerIn::Shutdown(last_message) => {
|
||||
self.shutting_down = true;
|
||||
self.delegate.rpc_mut().shutdown(last_message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_dial_upgrade_error(
|
||||
&mut self,
|
||||
info: Self::OutboundOpenInfo,
|
||||
err: ProtocolsHandlerUpgrErr<
|
||||
<Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Error,
|
||||
>,
|
||||
) {
|
||||
self.delegate.inject_dial_upgrade_error(info, err)
|
||||
}
|
||||
|
||||
// We don't use the keep alive to disconnect. This is handled in the poll
|
||||
fn connection_keep_alive(&self) -> KeepAlive {
|
||||
KeepAlive::Yes
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context,
|
||||
) -> Poll<
|
||||
ProtocolsHandlerEvent<
|
||||
Self::OutboundProtocol,
|
||||
Self::OutboundOpenInfo,
|
||||
Self::OutEvent,
|
||||
Self::Error,
|
||||
>,
|
||||
> {
|
||||
// Disconnect if the sub-handlers are ready.
|
||||
// Currently we only respect the RPC handler.
|
||||
if self.shutting_down && KeepAlive::No == self.delegate.rpc().connection_keep_alive() {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Disconnected));
|
||||
}
|
||||
|
||||
match self.delegate.poll(cx) {
|
||||
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(
|
||||
BehaviourHandlerOut::Delegate(Box::new(event)),
|
||||
))
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::Close(err)) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::Close(err))
|
||||
}
|
||||
Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => {
|
||||
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol });
|
||||
}
|
||||
Poll::Pending => (),
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user