mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-03 08:41:43 +00:00
Compare commits
875 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
98f74041a0 | ||
|
|
28aceaa213 | ||
|
|
c2e9354126 | ||
|
|
f5d8fdbb4e | ||
|
|
925e9241d1 | ||
|
|
15b8811580 | ||
|
|
e4fa7d906f | ||
|
|
6d4af4c9ca | ||
|
|
c475499dfe | ||
|
|
65eaf01942 | ||
|
|
267d8babc8 | ||
|
|
527dfa4893 | ||
|
|
381d0ece3c | ||
|
|
3b4865c3ae | ||
|
|
4186d117af | ||
|
|
cbda0a2f0a | ||
|
|
1829250ee4 | ||
|
|
09d2187198 | ||
|
|
aea43b626b | ||
|
|
4bf1af4e85 | ||
|
|
e88b18be09 | ||
|
|
f3c1dde898 | ||
|
|
668115a4b8 | ||
|
|
e34524be75 | ||
|
|
b6493d5e24 | ||
|
|
a1b730c043 | ||
|
|
27e83b888c | ||
|
|
5e1f8a8480 | ||
|
|
c1df5d29cb | ||
|
|
696de58141 | ||
|
|
5a0b049049 | ||
|
|
b37d5db8df | ||
|
|
104e3104f9 | ||
|
|
56b2ec6b29 | ||
|
|
c8019caba6 | ||
|
|
3ebb8b0244 | ||
|
|
da4ca024f1 | ||
|
|
0a6a8ea3b0 | ||
|
|
2f8531dc60 | ||
|
|
886afd684a | ||
|
|
c3a793fd73 | ||
|
|
b5921e4248 | ||
|
|
1306b2db96 | ||
|
|
7e38d203ce | ||
|
|
5388183884 | ||
|
|
d172c0b9fc | ||
|
|
1fd883d79a | ||
|
|
36fc887a40 | ||
|
|
675c7b7e26 | ||
|
|
48b7c8685b | ||
|
|
615695776e | ||
|
|
0177b9286e | ||
|
|
fc37d51e10 | ||
|
|
a6da87066b | ||
|
|
286996b090 | ||
|
|
5d26050e97 | ||
|
|
736457b562 | ||
|
|
139b44342f | ||
|
|
bdd70d7aef | ||
|
|
9ff2160198 | ||
|
|
e961ff60b4 | ||
|
|
ee000d5219 | ||
|
|
99d2c33387 | ||
|
|
e05142b798 | ||
|
|
782abdcab5 | ||
|
|
e70daaa3b6 | ||
|
|
013a3cc3e0 | ||
|
|
85d73d5443 | ||
|
|
f2b1e096b2 | ||
|
|
9964f5afe5 | ||
|
|
150931950d | ||
|
|
5f628a71d4 | ||
|
|
69288f6164 | ||
|
|
b9b3ea70de | ||
|
|
ca29b580a2 | ||
|
|
f35a33716b | ||
|
|
799aedd631 | ||
|
|
f0f327af0c | ||
|
|
fc7a1a7dc7 | ||
|
|
a8ae9c8418 | ||
|
|
0116c8d464 | ||
|
|
d06f87486a | ||
|
|
79db2d4deb | ||
|
|
95b3183cb4 | ||
|
|
ef7351ddfe | ||
|
|
9ed92d6e78 | ||
|
|
a836e180f9 | ||
|
|
a26b8802da | ||
|
|
c11253a82f | ||
|
|
ceeab02e3a | ||
|
|
1c667ad3ca | ||
|
|
6f4102aab6 | ||
|
|
db95255aeb | ||
|
|
6883e1bfb6 | ||
|
|
e8887ffea0 | ||
|
|
2ce2ec9b62 | ||
|
|
f13e9c3d10 | ||
|
|
aaa5344eab | ||
|
|
61f60bdf03 | ||
|
|
b656007963 | ||
|
|
4848e53155 | ||
|
|
6976796162 | ||
|
|
02e2fd2fb8 | ||
|
|
65b1374b58 | ||
|
|
ccdc10c288 | ||
|
|
daa3da3758 | ||
|
|
20941bc0f7 | ||
|
|
668477872e | ||
|
|
f6b5b1a8be | ||
|
|
fac117667b | ||
|
|
0b54ff17f2 | ||
|
|
dfc8968201 | ||
|
|
a0c5701e36 | ||
|
|
81c667b58e | ||
|
|
60d917d9e9 | ||
|
|
3b61ac9cbf | ||
|
|
a290a3c537 | ||
|
|
56d596ee42 | ||
|
|
f721f8d0a0 | ||
|
|
9be3d4ecac | ||
|
|
10dac51c6f | ||
|
|
4dcb262c2d | ||
|
|
eee0260a68 | ||
|
|
52c69c4eee | ||
|
|
a43d5e161f | ||
|
|
b22ac95d7f | ||
|
|
e391b32858 | ||
|
|
62d11e886e | ||
|
|
2984f4b474 | ||
|
|
a7a7edb6cf | ||
|
|
b5f2764bae | ||
|
|
a80ccc3a33 | ||
|
|
f3c237cfa0 | ||
|
|
144978f8f8 | ||
|
|
94385fe17b | ||
|
|
ab86b42874 | ||
|
|
c2f2813385 | ||
|
|
1b56ebf85e | ||
|
|
82a81524e3 | ||
|
|
f6748537db | ||
|
|
5f0fef2d1e | ||
|
|
44a7b37ce3 | ||
|
|
afe59afacd | ||
|
|
47db682d7e | ||
|
|
cdfd1304a5 | ||
|
|
cbd2201164 | ||
|
|
eb35c64afd | ||
|
|
24966c059d | ||
|
|
de49c7ddaa | ||
|
|
cdbe603adf | ||
|
|
cee18ca842 | ||
|
|
86e0c56a38 | ||
|
|
6b4cc63b57 | ||
|
|
d8eec16c5e | ||
|
|
6dde12f311 | ||
|
|
aa1d57aa55 | ||
|
|
67a6f91df6 | ||
|
|
35350dff75 | ||
|
|
d6fda44620 | ||
|
|
7975ceff97 | ||
|
|
52e5083502 | ||
|
|
b162b067de | ||
|
|
a1033a9247 | ||
|
|
801f6f7425 | ||
|
|
471cf10392 | ||
|
|
5efb7aeab4 | ||
|
|
01031931d9 | ||
|
|
20ca7a56ed | ||
|
|
d8623cfc4f | ||
|
|
1563bce905 | ||
|
|
fe945bc84a | ||
|
|
aa534f8989 | ||
|
|
fd828199f5 | ||
|
|
18eee2dc82 | ||
|
|
10b263fed4 | ||
|
|
c10e8ce955 | ||
|
|
cce855f9ea | ||
|
|
5687c56d51 | ||
|
|
fe75a0a9a1 | ||
|
|
6625aa4afe | ||
|
|
413b0b5b2b | ||
|
|
9eedb6b888 | ||
|
|
2c07a72980 | ||
|
|
3fb8162dcc | ||
|
|
c61fbf71a4 | ||
|
|
0b319d4926 | ||
|
|
53562010ec | ||
|
|
31386277c3 | ||
|
|
e519af9012 | ||
|
|
e32c09bfda | ||
|
|
a43a2448b7 | ||
|
|
58b04acf28 | ||
|
|
3919046731 | ||
|
|
931daa40d7 | ||
|
|
d01fe02824 | ||
|
|
fbafe416d1 | ||
|
|
df02639b71 | ||
|
|
fadb8b2b2b | ||
|
|
2ed6775dcf | ||
|
|
a683e0296a | ||
|
|
7502970a7d | ||
|
|
c4ad0e3fb3 | ||
|
|
80627b428b | ||
|
|
4499adc7fd | ||
|
|
ffb04e1a9e | ||
|
|
e8a557fdd8 | ||
|
|
e2c0650d16 | ||
|
|
1790010260 | ||
|
|
2c4413454a | ||
|
|
88063398f6 | ||
|
|
2dc6163043 | ||
|
|
8edd9d45ab | ||
|
|
39c0d1219c | ||
|
|
3a51f829d5 | ||
|
|
bf1667a904 | ||
|
|
d4819bfd42 | ||
|
|
de34001e78 | ||
|
|
2f7b5f8cb0 | ||
|
|
99f7a7db58 | ||
|
|
aad397f00a | ||
|
|
efec60ee90 | ||
|
|
d2e3d4c6f1 | ||
|
|
df40700ddd | ||
|
|
06e310c4eb | ||
|
|
e75ce534f6 | ||
|
|
fff01b24dd | ||
|
|
180c90bf6d | ||
|
|
a7b675460d | ||
|
|
cfafe7ba3a | ||
|
|
5cde3fc4da | ||
|
|
9c5a8ab7f2 | ||
|
|
7c23e2142a | ||
|
|
05040e68ec | ||
|
|
ef49524ff8 | ||
|
|
18340d1fb6 | ||
|
|
e2d09bb8ac | ||
|
|
0a77d783a4 | ||
|
|
34d22b5920 | ||
|
|
a73d698e30 | ||
|
|
0aee7ec873 | ||
|
|
708557a473 | ||
|
|
229542cd6c | ||
|
|
7c7ba770de | ||
|
|
73ec29c267 | ||
|
|
58870fc6d3 | ||
|
|
02a646a27d | ||
|
|
7c88f582d9 | ||
|
|
ed1fc7cca6 | ||
|
|
28b79084cd | ||
|
|
21d1af435a | ||
|
|
ea78315749 | ||
|
|
29a8865d07 | ||
|
|
db4d72c4f1 | ||
|
|
4c510f8f6b | ||
|
|
70441aa554 | ||
|
|
7d13e57d9f | ||
|
|
c0122e1a52 | ||
|
|
e895074ba9 | ||
|
|
113ef74ef6 | ||
|
|
440badd973 | ||
|
|
924a1345b1 | ||
|
|
fe52322088 | ||
|
|
a844ce5ba9 | ||
|
|
00a7ef0036 | ||
|
|
be11437c27 | ||
|
|
9667dc2f03 | ||
|
|
280e4fe23d | ||
|
|
fb1df2c926 | ||
|
|
a73dcb7b6d | ||
|
|
64ad2af100 | ||
|
|
acdcea9663 | ||
|
|
56e0615df8 | ||
|
|
c5c7476518 | ||
|
|
58012f85e1 | ||
|
|
95b17137a8 | ||
|
|
4755d4b236 | ||
|
|
f9bba92db3 | ||
|
|
e4ed42a9d8 | ||
|
|
f7dd24c998 | ||
|
|
46cd67d519 | ||
|
|
8d2650fffd | ||
|
|
3b0f3247c6 | ||
|
|
1eefd6d413 | ||
|
|
ddbd4e6965 | ||
|
|
9c785a9b33 | ||
|
|
f4aa1d8aea | ||
|
|
d9910f96c5 | ||
|
|
ac274221c5 | ||
|
|
6f18f95893 | ||
|
|
50321c6671 | ||
|
|
5a3bcd2904 | ||
|
|
44fa54004c | ||
|
|
1031f79aca | ||
|
|
b4dd98b3c6 | ||
|
|
10945e0619 | ||
|
|
6b65b6f3bd | ||
|
|
bc14d1d73d | ||
|
|
99737c551a | ||
|
|
beab306e07 | ||
|
|
b0ac3464ca | ||
|
|
4405425726 | ||
|
|
9fb94fbebe | ||
|
|
3fdad38eba | ||
|
|
09545fe668 | ||
|
|
d3b4cbed53 | ||
|
|
aca49fc45e | ||
|
|
5b8436e33f | ||
|
|
bc0599246f | ||
|
|
02fd54bea7 | ||
|
|
90d5ab1566 | ||
|
|
f2a8c6229c | ||
|
|
12fe72bd37 | ||
|
|
c7379836a5 | ||
|
|
bc6a848ded | ||
|
|
c0a2f501d9 | ||
|
|
d17350c0fa | ||
|
|
4c4ebfbaa1 | ||
|
|
4af6fcfafd | ||
|
|
33ff51a096 | ||
|
|
54f92cc263 | ||
|
|
7b46c4bb7a | ||
|
|
ceda27371d | ||
|
|
ff85b05249 | ||
|
|
71ab16e404 | ||
|
|
17a2c778e3 | ||
|
|
350b6f19de | ||
|
|
fc8969302c | ||
|
|
e8c0d1f19b | ||
|
|
6a620a31da | ||
|
|
187425cdc1 | ||
|
|
c5786a8821 | ||
|
|
834ee98bc2 | ||
|
|
303deb9969 | ||
|
|
8efd9fc324 | ||
|
|
1d4f90e2eb | ||
|
|
923486f34c | ||
|
|
6e3ca48cb9 | ||
|
|
f5bdca09ff | ||
|
|
84e6d71950 | ||
|
|
74aa99c409 | ||
|
|
63923eaa29 | ||
|
|
17b6d7ce86 | ||
|
|
bcf8ba6318 | ||
|
|
9a8320beaa | ||
|
|
08fedbfcba | ||
|
|
6818a94171 | ||
|
|
381befbf82 | ||
|
|
059d9ec1b1 | ||
|
|
c62810b408 | ||
|
|
3c0d3227ab | ||
|
|
64226321b3 | ||
|
|
c1d2e35c9e | ||
|
|
f4bc9db16d | ||
|
|
6fb48b45fa | ||
|
|
4aa06c9555 | ||
|
|
b0f5c4c776 | ||
|
|
a3a7f39b0d | ||
|
|
8fa6e463ca | ||
|
|
fc4c611476 | ||
|
|
304fb05e44 | ||
|
|
9656ffee7c | ||
|
|
27aec1962c | ||
|
|
20fce117f3 | ||
|
|
2c691af95b | ||
|
|
a7b7134abb | ||
|
|
371c216ac3 | ||
|
|
b3c7e59a5b | ||
|
|
b4689e20c6 | ||
|
|
89361573d4 | ||
|
|
78e5c0c157 | ||
|
|
406e3921d9 | ||
|
|
73d002ef92 | ||
|
|
206486006c | ||
|
|
379664a648 | ||
|
|
9461ac2d50 | ||
|
|
2357a41868 | ||
|
|
6583ce325b | ||
|
|
b84ff9f793 | ||
|
|
3dc1eb5eb6 | ||
|
|
98ab00cc52 | ||
|
|
b1657a60e9 | ||
|
|
3261eff0bf | ||
|
|
a526145b4a | ||
|
|
dffe31c312 | ||
|
|
3b600acdc5 | ||
|
|
b383836418 | ||
|
|
4a6f2fac81 | ||
|
|
93100f221f | ||
|
|
502402c6b9 | ||
|
|
f6280aa663 | ||
|
|
90ea075c62 | ||
|
|
d34f922c1d | ||
|
|
bf4e02e2cc | ||
|
|
ba9c4c5eea | ||
|
|
4c7bb4984c | ||
|
|
320a683e72 | ||
|
|
0847986936 | ||
|
|
ec5cceba50 | ||
|
|
d12e746b50 | ||
|
|
456b313665 | ||
|
|
fdaeec631b | ||
|
|
55aada006f | ||
|
|
5d9a1bc558 | ||
|
|
ba55e140ae | ||
|
|
58e52f8f40 | ||
|
|
480b247828 | ||
|
|
cb47388ad7 | ||
|
|
bacc38c3da | ||
|
|
4cc613d644 | ||
|
|
2ccb358d87 | ||
|
|
0aa8509525 | ||
|
|
0754ba3be7 | ||
|
|
2c2c443718 | ||
|
|
c646d2f7a3 | ||
|
|
2992ca66cd | ||
|
|
125915e632 | ||
|
|
3a24ca5f14 | ||
|
|
3b901dc5ec | ||
|
|
c1203f5e52 | ||
|
|
0df7be1814 | ||
|
|
aaa14073ff | ||
|
|
f6f64cf0f5 | ||
|
|
e7e5878953 | ||
|
|
66590d043c | ||
|
|
52995ab5f5 | ||
|
|
95a362213d | ||
|
|
9eb1945136 | ||
|
|
3d239b85ac | ||
|
|
03cefd0065 | ||
|
|
39928d5c69 | ||
|
|
d3d8c22edf | ||
|
|
f9d60f5436 | ||
|
|
9a71a7e486 | ||
|
|
d18bba588b | ||
|
|
b34a79dc0b | ||
|
|
aae8bbd130 | ||
|
|
015ab7d0a7 | ||
|
|
6a69b20be1 | ||
|
|
87825b2bd2 | ||
|
|
0c52a2ac1d | ||
|
|
3919737978 | ||
|
|
770a2ca030 | ||
|
|
786e25ea08 | ||
|
|
babd153352 | ||
|
|
8faab89f09 | ||
|
|
e4eb0eb168 | ||
|
|
363f15f362 | ||
|
|
c370100719 | ||
|
|
b30ff6affc | ||
|
|
da8791abd7 | ||
|
|
1c507c588e | ||
|
|
5c285f652a | ||
|
|
cc6f7998fd | ||
|
|
ed9b245de0 | ||
|
|
baef1db40f | ||
|
|
0b2ccecbcf | ||
|
|
afd4786c59 | ||
|
|
2f077b11fe | ||
|
|
5bc93869c8 | ||
|
|
a764c3b247 | ||
|
|
399d073ab4 | ||
|
|
46920a84e8 | ||
|
|
4362ea4f98 | ||
|
|
8949ae7c4e | ||
|
|
8c6537e71d | ||
|
|
f8cc82f2b1 | ||
|
|
613382f304 | ||
|
|
1ab495738d | ||
|
|
3ffee365e2 | ||
|
|
f819ba5414 | ||
|
|
4a357c9947 | ||
|
|
88cc222204 | ||
|
|
3000f3e5da | ||
|
|
1a748699d9 | ||
|
|
ff1813e618 | ||
|
|
9ae92aa256 | ||
|
|
fee24539ac | ||
|
|
6e6e9104f5 | ||
|
|
8e5c20b6d1 | ||
|
|
e2ff9c66a1 | ||
|
|
e20f64b21a | ||
|
|
6f4da9a5d2 | ||
|
|
7c059117f4 | ||
|
|
194609d210 | ||
|
|
e383ef3e91 | ||
|
|
ff35fbb121 | ||
|
|
de193c95d3 | ||
|
|
420c2d28f8 | ||
|
|
1a22a096c6 | ||
|
|
e4b62139d7 | ||
|
|
2b2a358522 | ||
|
|
1eb0915301 | ||
|
|
b06559ae97 | ||
|
|
d9f940613f | ||
|
|
46cb6e204c | ||
|
|
805e152f66 | ||
|
|
8892114f52 | ||
|
|
51f7724c76 | ||
|
|
9cdfa94ba4 | ||
|
|
3d07934ca0 | ||
|
|
a8d040c821 | ||
|
|
908c8eadf3 | ||
|
|
7a71977987 | ||
|
|
e5b1a37110 | ||
|
|
28238d97b1 | ||
|
|
14df5d5c32 | ||
|
|
1d535659d6 | ||
|
|
423dea169c | ||
|
|
851a4dca3c | ||
|
|
7e4b190df0 | ||
|
|
c2eac8e5bd | ||
|
|
588b90157d | ||
|
|
939fa717fd | ||
|
|
0245ddd37b | ||
|
|
f183af20e3 | ||
|
|
32a60578fe | ||
|
|
43ac3f7209 | ||
|
|
78d17c3255 | ||
|
|
9ed65a64f8 | ||
|
|
c5f03f7d56 | ||
|
|
2931b05582 | ||
|
|
b5e81eb6b2 | ||
|
|
3381266998 | ||
|
|
166f617b19 | ||
|
|
e5bf2576f1 | ||
|
|
a62dc65ca4 | ||
|
|
f998eff7ce | ||
|
|
ca08fc7831 | ||
|
|
3fcc517993 | ||
|
|
da1c5fe69d | ||
|
|
80f47fcfff | ||
|
|
0c529b8d52 | ||
|
|
63eeb14a81 | ||
|
|
11c299cbf6 | ||
|
|
701843aaa0 | ||
|
|
c1e27f4c89 | ||
|
|
1abc70e815 | ||
|
|
dfb588e521 | ||
|
|
adbd49ddc6 | ||
|
|
aa45fa3ff7 | ||
|
|
82753f842d | ||
|
|
4f85371ce8 | ||
|
|
57489e620f | ||
|
|
f3200784b4 | ||
|
|
a28e8decbf | ||
|
|
9c915349d4 | ||
|
|
0f5f3b522e | ||
|
|
c1ec386d18 | ||
|
|
7933596c89 | ||
|
|
fdfb81a74a | ||
|
|
2b5c0df9e5 | ||
|
|
e06d040b5d | ||
|
|
2682f46025 | ||
|
|
686b605112 | ||
|
|
e1353088e0 | ||
|
|
482695142a | ||
|
|
d8cda2d86e | ||
|
|
d3f0a21436 | ||
|
|
b8bd80d2fb | ||
|
|
075eecdcb1 | ||
|
|
65dcdc361b | ||
|
|
c718e81eaf | ||
|
|
77f3539654 | ||
|
|
8fcd22992c | ||
|
|
3f036fd193 | ||
|
|
85e69249e6 | ||
|
|
f7183098ee | ||
|
|
cadcc9a76b | ||
|
|
9a37f356a9 | ||
|
|
a567f788bd | ||
|
|
5a3b94cbb4 | ||
|
|
38b15deccb | ||
|
|
1312844f29 | ||
|
|
0589a14afe | ||
|
|
3486d6a809 | ||
|
|
fc07cc3fdf | ||
|
|
26741944b1 | ||
|
|
50558e61f7 | ||
|
|
198c4a873d | ||
|
|
7b6a97e73c | ||
|
|
7020f5df40 | ||
|
|
3b4afc27bf | ||
|
|
c6baa0eed1 | ||
|
|
a96893744c | ||
|
|
11c4968ea0 | ||
|
|
b6eff50ffa | ||
|
|
61277e3a72 | ||
|
|
ead6be074e | ||
|
|
011cea93b3 | ||
|
|
20339ade01 | ||
|
|
84b3387d09 | ||
|
|
e504645767 | ||
|
|
a171fb8843 | ||
|
|
6f890c398e | ||
|
|
21617aa87f | ||
|
|
7d644103c6 | ||
|
|
5828ff1204 | ||
|
|
59b2247ab8 | ||
|
|
65b1cf2af1 | ||
|
|
d0cbf3111a | ||
|
|
426b3001e0 | ||
|
|
0b556c4405 | ||
|
|
48f73b21e6 | ||
|
|
3b405f10ea | ||
|
|
d727e55abe | ||
|
|
e47739047d | ||
|
|
37369c6a56 | ||
|
|
c5e97b9bf7 | ||
|
|
1a530e5a93 | ||
|
|
3db9072fee | ||
|
|
79fd9b32b9 | ||
|
|
3408de8151 | ||
|
|
bcc7f6b143 | ||
|
|
0c2c2cef93 | ||
|
|
7e4ee58729 | ||
|
|
103103e72e | ||
|
|
a60ab4eff2 | ||
|
|
ecff8807a5 | ||
|
|
5114aee5cf | ||
|
|
398919b5d4 | ||
|
|
280334b1b0 | ||
|
|
4d732a1f1d | ||
|
|
49c4630045 | ||
|
|
646c049df2 | ||
|
|
836eaf559b | ||
|
|
fe71f25c3a | ||
|
|
eb56140582 | ||
|
|
6a7d221f72 | ||
|
|
8a16548715 | ||
|
|
46a06069c6 | ||
|
|
c00e6c2c6f | ||
|
|
8772c02fa0 | ||
|
|
c7ac967d5a | ||
|
|
cb26c15eb6 | ||
|
|
fcb4893f72 | ||
|
|
11076912d9 | ||
|
|
7404f1ce54 | ||
|
|
f0c9339153 | ||
|
|
9ee71d6fec | ||
|
|
5ed4c1daca | ||
|
|
f8da151b0b | ||
|
|
556190ff46 | ||
|
|
b711cfe2bb | ||
|
|
2f9999752e | ||
|
|
b3fc48e887 | ||
|
|
b0e9e3dcef | ||
|
|
63fe5542e7 | ||
|
|
3574bad6cd | ||
|
|
78744cd07a | ||
|
|
492ce07ed3 | ||
|
|
e004b98eab | ||
|
|
e2ae5010a6 | ||
|
|
4c4dad9fb5 | ||
|
|
157e31027a | ||
|
|
7e7fad5734 | ||
|
|
0a0f4daf9d | ||
|
|
0cde4e285c | ||
|
|
2ff5828310 | ||
|
|
863ee7c9f2 | ||
|
|
7afbaa807e | ||
|
|
6c0c050fbb | ||
|
|
304793a6ab | ||
|
|
56f9394141 | ||
|
|
f64f8246db | ||
|
|
ae0f025375 | ||
|
|
9f45ac2f5e | ||
|
|
2bd5b9182f | ||
|
|
36bd4d87f0 | ||
|
|
ad846ad280 | ||
|
|
92c8eba8ca | ||
|
|
f157d61cc7 | ||
|
|
eba51f0973 | ||
|
|
7453f39d68 | ||
|
|
a3cc1a1e0f | ||
|
|
1644289a08 | ||
|
|
7870b81ade | ||
|
|
fa2daa7d6c | ||
|
|
55eee18ebb | ||
|
|
64c5899d25 | ||
|
|
2c7f362908 | ||
|
|
ae96dab5d2 | ||
|
|
c49dd94e20 | ||
|
|
acd49d988d | ||
|
|
66f0cf4430 | ||
|
|
4298efeb23 | ||
|
|
542f755ac5 | ||
|
|
b829257cca | ||
|
|
7f73dccebc | ||
|
|
a3704b971e | ||
|
|
a3552a4b70 | ||
|
|
8f86baa48d | ||
|
|
668513b67e | ||
|
|
e1eec7828b | ||
|
|
628891df1d | ||
|
|
fdb9744759 | ||
|
|
02d94a70b7 | ||
|
|
2acf75785c | ||
|
|
703c33bdc7 | ||
|
|
6ba997b88e | ||
|
|
d7b9d0dd9f | ||
|
|
97be2ca295 | ||
|
|
aadbab47cc | ||
|
|
a0634cc64f | ||
|
|
8248afa793 | ||
|
|
467de4c8d0 | ||
|
|
95c96ac567 | ||
|
|
e9d5bade36 | ||
|
|
83ae12a1b4 | ||
|
|
99a02fd2ab | ||
|
|
b185d7bbd8 | ||
|
|
0e4cc50262 | ||
|
|
db3e0578e9 | ||
|
|
b0833033b7 | ||
|
|
72cc5e35af | ||
|
|
414138f137 | ||
|
|
b69c63d486 | ||
|
|
a67fa5f4a4 | ||
|
|
a886afd3ca | ||
|
|
56ffe91f90 | ||
|
|
59adc5ba00 | ||
|
|
da44821e39 | ||
|
|
ee7c8a0b7e | ||
|
|
240181e840 | ||
|
|
bcb629564a | ||
|
|
113758a4f5 | ||
|
|
cf74e0baed | ||
|
|
a8c5af8874 | ||
|
|
6997776494 | ||
|
|
e7eb99cb5e | ||
|
|
fe07a3c21c | ||
|
|
47c921f326 | ||
|
|
b1c121b880 | ||
|
|
39bd511838 | ||
|
|
cee3e6483a | ||
|
|
6b68c628df | ||
|
|
86a18e72c4 | ||
|
|
e3c7b58657 | ||
|
|
d72c026d32 | ||
|
|
c4bd9c86e6 | ||
|
|
6af3bc9ce2 | ||
|
|
8fde9a4016 | ||
|
|
17c5da478e | ||
|
|
255cc25623 | ||
|
|
32338bcafa | ||
|
|
6ea3bc5e52 | ||
|
|
94b17ce02b | ||
|
|
2cc20101d4 | ||
|
|
b56dbc3ba0 | ||
|
|
1d278aaa83 | ||
|
|
9d2d6239cd | ||
|
|
22aedda1be | ||
|
|
cdec3cec18 | ||
|
|
8e20176337 | ||
|
|
dffc56ef1d | ||
|
|
996887376d | ||
|
|
fcf8419c90 | ||
|
|
a1a6b01acb | ||
|
|
c0e76d2c15 | ||
|
|
13cb642f39 | ||
|
|
ae28773965 | ||
|
|
6c1d7f55bf | ||
|
|
7bf14908dc | ||
|
|
1ef4f0ea12 | ||
|
|
f1180a8947 | ||
|
|
5688f21bbd | ||
|
|
258b28469e | ||
|
|
3412a3ec54 | ||
|
|
28b6d921c6 | ||
|
|
15638d1448 | ||
|
|
b8013b7b2c | ||
|
|
80e52a0263 | ||
|
|
80ecafaae4 | ||
|
|
b75df29501 | ||
|
|
1801dd1a34 | ||
|
|
62c8548ed0 | ||
|
|
a97ec318c4 | ||
|
|
7aceff4d13 | ||
|
|
4fca306397 | ||
|
|
d85d5a435e | ||
|
|
bd39cc8e26 | ||
|
|
14ff38539c | ||
|
|
5d17eb899f | ||
|
|
1db8daae0c | ||
|
|
7b97c4ad30 | ||
|
|
371e1c1d5d | ||
|
|
a17f74896a | ||
|
|
49ab414594 | ||
|
|
2074beccdc | ||
|
|
e5fc6bab48 | ||
|
|
c9596fcf0e | ||
|
|
c6abc56113 | ||
|
|
7f1b936905 | ||
|
|
810de2f8b7 | ||
|
|
0525876882 | ||
|
|
d79366c503 | ||
|
|
b19cf02d2d | ||
|
|
dfe507715d | ||
|
|
0821e6b39f | ||
|
|
9cf8f45192 | ||
|
|
00cdc4bb35 | ||
|
|
19be7abfd2 | ||
|
|
9833eca024 | ||
|
|
2a9a815f29 | ||
|
|
a6376b4585 | ||
|
|
74fa87aa98 | ||
|
|
211109bbc0 | ||
|
|
638daa87fe | ||
|
|
2627463366 | ||
|
|
9c9176c1d1 | ||
|
|
87181204d0 | ||
|
|
fb9d828e5e | ||
|
|
8301a984eb | ||
|
|
7d71d98dc1 | ||
|
|
c34e8efb12 | ||
|
|
adea7992f8 | ||
|
|
c18d37c202 | ||
|
|
b6340ec495 | ||
|
|
967700c1ff | ||
|
|
d9f4819fe0 | ||
|
|
30bb7aecfb | ||
|
|
4763f03dcc | ||
|
|
175471a64b | ||
|
|
dfd02d6179 | ||
|
|
3569506acd | ||
|
|
c895dc8971 | ||
|
|
2bc9115a94 | ||
|
|
3cfd70d7fd | ||
|
|
3f0a113c7f | ||
|
|
ebb25b5569 | ||
|
|
bbed42f30c | ||
|
|
fdc6e2aa8e | ||
|
|
8e7dd7b2b1 | ||
|
|
33b2a3d0e0 | ||
|
|
93b7c3b7ff | ||
|
|
2d0b214b57 | ||
|
|
d4f763bbae | ||
|
|
e1e5002d3c | ||
|
|
46dd530476 | ||
|
|
8311074d68 | ||
|
|
3bb30754d9 | ||
|
|
cc44a64d15 | ||
|
|
46dbf027af | ||
|
|
9a97a0b14f | ||
|
|
719a69aee0 | ||
|
|
a58aa6ee55 | ||
|
|
73cbfbdfd0 | ||
|
|
f85485884f | ||
|
|
61d5b592cb | ||
|
|
3c689a6837 | ||
|
|
afdc4fea1d | ||
|
|
850a2d5985 | ||
|
|
113b40f321 | ||
|
|
99acfb50f2 | ||
|
|
c75c06cf16 | ||
|
|
6aeb896480 | ||
|
|
f4a7311008 | ||
|
|
619ad106cf | ||
|
|
b0a3731fff | ||
|
|
e3d45eda1e | ||
|
|
05a8399769 | ||
|
|
e6f45524f9 | ||
|
|
8a1a4051cf | ||
|
|
61367efa64 | ||
|
|
70089f5231 | ||
|
|
b063df5bf9 | ||
|
|
b83fcd5e5c | ||
|
|
1a67d15701 | ||
|
|
ec84183e05 | ||
|
|
95b55d7170 | ||
|
|
134676fd6f | ||
|
|
cbfae87aa6 | ||
|
|
04e4389efe | ||
|
|
08a31c5a1a | ||
|
|
a1f9769040 | ||
|
|
1d5d3e3ea7 |
@@ -1,4 +1,4 @@
|
||||
tests/ef_tests/eth2.0-spec-tests
|
||||
testing/ef_tests/consensus-spec-tests
|
||||
target/
|
||||
*.data
|
||||
*.tar.gz
|
||||
|
||||
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -5,7 +5,7 @@ Please provide a brief description of the issue.
|
||||
## Version
|
||||
|
||||
Please provide your Lighthouse and Rust version. Are you building from
|
||||
`master`, which commit?
|
||||
`stable` or `unstable`, which commit?
|
||||
|
||||
## Present Behaviour
|
||||
|
||||
|
||||
3
.github/custom/config.toml
vendored
Normal file
3
.github/custom/config.toml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# Custom Cargo config to be used for the udeps CI job
|
||||
[http]
|
||||
multiplexing = false
|
||||
4
.github/workflows/book.yml
vendored
4
.github/workflows/book.yml
vendored
@@ -3,7 +3,7 @@ name: mdbook
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- unstable
|
||||
|
||||
jobs:
|
||||
build-and-upload-to-s3:
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
- name: Setup mdBook
|
||||
uses: peaceiris/actions-mdbook@v1
|
||||
with:
|
||||
mdbook-version: '0.3.5'
|
||||
mdbook-version: 'latest'
|
||||
|
||||
- run: mdbook build
|
||||
working-directory: book
|
||||
|
||||
14
.github/workflows/cancel-previous-runs.yml
vendored
Normal file
14
.github/workflows/cancel-previous-runs.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: cancel previous runs
|
||||
on: [push]
|
||||
jobs:
|
||||
cancel:
|
||||
name: 'Cancel Previous Runs'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
# https://github.com/styfle/cancel-workflow-action/releases
|
||||
- uses: styfle/cancel-workflow-action@514c783324374c6940d1b92bfb962d0763d22de3 # 0.7.0
|
||||
with:
|
||||
# https://api.github.com/repos/sigp/lighthouse/actions/workflows
|
||||
workflow_id: 697364,2434944,4462424,308241,2883401,316
|
||||
access_token: ${{ github.token }}
|
||||
31
.github/workflows/docker-antithesis.yml
vendored
Normal file
31
.github/workflows/docker-antithesis.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: docker antithesis
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- unstable
|
||||
|
||||
env:
|
||||
ANTITHESIS_PASSWORD: ${{ secrets.ANTITHESIS_PASSWORD }}
|
||||
ANTITHESIS_USERNAME: ${{ secrets.ANTITHESIS_USERNAME }}
|
||||
ANTITHESIS_SERVER: ${{ secrets.ANTITHESIS_SERVER }}
|
||||
REPOSITORY: ${{ secrets.ANTITHESIS_REPOSITORY }}
|
||||
IMAGE_NAME: lighthouse
|
||||
TAG: libvoidstar
|
||||
|
||||
jobs:
|
||||
build-docker:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Update Rust
|
||||
run: rustup update stable
|
||||
- name: Dockerhub login
|
||||
run: |
|
||||
echo "${ANTITHESIS_PASSWORD}" | docker login --username ${ANTITHESIS_USERNAME} https://${ANTITHESIS_SERVER} --password-stdin
|
||||
- name: Build AMD64 dockerfile (with push)
|
||||
run: |
|
||||
docker build \
|
||||
--tag ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} \
|
||||
--file ./testing/antithesis/Dockerfile.libvoidstar .
|
||||
docker push ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG}
|
||||
139
.github/workflows/docker.yml
vendored
Normal file
139
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
name: docker
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- unstable
|
||||
- stable
|
||||
tags:
|
||||
- v*
|
||||
|
||||
env:
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
|
||||
IMAGE_NAME: ${{ github.repository_owner}}/lighthouse
|
||||
LCLI_IMAGE_NAME: ${{ github.repository_owner }}/lcli
|
||||
|
||||
jobs:
|
||||
# Extract the VERSION which is either `latest` or `vX.Y.Z`, and the VERSION_SUFFIX
|
||||
# which is either empty or `-unstable`.
|
||||
#
|
||||
# It would be nice if the arch didn't get spliced into the version between `latest` and
|
||||
# `unstable`, but for now we keep the two parts of the version separate for backwards
|
||||
# compatibility.
|
||||
extract-version:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Extract version (if stable)
|
||||
if: github.event.ref == 'refs/heads/stable'
|
||||
run: |
|
||||
echo "VERSION=latest" >> $GITHUB_ENV
|
||||
echo "VERSION_SUFFIX=" >> $GITHUB_ENV
|
||||
- name: Extract version (if unstable)
|
||||
if: github.event.ref == 'refs/heads/unstable'
|
||||
run: |
|
||||
echo "VERSION=latest" >> $GITHUB_ENV
|
||||
echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV
|
||||
- name: Extract version (if tagged release)
|
||||
if: startsWith(github.event.ref, 'refs/tags')
|
||||
run: |
|
||||
echo "VERSION=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_ENV
|
||||
echo "VERSION_SUFFIX=" >> $GITHUB_ENV
|
||||
outputs:
|
||||
VERSION: ${{ env.VERSION }}
|
||||
VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }}
|
||||
build-docker-single-arch:
|
||||
name: build-docker-${{ matrix.binary }}
|
||||
runs-on: ubuntu-18.04
|
||||
strategy:
|
||||
matrix:
|
||||
binary: [aarch64,
|
||||
aarch64-portable,
|
||||
x86_64,
|
||||
x86_64-portable]
|
||||
needs: [extract-version]
|
||||
env:
|
||||
# We need to enable experimental docker features in order to use `docker buildx`
|
||||
DOCKER_CLI_EXPERIMENTAL: enabled
|
||||
VERSION: ${{ needs.extract-version.outputs.VERSION }}
|
||||
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Update Rust
|
||||
run: rustup update stable
|
||||
- name: Dockerhub login
|
||||
run: |
|
||||
echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin
|
||||
- name: Cross build Lighthouse binary
|
||||
run: |
|
||||
cargo install cross
|
||||
make build-${{ matrix.binary }}
|
||||
- name: Move cross-built binary into Docker scope (if ARM)
|
||||
if: startsWith(matrix.binary, 'aarch64')
|
||||
run: |
|
||||
mkdir ./bin;
|
||||
mv ./target/aarch64-unknown-linux-gnu/release/lighthouse ./bin;
|
||||
- name: Move cross-built binary into Docker scope (if x86_64)
|
||||
if: startsWith(matrix.binary, 'x86_64')
|
||||
run: |
|
||||
mkdir ./bin;
|
||||
mv ./target/x86_64-unknown-linux-gnu/release/lighthouse ./bin;
|
||||
- name: Map aarch64 to arm64 short arch
|
||||
if: startsWith(matrix.binary, 'aarch64')
|
||||
run: echo "SHORT_ARCH=arm64" >> $GITHUB_ENV
|
||||
- name: Map x86_64 to amd64 short arch
|
||||
if: startsWith(matrix.binary, 'x86_64')
|
||||
run: echo "SHORT_ARCH=amd64" >> $GITHUB_ENV;
|
||||
- name: Set modernity suffix
|
||||
if: endsWith(matrix.binary, '-portable') != true
|
||||
run: echo "MODERNITY_SUFFIX=-modern" >> $GITHUB_ENV;
|
||||
# Install dependencies for emulation. Have to create a new builder to pick up emulation support.
|
||||
- name: Build Dockerfile and push
|
||||
run: |
|
||||
docker run --privileged --rm tonistiigi/binfmt --install ${SHORT_ARCH}
|
||||
docker buildx create --use --name cross-builder
|
||||
docker buildx build \
|
||||
--platform=linux/${SHORT_ARCH} \
|
||||
--file ./Dockerfile.cross . \
|
||||
--tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX} \
|
||||
--push
|
||||
build-docker-multiarch:
|
||||
name: build-docker-multiarch${{ matrix.modernity }}
|
||||
runs-on: ubuntu-18.04
|
||||
needs: [build-docker-single-arch, extract-version]
|
||||
strategy:
|
||||
matrix:
|
||||
modernity: ["", "-modern"]
|
||||
env:
|
||||
# We need to enable experimental docker features in order to use `docker manifest`
|
||||
DOCKER_CLI_EXPERIMENTAL: enabled
|
||||
VERSION: ${{ needs.extract-version.outputs.VERSION }}
|
||||
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
||||
steps:
|
||||
- name: Dockerhub login
|
||||
run: |
|
||||
echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin
|
||||
- name: Create and push multiarch manifest
|
||||
run: |
|
||||
docker manifest create ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} \
|
||||
--amend ${IMAGE_NAME}:${VERSION}-arm64${VERSION_SUFFIX}${{ matrix.modernity }} \
|
||||
--amend ${IMAGE_NAME}:${VERSION}-amd64${VERSION_SUFFIX}${{ matrix.modernity }};
|
||||
docker manifest push ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }}
|
||||
build-docker-lcli:
|
||||
runs-on: ubuntu-18.04
|
||||
needs: [extract-version]
|
||||
env:
|
||||
VERSION: ${{ needs.extract-version.outputs.VERSION }}
|
||||
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Dockerhub login
|
||||
run: |
|
||||
echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin
|
||||
- name: Build lcli dockerfile (with push)
|
||||
run: |
|
||||
docker build \
|
||||
--build-arg PORTABLE=true \
|
||||
--tag ${LCLI_IMAGE_NAME}:${VERSION}${VERSION_SUFFIX} \
|
||||
--file ./lcli/Dockerfile .
|
||||
docker push ${LCLI_IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}
|
||||
30
.github/workflows/linkcheck.yml
vendored
Normal file
30
.github/workflows/linkcheck.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: linkcheck
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- unstable
|
||||
pull_request:
|
||||
paths:
|
||||
- 'book/**'
|
||||
|
||||
jobs:
|
||||
linkcheck:
|
||||
name: Check broken links
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Create docker network
|
||||
run: docker network create book
|
||||
|
||||
- name: Run mdbook server
|
||||
run: docker run -v ${{ github.workspace }}/book:/book --network book --name book -p 3000:3000 -d peaceiris/mdbook:latest serve --hostname 0.0.0.0
|
||||
|
||||
- name: Print logs
|
||||
run: docker logs book
|
||||
|
||||
- name: Run linkcheck
|
||||
run: docker run --network book tennox/linkcheck:latest book:3000
|
||||
49
.github/workflows/local-testnet.yml
vendored
Normal file
49
.github/workflows/local-testnet.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
# Test that local testnet starts successfully.
|
||||
name: local testnet
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- unstable
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
run-local-testnet:
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-18.04
|
||||
- macos-latest
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
- name: Install ganache
|
||||
run: npm install ganache-cli@latest --global
|
||||
|
||||
# https://github.com/actions/cache/blob/main/examples.md#rust---cargo
|
||||
- uses: actions/cache@v2
|
||||
id: cache-cargo
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Install lighthouse
|
||||
run: make && make install-lcli
|
||||
|
||||
- name: Start local testnet
|
||||
run: ./start_local_testnet.sh
|
||||
working-directory: scripts/local_testnet
|
||||
|
||||
- name: Print logs
|
||||
run: ./print_logs.sh
|
||||
working-directory: scripts/local_testnet
|
||||
|
||||
- name: Stop local testnet
|
||||
run: ./stop_local_testnet.sh
|
||||
working-directory: scripts/local_testnet
|
||||
66
.github/workflows/publish-crate.yml
vendored
Normal file
66
.github/workflows/publish-crate.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
name: Publish Crate
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- tree-hash-v*
|
||||
- tree-hash-derive-v*
|
||||
- eth2-ssz-v*
|
||||
- eth2-ssz-derive-v*
|
||||
- eth2-ssz-types-v*
|
||||
- eth2-serde-util-v*
|
||||
- eth2-hashing-v*
|
||||
|
||||
env:
|
||||
CARGO_API_TOKEN: ${{ secrets.CARGO_API_TOKEN }}
|
||||
|
||||
jobs:
|
||||
extract-tag:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Extract tag
|
||||
run: echo "::set-output name=TAG::$(echo ${GITHUB_REF#refs/tags/})"
|
||||
id: extract_tag
|
||||
outputs:
|
||||
TAG: ${{ steps.extract_tag.outputs.TAG }}
|
||||
|
||||
publish-crate:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [extract-tag]
|
||||
env:
|
||||
TAG: ${{ needs.extract-tag.outputs.TAG }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Update Rust
|
||||
run: rustup update stable
|
||||
- name: Cargo login
|
||||
run: |
|
||||
echo "${CARGO_API_TOKEN}" | cargo login
|
||||
- name: publish eth2 ssz derive
|
||||
if: startsWith(env.TAG, 'eth2-ssz-derive-v')
|
||||
run: |
|
||||
./scripts/ci/publish.sh consensus/ssz_derive eth2_ssz_derive "$TAG"
|
||||
- name: publish eth2 ssz
|
||||
if: startsWith(env.TAG, 'eth2-ssz-v')
|
||||
run: |
|
||||
./scripts/ci/publish.sh consensus/ssz eth2_ssz "$TAG"
|
||||
- name: publish eth2 hashing
|
||||
if: startsWith(env.TAG, 'eth2-hashing-v')
|
||||
run: |
|
||||
./scripts/ci/publish.sh crypto/eth2_hashing eth2_hashing "$TAG"
|
||||
- name: publish tree hash derive
|
||||
if: startsWith(env.TAG, 'tree-hash-derive-v')
|
||||
run: |
|
||||
./scripts/ci/publish.sh consensus/tree_hash_derive tree_hash_derive "$TAG"
|
||||
- name: publish tree hash
|
||||
if: startsWith(env.TAG, 'tree-hash-v')
|
||||
run: |
|
||||
./scripts/ci/publish.sh consensus/tree_hash tree_hash "$TAG"
|
||||
- name: publish ssz types
|
||||
if: startsWith(env.TAG, 'eth2-ssz-types-v')
|
||||
run: |
|
||||
./scripts/ci/publish.sh consensus/ssz_types eth2_ssz_types "$TAG"
|
||||
- name: publish serde util
|
||||
if: startsWith(env.TAG, 'eth2-serde-util-v')
|
||||
run: |
|
||||
./scripts/ci/publish.sh consensus/serde_utils eth2_serde_utils "$TAG"
|
||||
276
.github/workflows/release.yml
vendored
Normal file
276
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,276 @@
|
||||
name: Release Suite
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
env:
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
|
||||
REPO_NAME: sigp/lighthouse
|
||||
IMAGE_NAME: sigp/lighthouse
|
||||
|
||||
jobs:
|
||||
extract-version:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Extract version
|
||||
run: echo "::set-output name=VERSION::$(echo ${GITHUB_REF#refs/tags/})"
|
||||
id: extract_version
|
||||
outputs:
|
||||
VERSION: ${{ steps.extract_version.outputs.VERSION }}
|
||||
build:
|
||||
name: Build Release
|
||||
strategy:
|
||||
matrix:
|
||||
arch: [aarch64-unknown-linux-gnu,
|
||||
aarch64-unknown-linux-gnu-portable,
|
||||
x86_64-unknown-linux-gnu,
|
||||
x86_64-unknown-linux-gnu-portable,
|
||||
x86_64-apple-darwin,
|
||||
x86_64-apple-darwin-portable,
|
||||
x86_64-windows,
|
||||
x86_64-windows-portable]
|
||||
include:
|
||||
- arch: aarch64-unknown-linux-gnu
|
||||
platform: ubuntu-latest
|
||||
- arch: aarch64-unknown-linux-gnu-portable
|
||||
platform: ubuntu-latest
|
||||
- arch: x86_64-unknown-linux-gnu
|
||||
platform: ubuntu-latest
|
||||
- arch: x86_64-unknown-linux-gnu-portable
|
||||
platform: ubuntu-latest
|
||||
- arch: x86_64-apple-darwin
|
||||
platform: macos-latest
|
||||
- arch: x86_64-apple-darwin-portable
|
||||
platform: macos-latest
|
||||
- arch: x86_64-windows
|
||||
platform: windows-2019
|
||||
- arch: x86_64-windows-portable
|
||||
platform: windows-2019
|
||||
|
||||
runs-on: ${{ matrix.platform }}
|
||||
needs: extract-version
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v2
|
||||
- name: Build toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
# ==============================
|
||||
# Windows dependencies
|
||||
# ==============================
|
||||
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
if: startsWith(matrix.arch, 'x86_64-windows')
|
||||
with:
|
||||
version: "13.0"
|
||||
directory: ${{ runner.temp }}/llvm
|
||||
- name: Set LIBCLANG_PATH
|
||||
if: startsWith(matrix.arch, 'x86_64-windows')
|
||||
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
|
||||
|
||||
# ==============================
|
||||
# Builds
|
||||
# ==============================
|
||||
|
||||
- name: Build Lighthouse for aarch64-unknown-linux-gnu-portable
|
||||
if: matrix.arch == 'aarch64-unknown-linux-gnu-portable'
|
||||
run: |
|
||||
cargo install cross
|
||||
make build-aarch64-portable
|
||||
|
||||
- name: Build Lighthouse for aarch64-unknown-linux-gnu
|
||||
if: matrix.arch == 'aarch64-unknown-linux-gnu'
|
||||
run: |
|
||||
cargo install cross
|
||||
make build-aarch64
|
||||
|
||||
- name: Build Lighthouse for x86_64-unknown-linux-gnu-portable
|
||||
if: matrix.arch == 'x86_64-unknown-linux-gnu-portable'
|
||||
run: |
|
||||
cargo install cross
|
||||
make build-x86_64-portable
|
||||
|
||||
- name: Build Lighthouse for x86_64-unknown-linux-gnu
|
||||
if: matrix.arch == 'x86_64-unknown-linux-gnu'
|
||||
run: |
|
||||
cargo install cross
|
||||
make build-x86_64
|
||||
|
||||
- name: Move cross-compiled binary
|
||||
if: startsWith(matrix.arch, 'aarch64')
|
||||
run: mv target/aarch64-unknown-linux-gnu/release/lighthouse ~/.cargo/bin/lighthouse
|
||||
|
||||
- name: Move cross-compiled binary
|
||||
if: startsWith(matrix.arch, 'x86_64-unknown-linux-gnu')
|
||||
run: mv target/x86_64-unknown-linux-gnu/release/lighthouse ~/.cargo/bin/lighthouse
|
||||
|
||||
- name: Build Lighthouse for x86_64-apple-darwin portable
|
||||
if: matrix.arch == 'x86_64-apple-darwin-portable'
|
||||
run: cargo install --path lighthouse --force --locked --features portable,gnosis
|
||||
|
||||
- name: Build Lighthouse for x86_64-apple-darwin modern
|
||||
if: matrix.arch == 'x86_64-apple-darwin'
|
||||
run: cargo install --path lighthouse --force --locked --features modern,gnosis
|
||||
|
||||
- name: Build Lighthouse for Windows portable
|
||||
if: matrix.arch == 'x86_64-windows-portable'
|
||||
run: cargo install --path lighthouse --force --locked --features portable,gnosis
|
||||
|
||||
- name: Build Lighthouse for Windows modern
|
||||
if: matrix.arch == 'x86_64-windows'
|
||||
run: cargo install --path lighthouse --force --locked --features modern,gnosis
|
||||
|
||||
- name: Configure GPG and create artifacts
|
||||
if: startsWith(matrix.arch, 'x86_64-windows') != true
|
||||
env:
|
||||
GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }}
|
||||
GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
|
||||
run: |
|
||||
export GPG_TTY=$(tty)
|
||||
echo "$GPG_SIGNING_KEY" | gpg --batch --import
|
||||
mkdir artifacts
|
||||
mv ~/.cargo/bin/lighthouse ./artifacts
|
||||
cd artifacts
|
||||
tar -czf lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz lighthouse
|
||||
echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
|
||||
mv *tar.gz* ..
|
||||
|
||||
- name: Configure GPG and create artifacts Windows
|
||||
if: startsWith(matrix.arch, 'x86_64-windows')
|
||||
env:
|
||||
GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }}
|
||||
GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
|
||||
run: |
|
||||
echo $env:GPG_SIGNING_KEY | gpg --batch --import
|
||||
mkdir artifacts
|
||||
move $env:USERPROFILE/.cargo/bin/lighthouse.exe ./artifacts
|
||||
cd artifacts
|
||||
tar -czf lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz lighthouse.exe
|
||||
gpg --passphrase "$env:GPG_PASSPHRASE" --batch --pinentry-mode loopback -ab lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
|
||||
move *tar.gz* ..
|
||||
|
||||
# =======================================================================
|
||||
# Upload artifacts
|
||||
# This is required to share artifacts between different jobs
|
||||
# =======================================================================
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
|
||||
path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
|
||||
|
||||
- name: Upload signature
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc
|
||||
path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc
|
||||
|
||||
draft-release:
|
||||
name: Draft Release
|
||||
needs: [build, extract-version]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
VERSION: ${{ needs.extract-version.outputs.VERSION }}
|
||||
steps:
|
||||
# This is necessary for generating the changelog. It has to come before "Download Artifacts" or else it deletes the artifacts.
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# ==============================
|
||||
# Download artifacts
|
||||
# ==============================
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
|
||||
# ==============================
|
||||
# Create release draft
|
||||
# ==============================
|
||||
|
||||
- name: Generate Full Changelog
|
||||
id: changelog
|
||||
run: echo "::set-output name=CHANGELOG::$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})"
|
||||
|
||||
- name: Create Release Draft
|
||||
env:
|
||||
GITHUB_USER: sigp
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# The formatting here is borrowed from OpenEthereum: https://github.com/openethereum/openethereum/blob/main/.github/workflows/build.yml
|
||||
run: |
|
||||
body=$(cat <<- "ENDBODY"
|
||||
<Rick and Morty character>
|
||||
|
||||
## Testing Checklist (DELETE ME)
|
||||
|
||||
- [ ] Run on synced Prater Sigma Prime nodes.
|
||||
- [ ] Run on synced Canary (mainnet) Sigma Prime nodes.
|
||||
- [ ] Resync a Prater node.
|
||||
- [ ] Resync a mainnet node.
|
||||
|
||||
## Release Checklist (DELETE ME)
|
||||
|
||||
- [ ] Merge `unstable` -> `stable`.
|
||||
- [ ] Ensure docker images are published (check `latest` and the version tag).
|
||||
- [ ] Prepare Discord post.
|
||||
- [ ] Prepare Twitter post.
|
||||
- [ ] Prepare mailing list email.
|
||||
|
||||
## Summary
|
||||
|
||||
Add a summary.
|
||||
|
||||
## Update Priority
|
||||
|
||||
This table provides priorities for which classes of users should update particular components.
|
||||
|
||||
|User Class |Beacon Node | Validator Client|
|
||||
--- | --- | ---
|
||||
|Staking Users| <TODO> | <TODO> |
|
||||
|Non-Staking Users| <TODO>|---|
|
||||
|
||||
*See [Update
|
||||
Priorities](https://lighthouse-book.sigmaprime.io/installation-priorities.html)
|
||||
more information about this table.*
|
||||
|
||||
## All Changes
|
||||
|
||||
${{ steps.changelog.outputs.CHANGELOG }}
|
||||
|
||||
## Binaries
|
||||
|
||||
[See pre-built binaries documentation.](https://lighthouse-book.sigmaprime.io/installation-binaries.html)
|
||||
|
||||
The binaries are signed with Sigma Prime's PGP key: `15E66D941F697E28F49381F426416DC3F30674B0`
|
||||
|
||||
| System | Architecture | Binary | PGP Signature |
|
||||
|:---:|:---:|:---:|:---|
|
||||
| <img src="https://simpleicons.org/icons/apple.svg" style="width: 32px;"/> | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz.asc) |
|
||||
| <img src="https://simpleicons.org/icons/apple.svg" style="width: 32px;"/> | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-apple-darwin-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-apple-darwin-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-apple-darwin-portable.tar.gz.asc) |
|
||||
| <img src="https://simpleicons.org/icons/linux.svg" style="width: 32px;"/> | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz.asc) |
|
||||
| <img src="https://simpleicons.org/icons/linux.svg" style="width: 32px;"/> | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu-portable.tar.gz.asc) |
|
||||
| <img src="https://simpleicons.org/icons/raspberrypi.svg" style="width: 32px;"/> | aarch64 | [lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz.asc) |
|
||||
| <img src="https://simpleicons.org/icons/raspberrypi.svg" style="width: 32px;"/> | aarch64 | [lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu-portable.tar.gz.asc) |
|
||||
| <img src="https://simpleicons.org/icons/windows.svg" style="width: 32px;"/> | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz.asc) |
|
||||
| <img src="https://simpleicons.org/icons/windows.svg" style="width: 32px;"/> | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz.asc) |
|
||||
| | | | |
|
||||
| **System** | **Option** | - | **Resource** |
|
||||
| <img src="https://simpleicons.org/icons/docker.svg" style="width: 32px;"/> | Docker | [${{ env.VERSION }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}/tags?page=1&ordering=last_updated&name=${{ env.VERSION }}) | [${{ env.IMAGE_NAME }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}) |
|
||||
ENDBODY
|
||||
)
|
||||
assets=()
|
||||
for asset in ./lighthouse-*.tar.gz*; do
|
||||
assets+=("-a" "$asset/$asset")
|
||||
done
|
||||
tag_name="${{ env.VERSION }}"
|
||||
echo "$body" | hub release create --draft "${assets[@]}" -F "-" "$tag_name"
|
||||
144
.github/workflows/test-suite.yml
vendored
144
.github/workflows/test-suite.yml
vendored
@@ -3,7 +3,7 @@ name: test-suite
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- stable
|
||||
- staging
|
||||
- trying
|
||||
- 'pr/*'
|
||||
@@ -11,7 +11,16 @@ on:
|
||||
env:
|
||||
# Deny warnings in CI
|
||||
RUSTFLAGS: "-D warnings"
|
||||
# The Nightly version used for cargo-udeps, might need updating from time to time.
|
||||
PINNED_NIGHTLY: nightly-2021-12-01
|
||||
jobs:
|
||||
target-branch-check:
|
||||
name: target-branch-check
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'pull_request'
|
||||
steps:
|
||||
- name: Check that the pull request is not targeting the stable branch
|
||||
run: test ${{ github.base_ref }} != "stable"
|
||||
cargo-fmt:
|
||||
name: cargo-fmt
|
||||
runs-on: ubuntu-latest
|
||||
@@ -33,20 +42,46 @@ jobs:
|
||||
run: sudo npm install -g ganache-cli
|
||||
- name: Run tests in release
|
||||
run: make test-release
|
||||
release-tests-and-install-macos:
|
||||
name: release-tests-and-install-macos
|
||||
runs-on: macos-latest
|
||||
release-tests-windows:
|
||||
name: release-tests-windows
|
||||
runs-on: windows-2019
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Install ganache-cli
|
||||
run: sudo npm install -g ganache-cli
|
||||
run: npm install -g ganache-cli
|
||||
- name: Install make
|
||||
run: choco install -y make
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
with:
|
||||
version: "13.0"
|
||||
directory: ${{ runner.temp }}/llvm
|
||||
- name: Set LIBCLANG_PATH
|
||||
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
|
||||
- name: Run tests in release
|
||||
run: make test-release
|
||||
- name: Install Lighthouse
|
||||
run: make
|
||||
beacon-chain-tests:
|
||||
name: beacon-chain-tests
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Run beacon_chain tests for all known forks
|
||||
run: make test-beacon-chain
|
||||
op-pool-tests:
|
||||
name: op-pool-tests
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Run operation_pool tests for all known forks
|
||||
run: make test-op-pool
|
||||
debug-tests-ubuntu:
|
||||
name: debug-tests-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
@@ -77,7 +112,7 @@ jobs:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Run eth2.0-spec-tests with blst, milagro and fake_crypto
|
||||
- name: Run consensus-spec-tests with blst, milagro and fake_crypto
|
||||
run: make test-ef
|
||||
dockerfile-ubuntu:
|
||||
name: dockerfile-ubuntu
|
||||
@@ -85,14 +120,20 @@ jobs:
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Build the root Dockerfile
|
||||
run: docker build .
|
||||
run: docker build --build-arg FEATURES=portable -t lighthouse:local .
|
||||
- name: Test the built image
|
||||
run: docker run -t lighthouse:local lighthouse --version
|
||||
eth1-simulator-ubuntu:
|
||||
name: eth1-simulator-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Install ganache-cli
|
||||
run: sudo npm install -g ganache-cli
|
||||
- name: Run the beacon chain sim that starts from an eth1 contract
|
||||
@@ -103,32 +144,99 @@ jobs:
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Install ganache-cli
|
||||
run: sudo npm install -g ganache-cli
|
||||
- name: Run the beacon chain sim without an eth1 connection
|
||||
run: cargo run --release --bin simulator no-eth1-sim
|
||||
syncing-simulator-ubuntu:
|
||||
name: syncing-simulator-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Install ganache-cli
|
||||
run: sudo npm install -g ganache-cli
|
||||
- name: Run the syncing simulator
|
||||
run: cargo run --release --bin simulator syncing-sim
|
||||
doppelganger-protection-test:
|
||||
name: doppelganger-protection-test
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Install ganache-cli
|
||||
run: sudo npm install -g ganache-cli
|
||||
- name: Install lighthouse and lcli
|
||||
run: |
|
||||
make
|
||||
make install-lcli
|
||||
- name: Run the doppelganger protection success test script
|
||||
run: |
|
||||
cd scripts/tests
|
||||
./doppelganger_protection.sh success
|
||||
- name: Run the doppelganger protection failure test script
|
||||
run: |
|
||||
cd scripts/tests
|
||||
./doppelganger_protection.sh failure
|
||||
execution-engine-integration-ubuntu:
|
||||
name: execution-engine-integration-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.17'
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Run exec engine integration tests in release
|
||||
run: make test-exec-engine
|
||||
check-benchmarks:
|
||||
name: check-benchmarks
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Typecheck benchmark code without running it
|
||||
run: make check-benches
|
||||
check-consensus:
|
||||
name: check-consensus
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Typecheck consensus code in strict mode
|
||||
run: make check-consensus
|
||||
clippy:
|
||||
name: clippy
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Lint code for quality and style with Clippy
|
||||
run: make lint
|
||||
- name: Certify Cargo.lock freshness
|
||||
run: git diff --exit-code Cargo.lock
|
||||
arbitrary-check:
|
||||
name: arbitrary-check
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Validate state_processing feature arbitrary-fuzz
|
||||
run: make arbitrary-fuzz
|
||||
cargo-audit:
|
||||
@@ -137,18 +245,32 @@ jobs:
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database
|
||||
run: make audit
|
||||
cargo-vendor:
|
||||
name: cargo-vendor
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose
|
||||
run: CARGO_HOME=$(readlink -f $HOME) make vendor
|
||||
cargo-udeps:
|
||||
name: cargo-udeps
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Install a nightly compiler with rustfmt, as a kind of quality control
|
||||
run: rustup toolchain install --component=rustfmt nightly
|
||||
- name: Install Rust (${{ env.PINNED_NIGHTLY }})
|
||||
run: rustup toolchain install $PINNED_NIGHTLY
|
||||
- name: Install cargo-udeps
|
||||
run: cargo install cargo-udeps --locked
|
||||
- name: Create Cargo config dir
|
||||
run: mkdir -p .cargo
|
||||
- name: Install custom Cargo config
|
||||
run: cp -f .github/custom/config.toml .cargo/config.toml
|
||||
- name: Run cargo udeps to identify unused crates in the dependency graph
|
||||
run: make udeps
|
||||
env:
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -6,3 +6,5 @@ target/
|
||||
flamegraph.svg
|
||||
perf.data*
|
||||
*.tar.gz
|
||||
/bin
|
||||
genesis.ssz
|
||||
|
||||
6624
Cargo.lock
generated
6624
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
42
Cargo.toml
42
Cargo.toml
@@ -6,12 +6,13 @@ members = [
|
||||
"beacon_node/beacon_chain",
|
||||
"beacon_node/client",
|
||||
"beacon_node/eth1",
|
||||
"beacon_node/eth2_libp2p",
|
||||
"beacon_node/lighthouse_network",
|
||||
"beacon_node/execution_layer",
|
||||
"beacon_node/http_api",
|
||||
"beacon_node/http_metrics",
|
||||
"beacon_node/network",
|
||||
"beacon_node/rest_api",
|
||||
"beacon_node/store",
|
||||
"beacon_node/timer",
|
||||
"beacon_node/websocket_server",
|
||||
|
||||
"boot_node",
|
||||
|
||||
@@ -20,19 +21,29 @@ members = [
|
||||
"common/compare_fields",
|
||||
"common/compare_fields_derive",
|
||||
"common/deposit_contract",
|
||||
"common/directory",
|
||||
"common/eth2",
|
||||
"common/eth2_config",
|
||||
"common/eth2_interop_keypairs",
|
||||
"common/eth2_testnet_config",
|
||||
"common/eth2_network_config",
|
||||
"common/eth2_wallet_manager",
|
||||
"common/hashset_delay",
|
||||
"common/lighthouse_metrics",
|
||||
"common/lighthouse_version",
|
||||
"common/lockfile",
|
||||
"common/logging",
|
||||
"common/remote_beacon_node",
|
||||
"common/rest_types",
|
||||
"common/lru_cache",
|
||||
"common/malloc_utils",
|
||||
"common/sensitive_url",
|
||||
"common/slot_clock",
|
||||
"common/task_executor",
|
||||
"common/target_check",
|
||||
"common/test_random_derive",
|
||||
"common/unused_port",
|
||||
"common/validator_dir",
|
||||
"common/warp_utils",
|
||||
"common/fallback",
|
||||
"common/monitoring_api",
|
||||
|
||||
"consensus/cached_tree_hash",
|
||||
"consensus/int_to_bytes",
|
||||
@@ -42,7 +53,7 @@ members = [
|
||||
"consensus/ssz",
|
||||
"consensus/ssz_derive",
|
||||
"consensus/ssz_types",
|
||||
"consensus/serde_hex",
|
||||
"consensus/serde_utils",
|
||||
"consensus/state_processing",
|
||||
"consensus/swap_or_not_shuffle",
|
||||
"consensus/tree_hash",
|
||||
@@ -59,11 +70,17 @@ members = [
|
||||
"lighthouse",
|
||||
"lighthouse/environment",
|
||||
|
||||
"testing/simulator",
|
||||
"slasher",
|
||||
"slasher/service",
|
||||
|
||||
"testing/ef_tests",
|
||||
"testing/eth1_test_rig",
|
||||
"testing/execution_engine_integration",
|
||||
"testing/node_test_rig",
|
||||
"testing/simulator",
|
||||
"testing/test-test_logger",
|
||||
"testing/state_transition_vectors",
|
||||
"testing/web3signer_tests",
|
||||
|
||||
"validator_client",
|
||||
"validator_client/slashing_protection",
|
||||
@@ -71,10 +88,9 @@ members = [
|
||||
|
||||
[patch]
|
||||
[patch.crates-io]
|
||||
tree_hash = { path = "consensus/tree_hash" }
|
||||
tree_hash_derive = { path = "consensus/tree_hash_derive" }
|
||||
fixed-hash = { git = "https://github.com/paritytech/parity-common", rev="df638ab0885293d21d656dc300d39236b69ce57d" }
|
||||
warp = { git = "https://github.com/macladson/warp", rev ="7e75acc" }
|
||||
eth2_ssz = { path = "consensus/ssz" }
|
||||
eth2_ssz_derive = { path = "consensus/ssz_derive" }
|
||||
eth2_ssz_types = { path = "consensus/ssz_types" }
|
||||
eth2_hashing = { path = "crypto/eth2_hashing" }
|
||||
leveldb-sys = { git = "https://github.com/michaelsproul/leveldb-sys", branch = "v2.0.6-cmake" }
|
||||
tree_hash = { path = "consensus/tree_hash" }
|
||||
eth2_serde_utils = { path = "consensus/serde_utils" }
|
||||
|
||||
15
Cross.toml
Normal file
15
Cross.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[build.env]
|
||||
passthrough = [
|
||||
"RUSTFLAGS",
|
||||
]
|
||||
|
||||
# These custom images are required to work around the lack of Clang in the default `cross` images.
|
||||
# We need Clang to run `bindgen` for MDBX, and the `BINDGEN_EXTRA_CLANG_ARGS` flags must also be set
|
||||
# while cross-compiling for ARM to prevent bindgen from attempting to include headers from the host.
|
||||
#
|
||||
# For more information see https://github.com/rust-embedded/cross/pull/608
|
||||
[target.x86_64-unknown-linux-gnu]
|
||||
image = "michaelsproul/cross-clang:x86_64-latest"
|
||||
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
image = "michaelsproul/cross-clang:aarch64-latest"
|
||||
14
Dockerfile
14
Dockerfile
@@ -1,16 +1,14 @@
|
||||
FROM rust:1.45.1 AS builder
|
||||
RUN apt-get update && apt-get install -y cmake
|
||||
FROM rust:1.58.1-bullseye AS builder
|
||||
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev
|
||||
COPY . lighthouse
|
||||
ARG PORTABLE
|
||||
ENV PORTABLE $PORTABLE
|
||||
ARG FEATURES
|
||||
ENV FEATURES $FEATURES
|
||||
RUN cd lighthouse && make
|
||||
RUN cd lighthouse && make install-lcli
|
||||
|
||||
FROM debian:buster-slim
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
FROM ubuntu:latest
|
||||
RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \
|
||||
libssl-dev \
|
||||
ca-certificates \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse
|
||||
COPY --from=builder /usr/local/cargo/bin/lcli /usr/local/bin/lcli
|
||||
|
||||
10
Dockerfile.cross
Normal file
10
Dockerfile.cross
Normal file
@@ -0,0 +1,10 @@
|
||||
# This image is meant to enable cross-architecture builds.
|
||||
# It assumes the lighthouse binary has already been
|
||||
# compiled for `$TARGETPLATFORM` and moved to `./bin`.
|
||||
FROM --platform=$TARGETPLATFORM ubuntu:latest
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libssl-dev \
|
||||
ca-certificates \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
COPY ./bin/lighthouse /usr/local/bin/lighthouse
|
||||
130
Makefile
130
Makefile
@@ -2,34 +2,86 @@
|
||||
|
||||
EF_TESTS = "testing/ef_tests"
|
||||
STATE_TRANSITION_VECTORS = "testing/state_transition_vectors"
|
||||
EXECUTION_ENGINE_INTEGRATION = "testing/execution_engine_integration"
|
||||
GIT_TAG := $(shell git describe --tags --candidates 1)
|
||||
BIN_DIR = "bin"
|
||||
|
||||
X86_64_TAG = "x86_64-unknown-linux-gnu"
|
||||
BUILD_PATH_X86_64 = "target/$(X86_64_TAG)/release"
|
||||
AARCH64_TAG = "aarch64-unknown-linux-gnu"
|
||||
BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release"
|
||||
|
||||
PINNED_NIGHTLY ?= nightly
|
||||
|
||||
# List of all hard forks. This list is used to set env variables for several tests so that
|
||||
# they run for different forks.
|
||||
FORKS=phase0 altair merge
|
||||
|
||||
# Builds the Lighthouse binary in release (optimized).
|
||||
#
|
||||
# Binaries will most likely be found in `./target/release`
|
||||
install:
|
||||
ifeq ($(PORTABLE), true)
|
||||
cargo install --path lighthouse --force --locked --features portable
|
||||
else
|
||||
cargo install --path lighthouse --force --locked
|
||||
endif
|
||||
cargo install --path lighthouse --force --locked --features "$(FEATURES)"
|
||||
|
||||
# Builds the lcli binary in release (optimized).
|
||||
install-lcli:
|
||||
ifeq ($(PORTABLE), true)
|
||||
cargo install --path lcli --force --locked --features portable
|
||||
else
|
||||
cargo install --path lcli --force --locked
|
||||
endif
|
||||
cargo install --path lcli --force --locked --features "$(FEATURES)"
|
||||
|
||||
# The following commands use `cross` to build a cross-compile.
|
||||
#
|
||||
# These commands require that:
|
||||
#
|
||||
# - `cross` is installed (`cargo install cross`).
|
||||
# - Docker is running.
|
||||
# - The current user is in the `docker` group.
|
||||
#
|
||||
# The resulting binaries will be created in the `target/` directory.
|
||||
#
|
||||
# The *-portable options compile the blst library *without* the use of some
|
||||
# optimized CPU functions that may not be available on some systems. This
|
||||
# results in a more portable binary with ~20% slower BLS verification.
|
||||
build-x86_64:
|
||||
cross build --release --bin lighthouse --target x86_64-unknown-linux-gnu --features modern,gnosis
|
||||
build-x86_64-portable:
|
||||
cross build --release --bin lighthouse --target x86_64-unknown-linux-gnu --features portable,gnosis
|
||||
build-aarch64:
|
||||
cross build --release --bin lighthouse --target aarch64-unknown-linux-gnu --features gnosis
|
||||
build-aarch64-portable:
|
||||
cross build --release --bin lighthouse --target aarch64-unknown-linux-gnu --features portable,gnosis
|
||||
|
||||
# Create a `.tar.gz` containing a binary for a specific target.
|
||||
define tarball_release_binary
|
||||
cp $(1)/lighthouse $(BIN_DIR)/lighthouse
|
||||
cd $(BIN_DIR) && \
|
||||
tar -czf lighthouse-$(GIT_TAG)-$(2)$(3).tar.gz lighthouse && \
|
||||
rm lighthouse
|
||||
endef
|
||||
|
||||
# Create a series of `.tar.gz` files in the BIN_DIR directory, each containing
|
||||
# a `lighthouse` binary for a different target.
|
||||
#
|
||||
# The current git tag will be used as the version in the output file names. You
|
||||
# will likely need to use `git tag` and create a semver tag (e.g., `v0.2.3`).
|
||||
build-release-tarballs:
|
||||
[ -d $(BIN_DIR) ] || mkdir -p $(BIN_DIR)
|
||||
$(MAKE) build-x86_64
|
||||
$(call tarball_release_binary,$(BUILD_PATH_X86_64),$(X86_64_TAG),"")
|
||||
$(MAKE) build-x86_64-portable
|
||||
$(call tarball_release_binary,$(BUILD_PATH_X86_64),$(X86_64_TAG),"-portable")
|
||||
$(MAKE) build-aarch64
|
||||
$(call tarball_release_binary,$(BUILD_PATH_AARCH64),$(AARCH64_TAG),"")
|
||||
$(MAKE) build-aarch64-portable
|
||||
$(call tarball_release_binary,$(BUILD_PATH_AARCH64),$(AARCH64_TAG),"-portable")
|
||||
|
||||
# Runs the full workspace tests in **release**, without downloading any additional
|
||||
# test vectors.
|
||||
test-release:
|
||||
cargo test --all --release --exclude ef_tests
|
||||
cargo test --workspace --release --exclude ef_tests --exclude beacon_chain
|
||||
|
||||
# Runs the full workspace tests in **debug**, without downloading any additional test
|
||||
# vectors.
|
||||
test-debug:
|
||||
cargo test --all --exclude ef_tests
|
||||
cargo test --workspace --exclude ef_tests --exclude beacon_chain
|
||||
|
||||
# Runs cargo-fmt (linter).
|
||||
cargo-fmt:
|
||||
@@ -37,13 +89,33 @@ cargo-fmt:
|
||||
|
||||
# Typechecks benchmark code
|
||||
check-benches:
|
||||
cargo check --all --benches
|
||||
cargo check --workspace --benches
|
||||
|
||||
# Typechecks consensus code *without* allowing deprecated legacy arithmetic or metrics.
|
||||
check-consensus:
|
||||
cargo check -p state_processing --no-default-features
|
||||
|
||||
# Runs only the ef-test vectors.
|
||||
run-ef-tests:
|
||||
cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests"
|
||||
cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests,fake_crypto"
|
||||
cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests,milagro"
|
||||
rm -rf $(EF_TESTS)/.accessed_file_log.txt
|
||||
cargo test --release -p ef_tests --features "ef_tests"
|
||||
cargo test --release -p ef_tests --features "ef_tests,fake_crypto"
|
||||
cargo test --release -p ef_tests --features "ef_tests,milagro"
|
||||
./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests
|
||||
|
||||
# Run the tests in the `beacon_chain` crate for all known forks.
|
||||
test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS))
|
||||
|
||||
test-beacon-chain-%:
|
||||
env FORK_NAME=$* cargo test --release --features fork_from_env -p beacon_chain
|
||||
|
||||
# Run the tests in the `operation_pool` crate for all known forks.
|
||||
test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS))
|
||||
|
||||
test-op-pool-%:
|
||||
env FORK_NAME=$* cargo test --release \
|
||||
--features 'beacon_chain/fork_from_env'\
|
||||
-p operation_pool
|
||||
|
||||
# Runs only the tests/state_transition_vectors tests.
|
||||
run-state-transition-tests:
|
||||
@@ -52,17 +124,26 @@ run-state-transition-tests:
|
||||
# Downloads and runs the EF test vectors.
|
||||
test-ef: make-ef-tests run-ef-tests
|
||||
|
||||
# Runs tests checking interop between Lighthouse and execution clients.
|
||||
test-exec-engine:
|
||||
make -C $(EXECUTION_ENGINE_INTEGRATION) test
|
||||
|
||||
# Runs the full workspace tests in release, without downloading any additional
|
||||
# test vectors.
|
||||
test: test-release
|
||||
|
||||
# Runs the entire test suite, downloading test vectors if required.
|
||||
test-full: cargo-fmt test-release test-debug test-ef
|
||||
test-full: cargo-fmt test-release test-debug test-ef test-exec-engine
|
||||
|
||||
# Lints the code for bad style and potentially unsafe arithmetic using Clippy.
|
||||
# Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
|
||||
lint:
|
||||
cargo clippy --all -- -D warnings
|
||||
cargo clippy --workspace --tests -- \
|
||||
-D clippy::fn_to_numeric_cast_any \
|
||||
-D warnings \
|
||||
-A clippy::from-over-into \
|
||||
-A clippy::upper-case-acronyms \
|
||||
-A clippy::vec-init-then-push
|
||||
|
||||
# Runs the makefile in the `ef_tests` repo.
|
||||
#
|
||||
@@ -72,18 +153,23 @@ lint:
|
||||
make-ef-tests:
|
||||
make -C $(EF_TESTS)
|
||||
|
||||
# Verifies that state_processing feature arbitrary-fuzz will compile
|
||||
# Verifies that crates compile with fuzzing features enabled
|
||||
arbitrary-fuzz:
|
||||
cargo check --manifest-path=consensus/state_processing/Cargo.toml --features arbitrary-fuzz
|
||||
cargo check -p state_processing --features arbitrary-fuzz
|
||||
cargo check -p slashing_protection --features arbitrary-fuzz
|
||||
|
||||
# Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
|
||||
audit:
|
||||
cargo install --force cargo-audit
|
||||
cargo audit
|
||||
cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159
|
||||
|
||||
# Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose.
|
||||
vendor:
|
||||
cargo vendor
|
||||
|
||||
# Runs `cargo udeps` to check for unused dependencies
|
||||
udeps:
|
||||
cargo +nightly udeps --tests --all-targets --release
|
||||
cargo +$(PINNED_NIGHTLY) udeps --tests --all-targets --release
|
||||
|
||||
# Performs a `cargo` clean and cleans the `ef_tests` directory.
|
||||
clean:
|
||||
|
||||
87
README.md
87
README.md
@@ -1,87 +1,78 @@
|
||||
# Lighthouse: Ethereum 2.0
|
||||
# Lighthouse: Ethereum consensus client
|
||||
|
||||
An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prime.
|
||||
An open-source Ethereum consensus client, written in Rust and maintained by Sigma Prime.
|
||||
|
||||
[![Build Status]][Build Link] [![Book Status]][Book Link] [![RustDoc Status]][RustDoc Link] [![Chat Badge]][Chat Link]
|
||||
[![Build Status]][Build Link] [![Book Status]][Book Link] [![Chat Badge]][Chat Link]
|
||||
|
||||
[Build Status]: https://github.com/sigp/lighthouse/workflows/test-suite/badge.svg?branch=master
|
||||
[Build Status]: https://github.com/sigp/lighthouse/workflows/test-suite/badge.svg?branch=stable
|
||||
[Build Link]: https://github.com/sigp/lighthouse/actions
|
||||
[Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da
|
||||
[Chat Link]: https://discord.gg/cyAszAh
|
||||
[Book Status]:https://img.shields.io/badge/user--docs-master-informational
|
||||
[Book Link]: http://lighthouse-book.sigmaprime.io/
|
||||
[RustDoc Status]:https://img.shields.io/badge/code--docs-master-orange
|
||||
[RustDoc Link]: http://lighthouse-docs.sigmaprime.io/
|
||||
[Book Status]:https://img.shields.io/badge/user--docs-unstable-informational
|
||||
[Book Link]: https://lighthouse-book.sigmaprime.io
|
||||
[stable]: https://github.com/sigp/lighthouse/tree/stable
|
||||
[unstable]: https://github.com/sigp/lighthouse/tree/unstable
|
||||
[blog]: https://lighthouse.sigmaprime.io
|
||||
|
||||
[Documentation](http://lighthouse-book.sigmaprime.io/)
|
||||
[Documentation](https://lighthouse-book.sigmaprime.io)
|
||||
|
||||

|
||||

|
||||
|
||||
## Overview
|
||||
|
||||
Lighthouse is:
|
||||
|
||||
- Ready for use on Ethereum consensus mainnet.
|
||||
- Fully open-source, licensed under Apache 2.0.
|
||||
- Security-focused. Fuzzing has begun and security reviews are underway.
|
||||
- Built in [Rust](https://www.rust-lang.org/), a modern language providing unique safety guarantees and
|
||||
- Security-focused. Fuzzing techniques have been continuously applied and several external security reviews have been performed.
|
||||
- Built in [Rust](https://www.rust-lang.org), a modern language providing unique safety guarantees and
|
||||
excellent performance (comparable to C++).
|
||||
- Funded by various organisations, including Sigma Prime, the
|
||||
Ethereum Foundation, ConsenSys and private individuals.
|
||||
- Actively involved in the specification and security analysis of the emerging
|
||||
Ethereum 2.0 specification.
|
||||
Ethereum Foundation, ConsenSys, the Decentralization Foundation and private individuals.
|
||||
- Actively involved in the specification and security analysis of the
|
||||
Ethereum proof-of-stake consensus specification.
|
||||
|
||||
Like all Ethereum 2.0 clients, Lighthouse is a work-in-progress.
|
||||
|
||||
## Development Status
|
||||
|
||||
Current development overview:
|
||||
|
||||
- Specification `v0.12.1` implemented, optimized and passing test vectors.
|
||||
- Rust-native libp2p with Gossipsub and Discv5.
|
||||
- RESTful JSON API via HTTP server.
|
||||
- Events via WebSocket.
|
||||
- Metrics via Prometheus.
|
||||
|
||||
### Roadmap
|
||||
|
||||
- ~~**April 2019**: Inital single-client testnets.~~
|
||||
- ~~**September 2019**: Inter-operability with other Ethereum 2.0 clients.~~
|
||||
- ~~**Q1 2020**: `lighthouse-0.1.0` release: All major phase 0 features implemented.~~
|
||||
- ~~**Q2 2020**: Public, multi-client testnet with user-facing functionality.~~
|
||||
- ~~**Q2 2020**: Third-party security review.~~
|
||||
- **Q3 2020**: Additional third-party security reviews.
|
||||
- **Q3 2020**: Long-lived, multi-client Beacon Chain testnet
|
||||
- **Q4 2020**: Production Beacon Chain (tentative).
|
||||
## Staking Deposit Contract
|
||||
|
||||
The Lighthouse team acknowledges
|
||||
[`0x00000000219ab540356cBB839Cbe05303d7705Fa`](https://etherscan.io/address/0x00000000219ab540356cbb839cbe05303d7705fa)
|
||||
as the canonical staking deposit contract address.
|
||||
|
||||
## Documentation
|
||||
|
||||
The [Lighthouse Book](http://lighthouse-book.sigmaprime.io/) contains information
|
||||
for testnet users and developers.
|
||||
The [Lighthouse Book](https://lighthouse-book.sigmaprime.io) contains information for users and
|
||||
developers.
|
||||
|
||||
Code documentation is generated via `cargo doc` and hosted at
|
||||
[lighthouse-docs.sigmaprime.io](http://lighthouse-docs.sigmaprime.io/).
|
||||
The Lighthouse team maintains a blog at [lighthouse.sigmaprime.io][blog] which contains periodical
|
||||
progress updates, roadmap insights and interesting findings.
|
||||
|
||||
If you'd like some background on Sigma Prime, please see the [Lighthouse Update
|
||||
\#00](https://lighthouse.sigmaprime.io/update-00.html) blog post or
|
||||
[sigmaprime.io](https://sigmaprime.io).
|
||||
## Branches
|
||||
|
||||
Lighthouse maintains two permanent branches:
|
||||
|
||||
- [`stable`][stable]: Always points to the latest stable release.
|
||||
- This is ideal for most users.
|
||||
- [`unstable`][unstable]: Used for development, contains the latest PRs.
|
||||
- Developers should base their PRs on this branch.
|
||||
|
||||
## Contributing
|
||||
|
||||
Lighthouse welcomes contributors.
|
||||
|
||||
If you are looking to contribute, please head to the
|
||||
[Contributing](http://lighthouse-book.sigmaprime.io/contributing.html) section
|
||||
[Contributing](https://lighthouse-book.sigmaprime.io/contributing.html) section
|
||||
of the Lighthouse book.
|
||||
|
||||
## Contact
|
||||
|
||||
The best place for discussion is the [Lighthouse Discord
|
||||
server](https://discord.gg/cyAszAh). Alternatively, you may use the
|
||||
[sigp/lighthouse gitter](https://gitter.im/sigp/lighthouse).
|
||||
server](https://discord.gg/cyAszAh).
|
||||
|
||||
Sign up to the [Lighthouse Development Updates](http://eepurl.com/dh9Lvb) mailing list for email
|
||||
notifications about releases, network status and other important information.
|
||||
|
||||
Encrypt sensitive messages using our [PGP
|
||||
key](https://keybase.io/sigp/pgp_keys.asc?fingerprint=dcf37e025d6c9d42ea795b119e7c6cf9988604be).
|
||||
key](https://keybase.io/sigp/pgp_keys.asc?fingerprint=15e66d941f697e28f49381f426416dc3f30674b0).
|
||||
|
||||
## Donations
|
||||
|
||||
|
||||
13
SECURITY.md
Normal file
13
SECURITY.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Please see [Releases](https://github.com/sigp/lighthouse/releases/). We recommend using the [most recently released version](https://github.com/sigp/lighthouse/releases/latest).
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please send vulnerability reports to security@sigmaprime.io and encrypt sensitive messages using our [PGP
|
||||
key](https://keybase.io/sigp/pgp_keys.asc?fingerprint=15e66d941f697e28f49381f426416dc3f30674b0).
|
||||
|
||||
**Please do not file a public ticket** mentioning the vulnerability, as doing so could increase the likelihood of the vulnerability being used before a fix has been created, released and installed on the network.
|
||||
|
||||
@@ -1,33 +1,29 @@
|
||||
[package]
|
||||
name = "account_manager"
|
||||
version = "0.2.0"
|
||||
version = "0.3.5"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Luke Anderson <luke@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bls = { path = "../crypto/bls" }
|
||||
clap = "2.33.0"
|
||||
slog = "2.5.2"
|
||||
slog-term = "2.5.0"
|
||||
slog-async = "2.5.0"
|
||||
clap = "2.33.3"
|
||||
types = { path = "../consensus/types" }
|
||||
state_processing = { path = "../consensus/state_processing" }
|
||||
dirs = "2.0.2"
|
||||
environment = { path = "../lighthouse/environment" }
|
||||
deposit_contract = { path = "../common/deposit_contract" }
|
||||
libc = "0.2.65"
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
hex = "0.4.2"
|
||||
rayon = "1.3.0"
|
||||
eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
||||
web3 = "0.11.0"
|
||||
futures = { version = "0.3.5", features = ["compat"] }
|
||||
eth2_network_config = { path = "../common/eth2_network_config" }
|
||||
clap_utils = { path = "../common/clap_utils" }
|
||||
directory = { path = "../common/directory" }
|
||||
eth2_wallet = { path = "../crypto/eth2_wallet" }
|
||||
eth2_wallet_manager = { path = "../common/eth2_wallet_manager" }
|
||||
rand = "0.7.2"
|
||||
validator_dir = { path = "../common/validator_dir" }
|
||||
tokio = { version = "0.2.21", features = ["full"] }
|
||||
tokio = { version = "1.14.0", features = ["full"] }
|
||||
eth2_keystore = { path = "../crypto/eth2_keystore" }
|
||||
account_utils = { path = "../common/account_utils" }
|
||||
slashing_protection = { path = "../validator_client/slashing_protection" }
|
||||
eth2 = {path = "../common/eth2"}
|
||||
safe_arith = {path = "../consensus/safe_arith"}
|
||||
slot_clock = { path = "../common/slot_clock" }
|
||||
filesystem = { path = "../common/filesystem" }
|
||||
sensitive_url = { path = "../common/sensitive_url" }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1.0"
|
||||
|
||||
@@ -1,21 +1,67 @@
|
||||
use clap::ArgMatches;
|
||||
use std::fs::create_dir_all;
|
||||
use std::path::{Path, PathBuf};
|
||||
use account_utils::PlainText;
|
||||
use account_utils::{read_input_from_user, strip_off_newlines};
|
||||
use eth2_wallet::bip39::{Language, Mnemonic};
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::str::from_utf8;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn ensure_dir_exists<P: AsRef<Path>>(path: P) -> Result<(), String> {
|
||||
let path = path.as_ref();
|
||||
pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:";
|
||||
pub const WALLET_NAME_PROMPT: &str = "Enter wallet name:";
|
||||
|
||||
if !path.exists() {
|
||||
create_dir_all(path).map_err(|e| format!("Unable to create {:?}: {:?}", path, e))?;
|
||||
pub fn read_mnemonic_from_cli(
|
||||
mnemonic_path: Option<PathBuf>,
|
||||
stdin_inputs: bool,
|
||||
) -> Result<Mnemonic, String> {
|
||||
let mnemonic = match mnemonic_path {
|
||||
Some(path) => fs::read(&path)
|
||||
.map_err(|e| format!("Unable to read {:?}: {:?}", path, e))
|
||||
.and_then(|bytes| {
|
||||
let bytes_no_newlines: PlainText = strip_off_newlines(bytes).into();
|
||||
let phrase = from_utf8(bytes_no_newlines.as_ref())
|
||||
.map_err(|e| format!("Unable to derive mnemonic: {:?}", e))?;
|
||||
Mnemonic::from_phrase(phrase, Language::English).map_err(|e| {
|
||||
format!(
|
||||
"Unable to derive mnemonic from string {:?}: {:?}",
|
||||
phrase, e
|
||||
)
|
||||
})
|
||||
})?,
|
||||
None => loop {
|
||||
eprintln!();
|
||||
eprintln!("{}", MNEMONIC_PROMPT);
|
||||
|
||||
let mnemonic = read_input_from_user(stdin_inputs)?;
|
||||
|
||||
match Mnemonic::from_phrase(mnemonic.as_str(), Language::English) {
|
||||
Ok(mnemonic_m) => {
|
||||
eprintln!("Valid mnemonic provided.");
|
||||
eprintln!();
|
||||
sleep(Duration::from_secs(1));
|
||||
break mnemonic_m;
|
||||
}
|
||||
Err(_) => {
|
||||
eprintln!("Invalid mnemonic");
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
Ok(mnemonic)
|
||||
}
|
||||
|
||||
/// Reads in a wallet name from the user. If the `--wallet-name` flag is provided, use it. Otherwise
|
||||
/// read from an interactive prompt using tty unless the `--stdin-inputs` flag is provided.
|
||||
pub fn read_wallet_name_from_cli(
|
||||
wallet_name: Option<String>,
|
||||
stdin_inputs: bool,
|
||||
) -> Result<String, String> {
|
||||
match wallet_name {
|
||||
Some(name) => Ok(name),
|
||||
None => {
|
||||
eprintln!("{}", WALLET_NAME_PROMPT);
|
||||
|
||||
read_input_from_user(stdin_inputs)
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn base_wallet_dir(matches: &ArgMatches, arg: &'static str) -> Result<PathBuf, String> {
|
||||
clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
arg,
|
||||
PathBuf::new().join(".lighthouse").join("wallets"),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,8 @@ use types::EthSpec;
|
||||
pub const CMD: &str = "account_manager";
|
||||
pub const SECRETS_DIR_FLAG: &str = "secrets-dir";
|
||||
pub const VALIDATOR_DIR_FLAG: &str = "validator-dir";
|
||||
pub const BASE_DIR_FLAG: &str = "base-dir";
|
||||
pub const VALIDATOR_DIR_FLAG_ALIAS: &str = "validators-dir";
|
||||
pub const WALLETS_DIR_FLAG: &str = "wallets-dir";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
|
||||
@@ -1,9 +1,16 @@
|
||||
use crate::{common::ensure_dir_exists, SECRETS_DIR_FLAG, VALIDATOR_DIR_FLAG};
|
||||
use account_utils::{random_password, strip_off_newlines, validator_definitions};
|
||||
use crate::common::read_wallet_name_from_cli;
|
||||
use crate::wallet::create::STDIN_INPUTS_FLAG;
|
||||
use crate::{SECRETS_DIR_FLAG, WALLETS_DIR_FLAG};
|
||||
use account_utils::{
|
||||
random_password, read_password_from_user, strip_off_newlines, validator_definitions, PlainText,
|
||||
};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use directory::{
|
||||
ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_SECRET_DIR, DEFAULT_WALLET_DIR,
|
||||
};
|
||||
use environment::Environment;
|
||||
use eth2_wallet::PlainText;
|
||||
use eth2_wallet_manager::WalletManager;
|
||||
use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME};
|
||||
use std::ffi::OsStr;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
@@ -11,13 +18,13 @@ use types::EthSpec;
|
||||
use validator_dir::Builder as ValidatorDirBuilder;
|
||||
|
||||
pub const CMD: &str = "create";
|
||||
pub const BASE_DIR_FLAG: &str = "base-dir";
|
||||
pub const WALLET_NAME_FLAG: &str = "wallet-name";
|
||||
pub const WALLET_PASSPHRASE_FLAG: &str = "wallet-passphrase";
|
||||
pub const WALLET_PASSWORD_FLAG: &str = "wallet-password";
|
||||
pub const DEPOSIT_GWEI_FLAG: &str = "deposit-gwei";
|
||||
pub const STORE_WITHDRAW_FLAG: &str = "store-withdrawal-keystore";
|
||||
pub const COUNT_FLAG: &str = "count";
|
||||
pub const AT_MOST_FLAG: &str = "at-most";
|
||||
pub const WALLET_PASSWORD_PROMPT: &str = "Enter your wallet's password:";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
@@ -30,26 +37,22 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.long(WALLET_NAME_FLAG)
|
||||
.value_name("WALLET_NAME")
|
||||
.help("Use the wallet identified by this name")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(WALLET_PASSPHRASE_FLAG)
|
||||
.long(WALLET_PASSPHRASE_FLAG)
|
||||
Arg::with_name(WALLET_PASSWORD_FLAG)
|
||||
.long(WALLET_PASSWORD_FLAG)
|
||||
.value_name("WALLET_PASSWORD_PATH")
|
||||
.help("A path to a file containing the password which will unlock the wallet.")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
||||
.long(VALIDATOR_DIR_FLAG)
|
||||
.value_name("VALIDATOR_DIRECTORY")
|
||||
.help(
|
||||
"The path where the validator directories will be created. \
|
||||
Defaults to ~/.lighthouse/validators",
|
||||
)
|
||||
.takes_value(true),
|
||||
Arg::with_name(WALLETS_DIR_FLAG)
|
||||
.long(WALLETS_DIR_FLAG)
|
||||
.value_name(WALLETS_DIR_FLAG)
|
||||
.help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{network}/wallets")
|
||||
.takes_value(true)
|
||||
.conflicts_with("datadir"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(SECRETS_DIR_FLAG)
|
||||
@@ -57,8 +60,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.value_name("SECRETS_DIR")
|
||||
.help(
|
||||
"The path where the validator keystore passwords will be stored. \
|
||||
Defaults to ~/.lighthouse/secrets",
|
||||
Defaults to ~/.lighthouse/{network}/secrets",
|
||||
)
|
||||
.conflicts_with("datadir")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
@@ -99,36 +103,61 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.conflicts_with("count")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(STDIN_INPUTS_FLAG)
|
||||
.takes_value(false)
|
||||
.hidden(cfg!(windows))
|
||||
.long(STDIN_INPUTS_FLAG)
|
||||
.help("If present, read all user inputs from stdin instead of tty."),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run<T: EthSpec>(
|
||||
matches: &ArgMatches,
|
||||
mut env: Environment<T>,
|
||||
wallet_base_dir: PathBuf,
|
||||
validator_dir: PathBuf,
|
||||
) -> Result<(), String> {
|
||||
let spec = env.core_context().eth2_config.spec;
|
||||
|
||||
let name: String = clap_utils::parse_required(matches, WALLET_NAME_FLAG)?;
|
||||
let wallet_password_path: PathBuf =
|
||||
clap_utils::parse_required(matches, WALLET_PASSPHRASE_FLAG)?;
|
||||
let validator_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
VALIDATOR_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("validators"),
|
||||
)?;
|
||||
let secrets_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
SECRETS_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("secrets"),
|
||||
)?;
|
||||
let name: Option<String> = clap_utils::parse_optional(matches, WALLET_NAME_FLAG)?;
|
||||
let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG);
|
||||
|
||||
let wallet_base_dir = if matches.value_of("datadir").is_some() {
|
||||
let path: PathBuf = clap_utils::parse_required(matches, "datadir")?;
|
||||
path.join(DEFAULT_WALLET_DIR)
|
||||
} else {
|
||||
parse_path_or_default_with_flag(matches, WALLETS_DIR_FLAG, DEFAULT_WALLET_DIR)?
|
||||
};
|
||||
let secrets_dir = if matches.value_of("datadir").is_some() {
|
||||
let path: PathBuf = clap_utils::parse_required(matches, "datadir")?;
|
||||
path.join(DEFAULT_SECRET_DIR)
|
||||
} else {
|
||||
parse_path_or_default_with_flag(matches, SECRETS_DIR_FLAG, DEFAULT_SECRET_DIR)?
|
||||
};
|
||||
|
||||
let deposit_gwei = clap_utils::parse_optional(matches, DEPOSIT_GWEI_FLAG)?
|
||||
.unwrap_or_else(|| spec.max_effective_balance);
|
||||
.unwrap_or(spec.max_effective_balance);
|
||||
let count: Option<usize> = clap_utils::parse_optional(matches, COUNT_FLAG)?;
|
||||
let at_most: Option<usize> = clap_utils::parse_optional(matches, AT_MOST_FLAG)?;
|
||||
|
||||
// The command will always fail if the wallet dir does not exist.
|
||||
if !wallet_base_dir.exists() {
|
||||
return Err(format!(
|
||||
"No wallet directory at {:?}. Use the `lighthouse --network {} {} {} {}` command to create a wallet",
|
||||
wallet_base_dir,
|
||||
matches.value_of("network").unwrap_or("<NETWORK>"),
|
||||
crate::CMD,
|
||||
crate::wallet::CMD,
|
||||
crate::wallet::create::CMD
|
||||
));
|
||||
}
|
||||
|
||||
ensure_dir_exists(&validator_dir)?;
|
||||
ensure_dir_exists(&secrets_dir)?;
|
||||
|
||||
eprintln!("secrets-dir path {:?}", secrets_dir);
|
||||
eprintln!("wallets-dir path {:?}", wallet_base_dir);
|
||||
|
||||
let starting_validator_count = existing_validator_count(&validator_dir)?;
|
||||
|
||||
let n = match (count, at_most) {
|
||||
@@ -152,17 +181,37 @@ pub fn cli_run<T: EthSpec>(
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let wallet_password = fs::read(&wallet_password_path)
|
||||
.map_err(|e| format!("Unable to read {:?}: {:?}", wallet_password_path, e))
|
||||
.map(|bytes| PlainText::from(strip_off_newlines(bytes)))?;
|
||||
let wallet_password_path: Option<PathBuf> =
|
||||
clap_utils::parse_optional(matches, WALLET_PASSWORD_FLAG)?;
|
||||
|
||||
let wallet_name = read_wallet_name_from_cli(name, stdin_inputs)?;
|
||||
let wallet_password = read_wallet_password_from_cli(wallet_password_path, stdin_inputs)?;
|
||||
|
||||
let mgr = WalletManager::open(&wallet_base_dir)
|
||||
.map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?;
|
||||
.map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?;
|
||||
|
||||
let mut wallet = mgr
|
||||
.wallet_by_name(&name)
|
||||
.wallet_by_name(&wallet_name)
|
||||
.map_err(|e| format!("Unable to open wallet: {:?}", e))?;
|
||||
|
||||
let slashing_protection_path = validator_dir.join(SLASHING_PROTECTION_FILENAME);
|
||||
let slashing_protection =
|
||||
SlashingDatabase::open_or_create(&slashing_protection_path).map_err(|e| {
|
||||
format!(
|
||||
"Unable to open or create slashing protection database at {}: {:?}",
|
||||
slashing_protection_path.display(),
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
// Create an empty transaction and drops it. Used to test if the database is locked.
|
||||
slashing_protection.test_transaction().map_err(|e| {
|
||||
format!(
|
||||
"Cannot create keys while the validator client is running: {:?}",
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
for i in 0..n {
|
||||
let voting_password = random_password();
|
||||
let withdrawal_password = random_password();
|
||||
@@ -175,9 +224,25 @@ pub fn cli_run<T: EthSpec>(
|
||||
)
|
||||
.map_err(|e| format!("Unable to create validator keys: {:?}", e))?;
|
||||
|
||||
let voting_pubkey = keystores.voting.pubkey().to_string();
|
||||
let voting_pubkey = keystores.voting.public_key().ok_or_else(|| {
|
||||
format!(
|
||||
"Keystore public key is invalid: {}",
|
||||
keystores.voting.pubkey()
|
||||
)
|
||||
})?;
|
||||
|
||||
ValidatorDirBuilder::new(validator_dir.clone(), secrets_dir.clone())
|
||||
slashing_protection
|
||||
.register_validator(voting_pubkey.compress())
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Error registering validator {}: {:?}",
|
||||
voting_pubkey.as_hex_string(),
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
ValidatorDirBuilder::new(validator_dir.clone())
|
||||
.password_dir(secrets_dir.clone())
|
||||
.voting_keystore(keystores.voting, voting_password.as_bytes())
|
||||
.withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes())
|
||||
.create_eth1_tx_data(deposit_gwei, &spec)
|
||||
@@ -185,7 +250,7 @@ pub fn cli_run<T: EthSpec>(
|
||||
.build()
|
||||
.map_err(|e| format!("Unable to build validator directory: {:?}", e))?;
|
||||
|
||||
println!("{}/{}\t0x{}", i + 1, n, voting_pubkey);
|
||||
println!("{}/{}\t{}", i + 1, n, voting_pubkey.as_hex_string());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -193,15 +258,40 @@ pub fn cli_run<T: EthSpec>(
|
||||
|
||||
/// Returns the number of validators that exist in the given `validator_dir`.
|
||||
///
|
||||
/// This function just assumes all files and directories, excluding the validator definitions YAML,
|
||||
/// are validator directories, making it likely to return a higher number than accurate
|
||||
/// but never a lower one.
|
||||
/// This function just assumes all files and directories, excluding the validator definitions YAML
|
||||
/// and slashing protection database are validator directories, making it likely to return a higher
|
||||
/// number than accurate but never a lower one.
|
||||
fn existing_validator_count<P: AsRef<Path>>(validator_dir: P) -> Result<usize, String> {
|
||||
fs::read_dir(validator_dir.as_ref())
|
||||
.map(|iter| {
|
||||
iter.filter_map(|e| e.ok())
|
||||
.filter(|e| e.file_name() != OsStr::new(validator_definitions::CONFIG_FILENAME))
|
||||
.filter(|e| {
|
||||
e.file_name() != OsStr::new(validator_definitions::CONFIG_FILENAME)
|
||||
&& e.file_name()
|
||||
!= OsStr::new(slashing_protection::SLASHING_PROTECTION_FILENAME)
|
||||
})
|
||||
.count()
|
||||
})
|
||||
.map_err(|e| format!("Unable to read {:?}: {}", validator_dir.as_ref(), e))
|
||||
}
|
||||
|
||||
/// Used when a user is accessing an existing wallet. Read in a wallet password from a file if the password file
|
||||
/// path is provided. Otherwise, read from an interactive prompt using tty unless the `--stdin-inputs`
|
||||
/// flag is provided.
|
||||
pub fn read_wallet_password_from_cli(
|
||||
password_file_path: Option<PathBuf>,
|
||||
stdin_inputs: bool,
|
||||
) -> Result<PlainText, String> {
|
||||
match password_file_path {
|
||||
Some(path) => fs::read(&path)
|
||||
.map_err(|e| format!("Unable to read {:?}: {:?}", path, e))
|
||||
.map(|bytes| strip_off_newlines(bytes).into()),
|
||||
None => {
|
||||
eprintln!();
|
||||
eprintln!("{}", WALLET_PASSWORD_PROMPT);
|
||||
let password =
|
||||
PlainText::from(read_password_from_user(stdin_inputs)?.as_ref().to_vec());
|
||||
Ok(password)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,405 +0,0 @@
|
||||
use crate::VALIDATOR_DIR_FLAG;
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use deposit_contract::DEPOSIT_GAS;
|
||||
use environment::Environment;
|
||||
use futures::{
|
||||
compat::Future01CompatExt,
|
||||
stream::{FuturesUnordered, StreamExt},
|
||||
};
|
||||
use slog::{info, Logger};
|
||||
use state_processing::per_block_processing::verify_deposit_signature;
|
||||
use std::path::PathBuf;
|
||||
use tokio::time::{delay_until, Duration, Instant};
|
||||
use types::EthSpec;
|
||||
use validator_dir::{Eth1DepositData, Manager as ValidatorManager, ValidatorDir};
|
||||
use web3::{
|
||||
transports::Http,
|
||||
transports::Ipc,
|
||||
types::{Address, SyncInfo, SyncState, TransactionRequest, U256},
|
||||
Transport, Web3,
|
||||
};
|
||||
|
||||
pub const CMD: &str = "deposit";
|
||||
pub const VALIDATOR_FLAG: &str = "validator";
|
||||
pub const ETH1_IPC_FLAG: &str = "eth1-ipc";
|
||||
pub const ETH1_HTTP_FLAG: &str = "eth1-http";
|
||||
pub const FROM_ADDRESS_FLAG: &str = "from-address";
|
||||
pub const CONFIRMATION_COUNT_FLAG: &str = "confirmation-count";
|
||||
pub const CONFIRMATION_BATCH_SIZE_FLAG: &str = "confirmation-batch-size";
|
||||
|
||||
const GWEI: u64 = 1_000_000_000;
|
||||
|
||||
const SYNCING_STATE_RETRY_DELAY: Duration = Duration::from_secs(2);
|
||||
|
||||
const CONFIRMATIONS_POLL_TIME: Duration = Duration::from_secs(2);
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new("deposit")
|
||||
.about(
|
||||
"Submits a deposit to an Eth1 validator registration contract via an IPC endpoint \
|
||||
of an Eth1 client (e.g., Geth, OpenEthereum, etc.). The validators must already \
|
||||
have been created and exist on the file-system. The process will exit immediately \
|
||||
with an error if any error occurs. After each deposit is submitted to the Eth1 \
|
||||
node, a file will be saved in the validator directory with the transaction hash. \
|
||||
If confirmations are set to non-zero then the application will wait for confirmations \
|
||||
before saving the transaction hash and moving onto the next batch of deposits. \
|
||||
The deposit contract address will be determined by the --testnet-dir flag on the \
|
||||
primary Lighthouse binary.",
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
||||
.long(VALIDATOR_DIR_FLAG)
|
||||
.value_name("VALIDATOR_DIRECTORY")
|
||||
.help(
|
||||
"The path to the validator client data directory. \
|
||||
Defaults to ~/.lighthouse/validators",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_FLAG)
|
||||
.long(VALIDATOR_FLAG)
|
||||
.value_name("VALIDATOR_NAME")
|
||||
.help(
|
||||
"The name of the directory in --data-dir for which to deposit. \
|
||||
Set to 'all' to deposit all validators in the --data-dir.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(ETH1_IPC_FLAG)
|
||||
.long(ETH1_IPC_FLAG)
|
||||
.value_name("ETH1_IPC_PATH")
|
||||
.help("Path to an Eth1 JSON-RPC IPC endpoint")
|
||||
.takes_value(true)
|
||||
.required(false),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(ETH1_HTTP_FLAG)
|
||||
.long(ETH1_HTTP_FLAG)
|
||||
.value_name("ETH1_HTTP_URL")
|
||||
.help("URL to an Eth1 JSON-RPC endpoint")
|
||||
.takes_value(true)
|
||||
.required(false),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(FROM_ADDRESS_FLAG)
|
||||
.long(FROM_ADDRESS_FLAG)
|
||||
.value_name("FROM_ETH1_ADDRESS")
|
||||
.help(
|
||||
"The address that will submit the eth1 deposit. \
|
||||
Must be unlocked on the node at --eth1-ipc.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(CONFIRMATION_COUNT_FLAG)
|
||||
.long(CONFIRMATION_COUNT_FLAG)
|
||||
.value_name("CONFIRMATION_COUNT")
|
||||
.help(
|
||||
"The number of Eth1 block confirmations required \
|
||||
before a transaction is considered complete. Set to \
|
||||
0 for no confirmations.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.default_value("1"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(CONFIRMATION_BATCH_SIZE_FLAG)
|
||||
.long(CONFIRMATION_BATCH_SIZE_FLAG)
|
||||
.value_name("BATCH_SIZE")
|
||||
.help(
|
||||
"Perform BATCH_SIZE deposits and wait for confirmations \
|
||||
in parallel. Useful for achieving faster bulk deposits.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.default_value("10"),
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn send_deposit_transactions<T1, T2: 'static>(
|
||||
mut env: Environment<T1>,
|
||||
log: Logger,
|
||||
mut eth1_deposit_datas: Vec<(ValidatorDir, Eth1DepositData)>,
|
||||
from_address: Address,
|
||||
deposit_contract: Address,
|
||||
transport: T2,
|
||||
confirmation_count: usize,
|
||||
confirmation_batch_size: usize,
|
||||
) -> Result<(), String>
|
||||
where
|
||||
T1: EthSpec,
|
||||
T2: Transport + std::marker::Send,
|
||||
<T2 as web3::Transport>::Out: std::marker::Send,
|
||||
{
|
||||
let web3 = Web3::new(transport);
|
||||
let spec = env.eth2_config.spec.clone();
|
||||
|
||||
let deposits_fut = async {
|
||||
poll_until_synced(web3.clone(), log.clone()).await?;
|
||||
|
||||
for chunk in eth1_deposit_datas.chunks_mut(confirmation_batch_size) {
|
||||
let futures = FuturesUnordered::default();
|
||||
|
||||
for (ref mut validator_dir, eth1_deposit_data) in chunk.iter_mut() {
|
||||
verify_deposit_signature(ð1_deposit_data.deposit_data, &spec).map_err(|e| {
|
||||
format!(
|
||||
"Deposit for {:?} fails verification, \
|
||||
are you using the correct testnet configuration?\nError: {:?}",
|
||||
eth1_deposit_data.deposit_data.pubkey, e
|
||||
)
|
||||
})?;
|
||||
|
||||
let web3 = web3.clone();
|
||||
let log = log.clone();
|
||||
futures.push(async move {
|
||||
let tx_hash = web3
|
||||
.send_transaction_with_confirmation(
|
||||
TransactionRequest {
|
||||
from: from_address,
|
||||
to: Some(deposit_contract),
|
||||
gas: Some(DEPOSIT_GAS.into()),
|
||||
gas_price: None,
|
||||
value: Some(from_gwei(eth1_deposit_data.deposit_data.amount)),
|
||||
data: Some(eth1_deposit_data.rlp.clone().into()),
|
||||
nonce: None,
|
||||
condition: None,
|
||||
},
|
||||
CONFIRMATIONS_POLL_TIME,
|
||||
confirmation_count,
|
||||
)
|
||||
.compat()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to send transaction: {:?}", e))?;
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Submitted deposit";
|
||||
"tx_hash" => format!("{:?}", tx_hash),
|
||||
);
|
||||
|
||||
validator_dir
|
||||
.save_eth1_deposit_tx_hash(&format!("{:?}", tx_hash))
|
||||
.map_err(|e| {
|
||||
format!("Failed to save tx hash {:?} to disk: {:?}", tx_hash, e)
|
||||
})?;
|
||||
|
||||
Ok::<(), String>(())
|
||||
});
|
||||
}
|
||||
|
||||
futures
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<_, _>>()?;
|
||||
}
|
||||
|
||||
Ok::<(), String>(())
|
||||
};
|
||||
|
||||
env.runtime().block_on(deposits_fut)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn cli_run<T: EthSpec>(
|
||||
matches: &ArgMatches<'_>,
|
||||
mut env: Environment<T>,
|
||||
) -> Result<(), String> {
|
||||
let log = env.core_context().log().clone();
|
||||
|
||||
let data_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
VALIDATOR_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("validators"),
|
||||
)?;
|
||||
let validator: String = clap_utils::parse_required(matches, VALIDATOR_FLAG)?;
|
||||
let eth1_ipc_path: Option<PathBuf> = clap_utils::parse_optional(matches, ETH1_IPC_FLAG)?;
|
||||
let eth1_http_url: Option<String> = clap_utils::parse_optional(matches, ETH1_HTTP_FLAG)?;
|
||||
let from_address: Address = clap_utils::parse_required(matches, FROM_ADDRESS_FLAG)?;
|
||||
let confirmation_count: usize = clap_utils::parse_required(matches, CONFIRMATION_COUNT_FLAG)?;
|
||||
let confirmation_batch_size: usize =
|
||||
clap_utils::parse_required(matches, CONFIRMATION_BATCH_SIZE_FLAG)?;
|
||||
|
||||
let manager = ValidatorManager::open(&data_dir)
|
||||
.map_err(|e| format!("Unable to read --{}: {:?}", VALIDATOR_DIR_FLAG, e))?;
|
||||
|
||||
let validators = match validator.as_ref() {
|
||||
"all" => manager
|
||||
.open_all_validators()
|
||||
.map_err(|e| format!("Unable to read all validators: {:?}", e)),
|
||||
name => {
|
||||
let path = manager
|
||||
.directory_names()
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Unable to read --{} directory names: {:?}",
|
||||
VALIDATOR_DIR_FLAG, e
|
||||
)
|
||||
})?
|
||||
.get(name)
|
||||
.ok_or_else(|| format!("Unknown validator: {}", name))?
|
||||
.clone();
|
||||
|
||||
manager
|
||||
.open_validator(&path)
|
||||
.map_err(|e| format!("Unable to open {}: {:?}", name, e))
|
||||
.map(|v| vec![v])
|
||||
}
|
||||
}?;
|
||||
|
||||
let eth1_deposit_datas = validators
|
||||
.into_iter()
|
||||
.filter(|v| !v.eth1_deposit_tx_hash_exists())
|
||||
.map(|v| match v.eth1_deposit_data() {
|
||||
Ok(Some(data)) => Ok((v, data)),
|
||||
Ok(None) => Err(format!(
|
||||
"Validator is missing deposit data file: {:?}",
|
||||
v.dir()
|
||||
)),
|
||||
Err(e) => Err(format!(
|
||||
"Unable to read deposit data for {:?}: {:?}",
|
||||
v.dir(),
|
||||
e
|
||||
)),
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
let total_gwei: u64 = eth1_deposit_datas
|
||||
.iter()
|
||||
.map(|(_, d)| d.deposit_data.amount)
|
||||
.sum();
|
||||
|
||||
if eth1_deposit_datas.is_empty() {
|
||||
info!(log, "No validators to deposit");
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Starting deposits";
|
||||
"deposit_count" => eth1_deposit_datas.len(),
|
||||
"total_eth" => total_gwei / GWEI,
|
||||
);
|
||||
|
||||
let deposit_contract = env
|
||||
.testnet
|
||||
.as_ref()
|
||||
.ok_or_else(|| "Unable to run account manager without a testnet dir".to_string())?
|
||||
.deposit_contract_address()
|
||||
.map_err(|e| format!("Unable to parse deposit contract address: {}", e))?;
|
||||
|
||||
if deposit_contract == Address::zero() {
|
||||
return Err("Refusing to deposit to the zero address. Check testnet configuration.".into());
|
||||
}
|
||||
|
||||
match (eth1_ipc_path, eth1_http_url) {
|
||||
(Some(_), Some(_)) => Err(format!(
|
||||
"error: Cannot supply both --{} and --{}",
|
||||
ETH1_IPC_FLAG, ETH1_HTTP_FLAG
|
||||
)),
|
||||
(None, None) => Err(format!(
|
||||
"error: Must supply one of --{} or --{}",
|
||||
ETH1_IPC_FLAG, ETH1_HTTP_FLAG
|
||||
)),
|
||||
(Some(ipc_path), None) => {
|
||||
let (_event_loop_handle, ipc_transport) = Ipc::new(ipc_path)
|
||||
.map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?;
|
||||
send_deposit_transactions(
|
||||
env,
|
||||
log,
|
||||
eth1_deposit_datas,
|
||||
from_address,
|
||||
deposit_contract,
|
||||
ipc_transport,
|
||||
confirmation_count,
|
||||
confirmation_batch_size,
|
||||
)
|
||||
}
|
||||
(None, Some(http_url)) => {
|
||||
let (_event_loop_handle, http_transport) = Http::new(http_url.as_str())
|
||||
.map_err(|e| format!("Unable to connect to eth1 http RPC: {:?}", e))?;
|
||||
send_deposit_transactions(
|
||||
env,
|
||||
log,
|
||||
eth1_deposit_datas,
|
||||
from_address,
|
||||
deposit_contract,
|
||||
http_transport,
|
||||
confirmation_count,
|
||||
confirmation_batch_size,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts gwei to wei.
|
||||
fn from_gwei(gwei: u64) -> U256 {
|
||||
U256::from(gwei) * U256::exp10(9)
|
||||
}
|
||||
|
||||
/// Run a poll on the `eth_syncing` endpoint, blocking until the node is synced.
|
||||
async fn poll_until_synced<T>(web3: Web3<T>, log: Logger) -> Result<(), String>
|
||||
where
|
||||
T: Transport + Send + 'static,
|
||||
<T as Transport>::Out: Send,
|
||||
{
|
||||
loop {
|
||||
let sync_state = web3
|
||||
.clone()
|
||||
.eth()
|
||||
.syncing()
|
||||
.compat()
|
||||
.await
|
||||
.map_err(|e| format!("Unable to read syncing state from eth1 node: {:?}", e))?;
|
||||
|
||||
match sync_state {
|
||||
SyncState::Syncing(SyncInfo {
|
||||
current_block,
|
||||
highest_block,
|
||||
..
|
||||
}) => {
|
||||
info!(
|
||||
log,
|
||||
"Waiting for eth1 node to sync";
|
||||
"est_highest_block" => format!("{}", highest_block),
|
||||
"current_block" => format!("{}", current_block),
|
||||
);
|
||||
|
||||
delay_until(Instant::now() + SYNCING_STATE_RETRY_DELAY).await;
|
||||
}
|
||||
SyncState::NotSyncing => {
|
||||
let block_number = web3
|
||||
.clone()
|
||||
.eth()
|
||||
.block_number()
|
||||
.compat()
|
||||
.await
|
||||
.map_err(|e| format!("Unable to read block number from eth1 node: {:?}", e))?;
|
||||
|
||||
if block_number > 0.into() {
|
||||
info!(
|
||||
log,
|
||||
"Eth1 node is synced";
|
||||
"head_block" => format!("{}", block_number),
|
||||
);
|
||||
break;
|
||||
} else {
|
||||
delay_until(Instant::now() + SYNCING_STATE_RETRY_DELAY).await;
|
||||
info!(
|
||||
log,
|
||||
"Waiting for eth1 node to sync";
|
||||
"current_block" => 0,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
430
account_manager/src/validator/exit.rs
Normal file
430
account_manager/src/validator/exit.rs
Normal file
@@ -0,0 +1,430 @@
|
||||
use crate::wallet::create::STDIN_INPUTS_FLAG;
|
||||
use bls::{Keypair, PublicKey};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use environment::Environment;
|
||||
use eth2::{
|
||||
types::{GenesisData, StateId, ValidatorData, ValidatorId, ValidatorStatus},
|
||||
BeaconNodeHttpClient, Timeouts,
|
||||
};
|
||||
use eth2_keystore::Keystore;
|
||||
use eth2_network_config::Eth2NetworkConfig;
|
||||
use safe_arith::SafeArith;
|
||||
use sensitive_url::SensitiveUrl;
|
||||
use slot_clock::{SlotClock, SystemTimeSlotClock};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
use types::{ChainSpec, Epoch, EthSpec, Fork, VoluntaryExit};
|
||||
|
||||
pub const CMD: &str = "exit";
|
||||
pub const KEYSTORE_FLAG: &str = "keystore";
|
||||
pub const PASSWORD_FILE_FLAG: &str = "password-file";
|
||||
pub const BEACON_SERVER_FLAG: &str = "beacon-node";
|
||||
pub const NO_WAIT: &str = "no-wait";
|
||||
pub const NO_CONFIRMATION: &str = "no-confirmation";
|
||||
pub const PASSWORD_PROMPT: &str = "Enter the keystore password";
|
||||
|
||||
pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/";
|
||||
pub const CONFIRMATION_PHRASE: &str = "Exit my validator";
|
||||
pub const WEBSITE_URL: &str = "https://lighthouse-book.sigmaprime.io/voluntary-exit.html";
|
||||
pub const PROMPT: &str = "WARNING: WITHDRAWING STAKED ETH IS NOT CURRENTLY POSSIBLE";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new("exit")
|
||||
.about("Submits a VoluntaryExit to the beacon chain for a given validator keystore.")
|
||||
.arg(
|
||||
Arg::with_name(KEYSTORE_FLAG)
|
||||
.long(KEYSTORE_FLAG)
|
||||
.value_name("KEYSTORE_PATH")
|
||||
.help("The path to the EIP-2335 voting keystore for the validator")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(PASSWORD_FILE_FLAG)
|
||||
.long(PASSWORD_FILE_FLAG)
|
||||
.value_name("PASSWORD_FILE_PATH")
|
||||
.help("The path to the password file which unlocks the validator voting keystore")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(BEACON_SERVER_FLAG)
|
||||
.long(BEACON_SERVER_FLAG)
|
||||
.value_name("NETWORK_ADDRESS")
|
||||
.help("Address to a beacon node HTTP API")
|
||||
.default_value(DEFAULT_BEACON_NODE)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(NO_WAIT)
|
||||
.long(NO_WAIT)
|
||||
.help("Exits after publishing the voluntary exit without waiting for confirmation that the exit was included in the beacon chain")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(NO_CONFIRMATION)
|
||||
.long(NO_CONFIRMATION)
|
||||
.help("Exits without prompting for confirmation that you understand the implications of a voluntary exit. This should be used with caution")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(STDIN_INPUTS_FLAG)
|
||||
.takes_value(false)
|
||||
.hidden(cfg!(windows))
|
||||
.long(STDIN_INPUTS_FLAG)
|
||||
.help("If present, read all user inputs from stdin instead of tty."),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run<E: EthSpec>(matches: &ArgMatches, env: Environment<E>) -> Result<(), String> {
|
||||
let keystore_path: PathBuf = clap_utils::parse_required(matches, KEYSTORE_FLAG)?;
|
||||
let password_file_path: Option<PathBuf> =
|
||||
clap_utils::parse_optional(matches, PASSWORD_FILE_FLAG)?;
|
||||
|
||||
let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG);
|
||||
let no_wait = matches.is_present(NO_WAIT);
|
||||
let no_confirmation = matches.is_present(NO_CONFIRMATION);
|
||||
|
||||
let spec = env.eth2_config().spec.clone();
|
||||
let server_url: String = clap_utils::parse_required(matches, BEACON_SERVER_FLAG)?;
|
||||
let client = BeaconNodeHttpClient::new(
|
||||
SensitiveUrl::parse(&server_url)
|
||||
.map_err(|e| format!("Failed to parse beacon http server: {:?}", e))?,
|
||||
Timeouts::set_all(Duration::from_secs(env.eth2_config.spec.seconds_per_slot)),
|
||||
);
|
||||
|
||||
let eth2_network_config = env
|
||||
.eth2_network_config
|
||||
.clone()
|
||||
.expect("network should have a valid config");
|
||||
|
||||
env.runtime().block_on(publish_voluntary_exit::<E>(
|
||||
&keystore_path,
|
||||
password_file_path.as_ref(),
|
||||
&client,
|
||||
&spec,
|
||||
stdin_inputs,
|
||||
ð2_network_config,
|
||||
no_wait,
|
||||
no_confirmation,
|
||||
))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets the keypair and validator_index for every validator and calls `publish_voluntary_exit` on it.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn publish_voluntary_exit<E: EthSpec>(
|
||||
keystore_path: &Path,
|
||||
password_file_path: Option<&PathBuf>,
|
||||
client: &BeaconNodeHttpClient,
|
||||
spec: &ChainSpec,
|
||||
stdin_inputs: bool,
|
||||
eth2_network_config: &Eth2NetworkConfig,
|
||||
no_wait: bool,
|
||||
no_confirmation: bool,
|
||||
) -> Result<(), String> {
|
||||
let genesis_data = get_geneisis_data(client).await?;
|
||||
let testnet_genesis_root = eth2_network_config
|
||||
.beacon_state::<E>()
|
||||
.as_ref()
|
||||
.expect("network should have valid genesis state")
|
||||
.genesis_validators_root();
|
||||
|
||||
// Verify that the beacon node and validator being exited are on the same network.
|
||||
if genesis_data.genesis_validators_root != testnet_genesis_root {
|
||||
return Err(
|
||||
"Invalid genesis state. Please ensure that your beacon node is on the same network \
|
||||
as the validator you are publishing an exit for"
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
// Return immediately if beacon node is not synced
|
||||
if is_syncing(client).await? {
|
||||
return Err("Beacon node is still syncing".to_string());
|
||||
}
|
||||
|
||||
let keypair = load_voting_keypair(keystore_path, password_file_path, stdin_inputs)?;
|
||||
|
||||
let epoch = get_current_epoch::<E>(genesis_data.genesis_time, spec)
|
||||
.ok_or("Failed to get current epoch. Please check your system time")?;
|
||||
let validator_index = get_validator_index_for_exit(client, &keypair.pk, epoch, spec).await?;
|
||||
|
||||
let fork = get_beacon_state_fork(client).await?;
|
||||
let voluntary_exit = VoluntaryExit {
|
||||
epoch,
|
||||
validator_index,
|
||||
};
|
||||
|
||||
eprintln!(
|
||||
"Publishing a voluntary exit for validator: {} \n",
|
||||
keypair.pk
|
||||
);
|
||||
if !no_confirmation {
|
||||
eprintln!("WARNING: THIS IS AN IRREVERSIBLE OPERATION\n");
|
||||
eprintln!("{}\n", PROMPT);
|
||||
eprintln!(
|
||||
"PLEASE VISIT {} TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT.",
|
||||
WEBSITE_URL
|
||||
);
|
||||
eprintln!("Enter the exit phrase from the above URL to confirm the voluntary exit: ");
|
||||
}
|
||||
|
||||
let confirmation = if !no_confirmation {
|
||||
account_utils::read_input_from_user(stdin_inputs)?
|
||||
} else {
|
||||
CONFIRMATION_PHRASE.to_string()
|
||||
};
|
||||
|
||||
if confirmation == CONFIRMATION_PHRASE {
|
||||
// Sign and publish the voluntary exit to network
|
||||
let signed_voluntary_exit = voluntary_exit.sign(
|
||||
&keypair.sk,
|
||||
&fork,
|
||||
genesis_data.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
client
|
||||
.post_beacon_pool_voluntary_exits(&signed_voluntary_exit)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to publish voluntary exit: {}", e))?;
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await; // Provides nicer UX.
|
||||
eprintln!(
|
||||
"Successfully validated and published voluntary exit for validator {}",
|
||||
keypair.pk
|
||||
);
|
||||
} else {
|
||||
eprintln!(
|
||||
"Did not publish voluntary exit for validator {}. Please check that you entered the correct exit phrase.",
|
||||
keypair.pk
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if no_wait {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
loop {
|
||||
// Sleep for a slot duration and then check if voluntary exit was processed
|
||||
// by checking the validator status.
|
||||
sleep(Duration::from_secs(spec.seconds_per_slot)).await;
|
||||
|
||||
let validator_data = get_validator_data(client, &keypair.pk).await?;
|
||||
match validator_data.status {
|
||||
ValidatorStatus::ActiveExiting => {
|
||||
let exit_epoch = validator_data.validator.exit_epoch;
|
||||
let withdrawal_epoch = validator_data.validator.withdrawable_epoch;
|
||||
let current_epoch = get_current_epoch::<E>(genesis_data.genesis_time, spec)
|
||||
.ok_or("Failed to get current epoch. Please check your system time")?;
|
||||
eprintln!("Voluntary exit has been accepted into the beacon chain, but not yet finalized. \
|
||||
Finalization may take several minutes or longer. Before finalization there is a low \
|
||||
probability that the exit may be reverted.");
|
||||
eprintln!(
|
||||
"Current epoch: {}, Exit epoch: {}, Withdrawable epoch: {}",
|
||||
current_epoch, exit_epoch, withdrawal_epoch
|
||||
);
|
||||
eprintln!("Please keep your validator running till exit epoch");
|
||||
eprintln!(
|
||||
"Exit epoch in approximately {} secs",
|
||||
(exit_epoch - current_epoch) * spec.seconds_per_slot * E::slots_per_epoch()
|
||||
);
|
||||
break;
|
||||
}
|
||||
ValidatorStatus::ExitedSlashed | ValidatorStatus::ExitedUnslashed => {
|
||||
eprintln!(
|
||||
"Validator has exited on epoch: {}",
|
||||
validator_data.validator.exit_epoch
|
||||
);
|
||||
break;
|
||||
}
|
||||
_ => eprintln!("Waiting for voluntary exit to be accepted into the beacon chain..."),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the validator index of a given the validator public key by querying the beacon node endpoint.
|
||||
///
|
||||
/// Returns an error if the beacon endpoint returns an error or given validator is not eligible for an exit.
|
||||
async fn get_validator_index_for_exit(
|
||||
client: &BeaconNodeHttpClient,
|
||||
validator_pubkey: &PublicKey,
|
||||
epoch: Epoch,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<u64, String> {
|
||||
let validator_data = get_validator_data(client, validator_pubkey).await?;
|
||||
|
||||
match validator_data.status {
|
||||
ValidatorStatus::ActiveOngoing => {
|
||||
let eligible_epoch = validator_data
|
||||
.validator
|
||||
.activation_epoch
|
||||
.safe_add(spec.shard_committee_period)
|
||||
.map_err(|e| format!("Failed to calculate eligible epoch, validator activation epoch too high: {:?}", e))?;
|
||||
|
||||
if epoch >= eligible_epoch {
|
||||
Ok(validator_data.index)
|
||||
} else {
|
||||
Err(format!(
|
||||
"Validator {:?} is not eligible for exit. It will become eligible on epoch {}",
|
||||
validator_pubkey, eligible_epoch
|
||||
))
|
||||
}
|
||||
}
|
||||
status => Err(format!(
|
||||
"Validator {:?} is not eligible for voluntary exit. Validator status: {:?}",
|
||||
validator_pubkey, status
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the validator data by querying the beacon node client.
|
||||
async fn get_validator_data(
|
||||
client: &BeaconNodeHttpClient,
|
||||
validator_pubkey: &PublicKey,
|
||||
) -> Result<ValidatorData, String> {
|
||||
Ok(client
|
||||
.get_beacon_states_validator_id(
|
||||
StateId::Head,
|
||||
&ValidatorId::PublicKey(validator_pubkey.into()),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to get validator details: {:?}", e))?
|
||||
.ok_or_else(|| {
|
||||
format!(
|
||||
"Validator {} is not present in the beacon state. \
|
||||
Please ensure that your beacon node is synced and the validator has been deposited.",
|
||||
validator_pubkey
|
||||
)
|
||||
})?
|
||||
.data)
|
||||
}
|
||||
|
||||
/// Get genesis data by querying the beacon node client.
|
||||
async fn get_geneisis_data(client: &BeaconNodeHttpClient) -> Result<GenesisData, String> {
|
||||
Ok(client
|
||||
.get_beacon_genesis()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to get beacon genesis: {}", e))?
|
||||
.data)
|
||||
}
|
||||
|
||||
/// Gets syncing status from beacon node client and returns true if syncing and false otherwise.
|
||||
async fn is_syncing(client: &BeaconNodeHttpClient) -> Result<bool, String> {
|
||||
Ok(client
|
||||
.get_node_syncing()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to get sync status: {:?}", e))?
|
||||
.data
|
||||
.is_syncing)
|
||||
}
|
||||
|
||||
/// Get fork object for the current state by querying the beacon node client.
|
||||
async fn get_beacon_state_fork(client: &BeaconNodeHttpClient) -> Result<Fork, String> {
|
||||
Ok(client
|
||||
.get_beacon_states_fork(StateId::Head)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to get get fork: {:?}", e))?
|
||||
.ok_or("Failed to get fork, state not found")?
|
||||
.data)
|
||||
}
|
||||
|
||||
/// Calculates the current epoch from the genesis time and current time.
|
||||
fn get_current_epoch<E: EthSpec>(genesis_time: u64, spec: &ChainSpec) -> Option<Epoch> {
|
||||
let slot_clock = SystemTimeSlotClock::new(
|
||||
spec.genesis_slot,
|
||||
Duration::from_secs(genesis_time),
|
||||
Duration::from_secs(spec.seconds_per_slot),
|
||||
);
|
||||
slot_clock.now().map(|s| s.epoch(E::slots_per_epoch()))
|
||||
}
|
||||
|
||||
/// Load the voting keypair by loading and decrypting the keystore.
|
||||
///
|
||||
/// If the `password_file_path` is Some, unlock keystore using password in given file
|
||||
/// otherwise, prompts user for a password to unlock the keystore.
|
||||
fn load_voting_keypair(
|
||||
voting_keystore_path: &Path,
|
||||
password_file_path: Option<&PathBuf>,
|
||||
stdin_inputs: bool,
|
||||
) -> Result<Keypair, String> {
|
||||
let keystore = Keystore::from_json_file(&voting_keystore_path).map_err(|e| {
|
||||
format!(
|
||||
"Unable to read keystore JSON {:?}: {:?}",
|
||||
voting_keystore_path, e
|
||||
)
|
||||
})?;
|
||||
|
||||
// Get password from password file.
|
||||
if let Some(password_file) = password_file_path {
|
||||
validator_dir::unlock_keypair_from_password_path(voting_keystore_path, password_file)
|
||||
.map_err(|e| format!("Error while decrypting keypair: {:?}", e))
|
||||
} else {
|
||||
// Prompt password from user.
|
||||
eprintln!();
|
||||
eprintln!(
|
||||
"{} for validator in {:?}: ",
|
||||
PASSWORD_PROMPT, voting_keystore_path
|
||||
);
|
||||
let password = account_utils::read_password_from_user(stdin_inputs)?;
|
||||
match keystore.decrypt_keypair(password.as_ref()) {
|
||||
Ok(keypair) => {
|
||||
eprintln!("Password is correct.");
|
||||
eprintln!();
|
||||
std::thread::sleep(std::time::Duration::from_secs(1)); // Provides nicer UX.
|
||||
Ok(keypair)
|
||||
}
|
||||
Err(eth2_keystore::Error::InvalidPassword) => Err("Invalid password".to_string()),
|
||||
Err(e) => Err(format!("Error while decrypting keypair: {:?}", e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(not(debug_assertions))]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use eth2_keystore::KeystoreBuilder;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use tempfile::{tempdir, TempDir};
|
||||
|
||||
const PASSWORD: &str = "cats";
|
||||
const KEYSTORE_NAME: &str = "keystore-m_12381_3600_0_0_0-1595406747.json";
|
||||
const PASSWORD_FILE: &str = "password.pass";
|
||||
|
||||
fn create_and_save_keystore(dir: &TempDir, save_password: bool) -> PublicKey {
|
||||
let keypair = Keypair::random();
|
||||
let keystore = KeystoreBuilder::new(&keypair, PASSWORD.as_bytes(), "".into())
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
// Create a keystore.
|
||||
File::create(dir.path().join(KEYSTORE_NAME))
|
||||
.map(|mut file| keystore.to_json_writer(&mut file).unwrap())
|
||||
.unwrap();
|
||||
if save_password {
|
||||
File::create(dir.path().join(PASSWORD_FILE))
|
||||
.map(|mut file| file.write_all(PASSWORD.as_bytes()).unwrap())
|
||||
.unwrap();
|
||||
}
|
||||
keystore.public_key().unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_keypair_password_file() {
|
||||
let dir = tempdir().unwrap();
|
||||
let expected_pk = create_and_save_keystore(&dir, true);
|
||||
|
||||
let kp = load_voting_keypair(
|
||||
&dir.path().join(KEYSTORE_NAME),
|
||||
Some(&dir.path().join(PASSWORD_FILE)),
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(expected_pk, kp.pk.into());
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
use crate::{common::ensure_dir_exists, VALIDATOR_DIR_FLAG};
|
||||
use crate::wallet::create::{PASSWORD_FLAG, STDIN_INPUTS_FLAG};
|
||||
use account_utils::validator_definitions::SigningDefinition;
|
||||
use account_utils::{
|
||||
eth2_keystore::Keystore,
|
||||
read_password_from_user,
|
||||
@@ -6,8 +7,10 @@ use account_utils::{
|
||||
recursively_find_voting_keystores, ValidatorDefinition, ValidatorDefinitions,
|
||||
CONFIG_FILENAME,
|
||||
},
|
||||
ZeroizeString,
|
||||
};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME};
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::thread::sleep;
|
||||
@@ -16,7 +19,7 @@ use std::time::Duration;
|
||||
pub const CMD: &str = "import";
|
||||
pub const KEYSTORE_FLAG: &str = "keystore";
|
||||
pub const DIR_FLAG: &str = "directory";
|
||||
pub const STDIN_PASSWORD_FLAG: &str = "stdin-passwords";
|
||||
pub const REUSE_PASSWORD_FLAG: &str = "reuse-password";
|
||||
|
||||
pub const PASSWORD_PROMPT: &str = "Enter the keystore password, or press enter to omit it:";
|
||||
pub const KEYSTORE_REUSE_WARNING: &str = "DO NOT USE THE ORIGINAL KEYSTORES TO VALIDATE WITH \
|
||||
@@ -54,37 +57,61 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
||||
.long(VALIDATOR_DIR_FLAG)
|
||||
.value_name("VALIDATOR_DIRECTORY")
|
||||
Arg::with_name(STDIN_INPUTS_FLAG)
|
||||
.takes_value(false)
|
||||
.hidden(cfg!(windows))
|
||||
.long(STDIN_INPUTS_FLAG)
|
||||
.help("If present, read all user inputs from stdin instead of tty."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(REUSE_PASSWORD_FLAG)
|
||||
.long(REUSE_PASSWORD_FLAG)
|
||||
.help("If present, the same password will be used for all imported keystores."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(PASSWORD_FLAG)
|
||||
.long(PASSWORD_FLAG)
|
||||
.value_name("KEYSTORE_PASSWORD_PATH")
|
||||
.requires(REUSE_PASSWORD_FLAG)
|
||||
.help(
|
||||
"The path where the validator directories will be created. \
|
||||
Defaults to ~/.lighthouse/validators",
|
||||
"The path to the file containing the password which will unlock all \
|
||||
keystores being imported. This flag must be used with `--reuse-password`. \
|
||||
The password will be copied to the `validator_definitions.yml` file, so after \
|
||||
import we strongly recommend you delete the file at KEYSTORE_PASSWORD_PATH.",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(STDIN_PASSWORD_FLAG)
|
||||
.long(STDIN_PASSWORD_FLAG)
|
||||
.help("If present, read passwords from stdin instead of tty."),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
||||
pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> {
|
||||
let keystore: Option<PathBuf> = clap_utils::parse_optional(matches, KEYSTORE_FLAG)?;
|
||||
let keystores_dir: Option<PathBuf> = clap_utils::parse_optional(matches, DIR_FLAG)?;
|
||||
let validator_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
VALIDATOR_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("validators"),
|
||||
)?;
|
||||
let stdin_password = matches.is_present(STDIN_PASSWORD_FLAG);
|
||||
|
||||
ensure_dir_exists(&validator_dir)?;
|
||||
let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG);
|
||||
let reuse_password = matches.is_present(REUSE_PASSWORD_FLAG);
|
||||
let keystore_password_path: Option<PathBuf> =
|
||||
clap_utils::parse_optional(matches, PASSWORD_FLAG)?;
|
||||
|
||||
let mut defs = ValidatorDefinitions::open_or_create(&validator_dir)
|
||||
.map_err(|e| format!("Unable to open {}: {:?}", CONFIG_FILENAME, e))?;
|
||||
|
||||
let slashing_protection_path = validator_dir.join(SLASHING_PROTECTION_FILENAME);
|
||||
let slashing_protection =
|
||||
SlashingDatabase::open_or_create(&slashing_protection_path).map_err(|e| {
|
||||
format!(
|
||||
"Unable to open or create slashing protection database at {}: {:?}",
|
||||
slashing_protection_path.display(),
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
// Create an empty transaction and drop it. Used to test if the database is locked.
|
||||
slashing_protection.test_transaction().map_err(|e| {
|
||||
format!(
|
||||
"Cannot import keys while the validator client is running: {:?}",
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
// Collect the paths for the keystores that should be imported.
|
||||
let keystore_paths = match (keystore, keystores_dir) {
|
||||
(Some(keystore), None) => vec![keystore],
|
||||
@@ -115,20 +142,24 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
||||
//
|
||||
// - Obtain the keystore password, if the user desires.
|
||||
// - Copy the keystore into the `validator_dir`.
|
||||
// - Register the voting key with the slashing protection database.
|
||||
// - Add the keystore to the validator definitions file.
|
||||
//
|
||||
// Skip keystores that already exist, but exit early if any operation fails.
|
||||
// Reuses the same password for all keystores if the `REUSE_PASSWORD_FLAG` flag is set.
|
||||
let mut num_imported_keystores = 0;
|
||||
let mut previous_password: Option<ZeroizeString> = None;
|
||||
|
||||
for src_keystore in &keystore_paths {
|
||||
let keystore = Keystore::from_json_file(src_keystore)
|
||||
.map_err(|e| format!("Unable to read keystore JSON {:?}: {:?}", src_keystore, e))?;
|
||||
|
||||
eprintln!("");
|
||||
eprintln!();
|
||||
eprintln!("Keystore found at {:?}:", src_keystore);
|
||||
eprintln!("");
|
||||
eprintln!();
|
||||
eprintln!(" - Public key: 0x{}", keystore.pubkey());
|
||||
eprintln!(" - UUID: {}", keystore.uuid());
|
||||
eprintln!("");
|
||||
eprintln!();
|
||||
eprintln!(
|
||||
"If you enter the password it will be stored as plain-text in {} so that it is not \
|
||||
required each time the validator client starts.",
|
||||
@@ -136,22 +167,39 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
||||
);
|
||||
|
||||
let password_opt = loop {
|
||||
eprintln!("");
|
||||
if let Some(password) = previous_password.clone() {
|
||||
eprintln!("Reuse previous password.");
|
||||
break Some(password);
|
||||
}
|
||||
eprintln!();
|
||||
eprintln!("{}", PASSWORD_PROMPT);
|
||||
|
||||
let password = read_password_from_user(stdin_password)?;
|
||||
|
||||
if password.as_ref().is_empty() {
|
||||
eprintln!("Continuing without password.");
|
||||
sleep(Duration::from_secs(1)); // Provides nicer UX.
|
||||
break None;
|
||||
}
|
||||
let password = match keystore_password_path.as_ref() {
|
||||
Some(path) => {
|
||||
let password_from_file: ZeroizeString = fs::read_to_string(&path)
|
||||
.map_err(|e| format!("Unable to read {:?}: {:?}", path, e))?
|
||||
.into();
|
||||
password_from_file.without_newlines()
|
||||
}
|
||||
None => {
|
||||
let password_from_user = read_password_from_user(stdin_inputs)?;
|
||||
if password_from_user.as_ref().is_empty() {
|
||||
eprintln!("Continuing without password.");
|
||||
sleep(Duration::from_secs(1)); // Provides nicer UX.
|
||||
break None;
|
||||
}
|
||||
password_from_user
|
||||
}
|
||||
};
|
||||
|
||||
match keystore.decrypt_keypair(password.as_ref()) {
|
||||
Ok(_) => {
|
||||
eprintln!("Password is correct.");
|
||||
eprintln!("");
|
||||
eprintln!();
|
||||
sleep(Duration::from_secs(1)); // Provides nicer UX.
|
||||
if reuse_password {
|
||||
previous_password = Some(password.clone());
|
||||
}
|
||||
break Some(password);
|
||||
}
|
||||
Err(eth2_keystore::Error::InvalidPassword) => {
|
||||
@@ -161,10 +209,35 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
||||
}
|
||||
};
|
||||
|
||||
let voting_pubkey = keystore
|
||||
.public_key()
|
||||
.ok_or_else(|| format!("Keystore public key is invalid: {}", keystore.pubkey()))?;
|
||||
|
||||
// The keystore is placed in a directory that matches the name of the public key. This
|
||||
// provides some loose protection against adding the same keystore twice.
|
||||
let dest_dir = validator_dir.join(format!("0x{}", keystore.pubkey()));
|
||||
if dest_dir.exists() {
|
||||
// Check if we should update password for existing validator in case if it was provided via reimport: #2854
|
||||
let old_validator_def_opt = defs
|
||||
.as_mut_slice()
|
||||
.iter_mut()
|
||||
.find(|def| def.voting_public_key == voting_pubkey);
|
||||
if let Some(ValidatorDefinition {
|
||||
signing_definition:
|
||||
SigningDefinition::LocalKeystore {
|
||||
voting_keystore_password: ref mut old_passwd,
|
||||
..
|
||||
},
|
||||
..
|
||||
}) = old_validator_def_opt
|
||||
{
|
||||
if old_passwd.is_none() && password_opt.is_some() {
|
||||
*old_passwd = password_opt;
|
||||
defs.save(&validator_dir)
|
||||
.map_err(|e| format!("Unable to save {}: {:?}", CONFIG_FILENAME, e))?;
|
||||
eprintln!("Password updated for public key {}", voting_pubkey);
|
||||
}
|
||||
}
|
||||
eprintln!(
|
||||
"Skipping import of keystore for existing public key: {:?}",
|
||||
src_keystore
|
||||
@@ -186,12 +259,29 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
||||
fs::copy(&src_keystore, &dest_keystore)
|
||||
.map_err(|e| format!("Unable to copy keystore: {:?}", e))?;
|
||||
|
||||
// Register with slashing protection.
|
||||
slashing_protection
|
||||
.register_validator(voting_pubkey.compress())
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Error registering validator {}: {:?}",
|
||||
voting_pubkey.as_hex_string(),
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
eprintln!("Successfully imported keystore.");
|
||||
num_imported_keystores += 1;
|
||||
|
||||
let validator_def =
|
||||
ValidatorDefinition::new_keystore_with_password(&dest_keystore, password_opt)
|
||||
.map_err(|e| format!("Unable to create new validator definition: {:?}", e))?;
|
||||
let graffiti = None;
|
||||
let suggested_fee_recipient = None;
|
||||
let validator_def = ValidatorDefinition::new_keystore_with_password(
|
||||
&dest_keystore,
|
||||
password_opt,
|
||||
graffiti,
|
||||
suggested_fee_recipient,
|
||||
)
|
||||
.map_err(|e| format!("Unable to create new validator definition: {:?}", e))?;
|
||||
|
||||
defs.push(validator_def);
|
||||
|
||||
@@ -201,13 +291,13 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
||||
eprintln!("Successfully updated {}.", CONFIG_FILENAME);
|
||||
}
|
||||
|
||||
eprintln!("");
|
||||
eprintln!();
|
||||
eprintln!(
|
||||
"Successfully imported {} validators ({} skipped).",
|
||||
num_imported_keystores,
|
||||
keystore_paths.len() - num_imported_keystores
|
||||
);
|
||||
eprintln!("");
|
||||
eprintln!();
|
||||
eprintln!("WARNING: {}", KEYSTORE_REUSE_WARNING);
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,40 +1,27 @@
|
||||
use crate::VALIDATOR_DIR_FLAG;
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use account_utils::validator_definitions::ValidatorDefinitions;
|
||||
use clap::App;
|
||||
use std::path::PathBuf;
|
||||
use validator_dir::Manager as ValidatorManager;
|
||||
|
||||
pub const CMD: &str = "list";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.arg(
|
||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
||||
.long(VALIDATOR_DIR_FLAG)
|
||||
.value_name("VALIDATOR_DIRECTORY")
|
||||
.help(
|
||||
"The path to search for validator directories. \
|
||||
Defaults to ~/.lighthouse/validators",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.about("Lists the names of all validators.")
|
||||
App::new(CMD).about("Lists the public keys of all validators.")
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches<'_>) -> Result<(), String> {
|
||||
let data_dir = clap_utils::parse_path_with_default_in_home_dir(
|
||||
matches,
|
||||
VALIDATOR_DIR_FLAG,
|
||||
PathBuf::new().join(".lighthouse").join("validators"),
|
||||
)?;
|
||||
pub fn cli_run(validator_dir: PathBuf) -> Result<(), String> {
|
||||
let validator_definitions = ValidatorDefinitions::open(&validator_dir).map_err(|e| {
|
||||
format!(
|
||||
"No validator definitions found in {:?}: {:?}",
|
||||
validator_dir, e
|
||||
)
|
||||
})?;
|
||||
|
||||
let mgr = ValidatorManager::open(&data_dir)
|
||||
.map_err(|e| format!("Unable to read --{}: {:?}", VALIDATOR_DIR_FLAG, e))?;
|
||||
|
||||
for (name, _path) in mgr
|
||||
.directory_names()
|
||||
.map_err(|e| format!("Unable to list wallets: {:?}", e))?
|
||||
{
|
||||
println!("{}", name)
|
||||
for def in validator_definitions.as_slice() {
|
||||
println!(
|
||||
"{} ({})",
|
||||
def.voting_public_key,
|
||||
if def.enabled { "enabled" } else { "disabled" }
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
pub mod create;
|
||||
pub mod deposit;
|
||||
pub mod exit;
|
||||
pub mod import;
|
||||
pub mod list;
|
||||
pub mod modify;
|
||||
pub mod recover;
|
||||
pub mod slashing_protection;
|
||||
|
||||
use crate::common::base_wallet_dir;
|
||||
use crate::{VALIDATOR_DIR_FLAG, VALIDATOR_DIR_FLAG_ALIAS};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use directory::{parse_path_or_default_with_flag, DEFAULT_VALIDATOR_DIR};
|
||||
use environment::Environment;
|
||||
use std::path::PathBuf;
|
||||
use types::EthSpec;
|
||||
|
||||
pub const CMD: &str = "validator";
|
||||
@@ -14,26 +19,45 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about("Provides commands for managing Eth2 validators.")
|
||||
.arg(
|
||||
Arg::with_name("base-dir")
|
||||
.long("base-dir")
|
||||
.value_name("BASE_DIRECTORY")
|
||||
.help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/wallets")
|
||||
.takes_value(true),
|
||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
||||
.long(VALIDATOR_DIR_FLAG)
|
||||
.alias(VALIDATOR_DIR_FLAG_ALIAS)
|
||||
.value_name("VALIDATOR_DIRECTORY")
|
||||
.help(
|
||||
"The path to search for validator directories. \
|
||||
Defaults to ~/.lighthouse/{network}/validators",
|
||||
)
|
||||
.takes_value(true)
|
||||
.conflicts_with("datadir"),
|
||||
)
|
||||
.subcommand(create::cli_app())
|
||||
.subcommand(deposit::cli_app())
|
||||
.subcommand(modify::cli_app())
|
||||
.subcommand(import::cli_app())
|
||||
.subcommand(list::cli_app())
|
||||
.subcommand(recover::cli_app())
|
||||
.subcommand(slashing_protection::cli_app())
|
||||
.subcommand(exit::cli_app())
|
||||
}
|
||||
|
||||
pub fn cli_run<T: EthSpec>(matches: &ArgMatches, env: Environment<T>) -> Result<(), String> {
|
||||
let base_wallet_dir = base_wallet_dir(matches, "base-dir")?;
|
||||
let validator_base_dir = if matches.value_of("datadir").is_some() {
|
||||
let path: PathBuf = clap_utils::parse_required(matches, "datadir")?;
|
||||
path.join(DEFAULT_VALIDATOR_DIR)
|
||||
} else {
|
||||
parse_path_or_default_with_flag(matches, VALIDATOR_DIR_FLAG, DEFAULT_VALIDATOR_DIR)?
|
||||
};
|
||||
eprintln!("validator-dir path: {:?}", validator_base_dir);
|
||||
|
||||
match matches.subcommand() {
|
||||
(create::CMD, Some(matches)) => create::cli_run::<T>(matches, env, base_wallet_dir),
|
||||
(deposit::CMD, Some(matches)) => deposit::cli_run::<T>(matches, env),
|
||||
(import::CMD, Some(matches)) => import::cli_run(matches),
|
||||
(list::CMD, Some(matches)) => list::cli_run(matches),
|
||||
(create::CMD, Some(matches)) => create::cli_run::<T>(matches, env, validator_base_dir),
|
||||
(modify::CMD, Some(matches)) => modify::cli_run(matches, validator_base_dir),
|
||||
(import::CMD, Some(matches)) => import::cli_run(matches, validator_base_dir),
|
||||
(list::CMD, Some(_)) => list::cli_run(validator_base_dir),
|
||||
(recover::CMD, Some(matches)) => recover::cli_run(matches, validator_base_dir),
|
||||
(slashing_protection::CMD, Some(matches)) => {
|
||||
slashing_protection::cli_run(matches, env, validator_base_dir)
|
||||
}
|
||||
(exit::CMD, Some(matches)) => exit::cli_run(matches, env),
|
||||
(unknown, _) => Err(format!(
|
||||
"{} does not have a {} command. See --help",
|
||||
CMD, unknown
|
||||
|
||||
100
account_manager/src/validator/modify.rs
Normal file
100
account_manager/src/validator/modify.rs
Normal file
@@ -0,0 +1,100 @@
|
||||
use account_utils::validator_definitions::ValidatorDefinitions;
|
||||
use bls::PublicKey;
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use std::{collections::HashSet, path::PathBuf};
|
||||
|
||||
pub const CMD: &str = "modify";
|
||||
pub const ENABLE: &str = "enable";
|
||||
pub const DISABLE: &str = "disable";
|
||||
|
||||
pub const PUBKEY_FLAG: &str = "pubkey";
|
||||
pub const ALL: &str = "all";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about("Modify validator status in validator_definitions.yml.")
|
||||
.subcommand(
|
||||
App::new(ENABLE)
|
||||
.about("Enable validator(s) in validator_definitions.yml.")
|
||||
.arg(
|
||||
Arg::with_name(PUBKEY_FLAG)
|
||||
.long(PUBKEY_FLAG)
|
||||
.value_name("PUBKEY")
|
||||
.help("Validator pubkey to enable")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(ALL)
|
||||
.long(ALL)
|
||||
.help("Enable all validators in the validator directory")
|
||||
.takes_value(false)
|
||||
.conflicts_with(PUBKEY_FLAG),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
App::new(DISABLE)
|
||||
.about("Disable validator(s) in validator_definitions.yml.")
|
||||
.arg(
|
||||
Arg::with_name(PUBKEY_FLAG)
|
||||
.long(PUBKEY_FLAG)
|
||||
.value_name("PUBKEY")
|
||||
.help("Validator pubkey to disable")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(ALL)
|
||||
.long(ALL)
|
||||
.help("Disable all validators in the validator directory")
|
||||
.takes_value(false)
|
||||
.conflicts_with(PUBKEY_FLAG),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> {
|
||||
// `true` implies we are setting `validator_definition.enabled = true` and
|
||||
// vice versa.
|
||||
let (enabled, sub_matches) = match matches.subcommand() {
|
||||
(ENABLE, Some(sub_matches)) => (true, sub_matches),
|
||||
(DISABLE, Some(sub_matches)) => (false, sub_matches),
|
||||
(unknown, _) => {
|
||||
return Err(format!(
|
||||
"{} does not have a {} command. See --help",
|
||||
CMD, unknown
|
||||
))
|
||||
}
|
||||
};
|
||||
let mut defs = ValidatorDefinitions::open(&validator_dir).map_err(|e| {
|
||||
format!(
|
||||
"No validator definitions found in {:?}: {:?}",
|
||||
validator_dir, e
|
||||
)
|
||||
})?;
|
||||
let pubkeys_to_modify = if sub_matches.is_present(ALL) {
|
||||
defs.as_slice()
|
||||
.iter()
|
||||
.map(|def| def.voting_public_key.clone())
|
||||
.collect::<HashSet<_>>()
|
||||
} else {
|
||||
let public_key: PublicKey = clap_utils::parse_required(sub_matches, PUBKEY_FLAG)?;
|
||||
std::iter::once(public_key).collect::<HashSet<PublicKey>>()
|
||||
};
|
||||
|
||||
// Modify required entries from validator_definitions.
|
||||
for def in defs.as_mut_slice() {
|
||||
if pubkeys_to_modify.contains(&def.voting_public_key) {
|
||||
def.enabled = enabled;
|
||||
eprintln!(
|
||||
"Validator {} {}",
|
||||
def.voting_public_key,
|
||||
if enabled { "enabled" } else { "disabled" }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
defs.save(&validator_dir)
|
||||
.map_err(|e| format!("Unable to modify validator definitions: {:?}", e))?;
|
||||
|
||||
eprintln!("\nSuccessfully modified validator_definitions.yml");
|
||||
Ok(())
|
||||
}
|
||||
149
account_manager/src/validator/recover.rs
Normal file
149
account_manager/src/validator/recover.rs
Normal file
@@ -0,0 +1,149 @@
|
||||
use super::create::STORE_WITHDRAW_FLAG;
|
||||
use crate::common::read_mnemonic_from_cli;
|
||||
use crate::validator::create::COUNT_FLAG;
|
||||
use crate::wallet::create::STDIN_INPUTS_FLAG;
|
||||
use crate::SECRETS_DIR_FLAG;
|
||||
use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder};
|
||||
use account_utils::random_password;
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use directory::ensure_dir_exists;
|
||||
use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR};
|
||||
use eth2_wallet::bip39::Seed;
|
||||
use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType, ValidatorKeystores};
|
||||
use std::path::PathBuf;
|
||||
use validator_dir::Builder as ValidatorDirBuilder;
|
||||
pub const CMD: &str = "recover";
|
||||
pub const FIRST_INDEX_FLAG: &str = "first-index";
|
||||
pub const MNEMONIC_FLAG: &str = "mnemonic-path";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about(
|
||||
"Recovers validator private keys given a BIP-39 mnemonic phrase. \
|
||||
If you did not specify a `--first-index` or count `--count`, by default this will \
|
||||
only recover the keys associated with the validator at index 0 for an HD wallet \
|
||||
in accordance with the EIP-2333 spec.")
|
||||
.arg(
|
||||
Arg::with_name(FIRST_INDEX_FLAG)
|
||||
.long(FIRST_INDEX_FLAG)
|
||||
.value_name("FIRST_INDEX")
|
||||
.help("The first of consecutive key indexes you wish to recover.")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("0"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(COUNT_FLAG)
|
||||
.long(COUNT_FLAG)
|
||||
.value_name("COUNT")
|
||||
.help("The number of validator keys you wish to recover. Counted consecutively from the provided `--first_index`.")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("1"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(MNEMONIC_FLAG)
|
||||
.long(MNEMONIC_FLAG)
|
||||
.value_name("MNEMONIC_PATH")
|
||||
.help(
|
||||
"If present, the mnemonic will be read in from this file.",
|
||||
)
|
||||
.takes_value(true)
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(SECRETS_DIR_FLAG)
|
||||
.long(SECRETS_DIR_FLAG)
|
||||
.value_name("SECRETS_DIR")
|
||||
.help(
|
||||
"The path where the validator keystore passwords will be stored. \
|
||||
Defaults to ~/.lighthouse/{network}/secrets",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(STORE_WITHDRAW_FLAG)
|
||||
.long(STORE_WITHDRAW_FLAG)
|
||||
.help(
|
||||
"If present, the withdrawal keystore will be stored alongside the voting \
|
||||
keypair. It is generally recommended to *not* store the withdrawal key and \
|
||||
instead generate them from the wallet seed when required.",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(STDIN_INPUTS_FLAG)
|
||||
.takes_value(false)
|
||||
.hidden(cfg!(windows))
|
||||
.long(STDIN_INPUTS_FLAG)
|
||||
.help("If present, read all user inputs from stdin instead of tty."),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> {
|
||||
let secrets_dir = if matches.value_of("datadir").is_some() {
|
||||
let path: PathBuf = clap_utils::parse_required(matches, "datadir")?;
|
||||
path.join(DEFAULT_SECRET_DIR)
|
||||
} else {
|
||||
parse_path_or_default_with_flag(matches, SECRETS_DIR_FLAG, DEFAULT_SECRET_DIR)?
|
||||
};
|
||||
let first_index: u32 = clap_utils::parse_required(matches, FIRST_INDEX_FLAG)?;
|
||||
let count: u32 = clap_utils::parse_required(matches, COUNT_FLAG)?;
|
||||
let mnemonic_path: Option<PathBuf> = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?;
|
||||
let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG);
|
||||
|
||||
eprintln!("secrets-dir path: {:?}", secrets_dir);
|
||||
|
||||
ensure_dir_exists(&validator_dir)?;
|
||||
ensure_dir_exists(&secrets_dir)?;
|
||||
|
||||
eprintln!();
|
||||
eprintln!("WARNING: KEY RECOVERY CAN LEAD TO DUPLICATING VALIDATORS KEYS, WHICH CAN LEAD TO SLASHING.");
|
||||
eprintln!();
|
||||
|
||||
let mnemonic = read_mnemonic_from_cli(mnemonic_path, stdin_inputs)?;
|
||||
|
||||
let seed = Seed::new(&mnemonic, "");
|
||||
|
||||
for index in first_index..first_index + count {
|
||||
let voting_password = random_password();
|
||||
let withdrawal_password = random_password();
|
||||
|
||||
let derive = |key_type: KeyType, password: &[u8]| -> Result<Keystore, String> {
|
||||
let (secret, path) =
|
||||
recover_validator_secret_from_mnemonic(seed.as_bytes(), index, key_type)
|
||||
.map_err(|e| format!("Unable to recover validator keys: {:?}", e))?;
|
||||
|
||||
let keypair = keypair_from_secret(secret.as_bytes())
|
||||
.map_err(|e| format!("Unable build keystore: {:?}", e))?;
|
||||
|
||||
KeystoreBuilder::new(&keypair, password, format!("{}", path))
|
||||
.map_err(|e| format!("Unable build keystore: {:?}", e))?
|
||||
.build()
|
||||
.map_err(|e| format!("Unable build keystore: {:?}", e))
|
||||
};
|
||||
|
||||
let keystores = ValidatorKeystores {
|
||||
voting: derive(KeyType::Voting, voting_password.as_bytes())?,
|
||||
withdrawal: derive(KeyType::Withdrawal, withdrawal_password.as_bytes())?,
|
||||
};
|
||||
|
||||
let voting_pubkey = keystores.voting.pubkey().to_string();
|
||||
|
||||
ValidatorDirBuilder::new(validator_dir.clone())
|
||||
.password_dir(secrets_dir.clone())
|
||||
.voting_keystore(keystores.voting, voting_password.as_bytes())
|
||||
.withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes())
|
||||
.store_withdrawal_keystore(matches.is_present(STORE_WITHDRAW_FLAG))
|
||||
.build()
|
||||
.map_err(|e| format!("Unable to build validator directory: {:?}", e))?;
|
||||
|
||||
println!(
|
||||
"{}/{}\tIndex: {}\t0x{}",
|
||||
index - first_index,
|
||||
count - first_index,
|
||||
index,
|
||||
voting_pubkey
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
272
account_manager/src/validator/slashing_protection.rs
Normal file
272
account_manager/src/validator/slashing_protection.rs
Normal file
@@ -0,0 +1,272 @@
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use environment::Environment;
|
||||
use slashing_protection::{
|
||||
interchange::Interchange, InterchangeError, InterchangeImportOutcome, SlashingDatabase,
|
||||
SLASHING_PROTECTION_FILENAME,
|
||||
};
|
||||
use std::fs::File;
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use types::{BeaconState, Epoch, EthSpec, PublicKeyBytes, Slot};
|
||||
|
||||
pub const CMD: &str = "slashing-protection";
|
||||
pub const IMPORT_CMD: &str = "import";
|
||||
pub const EXPORT_CMD: &str = "export";
|
||||
|
||||
pub const IMPORT_FILE_ARG: &str = "IMPORT-FILE";
|
||||
pub const EXPORT_FILE_ARG: &str = "EXPORT-FILE";
|
||||
|
||||
pub const MINIFY_FLAG: &str = "minify";
|
||||
pub const PUBKEYS_FLAG: &str = "pubkeys";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about("Import or export slashing protection data to or from another client")
|
||||
.subcommand(
|
||||
App::new(IMPORT_CMD)
|
||||
.about("Import an interchange file")
|
||||
.arg(
|
||||
Arg::with_name(IMPORT_FILE_ARG)
|
||||
.takes_value(true)
|
||||
.value_name("FILE")
|
||||
.help("The slashing protection interchange file to import (.json)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(MINIFY_FLAG)
|
||||
.long(MINIFY_FLAG)
|
||||
.takes_value(true)
|
||||
.possible_values(&["false", "true"])
|
||||
.help(
|
||||
"Deprecated: Lighthouse no longer requires minification on import \
|
||||
because it always minifies",
|
||||
),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
App::new(EXPORT_CMD)
|
||||
.about("Export an interchange file")
|
||||
.arg(
|
||||
Arg::with_name(EXPORT_FILE_ARG)
|
||||
.takes_value(true)
|
||||
.value_name("FILE")
|
||||
.help("The filename to export the interchange file to"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(PUBKEYS_FLAG)
|
||||
.long(PUBKEYS_FLAG)
|
||||
.takes_value(true)
|
||||
.value_name("PUBKEYS")
|
||||
.help(
|
||||
"List of public keys to export history for. Keys should be 0x-prefixed, \
|
||||
comma-separated. All known keys will be exported if omitted",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(MINIFY_FLAG)
|
||||
.long(MINIFY_FLAG)
|
||||
.takes_value(true)
|
||||
.default_value("false")
|
||||
.possible_values(&["false", "true"])
|
||||
.help(
|
||||
"Minify the output file. This will make it smaller and faster to \
|
||||
import, but not faster to generate.",
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run<T: EthSpec>(
|
||||
matches: &ArgMatches<'_>,
|
||||
env: Environment<T>,
|
||||
validator_base_dir: PathBuf,
|
||||
) -> Result<(), String> {
|
||||
let slashing_protection_db_path = validator_base_dir.join(SLASHING_PROTECTION_FILENAME);
|
||||
|
||||
let eth2_network_config = env
|
||||
.eth2_network_config
|
||||
.ok_or("Unable to get testnet configuration from the environment")?;
|
||||
|
||||
let genesis_validators_root = eth2_network_config
|
||||
.beacon_state::<T>()
|
||||
.map(|state: BeaconState<T>| state.genesis_validators_root())
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Unable to get genesis state, has genesis occurred? Detail: {:?}",
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
match matches.subcommand() {
|
||||
(IMPORT_CMD, Some(matches)) => {
|
||||
let import_filename: PathBuf = clap_utils::parse_required(matches, IMPORT_FILE_ARG)?;
|
||||
let minify: Option<bool> = clap_utils::parse_optional(matches, MINIFY_FLAG)?;
|
||||
let import_file = File::open(&import_filename).map_err(|e| {
|
||||
format!(
|
||||
"Unable to open import file at {}: {:?}",
|
||||
import_filename.display(),
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
eprint!("Loading JSON file into memory & deserializing");
|
||||
let mut interchange = Interchange::from_json_reader(&import_file)
|
||||
.map_err(|e| format!("Error parsing file for import: {:?}", e))?;
|
||||
eprintln!(" [done].");
|
||||
|
||||
if let Some(minify) = minify {
|
||||
eprintln!(
|
||||
"WARNING: --minify flag is deprecated and will be removed in a future release"
|
||||
);
|
||||
if minify {
|
||||
eprint!("Minifying input file for faster loading");
|
||||
interchange = interchange
|
||||
.minify()
|
||||
.map_err(|e| format!("Minification failed: {:?}", e))?;
|
||||
eprintln!(" [done].");
|
||||
}
|
||||
}
|
||||
|
||||
let slashing_protection_database =
|
||||
SlashingDatabase::open_or_create(&slashing_protection_db_path).map_err(|e| {
|
||||
format!(
|
||||
"Unable to open database at {}: {:?}",
|
||||
slashing_protection_db_path.display(),
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
let display_slot = |slot: Option<Slot>| {
|
||||
slot.map_or("none".to_string(), |slot| format!("slot {}", slot.as_u64()))
|
||||
};
|
||||
let display_epoch = |epoch: Option<Epoch>| {
|
||||
epoch.map_or("?".to_string(), |epoch| format!("epoch {}", epoch.as_u64()))
|
||||
};
|
||||
let display_attestation = |source, target| match (source, target) {
|
||||
(None, None) => "none".to_string(),
|
||||
(source, target) => {
|
||||
format!("{} => {}", display_epoch(source), display_epoch(target))
|
||||
}
|
||||
};
|
||||
|
||||
match slashing_protection_database
|
||||
.import_interchange_info(interchange, genesis_validators_root)
|
||||
{
|
||||
Ok(outcomes) => {
|
||||
eprintln!("All records imported successfully:");
|
||||
for outcome in &outcomes {
|
||||
match outcome {
|
||||
InterchangeImportOutcome::Success { pubkey, summary } => {
|
||||
eprintln!("- {:?}", pubkey);
|
||||
eprintln!(
|
||||
" - latest block: {}",
|
||||
display_slot(summary.max_block_slot)
|
||||
);
|
||||
eprintln!(
|
||||
" - latest attestation: {}",
|
||||
display_attestation(
|
||||
summary.max_attestation_source,
|
||||
summary.max_attestation_target
|
||||
)
|
||||
);
|
||||
}
|
||||
InterchangeImportOutcome::Failure { pubkey, error } => {
|
||||
panic!(
|
||||
"import should be atomic, but key {:?} was imported despite error: {:?}",
|
||||
pubkey, error
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(InterchangeError::AtomicBatchAborted(outcomes)) => {
|
||||
eprintln!("ERROR: import aborted due to one or more errors");
|
||||
for outcome in &outcomes {
|
||||
if let InterchangeImportOutcome::Failure { pubkey, error } = outcome {
|
||||
eprintln!("- {:?}", pubkey);
|
||||
eprintln!(" - error: {:?}", error);
|
||||
}
|
||||
}
|
||||
return Err("ERROR: import aborted due to errors, see above.\n\
|
||||
No data has been imported and the slashing protection \
|
||||
database is in the same state it was in before the import.\n\
|
||||
Due to the failed import it is NOT SAFE to start validating\n\
|
||||
with any newly imported validator keys, as your database lacks\n\
|
||||
slashing protection data for them."
|
||||
.to_string());
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(format!(
|
||||
"Fatal error during import: {:?}\n\
|
||||
IT IS NOT SAFE TO START VALIDATING",
|
||||
e
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
eprintln!("Import completed successfully.");
|
||||
eprintln!(
|
||||
"Please double-check that the latest blocks and attestations above \
|
||||
match your expectations."
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
(EXPORT_CMD, Some(matches)) => {
|
||||
let export_filename: PathBuf = clap_utils::parse_required(matches, EXPORT_FILE_ARG)?;
|
||||
let minify: bool = clap_utils::parse_required(matches, MINIFY_FLAG)?;
|
||||
|
||||
let selected_pubkeys = if let Some(pubkeys) =
|
||||
clap_utils::parse_optional::<String>(matches, PUBKEYS_FLAG)?
|
||||
{
|
||||
let pubkeys = pubkeys
|
||||
.split(',')
|
||||
.map(PublicKeyBytes::from_str)
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| format!("Invalid --{} value: {:?}", PUBKEYS_FLAG, e))?;
|
||||
Some(pubkeys)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if !slashing_protection_db_path.exists() {
|
||||
return Err(format!(
|
||||
"No slashing protection database exists at: {}",
|
||||
slashing_protection_db_path.display()
|
||||
));
|
||||
}
|
||||
|
||||
let slashing_protection_database = SlashingDatabase::open(&slashing_protection_db_path)
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Unable to open database at {}: {:?}",
|
||||
slashing_protection_db_path.display(),
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
let mut interchange = slashing_protection_database
|
||||
.export_interchange_info(genesis_validators_root, selected_pubkeys.as_deref())
|
||||
.map_err(|e| format!("Error during export: {:?}", e))?;
|
||||
|
||||
if minify {
|
||||
eprintln!("Minifying output file");
|
||||
interchange = interchange
|
||||
.minify()
|
||||
.map_err(|e| format!("Unable to minify output: {:?}", e))?;
|
||||
}
|
||||
|
||||
let output_file = File::create(export_filename)
|
||||
.map_err(|e| format!("Error creating output file: {:?}", e))?;
|
||||
|
||||
interchange
|
||||
.write_to(&output_file)
|
||||
.map_err(|e| format!("Error writing output file: {:?}", e))?;
|
||||
|
||||
eprintln!("Export completed successfully");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
("", _) => Err("No subcommand provided, see --help for options".to_string()),
|
||||
(command, _) => Err(format!("No such subcommand `{}`", command)),
|
||||
}
|
||||
}
|
||||
@@ -1,23 +1,37 @@
|
||||
use crate::BASE_DIR_FLAG;
|
||||
use account_utils::{random_password, strip_off_newlines};
|
||||
use crate::common::read_wallet_name_from_cli;
|
||||
use crate::WALLETS_DIR_FLAG;
|
||||
use account_utils::{
|
||||
is_password_sufficiently_complex, random_password, read_password_from_user, strip_off_newlines,
|
||||
};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use eth2_wallet::{
|
||||
bip39::{Language, Mnemonic, MnemonicType},
|
||||
PlainText,
|
||||
};
|
||||
use eth2_wallet_manager::{WalletManager, WalletType};
|
||||
use eth2_wallet_manager::{LockedWallet, WalletManager, WalletType};
|
||||
use filesystem::create_with_600_perms;
|
||||
use std::ffi::OsStr;
|
||||
use std::fs::{self, File};
|
||||
use std::io::prelude::*;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
pub const CMD: &str = "create";
|
||||
pub const HD_TYPE: &str = "hd";
|
||||
pub const NAME_FLAG: &str = "name";
|
||||
pub const PASSPHRASE_FLAG: &str = "passphrase-file";
|
||||
pub const PASSWORD_FLAG: &str = "password-file";
|
||||
pub const TYPE_FLAG: &str = "type";
|
||||
pub const MNEMONIC_FLAG: &str = "mnemonic-output-path";
|
||||
pub const STDIN_INPUTS_FLAG: &str = "stdin-inputs";
|
||||
pub const MNEMONIC_LENGTH_FLAG: &str = "mnemonic-length";
|
||||
pub const MNEMONIC_TYPES: &[MnemonicType] = &[
|
||||
MnemonicType::Words12,
|
||||
MnemonicType::Words15,
|
||||
MnemonicType::Words18,
|
||||
MnemonicType::Words21,
|
||||
MnemonicType::Words24,
|
||||
];
|
||||
pub const NEW_WALLET_PASSWORD_PROMPT: &str =
|
||||
"Enter a password for your new wallet that is at least 12 characters long:";
|
||||
pub const RETYPE_PASSWORD_PROMPT: &str = "Please re-enter your wallet's new password:";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
@@ -30,12 +44,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
"The wallet will be created with this name. It is not allowed to \
|
||||
create two wallets with the same name for the same --base-dir.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(PASSPHRASE_FLAG)
|
||||
.long(PASSPHRASE_FLAG)
|
||||
Arg::with_name(PASSWORD_FLAG)
|
||||
.long(PASSWORD_FLAG)
|
||||
.value_name("WALLET_PASSWORD_PATH")
|
||||
.help(
|
||||
"A path to a file containing the password which will unlock the wallet. \
|
||||
@@ -43,8 +56,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
saved at that path. To avoid confusion, if the file does not already \
|
||||
exist it must include a '.pass' suffix.",
|
||||
)
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(TYPE_FLAG)
|
||||
@@ -67,56 +79,49 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
)
|
||||
.takes_value(true)
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(STDIN_INPUTS_FLAG)
|
||||
.takes_value(false)
|
||||
.hidden(cfg!(windows))
|
||||
.long(STDIN_INPUTS_FLAG)
|
||||
.help("If present, read all user inputs from stdin instead of tty."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(MNEMONIC_LENGTH_FLAG)
|
||||
.long(MNEMONIC_LENGTH_FLAG)
|
||||
.value_name("MNEMONIC_LENGTH")
|
||||
.help("The number of words to use for the mnemonic phrase.")
|
||||
.takes_value(true)
|
||||
.validator(|len| {
|
||||
match len.parse::<usize>().ok().and_then(|words| MnemonicType::for_word_count(words).ok()) {
|
||||
Some(_) => Ok(()),
|
||||
None => Err(format!("Mnemonic length must be one of {}", MNEMONIC_TYPES.iter().map(|t| t.word_count().to_string()).collect::<Vec<_>>().join(", "))),
|
||||
}
|
||||
})
|
||||
.default_value("24"),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> {
|
||||
let name: String = clap_utils::parse_required(matches, NAME_FLAG)?;
|
||||
let wallet_password_path: PathBuf = clap_utils::parse_required(matches, PASSPHRASE_FLAG)?;
|
||||
pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), String> {
|
||||
let mnemonic_output_path: Option<PathBuf> = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?;
|
||||
let type_field: String = clap_utils::parse_required(matches, TYPE_FLAG)?;
|
||||
|
||||
let wallet_type = match type_field.as_ref() {
|
||||
HD_TYPE => WalletType::Hd,
|
||||
unknown => return Err(format!("--{} {} is not supported", TYPE_FLAG, unknown)),
|
||||
};
|
||||
|
||||
let mgr = WalletManager::open(&base_dir)
|
||||
.map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?;
|
||||
|
||||
// Create a new random mnemonic.
|
||||
//
|
||||
// The `tiny-bip39` crate uses `thread_rng()` for this entropy.
|
||||
let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English);
|
||||
let mnemonic_length = clap_utils::parse_required(matches, MNEMONIC_LENGTH_FLAG)?;
|
||||
let mnemonic = Mnemonic::new(
|
||||
MnemonicType::for_word_count(mnemonic_length).expect("Mnemonic length already validated"),
|
||||
Language::English,
|
||||
);
|
||||
|
||||
// Create a random password if the file does not exist.
|
||||
if !wallet_password_path.exists() {
|
||||
// To prevent users from accidentally supplying their password to the PASSPHRASE_FLAG and
|
||||
// create a file with that name, we require that the password has a .pass suffix.
|
||||
if wallet_password_path.extension() != Some(&OsStr::new("pass")) {
|
||||
return Err(format!(
|
||||
"Only creates a password file if that file ends in .pass: {:?}",
|
||||
wallet_password_path
|
||||
));
|
||||
}
|
||||
|
||||
create_with_600_perms(&wallet_password_path, random_password().as_bytes())
|
||||
.map_err(|e| format!("Unable to write to {:?}: {:?}", wallet_password_path, e))?;
|
||||
}
|
||||
|
||||
let wallet_password = fs::read(&wallet_password_path)
|
||||
.map_err(|e| format!("Unable to read {:?}: {:?}", wallet_password_path, e))
|
||||
.map(|bytes| PlainText::from(strip_off_newlines(bytes)))?;
|
||||
|
||||
let wallet = mgr
|
||||
.create_wallet(name, wallet_type, &mnemonic, wallet_password.as_bytes())
|
||||
.map_err(|e| format!("Unable to create wallet: {:?}", e))?;
|
||||
let wallet = create_wallet_from_mnemonic(matches, wallet_base_dir.as_path(), &mnemonic)?;
|
||||
|
||||
if let Some(path) = mnemonic_output_path {
|
||||
create_with_600_perms(&path, mnemonic.phrase().as_bytes())
|
||||
.map_err(|e| format!("Unable to write mnemonic to {:?}: {:?}", path, e))?;
|
||||
}
|
||||
|
||||
println!("Your wallet's 12-word BIP-39 mnemonic is:");
|
||||
println!("Your wallet's {}-word BIP-39 mnemonic is:", mnemonic_length);
|
||||
println!();
|
||||
println!("\t{}", mnemonic.phrase());
|
||||
println!();
|
||||
@@ -140,25 +145,94 @@ pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Creates a file with `600 (-rw-------)` permissions.
|
||||
pub fn create_with_600_perms<P: AsRef<Path>>(path: P, bytes: &[u8]) -> Result<(), String> {
|
||||
let path = path.as_ref();
|
||||
pub fn create_wallet_from_mnemonic(
|
||||
matches: &ArgMatches,
|
||||
wallet_base_dir: &Path,
|
||||
mnemonic: &Mnemonic,
|
||||
) -> Result<LockedWallet, String> {
|
||||
let name: Option<String> = clap_utils::parse_optional(matches, NAME_FLAG)?;
|
||||
let wallet_password_path: Option<PathBuf> = clap_utils::parse_optional(matches, PASSWORD_FLAG)?;
|
||||
let type_field: String = clap_utils::parse_required(matches, TYPE_FLAG)?;
|
||||
let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG);
|
||||
let wallet_type = match type_field.as_ref() {
|
||||
HD_TYPE => WalletType::Hd,
|
||||
unknown => return Err(format!("--{} {} is not supported", TYPE_FLAG, unknown)),
|
||||
};
|
||||
|
||||
let mut file =
|
||||
File::create(&path).map_err(|e| format!("Unable to create {:?}: {}", path, e))?;
|
||||
let mgr = WalletManager::open(&wallet_base_dir)
|
||||
.map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?;
|
||||
|
||||
let mut perm = file
|
||||
.metadata()
|
||||
.map_err(|e| format!("Unable to get {:?} metadata: {}", path, e))?
|
||||
.permissions();
|
||||
let wallet_password: PlainText = match wallet_password_path {
|
||||
Some(path) => {
|
||||
// Create a random password if the file does not exist.
|
||||
if !path.exists() {
|
||||
// To prevent users from accidentally supplying their password to the PASSWORD_FLAG and
|
||||
// create a file with that name, we require that the password has a .pass suffix.
|
||||
if path.extension() != Some(OsStr::new("pass")) {
|
||||
return Err(format!(
|
||||
"Only creates a password file if that file ends in .pass: {:?}",
|
||||
path
|
||||
));
|
||||
}
|
||||
|
||||
perm.set_mode(0o600);
|
||||
create_with_600_perms(&path, random_password().as_bytes())
|
||||
.map_err(|e| format!("Unable to write to {:?}: {:?}", path, e))?;
|
||||
}
|
||||
read_new_wallet_password_from_cli(Some(path), stdin_inputs)?
|
||||
}
|
||||
None => read_new_wallet_password_from_cli(None, stdin_inputs)?,
|
||||
};
|
||||
|
||||
file.set_permissions(perm)
|
||||
.map_err(|e| format!("Unable to set {:?} permissions: {}", path, e))?;
|
||||
let wallet_name = read_wallet_name_from_cli(name, stdin_inputs)?;
|
||||
|
||||
file.write_all(bytes)
|
||||
.map_err(|e| format!("Unable to write to {:?}: {}", path, e))?;
|
||||
|
||||
Ok(())
|
||||
let wallet = mgr
|
||||
.create_wallet(
|
||||
wallet_name,
|
||||
wallet_type,
|
||||
mnemonic,
|
||||
wallet_password.as_bytes(),
|
||||
)
|
||||
.map_err(|e| format!("Unable to create wallet: {:?}", e))?;
|
||||
Ok(wallet)
|
||||
}
|
||||
|
||||
/// Used when a user is creating a new wallet. Read in a wallet password from a file if the password file
|
||||
/// path is provided. Otherwise, read from an interactive prompt using tty unless the `--stdin-inputs`
|
||||
/// flag is provided. This verifies the password complexity and verifies the password is correctly re-entered.
|
||||
pub fn read_new_wallet_password_from_cli(
|
||||
password_file_path: Option<PathBuf>,
|
||||
stdin_inputs: bool,
|
||||
) -> Result<PlainText, String> {
|
||||
match password_file_path {
|
||||
Some(path) => {
|
||||
let password: PlainText = fs::read(&path)
|
||||
.map_err(|e| format!("Unable to read {:?}: {:?}", path, e))
|
||||
.map(|bytes| strip_off_newlines(bytes).into())?;
|
||||
|
||||
// Ensure the password meets the minimum requirements.
|
||||
is_password_sufficiently_complex(password.as_bytes())?;
|
||||
Ok(password)
|
||||
}
|
||||
None => loop {
|
||||
eprintln!();
|
||||
eprintln!("{}", NEW_WALLET_PASSWORD_PROMPT);
|
||||
let password =
|
||||
PlainText::from(read_password_from_user(stdin_inputs)?.as_ref().to_vec());
|
||||
|
||||
// Ensure the password meets the minimum requirements.
|
||||
match is_password_sufficiently_complex(password.as_bytes()) {
|
||||
Ok(_) => {
|
||||
eprintln!("{}", RETYPE_PASSWORD_PROMPT);
|
||||
let retyped_password =
|
||||
PlainText::from(read_password_from_user(stdin_inputs)?.as_ref().to_vec());
|
||||
if retyped_password == password {
|
||||
break Ok(password);
|
||||
} else {
|
||||
eprintln!("Passwords do not match.");
|
||||
}
|
||||
}
|
||||
Err(message) => eprintln!("{}", message),
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::BASE_DIR_FLAG;
|
||||
use crate::WALLETS_DIR_FLAG;
|
||||
use clap::App;
|
||||
use eth2_wallet_manager::WalletManager;
|
||||
use std::path::PathBuf;
|
||||
@@ -9,9 +9,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD).about("Lists the names of all wallets.")
|
||||
}
|
||||
|
||||
pub fn cli_run(base_dir: PathBuf) -> Result<(), String> {
|
||||
let mgr = WalletManager::open(&base_dir)
|
||||
.map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?;
|
||||
pub fn cli_run(wallet_base_dir: PathBuf) -> Result<(), String> {
|
||||
let mgr = WalletManager::open(&wallet_base_dir)
|
||||
.map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?;
|
||||
|
||||
for (name, _uuid) in mgr
|
||||
.wallets()
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
pub mod create;
|
||||
pub mod list;
|
||||
pub mod recover;
|
||||
|
||||
use crate::{
|
||||
common::{base_wallet_dir, ensure_dir_exists},
|
||||
BASE_DIR_FLAG,
|
||||
};
|
||||
use crate::WALLETS_DIR_FLAG;
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use directory::{ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_WALLET_DIR};
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub const CMD: &str = "wallet";
|
||||
|
||||
@@ -13,23 +13,33 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about("Manage wallets, from which validator keys can be derived.")
|
||||
.arg(
|
||||
Arg::with_name(BASE_DIR_FLAG)
|
||||
.long(BASE_DIR_FLAG)
|
||||
.value_name("BASE_DIRECTORY")
|
||||
.help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/wallets")
|
||||
.takes_value(true),
|
||||
Arg::with_name(WALLETS_DIR_FLAG)
|
||||
.long(WALLETS_DIR_FLAG)
|
||||
.value_name("WALLETS_DIRECTORY")
|
||||
.help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{network}/wallets")
|
||||
.takes_value(true)
|
||||
.conflicts_with("datadir"),
|
||||
)
|
||||
.subcommand(create::cli_app())
|
||||
.subcommand(list::cli_app())
|
||||
.subcommand(recover::cli_app())
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
||||
let base_dir = base_wallet_dir(matches, BASE_DIR_FLAG)?;
|
||||
ensure_dir_exists(&base_dir)?;
|
||||
let wallet_base_dir = if matches.value_of("datadir").is_some() {
|
||||
let path: PathBuf = clap_utils::parse_required(matches, "datadir")?;
|
||||
path.join(DEFAULT_WALLET_DIR)
|
||||
} else {
|
||||
parse_path_or_default_with_flag(matches, WALLETS_DIR_FLAG, DEFAULT_WALLET_DIR)?
|
||||
};
|
||||
ensure_dir_exists(&wallet_base_dir)?;
|
||||
|
||||
eprintln!("wallet-dir path: {:?}", wallet_base_dir);
|
||||
|
||||
match matches.subcommand() {
|
||||
(create::CMD, Some(matches)) => create::cli_run(matches, base_dir),
|
||||
(list::CMD, Some(_)) => list::cli_run(base_dir),
|
||||
(create::CMD, Some(matches)) => create::cli_run(matches, wallet_base_dir),
|
||||
(list::CMD, Some(_)) => list::cli_run(wallet_base_dir),
|
||||
(recover::CMD, Some(matches)) => recover::cli_run(matches, wallet_base_dir),
|
||||
(unknown, _) => Err(format!(
|
||||
"{} does not have a {} command. See --help",
|
||||
CMD, unknown
|
||||
|
||||
86
account_manager/src/wallet/recover.rs
Normal file
86
account_manager/src/wallet/recover.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
use crate::common::read_mnemonic_from_cli;
|
||||
use crate::wallet::create::{create_wallet_from_mnemonic, STDIN_INPUTS_FLAG};
|
||||
use crate::wallet::create::{HD_TYPE, NAME_FLAG, PASSWORD_FLAG, TYPE_FLAG};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub const CMD: &str = "recover";
|
||||
pub const MNEMONIC_FLAG: &str = "mnemonic-path";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
.about("Recovers an EIP-2386 wallet from a given a BIP-39 mnemonic phrase.")
|
||||
.arg(
|
||||
Arg::with_name(NAME_FLAG)
|
||||
.long(NAME_FLAG)
|
||||
.value_name("WALLET_NAME")
|
||||
.help(
|
||||
"The wallet will be created with this name. It is not allowed to \
|
||||
create two wallets with the same name for the same --base-dir.",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(PASSWORD_FLAG)
|
||||
.long(PASSWORD_FLAG)
|
||||
.value_name("PASSWORD_FILE_PATH")
|
||||
.help(
|
||||
"This will be the new password for your recovered wallet. \
|
||||
A path to a file containing the password which will unlock the wallet. \
|
||||
If the file does not exist, a random password will be generated and \
|
||||
saved at that path. To avoid confusion, if the file does not already \
|
||||
exist it must include a '.pass' suffix.",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(MNEMONIC_FLAG)
|
||||
.long(MNEMONIC_FLAG)
|
||||
.value_name("MNEMONIC_PATH")
|
||||
.help("If present, the mnemonic will be read in from this file.")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(TYPE_FLAG)
|
||||
.long(TYPE_FLAG)
|
||||
.value_name("WALLET_TYPE")
|
||||
.help(
|
||||
"The type of wallet to create. Only HD (hierarchical-deterministic) \
|
||||
wallets are supported presently..",
|
||||
)
|
||||
.takes_value(true)
|
||||
.possible_values(&[HD_TYPE])
|
||||
.default_value(HD_TYPE),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(STDIN_INPUTS_FLAG)
|
||||
.takes_value(false)
|
||||
.hidden(cfg!(windows))
|
||||
.long(STDIN_INPUTS_FLAG)
|
||||
.help("If present, read all user inputs from stdin instead of tty."),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), String> {
|
||||
let mnemonic_path: Option<PathBuf> = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?;
|
||||
let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG);
|
||||
|
||||
eprintln!();
|
||||
eprintln!("WARNING: KEY RECOVERY CAN LEAD TO DUPLICATING VALIDATORS KEYS, WHICH CAN LEAD TO SLASHING.");
|
||||
eprintln!();
|
||||
|
||||
let mnemonic = read_mnemonic_from_cli(mnemonic_path, stdin_inputs)?;
|
||||
|
||||
let wallet = create_wallet_from_mnemonic(matches, wallet_base_dir.as_path(), &mnemonic)
|
||||
.map_err(|e| format!("Unable to create wallet: {:?}", e))?;
|
||||
|
||||
println!("Your wallet has been successfully recovered.");
|
||||
println!();
|
||||
println!("Your wallet's UUID is:");
|
||||
println!();
|
||||
println!("\t{}", wallet.wallet().uuid());
|
||||
println!();
|
||||
println!("You do not need to backup your UUID or keep it secret.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "beacon_node"
|
||||
version = "0.2.0"
|
||||
version = "2.1.4"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
name = "beacon_node"
|
||||
@@ -20,23 +20,24 @@ beacon_chain = { path = "beacon_chain" }
|
||||
types = { path = "../consensus/types" }
|
||||
store = { path = "./store" }
|
||||
client = { path = "client" }
|
||||
clap = "2.33.0"
|
||||
rand = "0.7.3"
|
||||
clap = "2.33.3"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] }
|
||||
slog-term = "2.5.0"
|
||||
slog-async = "2.5.0"
|
||||
ctrlc = { version = "3.1.4", features = ["termination"] }
|
||||
tokio = { version = "0.2.21", features = ["time"] }
|
||||
exit-future = "0.2.0"
|
||||
dirs = "2.0.2"
|
||||
logging = { path = "../common/logging" }
|
||||
futures = "0.3.5"
|
||||
dirs = "3.0.1"
|
||||
directory = {path = "../common/directory"}
|
||||
futures = "0.3.7"
|
||||
environment = { path = "../lighthouse/environment" }
|
||||
task_executor = { path = "../common/task_executor" }
|
||||
genesis = { path = "genesis" }
|
||||
eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
||||
eth2_libp2p = { path = "./eth2_libp2p" }
|
||||
eth2_ssz = "0.1.2"
|
||||
serde = "1.0.110"
|
||||
eth2_network_config = { path = "../common/eth2_network_config" }
|
||||
execution_layer = { path = "execution_layer" }
|
||||
lighthouse_network = { path = "./lighthouse_network" }
|
||||
serde = "1.0.116"
|
||||
clap_utils = { path = "../common/clap_utils" }
|
||||
hyper = "0.13.5"
|
||||
hyper = "0.14.4"
|
||||
lighthouse_version = { path = "../common/lighthouse_version" }
|
||||
hex = "0.4.2"
|
||||
slasher = { path = "../slasher" }
|
||||
monitoring_api = { path = "../common/monitoring_api" }
|
||||
sensitive_url = { path = "../common/sensitive_url" }
|
||||
http_api = { path = "http_api" }
|
||||
unused_port = { path = "../common/unused_port" }
|
||||
|
||||
@@ -2,57 +2,65 @@
|
||||
name = "beacon_chain"
|
||||
version = "0.2.0"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>"]
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
autotests = false # using a single test binary compiles faster
|
||||
|
||||
[features]
|
||||
default = ["participation_metrics"]
|
||||
write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing.
|
||||
participation_metrics = [] # Exposes validator participation metrics to Prometheus.
|
||||
fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable
|
||||
|
||||
[dev-dependencies]
|
||||
int_to_bytes = { path = "../../consensus/int_to_bytes" }
|
||||
maplit = "1.0.2"
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
serde_json = "1.0.58"
|
||||
|
||||
[dependencies]
|
||||
eth2_config = { path = "../../common/eth2_config" }
|
||||
merkle_proof = { path = "../../consensus/merkle_proof" }
|
||||
store = { path = "../store" }
|
||||
parking_lot = "0.11.0"
|
||||
lazy_static = "1.4.0"
|
||||
smallvec = "1.4.1"
|
||||
smallvec = "1.6.1"
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||
log = "0.4.8"
|
||||
operation_pool = { path = "../operation_pool" }
|
||||
rayon = "1.3.0"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.110"
|
||||
serde_yaml = "0.8.11"
|
||||
serde_json = "1.0.52"
|
||||
rayon = "1.4.1"
|
||||
serde = "1.0.116"
|
||||
serde_derive = "1.0.116"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
slog-term = "2.6.0"
|
||||
sloggers = "1.0.0"
|
||||
sloggers = { version = "2.1.1", features = ["json"] }
|
||||
slot_clock = { path = "../../common/slot_clock" }
|
||||
eth2_hashing = "0.1.0"
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
eth2_hashing = "0.2.0"
|
||||
eth2_ssz = "0.4.1"
|
||||
eth2_ssz_types = "0.2.2"
|
||||
eth2_ssz_derive = "0.3.0"
|
||||
state_processing = { path = "../../consensus/state_processing" }
|
||||
tree_hash = "0.1.0"
|
||||
tree_hash = "0.4.1"
|
||||
types = { path = "../../consensus/types" }
|
||||
tokio = "0.2.21"
|
||||
tokio = "1.14.0"
|
||||
eth1 = { path = "../eth1" }
|
||||
websocket_server = { path = "../websocket_server" }
|
||||
futures = "0.3.5"
|
||||
futures = "0.3.7"
|
||||
genesis = { path = "../genesis" }
|
||||
integer-sqrt = "0.1.3"
|
||||
int_to_bytes = { path = "../../consensus/int_to_bytes" }
|
||||
rand = "0.7.3"
|
||||
proto_array = { path = "../../consensus/proto_array" }
|
||||
lru = "0.5.1"
|
||||
lru = "0.7.1"
|
||||
tempfile = "3.1.0"
|
||||
bitvec = "0.17.4"
|
||||
bitvec = "0.19.3"
|
||||
bls = { path = "../../crypto/bls" }
|
||||
safe_arith = { path = "../../consensus/safe_arith" }
|
||||
fork_choice = { path = "../../consensus/fork_choice" }
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
bus = "2.2.3"
|
||||
task_executor = { path = "../../common/task_executor" }
|
||||
derivative = "2.1.1"
|
||||
itertools = "0.9.0"
|
||||
itertools = "0.10.0"
|
||||
slasher = { path = "../../slasher" }
|
||||
eth2 = { path = "../../common/eth2" }
|
||||
strum = { version = "0.21.0", features = ["derive"] }
|
||||
logging = { path = "../../common/logging" }
|
||||
execution_layer = { path = "../execution_layer" }
|
||||
sensitive_url = { path = "../../common/sensitive_url" }
|
||||
superstruct = "0.4.0"
|
||||
|
||||
[[test]]
|
||||
name = "beacon_chain_tests"
|
||||
path = "tests/main.rs"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
222
beacon_node/beacon_chain/src/attestation_verification/batch.rs
Normal file
222
beacon_node/beacon_chain/src/attestation_verification/batch.rs
Normal file
@@ -0,0 +1,222 @@
|
||||
//! These two `batch_...` functions provide verification of batches of attestations. They provide
|
||||
//! significant CPU-time savings by performing batch verification of BLS signatures.
|
||||
//!
|
||||
//! In each function, attestations are "indexed" (i.e., the `IndexedAttestation` is computed), to
|
||||
//! determine if they should progress to signature verification. Then, all attestations which were
|
||||
//! successfully indexed have their signatures verified in a batch. If that signature batch fails
|
||||
//! then all attestation signatures are verified independently.
|
||||
//!
|
||||
//! The outcome of each function is a `Vec<Result>` with a one-to-one mapping to the attestations
|
||||
//! supplied as input. Each result provides the exact success or failure result of the corresponding
|
||||
//! attestation, with no loss of fidelity when compared to individual verification.
|
||||
use super::{
|
||||
CheckAttestationSignature, Error, IndexedAggregatedAttestation, IndexedUnaggregatedAttestation,
|
||||
VerifiedAggregatedAttestation, VerifiedUnaggregatedAttestation,
|
||||
};
|
||||
use crate::{
|
||||
beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics, BeaconChain, BeaconChainError,
|
||||
BeaconChainTypes,
|
||||
};
|
||||
use bls::verify_signature_sets;
|
||||
use state_processing::signature_sets::{
|
||||
indexed_attestation_signature_set_from_pubkeys, signed_aggregate_selection_proof_signature_set,
|
||||
signed_aggregate_signature_set,
|
||||
};
|
||||
use std::borrow::Cow;
|
||||
use types::*;
|
||||
|
||||
/// Verify aggregated attestations using batch BLS signature verification.
|
||||
///
|
||||
/// See module-level docs for more info.
|
||||
pub fn batch_verify_aggregated_attestations<'a, T, I>(
|
||||
aggregates: I,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Vec<Result<VerifiedAggregatedAttestation<'a, T>, Error>>, Error>
|
||||
where
|
||||
T: BeaconChainTypes,
|
||||
I: Iterator<Item = &'a SignedAggregateAndProof<T::EthSpec>> + ExactSizeIterator,
|
||||
{
|
||||
let mut num_indexed = 0;
|
||||
let mut num_failed = 0;
|
||||
|
||||
// Perform indexing of all attestations, collecting the results.
|
||||
let indexing_results = aggregates
|
||||
.map(|aggregate| {
|
||||
let result = IndexedAggregatedAttestation::verify(aggregate, chain);
|
||||
if result.is_ok() {
|
||||
num_indexed += 1;
|
||||
} else {
|
||||
num_failed += 1;
|
||||
}
|
||||
result
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// May be set to `No` if batch verification succeeds.
|
||||
let mut check_signatures = CheckAttestationSignature::Yes;
|
||||
|
||||
// Perform batch BLS verification, if any attestation signatures are worth checking.
|
||||
if num_indexed > 0 {
|
||||
let signature_setup_timer =
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_BATCH_AGG_SIGNATURE_SETUP_TIMES);
|
||||
|
||||
let pubkey_cache = chain
|
||||
.validator_pubkey_cache
|
||||
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?;
|
||||
|
||||
let fork = chain.with_head(|head| Ok::<_, BeaconChainError>(head.beacon_state.fork()))?;
|
||||
|
||||
let mut signature_sets = Vec::with_capacity(num_indexed * 3);
|
||||
|
||||
// Iterate, flattening to get only the `Ok` values.
|
||||
for indexed in indexing_results.iter().flatten() {
|
||||
let signed_aggregate = &indexed.signed_aggregate;
|
||||
let indexed_attestation = &indexed.indexed_attestation;
|
||||
|
||||
signature_sets.push(
|
||||
signed_aggregate_selection_proof_signature_set(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
signed_aggregate,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::SignatureSetError)?,
|
||||
);
|
||||
signature_sets.push(
|
||||
signed_aggregate_signature_set(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
signed_aggregate,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::SignatureSetError)?,
|
||||
);
|
||||
signature_sets.push(
|
||||
indexed_attestation_signature_set_from_pubkeys(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
&indexed_attestation.signature,
|
||||
indexed_attestation,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::SignatureSetError)?,
|
||||
);
|
||||
}
|
||||
|
||||
metrics::stop_timer(signature_setup_timer);
|
||||
|
||||
let _signature_verification_timer =
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_BATCH_AGG_SIGNATURE_TIMES);
|
||||
|
||||
if verify_signature_sets(signature_sets.iter()) {
|
||||
// Since all the signatures verified in a batch, there's no reason for them to be
|
||||
// checked again later.
|
||||
check_signatures = CheckAttestationSignature::No
|
||||
}
|
||||
}
|
||||
|
||||
// Complete the attestation verification, potentially verifying all signatures independently.
|
||||
let final_results = indexing_results
|
||||
.into_iter()
|
||||
.map(|result| match result {
|
||||
Ok(indexed) => {
|
||||
VerifiedAggregatedAttestation::from_indexed(indexed, chain, check_signatures)
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(final_results)
|
||||
}
|
||||
|
||||
/// Verify unaggregated attestations using batch BLS signature verification.
|
||||
///
|
||||
/// See module-level docs for more info.
|
||||
pub fn batch_verify_unaggregated_attestations<'a, T, I>(
|
||||
attestations: I,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Vec<Result<VerifiedUnaggregatedAttestation<'a, T>, Error>>, Error>
|
||||
where
|
||||
T: BeaconChainTypes,
|
||||
I: Iterator<Item = (&'a Attestation<T::EthSpec>, Option<SubnetId>)> + ExactSizeIterator,
|
||||
{
|
||||
let mut num_partially_verified = 0;
|
||||
let mut num_failed = 0;
|
||||
|
||||
// Perform partial verification of all attestations, collecting the results.
|
||||
let partial_results = attestations
|
||||
.map(|(attn, subnet_opt)| {
|
||||
let result = IndexedUnaggregatedAttestation::verify(attn, subnet_opt, chain);
|
||||
if result.is_ok() {
|
||||
num_partially_verified += 1;
|
||||
} else {
|
||||
num_failed += 1;
|
||||
}
|
||||
result
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// May be set to `No` if batch verification succeeds.
|
||||
let mut check_signatures = CheckAttestationSignature::Yes;
|
||||
|
||||
// Perform batch BLS verification, if any attestation signatures are worth checking.
|
||||
if num_partially_verified > 0 {
|
||||
let signature_setup_timer = metrics::start_timer(
|
||||
&metrics::ATTESTATION_PROCESSING_BATCH_UNAGG_SIGNATURE_SETUP_TIMES,
|
||||
);
|
||||
|
||||
let pubkey_cache = chain
|
||||
.validator_pubkey_cache
|
||||
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?;
|
||||
|
||||
let fork = chain.with_head(|head| Ok::<_, BeaconChainError>(head.beacon_state.fork()))?;
|
||||
|
||||
let mut signature_sets = Vec::with_capacity(num_partially_verified);
|
||||
|
||||
// Iterate, flattening to get only the `Ok` values.
|
||||
for partially_verified in partial_results.iter().flatten() {
|
||||
let indexed_attestation = &partially_verified.indexed_attestation;
|
||||
|
||||
let signature_set = indexed_attestation_signature_set_from_pubkeys(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
&indexed_attestation.signature,
|
||||
indexed_attestation,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::SignatureSetError)?;
|
||||
|
||||
signature_sets.push(signature_set);
|
||||
}
|
||||
|
||||
metrics::stop_timer(signature_setup_timer);
|
||||
|
||||
let _signature_verification_timer =
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_BATCH_UNAGG_SIGNATURE_TIMES);
|
||||
|
||||
if verify_signature_sets(signature_sets.iter()) {
|
||||
// Since all the signatures verified in a batch, there's no reason for them to be
|
||||
// checked again later.
|
||||
check_signatures = CheckAttestationSignature::No
|
||||
}
|
||||
}
|
||||
|
||||
// Complete the attestation verification, potentially verifying all signatures independently.
|
||||
let final_results = partial_results
|
||||
.into_iter()
|
||||
.map(|result| match result {
|
||||
Ok(partial) => {
|
||||
VerifiedUnaggregatedAttestation::from_indexed(partial, chain, check_signatures)
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(final_results)
|
||||
}
|
||||
384
beacon_node/beacon_chain/src/attester_cache.rs
Normal file
384
beacon_node/beacon_chain/src/attester_cache.rs
Normal file
@@ -0,0 +1,384 @@
|
||||
//! This module provides the `AttesterCache`, a cache designed for reducing state-reads when
|
||||
//! validators produce `AttestationData`.
|
||||
//!
|
||||
//! This cache is required *as well as* the `ShufflingCache` since the `ShufflingCache` does not
|
||||
//! provide any information about the `state.current_justified_checkpoint`. It is not trivial to add
|
||||
//! the justified checkpoint to the `ShufflingCache` since that cache is keyed by shuffling decision
|
||||
//! root, which is not suitable for the justified checkpoint. Whilst we can know the shuffling for
|
||||
//! epoch `n` during `n - 1`, we *cannot* know the justified checkpoint. Instead, we *must* perform
|
||||
//! `per_epoch_processing` to transform the state from epoch `n - 1` to epoch `n` so that rewards
|
||||
//! and penalties can be computed and the `state.current_justified_checkpoint` can be updated.
|
||||
|
||||
use crate::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||
use parking_lot::RwLock;
|
||||
use state_processing::state_advance::{partial_state_advance, Error as StateAdvanceError};
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Range;
|
||||
use types::{
|
||||
beacon_state::{
|
||||
compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count,
|
||||
},
|
||||
BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, RelativeEpoch,
|
||||
Slot,
|
||||
};
|
||||
|
||||
type JustifiedCheckpoint = Checkpoint;
|
||||
type CommitteeLength = usize;
|
||||
type CommitteeIndex = u64;
|
||||
type CacheHashMap = HashMap<AttesterCacheKey, AttesterCacheValue>;
|
||||
|
||||
/// The maximum number of `AttesterCacheValues` to be kept in memory.
|
||||
///
|
||||
/// Each `AttesterCacheValues` is very small (~16 bytes) and the cache will generally be kept small
|
||||
/// by pruning on finality.
|
||||
///
|
||||
/// The value provided here is much larger than will be used during ideal network conditions,
|
||||
/// however we make it large since the values are so small.
|
||||
const MAX_CACHE_LEN: usize = 1_024;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
BeaconState(BeaconStateError),
|
||||
// Boxed to avoid an infinite-size recursion issue.
|
||||
BeaconChain(Box<BeaconChainError>),
|
||||
MissingBeaconState(Hash256),
|
||||
FailedToTransitionState(StateAdvanceError),
|
||||
CannotAttestToFutureState {
|
||||
state_slot: Slot,
|
||||
request_slot: Slot,
|
||||
},
|
||||
/// Indicates a cache inconsistency.
|
||||
WrongEpoch {
|
||||
request_epoch: Epoch,
|
||||
epoch: Epoch,
|
||||
},
|
||||
InvalidCommitteeIndex {
|
||||
committee_index: u64,
|
||||
},
|
||||
/// Indicates an inconsistency with the beacon state committees.
|
||||
InverseRange {
|
||||
range: Range<usize>,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<BeaconStateError> for Error {
|
||||
fn from(e: BeaconStateError) -> Self {
|
||||
Error::BeaconState(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BeaconChainError> for Error {
|
||||
fn from(e: BeaconChainError) -> Self {
|
||||
Error::BeaconChain(Box::new(e))
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores the minimal amount of data required to compute the committee length for any committee at any
|
||||
/// slot in a given `epoch`.
|
||||
pub struct CommitteeLengths {
|
||||
/// The `epoch` to which the lengths pertain.
|
||||
epoch: Epoch,
|
||||
/// The length of the shuffling in `self.epoch`.
|
||||
active_validator_indices_len: usize,
|
||||
}
|
||||
|
||||
impl CommitteeLengths {
|
||||
/// Instantiate `Self` using `state.current_epoch()`.
|
||||
pub fn new<T: EthSpec>(state: &BeaconState<T>, spec: &ChainSpec) -> Result<Self, Error> {
|
||||
let active_validator_indices_len = if let Ok(committee_cache) =
|
||||
state.committee_cache(RelativeEpoch::Current)
|
||||
{
|
||||
committee_cache.active_validator_indices().len()
|
||||
} else {
|
||||
// Building the cache like this avoids taking a mutable reference to `BeaconState`.
|
||||
let committee_cache = state.initialize_committee_cache(state.current_epoch(), spec)?;
|
||||
committee_cache.active_validator_indices().len()
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
epoch: state.current_epoch(),
|
||||
active_validator_indices_len,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the count of committees per each slot of `self.epoch`.
|
||||
pub fn get_committee_count_per_slot<T: EthSpec>(
|
||||
&self,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<usize, Error> {
|
||||
T::get_committee_count_per_slot(self.active_validator_indices_len, spec).map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Get the length of the committee at the given `slot` and `committee_index`.
|
||||
pub fn get_committee_length<T: EthSpec>(
|
||||
&self,
|
||||
slot: Slot,
|
||||
committee_index: CommitteeIndex,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<CommitteeLength, Error> {
|
||||
let slots_per_epoch = T::slots_per_epoch();
|
||||
let request_epoch = slot.epoch(slots_per_epoch);
|
||||
|
||||
// Sanity check.
|
||||
if request_epoch != self.epoch {
|
||||
return Err(Error::WrongEpoch {
|
||||
request_epoch,
|
||||
epoch: self.epoch,
|
||||
});
|
||||
}
|
||||
|
||||
let slots_per_epoch = slots_per_epoch as usize;
|
||||
let committees_per_slot = self.get_committee_count_per_slot::<T>(spec)?;
|
||||
let index_in_epoch = compute_committee_index_in_epoch(
|
||||
slot,
|
||||
slots_per_epoch,
|
||||
committees_per_slot,
|
||||
committee_index as usize,
|
||||
);
|
||||
let range = compute_committee_range_in_epoch(
|
||||
epoch_committee_count(committees_per_slot, slots_per_epoch),
|
||||
index_in_epoch,
|
||||
self.active_validator_indices_len,
|
||||
)
|
||||
.ok_or(Error::InvalidCommitteeIndex { committee_index })?;
|
||||
|
||||
range
|
||||
.end
|
||||
.checked_sub(range.start)
|
||||
.ok_or(Error::InverseRange { range })
|
||||
}
|
||||
}
|
||||
|
||||
/// Provides the following information for some epoch:
|
||||
///
|
||||
/// - The `state.current_justified_checkpoint` value.
|
||||
/// - The committee lengths for all indices and slots.
|
||||
///
|
||||
/// These values are used during attestation production.
|
||||
pub struct AttesterCacheValue {
|
||||
current_justified_checkpoint: Checkpoint,
|
||||
committee_lengths: CommitteeLengths,
|
||||
}
|
||||
|
||||
impl AttesterCacheValue {
|
||||
/// Instantiate `Self` using `state.current_epoch()`.
|
||||
pub fn new<T: EthSpec>(state: &BeaconState<T>, spec: &ChainSpec) -> Result<Self, Error> {
|
||||
let current_justified_checkpoint = state.current_justified_checkpoint();
|
||||
let committee_lengths = CommitteeLengths::new(state, spec)?;
|
||||
Ok(Self {
|
||||
current_justified_checkpoint,
|
||||
committee_lengths,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the justified checkpoint and committee length for some `slot` and `committee_index`.
|
||||
fn get<T: EthSpec>(
|
||||
&self,
|
||||
slot: Slot,
|
||||
committee_index: CommitteeIndex,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(JustifiedCheckpoint, CommitteeLength), Error> {
|
||||
self.committee_lengths
|
||||
.get_committee_length::<T>(slot, committee_index, spec)
|
||||
.map(|committee_length| (self.current_justified_checkpoint, committee_length))
|
||||
}
|
||||
}
|
||||
|
||||
/// The `AttesterCacheKey` is fundamentally the same thing as the proposer shuffling decision root,
|
||||
/// however here we use it as an identity for both of the following values:
|
||||
///
|
||||
/// 1. The `state.current_justified_checkpoint`.
|
||||
/// 2. The attester shuffling.
|
||||
///
|
||||
/// This struct relies upon the premise that the `state.current_justified_checkpoint` in epoch `n`
|
||||
/// is determined by the root of the latest block in epoch `n - 1`. Notably, this is identical to
|
||||
/// how the proposer shuffling is keyed in `BeaconProposerCache`.
|
||||
///
|
||||
/// It is also safe, but not maximally efficient, to key the attester shuffling with the same
|
||||
/// strategy. For better shuffling keying strategies, see the `ShufflingCache`.
|
||||
#[derive(Eq, PartialEq, Hash, Clone, Copy)]
|
||||
pub struct AttesterCacheKey {
|
||||
/// The epoch from which the justified checkpoint should be observed.
|
||||
///
|
||||
/// Attestations which use `self.epoch` as `target.epoch` should use this key.
|
||||
epoch: Epoch,
|
||||
/// The root of the block at the last slot of `self.epoch - 1`.
|
||||
decision_root: Hash256,
|
||||
}
|
||||
|
||||
impl AttesterCacheKey {
|
||||
/// Instantiate `Self` to key `state.current_epoch()`.
|
||||
///
|
||||
/// The `latest_block_root` should be the latest block that has been applied to `state`. This
|
||||
/// parameter is required since the state does not store the block root for any block with the
|
||||
/// same slot as `state.slot()`.
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// May error if `epoch` is out of the range of `state.block_roots`.
|
||||
pub fn new<T: EthSpec>(
|
||||
epoch: Epoch,
|
||||
state: &BeaconState<T>,
|
||||
latest_block_root: Hash256,
|
||||
) -> Result<Self, Error> {
|
||||
let slots_per_epoch = T::slots_per_epoch();
|
||||
let decision_slot = epoch.start_slot(slots_per_epoch).saturating_sub(1_u64);
|
||||
|
||||
let decision_root = if decision_slot.epoch(slots_per_epoch) == epoch {
|
||||
// This scenario is only possible during the genesis epoch. In this scenario, all-zeros
|
||||
// is used as an alias to the genesis block.
|
||||
Hash256::zero()
|
||||
} else if epoch > state.current_epoch() {
|
||||
// If the requested epoch is higher than the current epoch, the latest block will always
|
||||
// be the decision root.
|
||||
latest_block_root
|
||||
} else {
|
||||
*state.get_block_root(decision_slot)?
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
epoch,
|
||||
decision_root,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Provides a cache for the justified checkpoint and committee length when producing an
|
||||
/// attestation.
|
||||
///
|
||||
/// See the module-level documentation for more information.
|
||||
#[derive(Default)]
|
||||
pub struct AttesterCache {
|
||||
cache: RwLock<CacheHashMap>,
|
||||
}
|
||||
|
||||
impl AttesterCache {
|
||||
/// Get the justified checkpoint and committee length for the `slot` and `committee_index` in
|
||||
/// the state identified by the cache `key`.
|
||||
pub fn get<T: EthSpec>(
|
||||
&self,
|
||||
key: &AttesterCacheKey,
|
||||
slot: Slot,
|
||||
committee_index: CommitteeIndex,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Option<(JustifiedCheckpoint, CommitteeLength)>, Error> {
|
||||
self.cache
|
||||
.read()
|
||||
.get(key)
|
||||
.map(|cache_item| cache_item.get::<T>(slot, committee_index, spec))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Cache the `state.current_epoch()` values if they are not already present in the state.
|
||||
pub fn maybe_cache_state<T: EthSpec>(
|
||||
&self,
|
||||
state: &BeaconState<T>,
|
||||
latest_block_root: Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), Error> {
|
||||
let key = AttesterCacheKey::new(state.current_epoch(), state, latest_block_root)?;
|
||||
let mut cache = self.cache.write();
|
||||
if !cache.contains_key(&key) {
|
||||
let cache_item = AttesterCacheValue::new(state, spec)?;
|
||||
Self::insert_respecting_max_len(&mut cache, key, cache_item);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Read the state identified by `state_root` from the database, advance it to the required
|
||||
/// slot, use it to prime the cache and return the values for the provided `slot` and
|
||||
/// `committee_index`.
|
||||
///
|
||||
/// ## Notes
|
||||
///
|
||||
/// This function takes a write-lock on the internal cache. Prefer attempting a `Self::get` call
|
||||
/// before running this function as `Self::get` only takes a read-lock and is therefore less
|
||||
/// likely to create contention.
|
||||
pub fn load_and_cache_state<T: BeaconChainTypes>(
|
||||
&self,
|
||||
state_root: Hash256,
|
||||
key: AttesterCacheKey,
|
||||
slot: Slot,
|
||||
committee_index: CommitteeIndex,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<(JustifiedCheckpoint, CommitteeLength), Error> {
|
||||
let spec = &chain.spec;
|
||||
let slots_per_epoch = T::EthSpec::slots_per_epoch();
|
||||
let epoch = slot.epoch(slots_per_epoch);
|
||||
|
||||
// Take a write-lock on the cache before starting the state read.
|
||||
//
|
||||
// Whilst holding the write-lock during the state read will create contention, it prevents
|
||||
// the scenario where multiple requests from separate threads cause duplicate state reads.
|
||||
let mut cache = self.cache.write();
|
||||
|
||||
// Try the cache to see if someone has already primed it between the time the function was
|
||||
// called and when the cache write-lock was obtained. This avoids performing duplicate state
|
||||
// reads.
|
||||
if let Some(value) = cache
|
||||
.get(&key)
|
||||
.map(|cache_item| cache_item.get::<T::EthSpec>(slot, committee_index, spec))
|
||||
.transpose()?
|
||||
{
|
||||
return Ok(value);
|
||||
}
|
||||
|
||||
let mut state: BeaconState<T::EthSpec> = chain
|
||||
.get_state(&state_root, None)?
|
||||
.ok_or(Error::MissingBeaconState(state_root))?;
|
||||
|
||||
if state.slot() > slot {
|
||||
// This indicates an internal inconsistency.
|
||||
return Err(Error::CannotAttestToFutureState {
|
||||
state_slot: state.slot(),
|
||||
request_slot: slot,
|
||||
});
|
||||
} else if state.current_epoch() < epoch {
|
||||
// Only perform a "partial" state advance since we do not require the state roots to be
|
||||
// accurate.
|
||||
partial_state_advance(
|
||||
&mut state,
|
||||
Some(state_root),
|
||||
epoch.start_slot(slots_per_epoch),
|
||||
spec,
|
||||
)
|
||||
.map_err(Error::FailedToTransitionState)?;
|
||||
state.build_committee_cache(RelativeEpoch::Current, spec)?;
|
||||
}
|
||||
|
||||
let cache_item = AttesterCacheValue::new(&state, spec)?;
|
||||
let value = cache_item.get::<T::EthSpec>(slot, committee_index, spec)?;
|
||||
Self::insert_respecting_max_len(&mut cache, key, cache_item);
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
/// Insert a value to `cache`, ensuring it does not exceed the maximum length.
|
||||
///
|
||||
/// If the cache is already full, the item with the lowest epoch will be removed.
|
||||
fn insert_respecting_max_len(
|
||||
cache: &mut CacheHashMap,
|
||||
key: AttesterCacheKey,
|
||||
value: AttesterCacheValue,
|
||||
) {
|
||||
while cache.len() >= MAX_CACHE_LEN {
|
||||
if let Some(oldest) = cache
|
||||
.iter()
|
||||
.map(|(key, _)| *key)
|
||||
.min_by_key(|key| key.epoch)
|
||||
{
|
||||
cache.remove(&oldest);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
cache.insert(key, value);
|
||||
}
|
||||
|
||||
/// Remove all entries where the `key.epoch` is lower than the given `epoch`.
|
||||
///
|
||||
/// Generally, the provided `epoch` should be the finalized epoch.
|
||||
pub fn prune_below(&self, epoch: Epoch) {
|
||||
self.cache.write().retain(|target, _| target.epoch >= epoch);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,18 +1,19 @@
|
||||
//! Defines the `BeaconForkChoiceStore` which provides the persistent storage for the `ForkChoice`
|
||||
//! struct.
|
||||
//!
|
||||
//! Additionally, the private `BalancesCache` struct is defined; a cache designed to avoid database
|
||||
//! Additionally, the `BalancesCache` struct is defined; a cache designed to avoid database
|
||||
//! reads when fork choice requires the validator balances of the justified state.
|
||||
|
||||
use crate::{metrics, BeaconSnapshot};
|
||||
use derivative::Derivative;
|
||||
use fork_choice::ForkChoiceStore;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use store::{Error as StoreError, HotColdDB, ItemStore};
|
||||
use superstruct::superstruct;
|
||||
use types::{
|
||||
BeaconBlock, BeaconState, BeaconStateError, Checkpoint, EthSpec, Hash256, SignedBeaconBlock,
|
||||
Slot,
|
||||
BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256, Slot,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -45,7 +46,7 @@ const MAX_BALANCE_CACHE_SIZE: usize = 4;
|
||||
/// zero.
|
||||
pub fn get_effective_balances<T: EthSpec>(state: &BeaconState<T>) -> Vec<u64> {
|
||||
state
|
||||
.validators
|
||||
.validators()
|
||||
.iter()
|
||||
.map(|validator| {
|
||||
if validator.is_active_at(state.current_epoch()) {
|
||||
@@ -57,24 +58,34 @@ pub fn get_effective_balances<T: EthSpec>(state: &BeaconState<T>) -> Vec<u64> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// An item that is stored in the `BalancesCache`.
|
||||
#[derive(PartialEq, Clone, Debug, Encode, Decode)]
|
||||
struct CacheItem {
|
||||
/// The block root at which `self.balances` are valid.
|
||||
block_root: Hash256,
|
||||
/// The effective balances from a `BeaconState` validator registry.
|
||||
balances: Vec<u64>,
|
||||
#[superstruct(
|
||||
variants(V1, V8),
|
||||
variant_attributes(derive(PartialEq, Clone, Debug, Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
pub(crate) struct CacheItem {
|
||||
pub(crate) block_root: Hash256,
|
||||
#[superstruct(only(V8))]
|
||||
pub(crate) epoch: Epoch,
|
||||
pub(crate) balances: Vec<u64>,
|
||||
}
|
||||
|
||||
/// Provides a cache to avoid reading `BeaconState` from disk when updating the current justified
|
||||
/// checkpoint.
|
||||
///
|
||||
/// It is effectively a mapping of `epoch_boundary_block_root -> state.balances`.
|
||||
#[derive(PartialEq, Clone, Default, Debug, Encode, Decode)]
|
||||
struct BalancesCache {
|
||||
items: Vec<CacheItem>,
|
||||
pub(crate) type CacheItem = CacheItemV8;
|
||||
|
||||
#[superstruct(
|
||||
variants(V1, V8),
|
||||
variant_attributes(derive(PartialEq, Clone, Default, Debug, Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
pub struct BalancesCache {
|
||||
#[superstruct(only(V1))]
|
||||
pub(crate) items: Vec<CacheItemV1>,
|
||||
#[superstruct(only(V8))]
|
||||
pub(crate) items: Vec<CacheItemV8>,
|
||||
}
|
||||
|
||||
pub type BalancesCache = BalancesCacheV8;
|
||||
|
||||
impl BalancesCache {
|
||||
/// Inspect the given `state` and determine the root of the block at the first slot of
|
||||
/// `state.current_epoch`. If there is not already some entry for the given block root, then
|
||||
@@ -84,14 +95,9 @@ impl BalancesCache {
|
||||
block_root: Hash256,
|
||||
state: &BeaconState<E>,
|
||||
) -> Result<(), Error> {
|
||||
// We are only interested in balances from states that are at the start of an epoch,
|
||||
// because this is where the `current_justified_checkpoint.root` will point.
|
||||
if !Self::is_first_block_in_epoch(block_root, state)? {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let epoch_boundary_slot = state.current_epoch().start_slot(E::slots_per_epoch());
|
||||
let epoch_boundary_root = if epoch_boundary_slot == state.slot {
|
||||
let epoch = state.current_epoch();
|
||||
let epoch_boundary_slot = epoch.start_slot(E::slots_per_epoch());
|
||||
let epoch_boundary_root = if epoch_boundary_slot == state.slot() {
|
||||
block_root
|
||||
} else {
|
||||
// This call remains sensible as long as `state.block_roots` is larger than a single
|
||||
@@ -99,9 +105,14 @@ impl BalancesCache {
|
||||
*state.get_block_root(epoch_boundary_slot)?
|
||||
};
|
||||
|
||||
if self.position(epoch_boundary_root).is_none() {
|
||||
// Check if there already exists a cache entry for the epoch boundary block of the current
|
||||
// epoch. We rely on the invariant that effective balances do not change for the duration
|
||||
// of a single epoch, so even if the block on the epoch boundary itself is skipped we can
|
||||
// still update its cache entry from any subsequent state in that epoch.
|
||||
if self.position(epoch_boundary_root, epoch).is_none() {
|
||||
let item = CacheItem {
|
||||
block_root: epoch_boundary_root,
|
||||
epoch,
|
||||
balances: get_effective_balances(state),
|
||||
};
|
||||
|
||||
@@ -115,50 +126,27 @@ impl BalancesCache {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns `true` if the given `block_root` is the first/only block to have been processed in
|
||||
/// the epoch of the given `state`.
|
||||
///
|
||||
/// We can determine if it is the first block by looking back through `state.block_roots` to
|
||||
/// see if there is a block in the current epoch with a different root.
|
||||
fn is_first_block_in_epoch<E: EthSpec>(
|
||||
block_root: Hash256,
|
||||
state: &BeaconState<E>,
|
||||
) -> Result<bool, Error> {
|
||||
let mut prior_block_found = false;
|
||||
|
||||
for slot in state.current_epoch().slot_iter(E::slots_per_epoch()) {
|
||||
if slot < state.slot {
|
||||
if *state.get_block_root(slot)? != block_root {
|
||||
prior_block_found = true;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(!prior_block_found)
|
||||
}
|
||||
|
||||
fn position(&self, block_root: Hash256) -> Option<usize> {
|
||||
fn position(&self, block_root: Hash256, epoch: Epoch) -> Option<usize> {
|
||||
self.items
|
||||
.iter()
|
||||
.position(|item| item.block_root == block_root)
|
||||
.position(|item| item.block_root == block_root && item.epoch == epoch)
|
||||
}
|
||||
|
||||
/// Get the balances for the given `block_root`, if any.
|
||||
///
|
||||
/// If some balances are found, they are removed from the cache.
|
||||
pub fn get(&mut self, block_root: Hash256) -> Option<Vec<u64>> {
|
||||
let i = self.position(block_root)?;
|
||||
Some(self.items.remove(i).balances)
|
||||
/// If some balances are found, they are cloned from the cache.
|
||||
pub fn get(&mut self, block_root: Hash256, epoch: Epoch) -> Option<Vec<u64>> {
|
||||
let i = self.position(block_root, epoch)?;
|
||||
Some(self.items[i].balances.clone())
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements `fork_choice::ForkChoiceStore` in order to provide a persistent backing to the
|
||||
/// `fork_choice::ForkChoice` struct.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Derivative)]
|
||||
#[derivative(PartialEq(bound = "E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>"))]
|
||||
pub struct BeaconForkChoiceStore<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
||||
#[derivative(PartialEq = "ignore")]
|
||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
balances_cache: BalancesCache,
|
||||
time: Slot,
|
||||
@@ -166,26 +154,10 @@ pub struct BeaconForkChoiceStore<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<
|
||||
justified_checkpoint: Checkpoint,
|
||||
justified_balances: Vec<u64>,
|
||||
best_justified_checkpoint: Checkpoint,
|
||||
proposer_boost_root: Hash256,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<E, Hot, Cold> PartialEq for BeaconForkChoiceStore<E, Hot, Cold>
|
||||
where
|
||||
E: EthSpec,
|
||||
Hot: ItemStore<E>,
|
||||
Cold: ItemStore<E>,
|
||||
{
|
||||
/// This implementation ignores the `store` and `slot_clock`.
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.balances_cache == other.balances_cache
|
||||
&& self.time == other.time
|
||||
&& self.finalized_checkpoint == other.finalized_checkpoint
|
||||
&& self.justified_checkpoint == other.justified_checkpoint
|
||||
&& self.justified_balances == other.justified_balances
|
||||
&& self.best_justified_checkpoint == other.best_justified_checkpoint
|
||||
}
|
||||
}
|
||||
|
||||
impl<E, Hot, Cold> BeaconForkChoiceStore<E, Hot, Cold>
|
||||
where
|
||||
E: EthSpec,
|
||||
@@ -208,9 +180,9 @@ where
|
||||
anchor: &BeaconSnapshot<E>,
|
||||
) -> Self {
|
||||
let anchor_state = &anchor.beacon_state;
|
||||
let mut anchor_block_header = anchor_state.latest_block_header.clone();
|
||||
let mut anchor_block_header = anchor_state.latest_block_header().clone();
|
||||
if anchor_block_header.state_root == Hash256::zero() {
|
||||
anchor_block_header.state_root = anchor.beacon_state_root;
|
||||
anchor_block_header.state_root = anchor.beacon_state_root();
|
||||
}
|
||||
let anchor_root = anchor_block_header.canonical_root();
|
||||
let anchor_epoch = anchor_state.current_epoch();
|
||||
@@ -223,11 +195,12 @@ where
|
||||
Self {
|
||||
store,
|
||||
balances_cache: <_>::default(),
|
||||
time: anchor_state.slot,
|
||||
time: anchor_state.slot(),
|
||||
justified_checkpoint,
|
||||
justified_balances: anchor_state.balances.clone().into(),
|
||||
justified_balances: anchor_state.balances().clone().into(),
|
||||
finalized_checkpoint,
|
||||
best_justified_checkpoint: justified_checkpoint,
|
||||
proposer_boost_root: Hash256::zero(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
@@ -242,6 +215,7 @@ where
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
justified_balances: self.justified_balances.clone(),
|
||||
best_justified_checkpoint: self.best_justified_checkpoint,
|
||||
proposer_boost_root: self.proposer_boost_root,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -258,6 +232,7 @@ where
|
||||
justified_checkpoint: persisted.justified_checkpoint,
|
||||
justified_balances: persisted.justified_balances,
|
||||
best_justified_checkpoint: persisted.best_justified_checkpoint,
|
||||
proposer_boost_root: persisted.proposer_boost_root,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
@@ -304,6 +279,10 @@ where
|
||||
&self.finalized_checkpoint
|
||||
}
|
||||
|
||||
fn proposer_boost_root(&self) -> Hash256 {
|
||||
self.proposer_boost_root
|
||||
}
|
||||
|
||||
fn set_finalized_checkpoint(&mut self, checkpoint: Checkpoint) {
|
||||
self.finalized_checkpoint = checkpoint
|
||||
}
|
||||
@@ -311,25 +290,29 @@ where
|
||||
fn set_justified_checkpoint(&mut self, checkpoint: Checkpoint) -> Result<(), Error> {
|
||||
self.justified_checkpoint = checkpoint;
|
||||
|
||||
if let Some(balances) = self.balances_cache.get(self.justified_checkpoint.root) {
|
||||
if let Some(balances) = self.balances_cache.get(
|
||||
self.justified_checkpoint.root,
|
||||
self.justified_checkpoint.epoch,
|
||||
) {
|
||||
metrics::inc_counter(&metrics::BALANCES_CACHE_HITS);
|
||||
self.justified_balances = balances;
|
||||
} else {
|
||||
metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES);
|
||||
let justified_block = self
|
||||
.store
|
||||
.get_item::<SignedBeaconBlock<E>>(&self.justified_checkpoint.root)
|
||||
.get_block(&self.justified_checkpoint.root)
|
||||
.map_err(Error::FailedToReadBlock)?
|
||||
.ok_or_else(|| Error::MissingBlock(self.justified_checkpoint.root))?
|
||||
.message;
|
||||
.ok_or(Error::MissingBlock(self.justified_checkpoint.root))?
|
||||
.deconstruct()
|
||||
.0;
|
||||
|
||||
self.justified_balances = self
|
||||
let state = self
|
||||
.store
|
||||
.get_state(&justified_block.state_root, Some(justified_block.slot))
|
||||
.get_state(&justified_block.state_root(), Some(justified_block.slot()))
|
||||
.map_err(Error::FailedToReadState)?
|
||||
.ok_or_else(|| Error::MissingState(justified_block.state_root))?
|
||||
.balances
|
||||
.into();
|
||||
.ok_or_else(|| Error::MissingState(justified_block.state_root()))?;
|
||||
|
||||
self.justified_balances = get_effective_balances(&state);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -338,15 +321,30 @@ where
|
||||
fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint) {
|
||||
self.best_justified_checkpoint = checkpoint
|
||||
}
|
||||
|
||||
fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256) {
|
||||
self.proposer_boost_root = proposer_boost_root;
|
||||
}
|
||||
}
|
||||
|
||||
/// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database.
|
||||
#[derive(Encode, Decode)]
|
||||
#[superstruct(
|
||||
variants(V1, V7, V8),
|
||||
variant_attributes(derive(Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
pub struct PersistedForkChoiceStore {
|
||||
balances_cache: BalancesCache,
|
||||
time: Slot,
|
||||
finalized_checkpoint: Checkpoint,
|
||||
justified_checkpoint: Checkpoint,
|
||||
justified_balances: Vec<u64>,
|
||||
best_justified_checkpoint: Checkpoint,
|
||||
#[superstruct(only(V1, V7))]
|
||||
pub balances_cache: BalancesCacheV1,
|
||||
#[superstruct(only(V8))]
|
||||
pub balances_cache: BalancesCacheV8,
|
||||
pub time: Slot,
|
||||
pub finalized_checkpoint: Checkpoint,
|
||||
pub justified_checkpoint: Checkpoint,
|
||||
pub justified_balances: Vec<u64>,
|
||||
pub best_justified_checkpoint: Checkpoint,
|
||||
#[superstruct(only(V7, V8))]
|
||||
pub proposer_boost_root: Hash256,
|
||||
}
|
||||
|
||||
pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV8;
|
||||
|
||||
185
beacon_node/beacon_chain/src/beacon_proposer_cache.rs
Normal file
185
beacon_node/beacon_chain/src/beacon_proposer_cache.rs
Normal file
@@ -0,0 +1,185 @@
|
||||
//! The `BeaconProposer` cache stores the proposer indices for some epoch.
|
||||
//!
|
||||
//! This cache is keyed by `(epoch, block_root)` where `block_root` is the block root at
|
||||
//! `end_slot(epoch - 1)`. We make the assertion that the proposer shuffling is identical for all
|
||||
//! blocks in `epoch` which share the common ancestor of `block_root`.
|
||||
//!
|
||||
//! The cache is a fairly unintelligent LRU cache that is not pruned after finality. This makes it
|
||||
//! very simple to reason about, but it might store values that are useless due to finalization. The
|
||||
//! values it stores are very small, so this should not be an issue.
|
||||
|
||||
use crate::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||
use lru::LruCache;
|
||||
use smallvec::SmallVec;
|
||||
use state_processing::state_advance::partial_state_advance;
|
||||
use std::cmp::Ordering;
|
||||
use types::{
|
||||
BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot, Unsigned,
|
||||
};
|
||||
|
||||
/// The number of sets of proposer indices that should be cached.
|
||||
const CACHE_SIZE: usize = 16;
|
||||
|
||||
/// This value is fairly unimportant, it's used to avoid heap allocations. The result of it being
|
||||
/// incorrect is non-substantial from a consensus perspective (and probably also from a
|
||||
/// performance perspective).
|
||||
const TYPICAL_SLOTS_PER_EPOCH: usize = 32;
|
||||
|
||||
/// For some given slot, this contains the proposer index (`index`) and the `fork` that should be
|
||||
/// used to verify their signature.
|
||||
pub struct Proposer {
|
||||
pub index: usize,
|
||||
pub fork: Fork,
|
||||
}
|
||||
|
||||
/// The list of proposers for some given `epoch`, alongside the `fork` that should be used to verify
|
||||
/// their signatures.
|
||||
pub struct EpochBlockProposers {
|
||||
/// The epoch to which the proposers pertain.
|
||||
epoch: Epoch,
|
||||
/// The fork that should be used to verify proposer signatures.
|
||||
fork: Fork,
|
||||
/// A list of length `T::EthSpec::slots_per_epoch()`, representing the proposers for each slot
|
||||
/// in that epoch.
|
||||
///
|
||||
/// E.g., if `self.epoch == 1`, then `self.proposers[0]` contains the proposer for slot `32`.
|
||||
proposers: SmallVec<[usize; TYPICAL_SLOTS_PER_EPOCH]>,
|
||||
}
|
||||
|
||||
/// A cache to store the proposers for some epoch.
|
||||
///
|
||||
/// See the module-level documentation for more information.
|
||||
pub struct BeaconProposerCache {
|
||||
cache: LruCache<(Epoch, Hash256), EpochBlockProposers>,
|
||||
}
|
||||
|
||||
impl Default for BeaconProposerCache {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
cache: LruCache::new(CACHE_SIZE),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BeaconProposerCache {
|
||||
/// If it is cached, returns the proposer for the block at `slot` where the block has the
|
||||
/// ancestor block root of `shuffling_decision_block` at `end_slot(slot.epoch() - 1)`.
|
||||
pub fn get_slot<T: EthSpec>(
|
||||
&mut self,
|
||||
shuffling_decision_block: Hash256,
|
||||
slot: Slot,
|
||||
) -> Option<Proposer> {
|
||||
let epoch = slot.epoch(T::slots_per_epoch());
|
||||
let key = (epoch, shuffling_decision_block);
|
||||
if let Some(cache) = self.cache.get(&key) {
|
||||
// This `if` statement is likely unnecessary, but it feels like good practice.
|
||||
if epoch == cache.epoch {
|
||||
cache
|
||||
.proposers
|
||||
.get(slot.as_usize() % T::SlotsPerEpoch::to_usize())
|
||||
.map(|&index| Proposer {
|
||||
index,
|
||||
fork: cache.fork,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// As per `Self::get_slot`, but returns all proposers in all slots for the given `epoch`.
|
||||
///
|
||||
/// The nth slot in the returned `SmallVec` will be equal to the nth slot in the given `epoch`.
|
||||
/// E.g., if `epoch == 1` then `smallvec[0]` refers to slot 32 (assuming `SLOTS_PER_EPOCH ==
|
||||
/// 32`).
|
||||
pub fn get_epoch<T: EthSpec>(
|
||||
&mut self,
|
||||
shuffling_decision_block: Hash256,
|
||||
epoch: Epoch,
|
||||
) -> Option<&SmallVec<[usize; TYPICAL_SLOTS_PER_EPOCH]>> {
|
||||
let key = (epoch, shuffling_decision_block);
|
||||
self.cache.get(&key).map(|cache| &cache.proposers)
|
||||
}
|
||||
|
||||
/// Insert the proposers into the cache.
|
||||
///
|
||||
/// See `Self::get` for a description of `shuffling_decision_block`.
|
||||
///
|
||||
/// The `fork` value must be valid to verify proposer signatures in `epoch`.
|
||||
pub fn insert(
|
||||
&mut self,
|
||||
epoch: Epoch,
|
||||
shuffling_decision_block: Hash256,
|
||||
proposers: Vec<usize>,
|
||||
fork: Fork,
|
||||
) -> Result<(), BeaconStateError> {
|
||||
let key = (epoch, shuffling_decision_block);
|
||||
if !self.cache.contains(&key) {
|
||||
self.cache.put(
|
||||
key,
|
||||
EpochBlockProposers {
|
||||
epoch,
|
||||
fork,
|
||||
proposers: proposers.into(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the proposer duties using the head state without cache.
|
||||
pub fn compute_proposer_duties_from_head<T: BeaconChainTypes>(
|
||||
current_epoch: Epoch,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<(Vec<usize>, Hash256, Fork), BeaconChainError> {
|
||||
// Take a copy of the head of the chain.
|
||||
let head = chain.head()?;
|
||||
let mut state = head.beacon_state;
|
||||
let head_state_root = head.beacon_block.state_root();
|
||||
|
||||
// Advance the state into the requested epoch.
|
||||
ensure_state_is_in_epoch(&mut state, head_state_root, current_epoch, &chain.spec)?;
|
||||
|
||||
let indices = state
|
||||
.get_beacon_proposer_indices(&chain.spec)
|
||||
.map_err(BeaconChainError::from)?;
|
||||
|
||||
let dependent_root = state
|
||||
// The only block which decides its own shuffling is the genesis block.
|
||||
.proposer_shuffling_decision_root(chain.genesis_block_root)
|
||||
.map_err(BeaconChainError::from)?;
|
||||
|
||||
Ok((indices, dependent_root, state.fork()))
|
||||
}
|
||||
|
||||
/// If required, advance `state` to `target_epoch`.
|
||||
///
|
||||
/// ## Details
|
||||
///
|
||||
/// - Returns an error if `state.current_epoch() > target_epoch`.
|
||||
/// - No-op if `state.current_epoch() == target_epoch`.
|
||||
/// - It must be the case that `state.canonical_root() == state_root`, but this function will not
|
||||
/// check that.
|
||||
pub fn ensure_state_is_in_epoch<E: EthSpec>(
|
||||
state: &mut BeaconState<E>,
|
||||
state_root: Hash256,
|
||||
target_epoch: Epoch,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), BeaconChainError> {
|
||||
match state.current_epoch().cmp(&target_epoch) {
|
||||
// Protects against an inconsistent slot clock.
|
||||
Ordering::Greater => Err(BeaconStateError::SlotOutOfBounds.into()),
|
||||
// The state needs to be advanced.
|
||||
Ordering::Less => {
|
||||
let target_slot = target_epoch.start_slot(E::slots_per_epoch());
|
||||
partial_state_advance(state, Some(state_root), target_slot, spec)
|
||||
.map_err(BeaconChainError::from)
|
||||
}
|
||||
// The state is suitable, nothing to do.
|
||||
Ordering::Equal => Ok(()),
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,13 @@
|
||||
use serde_derive::Serialize;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use types::{BeaconState, EthSpec, Hash256, SignedBeaconBlock};
|
||||
use types::{beacon_state::CloneConfig, BeaconState, EthSpec, Hash256, SignedBeaconBlock};
|
||||
|
||||
/// Represents some block and its associated state. Generally, this will be used for tracking the
|
||||
/// head, justified head and finalized head.
|
||||
#[derive(Clone, Serialize, PartialEq, Debug, Encode, Decode)]
|
||||
#[derive(Clone, Serialize, PartialEq, Debug)]
|
||||
pub struct BeaconSnapshot<E: EthSpec> {
|
||||
pub beacon_block: SignedBeaconBlock<E>,
|
||||
pub beacon_block_root: Hash256,
|
||||
pub beacon_state: BeaconState<E>,
|
||||
pub beacon_state_root: Hash256,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> BeaconSnapshot<E> {
|
||||
@@ -18,36 +16,40 @@ impl<E: EthSpec> BeaconSnapshot<E> {
|
||||
beacon_block: SignedBeaconBlock<E>,
|
||||
beacon_block_root: Hash256,
|
||||
beacon_state: BeaconState<E>,
|
||||
beacon_state_root: Hash256,
|
||||
) -> Self {
|
||||
Self {
|
||||
beacon_block,
|
||||
beacon_block_root,
|
||||
beacon_state,
|
||||
beacon_state_root,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the state root from `self.beacon_block`.
|
||||
///
|
||||
/// ## Caution
|
||||
///
|
||||
/// It is not strictly enforced that `root(self.beacon_state) == self.beacon_state_root()`.
|
||||
pub fn beacon_state_root(&self) -> Hash256 {
|
||||
self.beacon_block.message().state_root()
|
||||
}
|
||||
|
||||
/// Update all fields of the checkpoint.
|
||||
pub fn update(
|
||||
&mut self,
|
||||
beacon_block: SignedBeaconBlock<E>,
|
||||
beacon_block_root: Hash256,
|
||||
beacon_state: BeaconState<E>,
|
||||
beacon_state_root: Hash256,
|
||||
) {
|
||||
self.beacon_block = beacon_block;
|
||||
self.beacon_block_root = beacon_block_root;
|
||||
self.beacon_state = beacon_state;
|
||||
self.beacon_state_root = beacon_state_root;
|
||||
}
|
||||
|
||||
pub fn clone_with_only_committee_caches(&self) -> Self {
|
||||
pub fn clone_with(&self, clone_config: CloneConfig) -> Self {
|
||||
Self {
|
||||
beacon_block: self.beacon_block.clone(),
|
||||
beacon_block_root: self.beacon_block_root,
|
||||
beacon_state: self.beacon_state.clone_with_only_committee_caches(),
|
||||
beacon_state_root: self.beacon_state_root,
|
||||
beacon_state: self.beacon_state.clone_with(clone_config),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
97
beacon_node/beacon_chain/src/block_reward.rs
Normal file
97
beacon_node/beacon_chain/src/block_reward.rs
Normal file
@@ -0,0 +1,97 @@
|
||||
use crate::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||
use eth2::lighthouse::{AttestationRewards, BlockReward, BlockRewardMeta};
|
||||
use operation_pool::{AttMaxCover, MaxCover};
|
||||
use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards;
|
||||
use types::{BeaconBlockRef, BeaconState, EthSpec, Hash256, RelativeEpoch};
|
||||
|
||||
impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
pub fn compute_block_reward(
|
||||
&self,
|
||||
block: BeaconBlockRef<'_, T::EthSpec>,
|
||||
block_root: Hash256,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
) -> Result<BlockReward, BeaconChainError> {
|
||||
if block.slot() != state.slot() {
|
||||
return Err(BeaconChainError::BlockRewardSlotError);
|
||||
}
|
||||
|
||||
let active_indices = state.get_cached_active_validator_indices(RelativeEpoch::Current)?;
|
||||
let total_active_balance = state.get_total_balance(active_indices, &self.spec)?;
|
||||
let mut per_attestation_rewards = block
|
||||
.body()
|
||||
.attestations()
|
||||
.iter()
|
||||
.map(|att| {
|
||||
AttMaxCover::new(att, state, total_active_balance, &self.spec)
|
||||
.ok_or(BeaconChainError::BlockRewardAttestationError)
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
// Update the attestation rewards for each previous attestation included.
|
||||
// This is O(n^2) in the number of attestations n.
|
||||
for i in 0..per_attestation_rewards.len() {
|
||||
let (updated, to_update) = per_attestation_rewards.split_at_mut(i + 1);
|
||||
let latest_att = &updated[i];
|
||||
|
||||
for att in to_update {
|
||||
att.update_covering_set(latest_att.object(), latest_att.covering_set());
|
||||
}
|
||||
}
|
||||
|
||||
let mut prev_epoch_total = 0;
|
||||
let mut curr_epoch_total = 0;
|
||||
|
||||
for cover in &per_attestation_rewards {
|
||||
for &reward in cover.fresh_validators_rewards.values() {
|
||||
if cover.att.data.slot.epoch(T::EthSpec::slots_per_epoch()) == state.current_epoch()
|
||||
{
|
||||
curr_epoch_total += reward;
|
||||
} else {
|
||||
prev_epoch_total += reward;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let attestation_total = prev_epoch_total + curr_epoch_total;
|
||||
|
||||
// Drop the covers.
|
||||
let per_attestation_rewards = per_attestation_rewards
|
||||
.into_iter()
|
||||
.map(|cover| cover.fresh_validators_rewards)
|
||||
.collect();
|
||||
|
||||
let attestation_rewards = AttestationRewards {
|
||||
total: attestation_total,
|
||||
prev_epoch_total,
|
||||
curr_epoch_total,
|
||||
per_attestation_rewards,
|
||||
};
|
||||
|
||||
// Sync committee rewards.
|
||||
let sync_committee_rewards = if let Ok(sync_aggregate) = block.body().sync_aggregate() {
|
||||
let (_, proposer_reward_per_bit) = compute_sync_aggregate_rewards(state, &self.spec)
|
||||
.map_err(|_| BeaconChainError::BlockRewardSyncError)?;
|
||||
sync_aggregate.sync_committee_bits.num_set_bits() as u64 * proposer_reward_per_bit
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
// Total, metadata
|
||||
let total = attestation_total + sync_committee_rewards;
|
||||
|
||||
let meta = BlockRewardMeta {
|
||||
slot: block.slot(),
|
||||
parent_slot: state.latest_block_header().slot,
|
||||
proposer_index: block.proposer_index(),
|
||||
graffiti: block.body().graffiti().as_utf8_lossy(),
|
||||
};
|
||||
|
||||
Ok(BlockReward {
|
||||
total,
|
||||
block_root,
|
||||
meta,
|
||||
attestation_rewards,
|
||||
sync_committee_rewards,
|
||||
})
|
||||
}
|
||||
}
|
||||
143
beacon_node/beacon_chain/src/block_times_cache.rs
Normal file
143
beacon_node/beacon_chain/src/block_times_cache.rs
Normal file
@@ -0,0 +1,143 @@
|
||||
//! This module provides the `BlockTimesCache' which contains information regarding block timings.
|
||||
//!
|
||||
//! This provides `BeaconChain` and associated functions with access to the timestamps of when a
|
||||
//! certain block was observed, imported and set as head.
|
||||
//! This allows for better traceability and allows us to determine the root cause for why a block
|
||||
//! was set as head late.
|
||||
//! This allows us to distingush between the following scenarios:
|
||||
//! - The block was observed late.
|
||||
//! - We were too slow to import it.
|
||||
//! - We were too slow to set it as head.
|
||||
|
||||
use eth2::types::{Hash256, Slot};
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
|
||||
type BlockRoot = Hash256;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct Timestamps {
|
||||
pub observed: Option<Duration>,
|
||||
pub imported: Option<Duration>,
|
||||
pub set_as_head: Option<Duration>,
|
||||
}
|
||||
|
||||
// Helps arrange delay data so it is more relevant to metrics.
|
||||
#[derive(Default)]
|
||||
pub struct BlockDelays {
|
||||
pub observed: Option<Duration>,
|
||||
pub imported: Option<Duration>,
|
||||
pub set_as_head: Option<Duration>,
|
||||
}
|
||||
|
||||
impl BlockDelays {
|
||||
fn new(times: Timestamps, slot_start_time: Duration) -> BlockDelays {
|
||||
let observed = times
|
||||
.observed
|
||||
.and_then(|observed_time| observed_time.checked_sub(slot_start_time));
|
||||
let imported = times
|
||||
.imported
|
||||
.and_then(|imported_time| imported_time.checked_sub(times.observed?));
|
||||
let set_as_head = times
|
||||
.set_as_head
|
||||
.and_then(|set_as_head_time| set_as_head_time.checked_sub(times.imported?));
|
||||
BlockDelays {
|
||||
observed,
|
||||
imported,
|
||||
set_as_head,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the block was received via gossip, we can record the client type of the peer which sent us
|
||||
// the block.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct BlockPeerInfo {
|
||||
pub id: Option<String>,
|
||||
pub client: Option<String>,
|
||||
}
|
||||
|
||||
pub struct BlockTimesCacheValue {
|
||||
pub slot: Slot,
|
||||
pub timestamps: Timestamps,
|
||||
pub peer_info: BlockPeerInfo,
|
||||
}
|
||||
|
||||
impl BlockTimesCacheValue {
|
||||
fn new(slot: Slot) -> Self {
|
||||
BlockTimesCacheValue {
|
||||
slot,
|
||||
timestamps: Default::default(),
|
||||
peer_info: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct BlockTimesCache {
|
||||
pub cache: HashMap<BlockRoot, BlockTimesCacheValue>,
|
||||
}
|
||||
|
||||
/// Helper methods to read from and write to the cache.
|
||||
impl BlockTimesCache {
|
||||
pub fn set_time_observed(
|
||||
&mut self,
|
||||
block_root: BlockRoot,
|
||||
slot: Slot,
|
||||
timestamp: Duration,
|
||||
peer_id: Option<String>,
|
||||
peer_client: Option<String>,
|
||||
) {
|
||||
let block_times = self
|
||||
.cache
|
||||
.entry(block_root)
|
||||
.or_insert_with(|| BlockTimesCacheValue::new(slot));
|
||||
block_times.timestamps.observed = Some(timestamp);
|
||||
block_times.peer_info = BlockPeerInfo {
|
||||
id: peer_id,
|
||||
client: peer_client,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn set_time_imported(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) {
|
||||
let block_times = self
|
||||
.cache
|
||||
.entry(block_root)
|
||||
.or_insert_with(|| BlockTimesCacheValue::new(slot));
|
||||
block_times.timestamps.imported = Some(timestamp);
|
||||
}
|
||||
|
||||
pub fn set_time_set_as_head(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) {
|
||||
let block_times = self
|
||||
.cache
|
||||
.entry(block_root)
|
||||
.or_insert_with(|| BlockTimesCacheValue::new(slot));
|
||||
block_times.timestamps.set_as_head = Some(timestamp);
|
||||
}
|
||||
|
||||
pub fn get_block_delays(
|
||||
&self,
|
||||
block_root: BlockRoot,
|
||||
slot_start_time: Duration,
|
||||
) -> BlockDelays {
|
||||
if let Some(block_times) = self.cache.get(&block_root) {
|
||||
BlockDelays::new(block_times.timestamps.clone(), slot_start_time)
|
||||
} else {
|
||||
BlockDelays::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_peer_info(&self, block_root: BlockRoot) -> BlockPeerInfo {
|
||||
if let Some(block_info) = self.cache.get(&block_root) {
|
||||
block_info.peer_info.clone()
|
||||
} else {
|
||||
BlockPeerInfo::default()
|
||||
}
|
||||
}
|
||||
|
||||
// Prune the cache to only store the most recent 2 epochs.
|
||||
pub fn prune(&mut self, current_slot: Slot) {
|
||||
self.cache
|
||||
.retain(|_, cache| cache.slot > current_slot.saturating_sub(64_u64));
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
33
beacon_node/beacon_chain/src/chain_config.rs
Normal file
33
beacon_node/beacon_chain/src/chain_config.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use types::Checkpoint;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)]
|
||||
pub struct ChainConfig {
|
||||
/// Maximum number of slots to skip when importing a consensus message (e.g., block,
|
||||
/// attestation, etc).
|
||||
///
|
||||
/// If `None`, there is no limit.
|
||||
pub import_max_skip_slots: Option<u64>,
|
||||
/// A user-input `Checkpoint` that must exist in the beacon chain's sync path.
|
||||
///
|
||||
/// If `None`, there is no weak subjectivity verification.
|
||||
pub weak_subjectivity_checkpoint: Option<Checkpoint>,
|
||||
/// Determine whether to reconstruct historic states, usually after a checkpoint sync.
|
||||
pub reconstruct_historic_states: bool,
|
||||
/// Whether timeouts on `TimeoutRwLock`s are enabled or not.
|
||||
pub enable_lock_timeouts: bool,
|
||||
/// The max size of a message that can be sent over the network.
|
||||
pub max_network_size: usize,
|
||||
}
|
||||
|
||||
impl Default for ChainConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
import_max_skip_slots: None,
|
||||
weak_subjectivity_checkpoint: None,
|
||||
reconstruct_historic_states: false,
|
||||
enable_lock_timeouts: true,
|
||||
max_network_size: 10 * 1_048_576, // 10M
|
||||
}
|
||||
}
|
||||
}
|
||||
161
beacon_node/beacon_chain/src/early_attester_cache.rs
Normal file
161
beacon_node/beacon_chain/src/early_attester_cache.rs
Normal file
@@ -0,0 +1,161 @@
|
||||
use crate::{
|
||||
attester_cache::{CommitteeLengths, Error},
|
||||
metrics,
|
||||
};
|
||||
use parking_lot::RwLock;
|
||||
use proto_array::Block as ProtoBlock;
|
||||
use types::*;
|
||||
|
||||
pub struct CacheItem<E: EthSpec> {
|
||||
/*
|
||||
* Values used to create attestations.
|
||||
*/
|
||||
epoch: Epoch,
|
||||
committee_lengths: CommitteeLengths,
|
||||
beacon_block_root: Hash256,
|
||||
source: Checkpoint,
|
||||
target: Checkpoint,
|
||||
/*
|
||||
* Values used to make the block available.
|
||||
*/
|
||||
block: SignedBeaconBlock<E>,
|
||||
proto_block: ProtoBlock,
|
||||
}
|
||||
|
||||
/// Provides a single-item cache which allows for attesting to blocks before those blocks have
|
||||
/// reached the database.
|
||||
///
|
||||
/// This cache stores enough information to allow Lighthouse to:
|
||||
///
|
||||
/// - Produce an attestation without using `chain.canonical_head`.
|
||||
/// - Verify that a block root exists (i.e., will be imported in the future) during attestation
|
||||
/// verification.
|
||||
/// - Provide a block which can be sent to peers via RPC.
|
||||
#[derive(Default)]
|
||||
pub struct EarlyAttesterCache<E: EthSpec> {
|
||||
item: RwLock<Option<CacheItem<E>>>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> EarlyAttesterCache<E> {
|
||||
/// Removes the cached item, meaning that all future calls to `Self::try_attest` will return
|
||||
/// `None` until a new cache item is added.
|
||||
pub fn clear(&self) {
|
||||
*self.item.write() = None
|
||||
}
|
||||
|
||||
/// Updates the cache item, so that `Self::try_attest` with return `Some` when given suitable
|
||||
/// parameters.
|
||||
pub fn add_head_block(
|
||||
&self,
|
||||
beacon_block_root: Hash256,
|
||||
block: SignedBeaconBlock<E>,
|
||||
proto_block: ProtoBlock,
|
||||
state: &BeaconState<E>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), Error> {
|
||||
let epoch = state.current_epoch();
|
||||
let committee_lengths = CommitteeLengths::new(state, spec)?;
|
||||
let source = state.current_justified_checkpoint();
|
||||
let target_slot = epoch.start_slot(E::slots_per_epoch());
|
||||
let target = Checkpoint {
|
||||
epoch,
|
||||
root: if state.slot() <= target_slot {
|
||||
beacon_block_root
|
||||
} else {
|
||||
*state.get_block_root(target_slot)?
|
||||
},
|
||||
};
|
||||
|
||||
let item = CacheItem {
|
||||
epoch,
|
||||
committee_lengths,
|
||||
beacon_block_root,
|
||||
source,
|
||||
target,
|
||||
block,
|
||||
proto_block,
|
||||
};
|
||||
|
||||
*self.item.write() = Some(item);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Will return `Some(attestation)` if all the following conditions are met:
|
||||
///
|
||||
/// - There is a cache `item` present.
|
||||
/// - If `request_slot` is in the same epoch as `item.epoch`.
|
||||
/// - If `request_index` does not exceed `item.comittee_count`.
|
||||
pub fn try_attest(
|
||||
&self,
|
||||
request_slot: Slot,
|
||||
request_index: CommitteeIndex,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Option<Attestation<E>>, Error> {
|
||||
let lock = self.item.read();
|
||||
let item = if let Some(item) = lock.as_ref() {
|
||||
item
|
||||
} else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let request_epoch = request_slot.epoch(E::slots_per_epoch());
|
||||
if request_epoch != item.epoch {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let committee_count = item
|
||||
.committee_lengths
|
||||
.get_committee_count_per_slot::<E>(spec)?;
|
||||
if request_index >= committee_count as u64 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let committee_len =
|
||||
item.committee_lengths
|
||||
.get_committee_length::<E>(request_slot, request_index, spec)?;
|
||||
|
||||
let attestation = Attestation {
|
||||
aggregation_bits: BitList::with_capacity(committee_len)
|
||||
.map_err(BeaconStateError::from)?,
|
||||
data: AttestationData {
|
||||
slot: request_slot,
|
||||
index: request_index,
|
||||
beacon_block_root: item.beacon_block_root,
|
||||
source: item.source,
|
||||
target: item.target,
|
||||
},
|
||||
signature: AggregateSignature::empty(),
|
||||
};
|
||||
|
||||
metrics::inc_counter(&metrics::BEACON_EARLY_ATTESTER_CACHE_HITS);
|
||||
|
||||
Ok(Some(attestation))
|
||||
}
|
||||
|
||||
/// Returns `true` if `block_root` matches the cached item.
|
||||
pub fn contains_block(&self, block_root: Hash256) -> bool {
|
||||
self.item
|
||||
.read()
|
||||
.as_ref()
|
||||
.map_or(false, |item| item.beacon_block_root == block_root)
|
||||
}
|
||||
|
||||
/// Returns the block, if `block_root` matches the cached item.
|
||||
pub fn get_block(&self, block_root: Hash256) -> Option<SignedBeaconBlock<E>> {
|
||||
self.item
|
||||
.read()
|
||||
.as_ref()
|
||||
.filter(|item| item.beacon_block_root == block_root)
|
||||
.map(|item| item.block.clone())
|
||||
}
|
||||
|
||||
/// Returns the proto-array block, if `block_root` matches the cached item.
|
||||
pub fn get_proto_block(&self, block_root: Hash256) -> Option<ProtoBlock> {
|
||||
self.item
|
||||
.read()
|
||||
.as_ref()
|
||||
.filter(|item| item.beacon_block_root == block_root)
|
||||
.map(|item| item.proto_block.clone())
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,15 @@
|
||||
use crate::attester_cache::Error as AttesterCacheError;
|
||||
use crate::beacon_chain::ForkChoiceError;
|
||||
use crate::beacon_fork_choice_store::Error as ForkChoiceStoreError;
|
||||
use crate::eth1_chain::Error as Eth1ChainError;
|
||||
use crate::historical_blocks::HistoricalBlockError;
|
||||
use crate::migrate::PruningError;
|
||||
use crate::naive_aggregation_pool::Error as NaiveAggregationError;
|
||||
use crate::observed_attestations::Error as ObservedAttestationsError;
|
||||
use crate::observed_aggregates::Error as ObservedAttestationsError;
|
||||
use crate::observed_attesters::Error as ObservedAttestersError;
|
||||
use crate::observed_block_producers::Error as ObservedBlockProducersError;
|
||||
use execution_layer::PayloadStatus;
|
||||
use futures::channel::mpsc::TrySendError;
|
||||
use operation_pool::OpPoolError;
|
||||
use safe_arith::ArithError;
|
||||
use ssz_types::Error as SszTypesError;
|
||||
@@ -11,12 +17,14 @@ use state_processing::{
|
||||
block_signature_verifier::Error as BlockSignatureVerifierError,
|
||||
per_block_processing::errors::{
|
||||
AttestationValidationError, AttesterSlashingValidationError, ExitValidationError,
|
||||
ProposerSlashingValidationError,
|
||||
ProposerSlashingValidationError, SyncCommitteeMessageValidationError,
|
||||
},
|
||||
signature_sets::Error as SignatureSetError,
|
||||
BlockProcessingError, SlotProcessingError,
|
||||
state_advance::Error as StateAdvanceError,
|
||||
BlockProcessingError, BlockReplayError, SlotProcessingError,
|
||||
};
|
||||
use std::time::Duration;
|
||||
use task_executor::ShutdownReason;
|
||||
use types::*;
|
||||
|
||||
macro_rules! easy_from_to {
|
||||
@@ -33,6 +41,7 @@ macro_rules! easy_from_to {
|
||||
pub enum BeaconChainError {
|
||||
InsufficientValidators,
|
||||
UnableToReadSlot,
|
||||
UnableToComputeTimeAtSlot,
|
||||
RevertedFinalizedEpoch {
|
||||
previous_epoch: Epoch,
|
||||
new_epoch: Epoch,
|
||||
@@ -44,15 +53,18 @@ pub enum BeaconChainError {
|
||||
DBInconsistent(String),
|
||||
DBError(store::Error),
|
||||
ForkChoiceError(ForkChoiceError),
|
||||
ForkChoiceStoreError(ForkChoiceStoreError),
|
||||
MissingBeaconBlock(Hash256),
|
||||
MissingBeaconState(Hash256),
|
||||
SlotProcessingError(SlotProcessingError),
|
||||
StateAdvanceError(StateAdvanceError),
|
||||
UnableToAdvanceState(String),
|
||||
NoStateForAttestation {
|
||||
beacon_block_root: Hash256,
|
||||
},
|
||||
CannotAttestToFutureState,
|
||||
AttestationValidationError(AttestationValidationError),
|
||||
SyncCommitteeMessageValidationError(SyncCommitteeMessageValidationError),
|
||||
ExitValidationError(ExitValidationError),
|
||||
ProposerSlashingValidationError(ProposerSlashingValidationError),
|
||||
AttesterSlashingValidationError(AttesterSlashingValidationError),
|
||||
@@ -61,29 +73,100 @@ pub enum BeaconChainError {
|
||||
requested_slot: Slot,
|
||||
max_task_runtime: Duration,
|
||||
},
|
||||
MissingFinalizedStateRoot(Slot),
|
||||
/// Returned when an internal check fails, indicating corrupt data.
|
||||
InvariantViolated(String),
|
||||
SszTypesError(SszTypesError),
|
||||
NoProposerForSlot(Slot),
|
||||
CanonicalHeadLockTimeout,
|
||||
AttestationCacheLockTimeout,
|
||||
ValidatorPubkeyCacheLockTimeout,
|
||||
SnapshotCacheLockTimeout,
|
||||
IncorrectStateForAttestation(RelativeEpochError),
|
||||
InvalidValidatorPubkeyBytes(bls::Error),
|
||||
ValidatorPubkeyCacheIncomplete(usize),
|
||||
SignatureSetError(SignatureSetError),
|
||||
BlockSignatureVerifierError(state_processing::block_signature_verifier::Error),
|
||||
BlockReplayError(BlockReplayError),
|
||||
DuplicateValidatorPublicKey,
|
||||
ValidatorPubkeyCacheFileError(String),
|
||||
ValidatorIndexUnknown(usize),
|
||||
ValidatorPubkeyUnknown(PublicKeyBytes),
|
||||
OpPoolError(OpPoolError),
|
||||
NaiveAggregationError(NaiveAggregationError),
|
||||
ObservedAttestationsError(ObservedAttestationsError),
|
||||
ObservedAttestersError(ObservedAttestersError),
|
||||
ObservedBlockProducersError(ObservedBlockProducersError),
|
||||
AttesterCacheError(AttesterCacheError),
|
||||
PruningError(PruningError),
|
||||
ArithError(ArithError),
|
||||
InvalidShufflingId {
|
||||
shuffling_epoch: Epoch,
|
||||
head_block_epoch: Epoch,
|
||||
},
|
||||
WeakSubjectivtyVerificationFailure,
|
||||
WeakSubjectivtyShutdownError(TrySendError<ShutdownReason>),
|
||||
AttestingToFinalizedSlot {
|
||||
finalized_slot: Slot,
|
||||
request_slot: Slot,
|
||||
},
|
||||
AttestingToAncientSlot {
|
||||
lowest_permissible_slot: Slot,
|
||||
request_slot: Slot,
|
||||
},
|
||||
BadPreState {
|
||||
parent_root: Hash256,
|
||||
parent_slot: Slot,
|
||||
block_root: Hash256,
|
||||
block_slot: Slot,
|
||||
state_slot: Slot,
|
||||
},
|
||||
HistoricalBlockError(HistoricalBlockError),
|
||||
InvalidStateForShuffling {
|
||||
state_epoch: Epoch,
|
||||
shuffling_epoch: Epoch,
|
||||
},
|
||||
SyncDutiesError(BeaconStateError),
|
||||
InconsistentForwardsIter {
|
||||
request_slot: Slot,
|
||||
slot: Slot,
|
||||
},
|
||||
InvalidReorgSlotIter {
|
||||
old_slot: Slot,
|
||||
new_slot: Slot,
|
||||
},
|
||||
AltairForkDisabled,
|
||||
ExecutionLayerMissing,
|
||||
ExecutionForkChoiceUpdateFailed(execution_layer::Error),
|
||||
PrepareProposerBlockingFailed(execution_layer::Error),
|
||||
ExecutionForkChoiceUpdateInvalid {
|
||||
status: PayloadStatus,
|
||||
},
|
||||
BlockRewardSlotError,
|
||||
BlockRewardAttestationError,
|
||||
BlockRewardSyncError,
|
||||
HeadMissingFromForkChoice(Hash256),
|
||||
FinalizedBlockMissingFromForkChoice(Hash256),
|
||||
InvalidFinalizedPayload {
|
||||
finalized_root: Hash256,
|
||||
execution_block_hash: ExecutionBlockHash,
|
||||
},
|
||||
InvalidFinalizedPayloadShutdownError(TrySendError<ShutdownReason>),
|
||||
JustifiedPayloadInvalid {
|
||||
justified_root: Hash256,
|
||||
execution_block_hash: Option<ExecutionBlockHash>,
|
||||
},
|
||||
ForkchoiceUpdate(execution_layer::Error),
|
||||
FinalizedCheckpointMismatch {
|
||||
head_state: Checkpoint,
|
||||
fork_choice: Hash256,
|
||||
},
|
||||
InvalidSlot(Slot),
|
||||
}
|
||||
|
||||
easy_from_to!(SlotProcessingError, BeaconChainError);
|
||||
easy_from_to!(AttestationValidationError, BeaconChainError);
|
||||
easy_from_to!(SyncCommitteeMessageValidationError, BeaconChainError);
|
||||
easy_from_to!(ExitValidationError, BeaconChainError);
|
||||
easy_from_to!(ProposerSlashingValidationError, BeaconChainError);
|
||||
easy_from_to!(AttesterSlashingValidationError, BeaconChainError);
|
||||
@@ -93,11 +176,18 @@ easy_from_to!(NaiveAggregationError, BeaconChainError);
|
||||
easy_from_to!(ObservedAttestationsError, BeaconChainError);
|
||||
easy_from_to!(ObservedAttestersError, BeaconChainError);
|
||||
easy_from_to!(ObservedBlockProducersError, BeaconChainError);
|
||||
easy_from_to!(AttesterCacheError, BeaconChainError);
|
||||
easy_from_to!(BlockSignatureVerifierError, BeaconChainError);
|
||||
easy_from_to!(PruningError, BeaconChainError);
|
||||
easy_from_to!(ArithError, BeaconChainError);
|
||||
easy_from_to!(ForkChoiceStoreError, BeaconChainError);
|
||||
easy_from_to!(HistoricalBlockError, BeaconChainError);
|
||||
easy_from_to!(StateAdvanceError, BeaconChainError);
|
||||
easy_from_to!(BlockReplayError, BeaconChainError);
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BlockProductionError {
|
||||
UnableToGetHeadInfo(BeaconChainError),
|
||||
UnableToGetBlockRootFromState,
|
||||
UnableToReadSlot,
|
||||
UnableToProduceAtSlot(Slot),
|
||||
@@ -105,13 +195,26 @@ pub enum BlockProductionError {
|
||||
BlockProcessingError(BlockProcessingError),
|
||||
Eth1ChainError(Eth1ChainError),
|
||||
BeaconStateError(BeaconStateError),
|
||||
StateAdvanceError(StateAdvanceError),
|
||||
OpPoolError(OpPoolError),
|
||||
/// The `BeaconChain` was explicitly configured _without_ a connection to eth1, therefore it
|
||||
/// cannot produce blocks.
|
||||
NoEth1ChainConnection,
|
||||
StateSlotTooHigh {
|
||||
produce_at_slot: Slot,
|
||||
state_slot: Slot,
|
||||
},
|
||||
ExecutionLayerMissing,
|
||||
BlockingFailed(execution_layer::Error),
|
||||
TerminalPoWBlockLookupFailed(execution_layer::Error),
|
||||
GetPayloadFailed(execution_layer::Error),
|
||||
FailedToReadFinalizedBlock(store::Error),
|
||||
MissingFinalizedBlock(Hash256),
|
||||
BlockTooLarge(usize),
|
||||
}
|
||||
|
||||
easy_from_to!(BlockProcessingError, BlockProductionError);
|
||||
easy_from_to!(BeaconStateError, BlockProductionError);
|
||||
easy_from_to!(SlotProcessingError, BlockProductionError);
|
||||
easy_from_to!(Eth1ChainError, BlockProductionError);
|
||||
easy_from_to!(StateAdvanceError, BlockProductionError);
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use crate::metrics;
|
||||
use environment::TaskExecutor;
|
||||
use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService};
|
||||
use eth2::lighthouse::Eth1SyncStatusData;
|
||||
use eth2_hashing::hash;
|
||||
use int_to_bytes::int_to_bytes32;
|
||||
use slog::{debug, error, trace, Logger};
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
@@ -10,7 +11,9 @@ use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
use std::iter::DoubleEndedIterator;
|
||||
use std::marker::PhantomData;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use store::{DBColumn, Error as StoreError, StoreItem};
|
||||
use task_executor::TaskExecutor;
|
||||
use types::{
|
||||
BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned,
|
||||
DEPOSIT_TREE_DEPTH,
|
||||
@@ -19,6 +22,11 @@ use types::{
|
||||
type BlockNumber = u64;
|
||||
type Eth1DataVoteCount = HashMap<(Eth1Data, BlockNumber), u64>;
|
||||
|
||||
/// We will declare ourself synced with the Eth1 chain, even if we are this many blocks behind.
|
||||
///
|
||||
/// This number (8) was chosen somewhat arbitrarily.
|
||||
const ETH1_SYNC_TOLERANCE: u64 = 8;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
/// Unable to return an Eth1Data for the given epoch.
|
||||
@@ -53,6 +61,113 @@ impl From<safe_arith::ArithError> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an `Eth1SyncStatusData` given some parameters:
|
||||
///
|
||||
/// - `latest_cached_block`: The latest eth1 block in our cache, if any.
|
||||
/// - `head_block`: The block at the very head of our eth1 node (ignoring follow distance, etc).
|
||||
/// - `genesis_time`: beacon chain genesis time.
|
||||
/// - `current_slot`: current beacon chain slot.
|
||||
/// - `spec`: current beacon chain specification.
|
||||
fn get_sync_status<T: EthSpec>(
|
||||
latest_cached_block: Option<&Eth1Block>,
|
||||
head_block: Option<&Eth1Block>,
|
||||
genesis_time: u64,
|
||||
current_slot: Option<Slot>,
|
||||
spec: &ChainSpec,
|
||||
) -> Option<Eth1SyncStatusData> {
|
||||
let eth1_follow_distance_seconds = spec
|
||||
.seconds_per_eth1_block
|
||||
.saturating_mul(spec.eth1_follow_distance);
|
||||
|
||||
// The voting target timestamp needs to be special-cased when we're before
|
||||
// genesis (as defined by `current_slot == None`).
|
||||
//
|
||||
// For the sake of this status, when prior to genesis we want to invent some voting periods
|
||||
// that are *before* genesis, so that we can indicate to users that we're actually adequately
|
||||
// cached for where they are in time.
|
||||
let voting_target_timestamp = if let Some(current_slot) = current_slot {
|
||||
let period = T::SlotsPerEth1VotingPeriod::to_u64();
|
||||
let voting_period_start_slot = (current_slot / period) * period;
|
||||
|
||||
let period_start = slot_start_seconds::<T>(
|
||||
genesis_time,
|
||||
spec.seconds_per_slot,
|
||||
voting_period_start_slot,
|
||||
);
|
||||
|
||||
period_start.saturating_sub(eth1_follow_distance_seconds)
|
||||
} else {
|
||||
// The number of seconds in an eth1 voting period.
|
||||
let voting_period_duration =
|
||||
T::slots_per_eth1_voting_period() as u64 * spec.seconds_per_slot;
|
||||
|
||||
let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?.as_secs();
|
||||
|
||||
// The number of seconds between now and genesis.
|
||||
let seconds_till_genesis = genesis_time.saturating_sub(now);
|
||||
|
||||
// Determine how many voting periods are contained in distance between
|
||||
// now and genesis, rounding up.
|
||||
let voting_periods_past =
|
||||
(seconds_till_genesis + voting_period_duration - 1) / voting_period_duration;
|
||||
|
||||
// Return the start time of the current voting period*.
|
||||
//
|
||||
// *: This voting period doesn't *actually* exist, we're just using it to
|
||||
// give useful logs prior to genesis.
|
||||
genesis_time
|
||||
.saturating_sub(voting_periods_past * voting_period_duration)
|
||||
.saturating_sub(eth1_follow_distance_seconds)
|
||||
};
|
||||
|
||||
let latest_cached_block_number = latest_cached_block.map(|b| b.number);
|
||||
let latest_cached_block_timestamp = latest_cached_block.map(|b| b.timestamp);
|
||||
let head_block_number = head_block.map(|b| b.number);
|
||||
let head_block_timestamp = head_block.map(|b| b.timestamp);
|
||||
|
||||
let eth1_node_sync_status_percentage = if let Some(head_block) = head_block {
|
||||
let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?.as_secs();
|
||||
let head_age = now.saturating_sub(head_block.timestamp);
|
||||
|
||||
if head_age < ETH1_SYNC_TOLERANCE * spec.seconds_per_eth1_block {
|
||||
// Always indicate we are fully synced if it's within the sync threshold.
|
||||
100.0
|
||||
} else {
|
||||
let blocks_behind = head_age
|
||||
.checked_div(spec.seconds_per_eth1_block)
|
||||
.unwrap_or(0);
|
||||
|
||||
let part = f64::from(head_block.number as u32);
|
||||
let whole = f64::from(head_block.number.saturating_add(blocks_behind) as u32);
|
||||
|
||||
if whole > 0.0 {
|
||||
(part / whole) * 100.0
|
||||
} else {
|
||||
// Avoids a divide-by-zero.
|
||||
0.0
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Always return 0% synced if the head block of the eth1 chain is unknown.
|
||||
0.0
|
||||
};
|
||||
|
||||
// Lighthouse is "cached and ready" when it has cached enough blocks to cover the start of the
|
||||
// current voting period.
|
||||
let lighthouse_is_cached_and_ready =
|
||||
latest_cached_block_timestamp.map_or(false, |t| t >= voting_target_timestamp);
|
||||
|
||||
Some(Eth1SyncStatusData {
|
||||
head_block_number,
|
||||
head_block_timestamp,
|
||||
latest_cached_block_number,
|
||||
latest_cached_block_timestamp,
|
||||
voting_target_timestamp,
|
||||
eth1_node_sync_status_percentage,
|
||||
lighthouse_is_cached_and_ready,
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Encode, Decode, Clone)]
|
||||
pub struct SszEth1 {
|
||||
use_dummy_backend: bool,
|
||||
@@ -106,6 +221,11 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the "dummy" backend is being used.
|
||||
pub fn is_dummy_backend(&self) -> bool {
|
||||
self.use_dummy_backend
|
||||
}
|
||||
|
||||
/// Returns the `Eth1Data` that should be included in a block being produced for the given
|
||||
/// `state`.
|
||||
pub fn eth1_data_for_block_production(
|
||||
@@ -143,6 +263,22 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a status indicating how synced our caches are with the eth1 chain.
|
||||
pub fn sync_status(
|
||||
&self,
|
||||
genesis_time: u64,
|
||||
current_slot: Option<Slot>,
|
||||
spec: &ChainSpec,
|
||||
) -> Option<Eth1SyncStatusData> {
|
||||
get_sync_status::<E>(
|
||||
self.backend.latest_cached_block().as_ref(),
|
||||
self.backend.head_block().as_ref(),
|
||||
genesis_time,
|
||||
current_slot,
|
||||
spec,
|
||||
)
|
||||
}
|
||||
|
||||
/// Instantiate `Eth1Chain` from a persisted `SszEth1`.
|
||||
///
|
||||
/// The `Eth1Chain` will have the same caches as the persisted `SszEth1`.
|
||||
@@ -195,6 +331,14 @@ pub trait Eth1ChainBackend<T: EthSpec>: Sized + Send + Sync {
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Vec<Deposit>, Error>;
|
||||
|
||||
/// Returns the latest block stored in the cache. Used to obtain an idea of how up-to-date the
|
||||
/// beacon node eth1 cache is.
|
||||
fn latest_cached_block(&self) -> Option<Eth1Block>;
|
||||
|
||||
/// Returns the block at the head of the chain (ignoring follow distance, etc). Used to obtain
|
||||
/// an idea of how up-to-date the remote eth1 node is.
|
||||
fn head_block(&self) -> Option<Eth1Block>;
|
||||
|
||||
/// Encode the `Eth1ChainBackend` instance to bytes.
|
||||
fn as_bytes(&self) -> Vec<u8>;
|
||||
|
||||
@@ -226,7 +370,7 @@ impl<T: EthSpec> Eth1ChainBackend<T> for DummyEth1ChainBackend<T> {
|
||||
|
||||
Ok(Eth1Data {
|
||||
deposit_root: Hash256::from_slice(&deposit_root),
|
||||
deposit_count: state.eth1_deposit_index,
|
||||
deposit_count: state.eth1_deposit_index(),
|
||||
block_hash: Hash256::from_slice(&block_hash),
|
||||
})
|
||||
}
|
||||
@@ -241,6 +385,14 @@ impl<T: EthSpec> Eth1ChainBackend<T> for DummyEth1ChainBackend<T> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
fn latest_cached_block(&self) -> Option<Eth1Block> {
|
||||
None
|
||||
}
|
||||
|
||||
fn head_block(&self) -> Option<Eth1Block> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Return empty Vec<u8> for dummy backend.
|
||||
fn as_bytes(&self) -> Vec<u8> {
|
||||
Vec::new()
|
||||
@@ -305,10 +457,10 @@ impl<T: EthSpec> CachingEth1Backend<T> {
|
||||
impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
|
||||
fn eth1_data(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Result<Eth1Data, Error> {
|
||||
let period = T::SlotsPerEth1VotingPeriod::to_u64();
|
||||
let voting_period_start_slot = (state.slot / period) * period;
|
||||
let voting_period_start_slot = (state.slot() / period) * period;
|
||||
let voting_period_start_seconds = slot_start_seconds::<T>(
|
||||
state.genesis_time,
|
||||
spec.milliseconds_per_slot,
|
||||
state.genesis_time(),
|
||||
spec.seconds_per_slot,
|
||||
voting_period_start_slot,
|
||||
);
|
||||
|
||||
@@ -333,24 +485,25 @@ impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
|
||||
// If no votes exist, choose `state.eth1_data` as default vote.
|
||||
votes_to_consider
|
||||
.iter()
|
||||
.max_by(|(_, x), (_, y)| x.cmp(y))
|
||||
.max_by_key(|(_, block_number)| *block_number)
|
||||
.map(|vote| {
|
||||
let vote = vote.0.clone();
|
||||
debug!(
|
||||
self.log,
|
||||
"No valid eth1_data votes";
|
||||
"outcome" => "Casting vote corresponding to last candidate eth1 block",
|
||||
"vote" => ?vote
|
||||
);
|
||||
vote
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
let vote = state.eth1_data.clone();
|
||||
let vote = state.eth1_data().clone();
|
||||
error!(
|
||||
self.log,
|
||||
"No valid eth1_data votes, `votes_to_consider` empty";
|
||||
"lowest_block_number" => self.core.lowest_block_number(),
|
||||
"earliest_block_timestamp" => self.core.earliest_block_timestamp(),
|
||||
"genesis_time" => state.genesis_time,
|
||||
"genesis_time" => state.genesis_time(),
|
||||
"outcome" => "casting `state.eth1_data` as eth1 vote"
|
||||
);
|
||||
metrics::inc_counter(&metrics::DEFAULT_ETH1_VOTES);
|
||||
@@ -375,11 +528,11 @@ impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
|
||||
eth1_data_vote: &Eth1Data,
|
||||
_spec: &ChainSpec,
|
||||
) -> Result<Vec<Deposit>, Error> {
|
||||
let deposit_index = state.eth1_deposit_index;
|
||||
let deposit_index = state.eth1_deposit_index();
|
||||
let deposit_count = if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data_vote)? {
|
||||
new_eth1_data.deposit_count
|
||||
} else {
|
||||
state.eth1_data.deposit_count
|
||||
state.eth1_data().deposit_count
|
||||
};
|
||||
|
||||
match deposit_index.cmp(&deposit_count) {
|
||||
@@ -400,6 +553,14 @@ impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
|
||||
}
|
||||
}
|
||||
|
||||
fn latest_cached_block(&self) -> Option<Eth1Block> {
|
||||
self.core.latest_cached_block()
|
||||
}
|
||||
|
||||
fn head_block(&self) -> Option<Eth1Block> {
|
||||
self.core.head_block()
|
||||
}
|
||||
|
||||
/// Return encoded byte representation of the block and deposit caches.
|
||||
fn as_bytes(&self) -> Vec<u8> {
|
||||
self.core.as_bytes()
|
||||
@@ -454,14 +615,12 @@ fn collect_valid_votes<T: EthSpec>(
|
||||
) -> Eth1DataVoteCount {
|
||||
let mut valid_votes = HashMap::new();
|
||||
state
|
||||
.eth1_data_votes
|
||||
.eth1_data_votes()
|
||||
.iter()
|
||||
.filter_map(|vote| {
|
||||
if let Some(block_num) = votes_to_consider.get(vote) {
|
||||
Some((vote.clone(), *block_num))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
votes_to_consider
|
||||
.get(vote)
|
||||
.map(|block_num| (vote.clone(), *block_num))
|
||||
})
|
||||
.for_each(|(eth1_data, block_number)| {
|
||||
valid_votes
|
||||
@@ -480,20 +639,13 @@ fn find_winning_vote(valid_votes: Eth1DataVoteCount) -> Option<Eth1Data> {
|
||||
.map(|((eth1_data, _), _)| eth1_data.clone())
|
||||
}
|
||||
|
||||
/// Returns `int` as little-endian bytes with a length of 32.
|
||||
fn int_to_bytes32(int: u64) -> Vec<u8> {
|
||||
let mut vec = int.to_le_bytes().to_vec();
|
||||
vec.resize(32, 0);
|
||||
vec
|
||||
}
|
||||
|
||||
/// Returns the unix-epoch seconds at the start of the given `slot`.
|
||||
fn slot_start_seconds<T: EthSpec>(
|
||||
genesis_unix_seconds: u64,
|
||||
milliseconds_per_slot: u64,
|
||||
seconds_per_slot: u64,
|
||||
slot: Slot,
|
||||
) -> u64 {
|
||||
genesis_unix_seconds + slot.as_u64() * milliseconds_per_slot / 1_000
|
||||
genesis_unix_seconds + slot.as_u64() * seconds_per_slot
|
||||
}
|
||||
|
||||
/// Returns a boolean denoting if a given `Eth1Block` is a candidate for `Eth1Data` calculation
|
||||
@@ -513,8 +665,7 @@ fn is_candidate_block(block: &Eth1Block, period_start: u64, spec: &ChainSpec) ->
|
||||
mod test {
|
||||
use super::*;
|
||||
use environment::null_logger;
|
||||
use std::iter::FromIterator;
|
||||
use types::{test_utils::DepositTestTask, MinimalEthSpec};
|
||||
use types::{DepositData, MinimalEthSpec, Signature};
|
||||
|
||||
type E = MinimalEthSpec;
|
||||
|
||||
@@ -528,10 +679,10 @@ mod test {
|
||||
|
||||
fn get_voting_period_start_seconds(state: &BeaconState<E>, spec: &ChainSpec) -> u64 {
|
||||
let period = <E as EthSpec>::SlotsPerEth1VotingPeriod::to_u64();
|
||||
let voting_period_start_slot = (state.slot / period) * period;
|
||||
let voting_period_start_slot = (state.slot() / period) * period;
|
||||
slot_start_seconds::<E>(
|
||||
state.genesis_time,
|
||||
spec.milliseconds_per_slot,
|
||||
state.genesis_time(),
|
||||
spec.seconds_per_slot,
|
||||
voting_period_start_slot,
|
||||
)
|
||||
}
|
||||
@@ -541,21 +692,21 @@ mod test {
|
||||
let zero_sec = 0;
|
||||
assert_eq!(slot_start_seconds::<E>(100, zero_sec, Slot::new(2)), 100);
|
||||
|
||||
let half_sec = 500;
|
||||
assert_eq!(slot_start_seconds::<E>(100, half_sec, Slot::new(0)), 100);
|
||||
assert_eq!(slot_start_seconds::<E>(100, half_sec, Slot::new(1)), 100);
|
||||
assert_eq!(slot_start_seconds::<E>(100, half_sec, Slot::new(2)), 101);
|
||||
assert_eq!(slot_start_seconds::<E>(100, half_sec, Slot::new(3)), 101);
|
||||
|
||||
let one_sec = 1_000;
|
||||
let one_sec = 1;
|
||||
assert_eq!(slot_start_seconds::<E>(100, one_sec, Slot::new(0)), 100);
|
||||
assert_eq!(slot_start_seconds::<E>(100, one_sec, Slot::new(1)), 101);
|
||||
assert_eq!(slot_start_seconds::<E>(100, one_sec, Slot::new(2)), 102);
|
||||
|
||||
let three_sec = 3_000;
|
||||
let three_sec = 3;
|
||||
assert_eq!(slot_start_seconds::<E>(100, three_sec, Slot::new(0)), 100);
|
||||
assert_eq!(slot_start_seconds::<E>(100, three_sec, Slot::new(1)), 103);
|
||||
assert_eq!(slot_start_seconds::<E>(100, three_sec, Slot::new(2)), 106);
|
||||
|
||||
let five_sec = 5;
|
||||
assert_eq!(slot_start_seconds::<E>(100, five_sec, Slot::new(0)), 100);
|
||||
assert_eq!(slot_start_seconds::<E>(100, five_sec, Slot::new(1)), 105);
|
||||
assert_eq!(slot_start_seconds::<E>(100, five_sec, Slot::new(2)), 110);
|
||||
assert_eq!(slot_start_seconds::<E>(100, five_sec, Slot::new(3)), 115);
|
||||
}
|
||||
|
||||
fn get_eth1_block(timestamp: u64, number: u64) -> Eth1Block {
|
||||
@@ -571,10 +722,7 @@ mod test {
|
||||
mod eth1_chain_json_backend {
|
||||
use super::*;
|
||||
use eth1::DepositLog;
|
||||
use types::{
|
||||
test_utils::{generate_deterministic_keypair, TestingDepositBuilder},
|
||||
EthSpec, MainnetEthSpec,
|
||||
};
|
||||
use types::{test_utils::generate_deterministic_keypair, EthSpec, MainnetEthSpec};
|
||||
|
||||
fn get_eth1_chain() -> Eth1Chain<CachingEth1Backend<E>, E> {
|
||||
let eth1_config = Eth1Config {
|
||||
@@ -591,13 +739,17 @@ mod test {
|
||||
|
||||
fn get_deposit_log(i: u64, spec: &ChainSpec) -> DepositLog {
|
||||
let keypair = generate_deterministic_keypair(i as usize);
|
||||
let mut builder =
|
||||
TestingDepositBuilder::new(keypair.pk.clone(), spec.max_effective_balance);
|
||||
builder.sign(DepositTestTask::Valid, &keypair, spec);
|
||||
let deposit_data = builder.build().data;
|
||||
let mut deposit = DepositData {
|
||||
pubkey: keypair.pk.into(),
|
||||
withdrawal_credentials: Hash256::zero(),
|
||||
amount: spec.max_effective_balance,
|
||||
signature: Signature::empty().into(),
|
||||
};
|
||||
|
||||
deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec());
|
||||
|
||||
DepositLog {
|
||||
deposit_data,
|
||||
deposit_data: deposit,
|
||||
block_number: i,
|
||||
index: i,
|
||||
signature_is_valid: true,
|
||||
@@ -610,14 +762,14 @@ mod test {
|
||||
|
||||
let eth1_chain = get_eth1_chain();
|
||||
|
||||
assert_eq!(
|
||||
eth1_chain.use_dummy_backend, false,
|
||||
assert!(
|
||||
!eth1_chain.use_dummy_backend,
|
||||
"test should not use dummy backend"
|
||||
);
|
||||
|
||||
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), &spec);
|
||||
state.eth1_deposit_index = 0;
|
||||
state.eth1_data.deposit_count = 0;
|
||||
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), spec);
|
||||
*state.eth1_deposit_index_mut() = 0;
|
||||
state.eth1_data_mut().deposit_count = 0;
|
||||
|
||||
assert!(
|
||||
eth1_chain
|
||||
@@ -626,7 +778,7 @@ mod test {
|
||||
"should succeed if cache is empty but no deposits are required"
|
||||
);
|
||||
|
||||
state.eth1_data.deposit_count = 1;
|
||||
state.eth1_data_mut().deposit_count = 1;
|
||||
|
||||
assert!(
|
||||
eth1_chain
|
||||
@@ -643,8 +795,8 @@ mod test {
|
||||
let eth1_chain = get_eth1_chain();
|
||||
let max_deposits = <E as EthSpec>::MaxDeposits::to_u64();
|
||||
|
||||
assert_eq!(
|
||||
eth1_chain.use_dummy_backend, false,
|
||||
assert!(
|
||||
!eth1_chain.use_dummy_backend,
|
||||
"test should not use dummy backend"
|
||||
);
|
||||
|
||||
@@ -658,7 +810,7 @@ mod test {
|
||||
.write()
|
||||
.cache
|
||||
.insert_log(log.clone())
|
||||
.expect("should insert log")
|
||||
.expect("should insert log");
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -668,9 +820,9 @@ mod test {
|
||||
"cache should store all logs"
|
||||
);
|
||||
|
||||
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), &spec);
|
||||
state.eth1_deposit_index = 0;
|
||||
state.eth1_data.deposit_count = 0;
|
||||
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), spec);
|
||||
*state.eth1_deposit_index_mut() = 0;
|
||||
state.eth1_data_mut().deposit_count = 0;
|
||||
|
||||
assert!(
|
||||
eth1_chain
|
||||
@@ -680,10 +832,10 @@ mod test {
|
||||
);
|
||||
|
||||
(0..3).for_each(|initial_deposit_index| {
|
||||
state.eth1_deposit_index = initial_deposit_index as u64;
|
||||
*state.eth1_deposit_index_mut() = initial_deposit_index as u64;
|
||||
|
||||
(initial_deposit_index..deposits.len()).for_each(|i| {
|
||||
state.eth1_data.deposit_count = i as u64;
|
||||
state.eth1_data_mut().deposit_count = i as u64;
|
||||
|
||||
let deposits_for_inclusion = eth1_chain
|
||||
.deposits_for_block_inclusion(&state, &Eth1Data::default(), spec)
|
||||
@@ -725,18 +877,19 @@ mod test {
|
||||
|
||||
let eth1_chain = get_eth1_chain();
|
||||
|
||||
assert_eq!(
|
||||
eth1_chain.use_dummy_backend, false,
|
||||
assert!(
|
||||
!eth1_chain.use_dummy_backend,
|
||||
"test should not use dummy backend"
|
||||
);
|
||||
|
||||
let state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), &spec);
|
||||
let state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), spec);
|
||||
|
||||
let a = eth1_chain
|
||||
.eth1_data_for_block_production(&state, &spec)
|
||||
.eth1_data_for_block_production(&state, spec)
|
||||
.expect("should produce default eth1 data vote");
|
||||
assert_eq!(
|
||||
a, state.eth1_data,
|
||||
a,
|
||||
*state.eth1_data(),
|
||||
"default vote should be same as state.eth1_data"
|
||||
);
|
||||
}
|
||||
@@ -749,16 +902,16 @@ mod test {
|
||||
|
||||
let eth1_chain = get_eth1_chain();
|
||||
|
||||
assert_eq!(
|
||||
eth1_chain.use_dummy_backend, false,
|
||||
assert!(
|
||||
!eth1_chain.use_dummy_backend,
|
||||
"test should not use dummy backend"
|
||||
);
|
||||
|
||||
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), &spec);
|
||||
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), spec);
|
||||
|
||||
state.slot = Slot::from(slots_per_eth1_voting_period * 10);
|
||||
*state.slot_mut() = Slot::from(slots_per_eth1_voting_period * 10);
|
||||
let follow_distance_seconds = eth1_follow_distance * spec.seconds_per_eth1_block;
|
||||
let voting_period_start = get_voting_period_start_seconds(&state, &spec);
|
||||
let voting_period_start = get_voting_period_start_seconds(&state, spec);
|
||||
let start_eth1_block = voting_period_start - follow_distance_seconds * 2;
|
||||
let end_eth1_block = voting_period_start - follow_distance_seconds;
|
||||
|
||||
@@ -778,7 +931,7 @@ mod test {
|
||||
});
|
||||
|
||||
let vote = eth1_chain
|
||||
.eth1_data_for_block_production(&state, &spec)
|
||||
.eth1_data_for_block_production(&state, spec)
|
||||
.expect("should produce default eth1 data vote");
|
||||
|
||||
assert_eq!(
|
||||
@@ -808,7 +961,7 @@ mod test {
|
||||
get_votes_to_consider(
|
||||
blocks.iter(),
|
||||
get_voting_period_start_seconds(&state, spec),
|
||||
&spec,
|
||||
spec,
|
||||
),
|
||||
HashMap::new()
|
||||
);
|
||||
@@ -822,8 +975,8 @@ mod test {
|
||||
let eth1_follow_distance = spec.eth1_follow_distance;
|
||||
|
||||
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), &spec);
|
||||
state.genesis_time = 0;
|
||||
state.slot = Slot::from(slots_per_eth1_voting_period * 10);
|
||||
*state.genesis_time_mut() = 0;
|
||||
*state.slot_mut() = Slot::from(slots_per_eth1_voting_period * 10);
|
||||
|
||||
let follow_distance_seconds = eth1_follow_distance * spec.seconds_per_eth1_block;
|
||||
let voting_period_start = get_voting_period_start_seconds(&state, &spec);
|
||||
@@ -887,10 +1040,7 @@ mod test {
|
||||
|
||||
let votes_to_consider = get_eth1_data_vec(slots, 0);
|
||||
|
||||
let votes = collect_valid_votes(
|
||||
&state,
|
||||
&HashMap::from_iter(votes_to_consider.clone().into_iter()),
|
||||
);
|
||||
let votes = collect_valid_votes(&state, &votes_to_consider.into_iter().collect());
|
||||
assert_eq!(
|
||||
votes.len(),
|
||||
0,
|
||||
@@ -906,17 +1056,15 @@ mod test {
|
||||
|
||||
let votes_to_consider = get_eth1_data_vec(slots, 0);
|
||||
|
||||
state.eth1_data_votes = votes_to_consider[0..slots as usize / 4]
|
||||
*state.eth1_data_votes_mut() = votes_to_consider[0..slots as usize / 4]
|
||||
.iter()
|
||||
.map(|(eth1_data, _)| eth1_data)
|
||||
.cloned()
|
||||
.collect::<Vec<_>>()
|
||||
.into();
|
||||
|
||||
let votes = collect_valid_votes(
|
||||
&state,
|
||||
&HashMap::from_iter(votes_to_consider.clone().into_iter()),
|
||||
);
|
||||
let votes =
|
||||
collect_valid_votes(&state, &votes_to_consider.clone().into_iter().collect());
|
||||
assert_votes!(
|
||||
votes,
|
||||
votes_to_consider[0..slots as usize / 4].to_vec(),
|
||||
@@ -937,17 +1085,14 @@ mod test {
|
||||
.expect("should have some eth1 data")
|
||||
.clone();
|
||||
|
||||
state.eth1_data_votes = vec![duplicate_eth1_data.clone(); 4]
|
||||
*state.eth1_data_votes_mut() = vec![duplicate_eth1_data.clone(); 4]
|
||||
.iter()
|
||||
.map(|(eth1_data, _)| eth1_data)
|
||||
.cloned()
|
||||
.collect::<Vec<_>>()
|
||||
.into();
|
||||
|
||||
let votes = collect_valid_votes(
|
||||
&state,
|
||||
&HashMap::from_iter(votes_to_consider.clone().into_iter()),
|
||||
);
|
||||
let votes = collect_valid_votes(&state, &votes_to_consider.into_iter().collect());
|
||||
assert_votes!(
|
||||
votes,
|
||||
// There should only be one value if there's a duplicate
|
||||
@@ -995,8 +1140,7 @@ mod test {
|
||||
assert_eq!(
|
||||
// Favour the highest block number when there are no votes.
|
||||
vote_data(&no_votes[2]),
|
||||
find_winning_vote(Eth1DataVoteCount::from_iter(no_votes.into_iter()))
|
||||
.expect("should find winner")
|
||||
find_winning_vote(no_votes.into_iter().collect()).expect("should find winner")
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1007,8 +1151,7 @@ mod test {
|
||||
assert_eq!(
|
||||
// Favour the highest block number when there are equal votes.
|
||||
vote_data(&votes[2]),
|
||||
find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter()))
|
||||
.expect("should find winner")
|
||||
find_winning_vote(votes.into_iter().collect()).expect("should find winner")
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1019,8 +1162,7 @@ mod test {
|
||||
assert_eq!(
|
||||
// Favour the highest vote over the highest block number.
|
||||
vote_data(&votes[3]),
|
||||
find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter()))
|
||||
.expect("should find winner")
|
||||
find_winning_vote(votes.into_iter().collect()).expect("should find winner")
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1031,8 +1173,7 @@ mod test {
|
||||
assert_eq!(
|
||||
// Favour the highest block number for tying votes.
|
||||
vote_data(&votes[3]),
|
||||
find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter()))
|
||||
.expect("should find winner")
|
||||
find_winning_vote(votes.into_iter().collect()).expect("should find winner")
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1043,8 +1184,7 @@ mod test {
|
||||
assert_eq!(
|
||||
// Favour the highest block number for tying votes.
|
||||
vote_data(&votes[0]),
|
||||
find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter()))
|
||||
.expect("should find winner")
|
||||
find_winning_vote(votes.into_iter().collect()).expect("should find winner")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,147 +1,152 @@
|
||||
use bus::Bus;
|
||||
use parking_lot::Mutex;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use slog::{error, Logger};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use types::{Attestation, Epoch, EthSpec, Hash256, SignedBeaconBlock, SignedBeaconBlockHash};
|
||||
pub use websocket_server::WebSocketSender;
|
||||
pub use eth2::types::{EventKind, SseBlock, SseFinalizedCheckpoint, SseHead};
|
||||
use slog::{trace, Logger};
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::broadcast::{error::SendError, Receiver, Sender};
|
||||
use types::EthSpec;
|
||||
|
||||
pub trait EventHandler<T: EthSpec>: Sized + Send + Sync {
|
||||
fn register(&self, kind: EventKind<T>) -> Result<(), String>;
|
||||
}
|
||||
const DEFAULT_CHANNEL_CAPACITY: usize = 16;
|
||||
|
||||
pub struct NullEventHandler<T: EthSpec>(PhantomData<T>);
|
||||
|
||||
impl<T: EthSpec> EventHandler<T> for WebSocketSender<T> {
|
||||
fn register(&self, kind: EventKind<T>) -> Result<(), String> {
|
||||
self.send_string(
|
||||
serde_json::to_string(&kind)
|
||||
.map_err(|e| format!("Unable to serialize event: {:?}", e))?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ServerSentEvents<T: EthSpec> {
|
||||
// Bus<> is itself Sync + Send. We use Mutex<> here only because of the surrounding code does
|
||||
// not enforce mutability statically (i.e. relies on interior mutability).
|
||||
head_changed_queue: Arc<Mutex<Bus<SignedBeaconBlockHash>>>,
|
||||
pub struct ServerSentEventHandler<T: EthSpec> {
|
||||
attestation_tx: Sender<EventKind<T>>,
|
||||
block_tx: Sender<EventKind<T>>,
|
||||
finalized_tx: Sender<EventKind<T>>,
|
||||
head_tx: Sender<EventKind<T>>,
|
||||
exit_tx: Sender<EventKind<T>>,
|
||||
chain_reorg_tx: Sender<EventKind<T>>,
|
||||
contribution_tx: Sender<EventKind<T>>,
|
||||
late_head: Sender<EventKind<T>>,
|
||||
block_reward_tx: Sender<EventKind<T>>,
|
||||
log: Logger,
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> ServerSentEvents<T> {
|
||||
pub fn new(log: Logger) -> (Self, Arc<Mutex<Bus<SignedBeaconBlockHash>>>) {
|
||||
let bus = Bus::new(T::slots_per_epoch() as usize);
|
||||
let mutex = Mutex::new(bus);
|
||||
let arc = Arc::new(mutex);
|
||||
let this = Self {
|
||||
head_changed_queue: arc.clone(),
|
||||
log,
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
(this, arc)
|
||||
impl<T: EthSpec> ServerSentEventHandler<T> {
|
||||
pub fn new(log: Logger) -> Self {
|
||||
Self::new_with_capacity(log, DEFAULT_CHANNEL_CAPACITY)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> EventHandler<T> for ServerSentEvents<T> {
|
||||
fn register(&self, kind: EventKind<T>) -> Result<(), String> {
|
||||
match kind {
|
||||
EventKind::BeaconHeadChanged {
|
||||
current_head_beacon_block_root,
|
||||
..
|
||||
} => {
|
||||
let mut guard = self.head_changed_queue.lock();
|
||||
if guard
|
||||
.try_broadcast(current_head_beacon_block_root.into())
|
||||
.is_err()
|
||||
{
|
||||
error!(
|
||||
self.log,
|
||||
"Head change streaming queue full";
|
||||
"dropped_change" => format!("{}", current_head_beacon_block_root),
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
_ => Ok(()),
|
||||
pub fn new_with_capacity(log: Logger, capacity: usize) -> Self {
|
||||
let (attestation_tx, _) = broadcast::channel(capacity);
|
||||
let (block_tx, _) = broadcast::channel(capacity);
|
||||
let (finalized_tx, _) = broadcast::channel(capacity);
|
||||
let (head_tx, _) = broadcast::channel(capacity);
|
||||
let (exit_tx, _) = broadcast::channel(capacity);
|
||||
let (chain_reorg_tx, _) = broadcast::channel(capacity);
|
||||
let (contribution_tx, _) = broadcast::channel(capacity);
|
||||
let (late_head, _) = broadcast::channel(capacity);
|
||||
let (block_reward_tx, _) = broadcast::channel(capacity);
|
||||
|
||||
Self {
|
||||
attestation_tx,
|
||||
block_tx,
|
||||
finalized_tx,
|
||||
head_tx,
|
||||
exit_tx,
|
||||
chain_reorg_tx,
|
||||
contribution_tx,
|
||||
late_head,
|
||||
block_reward_tx,
|
||||
log,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// An event handler that pushes events to both the websockets handler and the SSE handler.
|
||||
// Named after the unix `tee` command. Meant as a temporary solution before ditching WebSockets
|
||||
// completely once SSE functions well enough.
|
||||
pub struct TeeEventHandler<E: EthSpec> {
|
||||
websockets_handler: WebSocketSender<E>,
|
||||
sse_handler: ServerSentEvents<E>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> TeeEventHandler<E> {
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn new(
|
||||
log: Logger,
|
||||
websockets_handler: WebSocketSender<E>,
|
||||
) -> Result<(Self, Arc<Mutex<Bus<SignedBeaconBlockHash>>>), String> {
|
||||
let (sse_handler, bus) = ServerSentEvents::new(log);
|
||||
let result = Self {
|
||||
websockets_handler,
|
||||
sse_handler,
|
||||
pub fn register(&self, kind: EventKind<T>) {
|
||||
let result = match kind {
|
||||
EventKind::Attestation(attestation) => self
|
||||
.attestation_tx
|
||||
.send(EventKind::Attestation(attestation))
|
||||
.map(|count| trace!(self.log, "Registering server-sent attestation event"; "receiver_count" => count)),
|
||||
EventKind::Block(block) => self.block_tx.send(EventKind::Block(block))
|
||||
.map(|count| trace!(self.log, "Registering server-sent block event"; "receiver_count" => count)),
|
||||
EventKind::FinalizedCheckpoint(checkpoint) => self.finalized_tx
|
||||
.send(EventKind::FinalizedCheckpoint(checkpoint))
|
||||
.map(|count| trace!(self.log, "Registering server-sent finalized checkpoint event"; "receiver_count" => count)),
|
||||
EventKind::Head(head) => self.head_tx.send(EventKind::Head(head))
|
||||
.map(|count| trace!(self.log, "Registering server-sent head event"; "receiver_count" => count)),
|
||||
EventKind::VoluntaryExit(exit) => self.exit_tx.send(EventKind::VoluntaryExit(exit))
|
||||
.map(|count| trace!(self.log, "Registering server-sent voluntary exit event"; "receiver_count" => count)),
|
||||
EventKind::ChainReorg(reorg) => self.chain_reorg_tx.send(EventKind::ChainReorg(reorg))
|
||||
.map(|count| trace!(self.log, "Registering server-sent chain reorg event"; "receiver_count" => count)),
|
||||
EventKind::ContributionAndProof(contribution_and_proof) => self.contribution_tx.send(EventKind::ContributionAndProof(contribution_and_proof))
|
||||
.map(|count| trace!(self.log, "Registering server-sent contribution and proof event"; "receiver_count" => count)),
|
||||
EventKind::LateHead(late_head) => self.late_head.send(EventKind::LateHead(late_head))
|
||||
.map(|count| trace!(self.log, "Registering server-sent late head event"; "receiver_count" => count)),
|
||||
EventKind::BlockReward(block_reward) => self.block_reward_tx.send(EventKind::BlockReward(block_reward))
|
||||
.map(|count| trace!(self.log, "Registering server-sent contribution and proof event"; "receiver_count" => count)),
|
||||
};
|
||||
Ok((result, bus))
|
||||
if let Err(SendError(event)) = result {
|
||||
trace!(self.log, "No receivers registered to listen for event"; "event" => ?event);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn subscribe_attestation(&self) -> Receiver<EventKind<T>> {
|
||||
self.attestation_tx.subscribe()
|
||||
}
|
||||
|
||||
pub fn subscribe_block(&self) -> Receiver<EventKind<T>> {
|
||||
self.block_tx.subscribe()
|
||||
}
|
||||
|
||||
pub fn subscribe_finalized(&self) -> Receiver<EventKind<T>> {
|
||||
self.finalized_tx.subscribe()
|
||||
}
|
||||
|
||||
pub fn subscribe_head(&self) -> Receiver<EventKind<T>> {
|
||||
self.head_tx.subscribe()
|
||||
}
|
||||
|
||||
pub fn subscribe_exit(&self) -> Receiver<EventKind<T>> {
|
||||
self.exit_tx.subscribe()
|
||||
}
|
||||
|
||||
pub fn subscribe_reorgs(&self) -> Receiver<EventKind<T>> {
|
||||
self.chain_reorg_tx.subscribe()
|
||||
}
|
||||
|
||||
pub fn subscribe_contributions(&self) -> Receiver<EventKind<T>> {
|
||||
self.contribution_tx.subscribe()
|
||||
}
|
||||
|
||||
pub fn subscribe_late_head(&self) -> Receiver<EventKind<T>> {
|
||||
self.late_head.subscribe()
|
||||
}
|
||||
|
||||
pub fn subscribe_block_reward(&self) -> Receiver<EventKind<T>> {
|
||||
self.block_reward_tx.subscribe()
|
||||
}
|
||||
|
||||
pub fn has_attestation_subscribers(&self) -> bool {
|
||||
self.attestation_tx.receiver_count() > 0
|
||||
}
|
||||
|
||||
pub fn has_block_subscribers(&self) -> bool {
|
||||
self.block_tx.receiver_count() > 0
|
||||
}
|
||||
|
||||
pub fn has_finalized_subscribers(&self) -> bool {
|
||||
self.finalized_tx.receiver_count() > 0
|
||||
}
|
||||
|
||||
pub fn has_head_subscribers(&self) -> bool {
|
||||
self.head_tx.receiver_count() > 0
|
||||
}
|
||||
|
||||
pub fn has_exit_subscribers(&self) -> bool {
|
||||
self.exit_tx.receiver_count() > 0
|
||||
}
|
||||
|
||||
pub fn has_reorg_subscribers(&self) -> bool {
|
||||
self.chain_reorg_tx.receiver_count() > 0
|
||||
}
|
||||
|
||||
pub fn has_contribution_subscribers(&self) -> bool {
|
||||
self.contribution_tx.receiver_count() > 0
|
||||
}
|
||||
|
||||
pub fn has_late_head_subscribers(&self) -> bool {
|
||||
self.late_head.receiver_count() > 0
|
||||
}
|
||||
|
||||
pub fn has_block_reward_subscribers(&self) -> bool {
|
||||
self.block_reward_tx.receiver_count() > 0
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> EventHandler<E> for TeeEventHandler<E> {
|
||||
fn register(&self, kind: EventKind<E>) -> Result<(), String> {
|
||||
self.websockets_handler.register(kind.clone())?;
|
||||
self.sse_handler.register(kind)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> EventHandler<T> for NullEventHandler<T> {
|
||||
fn register(&self, _kind: EventKind<T>) -> Result<(), String> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> Default for NullEventHandler<T> {
|
||||
fn default() -> Self {
|
||||
NullEventHandler(PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[serde(
|
||||
bound = "T: EthSpec",
|
||||
rename_all = "snake_case",
|
||||
tag = "event",
|
||||
content = "data"
|
||||
)]
|
||||
pub enum EventKind<T: EthSpec> {
|
||||
BeaconHeadChanged {
|
||||
reorg: bool,
|
||||
current_head_beacon_block_root: Hash256,
|
||||
previous_head_beacon_block_root: Hash256,
|
||||
},
|
||||
BeaconFinalization {
|
||||
epoch: Epoch,
|
||||
root: Hash256,
|
||||
},
|
||||
BeaconBlockImported {
|
||||
block_root: Hash256,
|
||||
block: Box<SignedBeaconBlock<T>>,
|
||||
},
|
||||
BeaconBlockRejected {
|
||||
reason: String,
|
||||
block: Box<SignedBeaconBlock<T>>,
|
||||
},
|
||||
BeaconAttestationImported {
|
||||
attestation: Box<Attestation<T>>,
|
||||
},
|
||||
BeaconAttestationRejected {
|
||||
reason: String,
|
||||
attestation: Box<Attestation<T>>,
|
||||
},
|
||||
}
|
||||
|
||||
354
beacon_node/beacon_chain/src/execution_payload.rs
Normal file
354
beacon_node/beacon_chain/src/execution_payload.rs
Normal file
@@ -0,0 +1,354 @@
|
||||
//! This module contains various functions for producing and verifying `ExecutionPayloads`.
|
||||
//!
|
||||
//! Lighthouse tends to do payload tasks in *slightly* different locations to the specification.
|
||||
//! This is because some tasks involve calling out to external servers and it's nice to keep those
|
||||
//! away from our pure `state_processing` and `fork_choice` crates.
|
||||
//!
|
||||
//! So, this module contains functions that one might expect to find in other crates, but they live
|
||||
//! here for good reason.
|
||||
|
||||
use crate::{
|
||||
BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError,
|
||||
ExecutionPayloadError,
|
||||
};
|
||||
use execution_layer::PayloadStatus;
|
||||
use fork_choice::{InvalidationOperation, PayloadVerificationStatus};
|
||||
use proto_array::{Block as ProtoBlock, ExecutionStatus};
|
||||
use slog::debug;
|
||||
use slot_clock::SlotClock;
|
||||
use state_processing::per_block_processing::{
|
||||
compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete,
|
||||
partially_verify_execution_payload,
|
||||
};
|
||||
use types::*;
|
||||
|
||||
/// Verify that `execution_payload` contained by `block` is considered valid by an execution
|
||||
/// engine.
|
||||
///
|
||||
/// ## Specification
|
||||
///
|
||||
/// Equivalent to the `notify_new_payload` function in the merge Beacon Chain Changes, although it
|
||||
/// contains a few extra checks by running `partially_verify_execution_payload` first:
|
||||
///
|
||||
/// https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#notify_new_payload
|
||||
pub fn notify_new_payload<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
block: BeaconBlockRef<T::EthSpec>,
|
||||
) -> Result<PayloadVerificationStatus, BlockError<T::EthSpec>> {
|
||||
if !is_execution_enabled(state, block.body()) {
|
||||
return Ok(PayloadVerificationStatus::Irrelevant);
|
||||
}
|
||||
|
||||
let execution_payload = block.execution_payload()?;
|
||||
|
||||
// Perform the initial stages of payload verification.
|
||||
//
|
||||
// We will duplicate these checks again during `per_block_processing`, however these checks
|
||||
// are cheap and doing them here ensures we protect the execution payload from junk.
|
||||
partially_verify_execution_payload(state, execution_payload, &chain.spec)
|
||||
.map_err(BlockError::PerBlockProcessingError)?;
|
||||
|
||||
let execution_layer = chain
|
||||
.execution_layer
|
||||
.as_ref()
|
||||
.ok_or(ExecutionPayloadError::NoExecutionConnection)?;
|
||||
let new_payload_response = execution_layer
|
||||
.block_on(|execution_layer| execution_layer.notify_new_payload(execution_payload));
|
||||
|
||||
match new_payload_response {
|
||||
Ok(status) => match status {
|
||||
PayloadStatus::Valid => Ok(PayloadVerificationStatus::Verified),
|
||||
PayloadStatus::Syncing | PayloadStatus::Accepted => {
|
||||
Ok(PayloadVerificationStatus::NotVerified)
|
||||
}
|
||||
PayloadStatus::Invalid {
|
||||
latest_valid_hash, ..
|
||||
} => {
|
||||
// This block has not yet been applied to fork choice, so the latest block that was
|
||||
// imported to fork choice was the parent.
|
||||
let latest_root = block.parent_root();
|
||||
chain.process_invalid_execution_payload(
|
||||
&InvalidationOperation::InvalidateMany {
|
||||
head_block_root: latest_root,
|
||||
always_invalidate_head: false,
|
||||
latest_valid_ancestor: latest_valid_hash,
|
||||
},
|
||||
)?;
|
||||
|
||||
Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into())
|
||||
}
|
||||
PayloadStatus::InvalidTerminalBlock { .. } | PayloadStatus::InvalidBlockHash { .. } => {
|
||||
// Returning an error here should be sufficient to invalidate the block. We have no
|
||||
// information to indicate its parent is invalid, so no need to run
|
||||
// `BeaconChain::process_invalid_execution_payload`.
|
||||
Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into())
|
||||
}
|
||||
},
|
||||
Err(e) => Err(ExecutionPayloadError::RequestFailed(e).into()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify that the block which triggers the merge is valid to be imported to fork choice.
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// Will return an error when using a pre-merge fork `state`. Ensure to only run this function
|
||||
/// after the merge fork.
|
||||
///
|
||||
/// ## Specification
|
||||
///
|
||||
/// Equivalent to the `validate_merge_block` function in the merge Fork Choice Changes:
|
||||
///
|
||||
/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/fork-choice.md#validate_merge_block
|
||||
pub fn validate_merge_block<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
block: BeaconBlockRef<T::EthSpec>,
|
||||
) -> Result<(), BlockError<T::EthSpec>> {
|
||||
let spec = &chain.spec;
|
||||
let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch());
|
||||
let execution_payload = block.execution_payload()?;
|
||||
|
||||
if spec.terminal_block_hash != ExecutionBlockHash::zero() {
|
||||
if block_epoch < spec.terminal_block_hash_activation_epoch {
|
||||
return Err(ExecutionPayloadError::InvalidActivationEpoch {
|
||||
activation_epoch: spec.terminal_block_hash_activation_epoch,
|
||||
epoch: block_epoch,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
if execution_payload.parent_hash != spec.terminal_block_hash {
|
||||
return Err(ExecutionPayloadError::InvalidTerminalBlockHash {
|
||||
terminal_block_hash: spec.terminal_block_hash,
|
||||
payload_parent_hash: execution_payload.parent_hash,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let execution_layer = chain
|
||||
.execution_layer
|
||||
.as_ref()
|
||||
.ok_or(ExecutionPayloadError::NoExecutionConnection)?;
|
||||
|
||||
let is_valid_terminal_pow_block = execution_layer
|
||||
.block_on(|execution_layer| {
|
||||
execution_layer.is_valid_terminal_pow_block_hash(execution_payload.parent_hash, spec)
|
||||
})
|
||||
.map_err(ExecutionPayloadError::from)?;
|
||||
|
||||
match is_valid_terminal_pow_block {
|
||||
Some(true) => Ok(()),
|
||||
Some(false) => Err(ExecutionPayloadError::InvalidTerminalPoWBlock {
|
||||
parent_hash: execution_payload.parent_hash,
|
||||
}
|
||||
.into()),
|
||||
None => {
|
||||
let current_slot = chain
|
||||
.slot_clock
|
||||
.now()
|
||||
.ok_or(BeaconChainError::UnableToReadSlot)?;
|
||||
|
||||
// Ensure the block is a candidate for optimistic import.
|
||||
if chain
|
||||
.fork_choice
|
||||
.read()
|
||||
.is_optimistic_candidate_block(
|
||||
current_slot,
|
||||
block.slot(),
|
||||
&block.parent_root(),
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
debug!(
|
||||
chain.log,
|
||||
"Optimistically accepting terminal block";
|
||||
"block_hash" => ?execution_payload.parent_hash,
|
||||
"msg" => "the terminal block/parent was unavailable"
|
||||
);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate the gossip block's execution_payload according to the checks described here:
|
||||
/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#beacon_block
|
||||
pub fn validate_execution_payload_for_gossip<T: BeaconChainTypes>(
|
||||
parent_block: &ProtoBlock,
|
||||
block: BeaconBlockRef<'_, T::EthSpec>,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<(), BlockError<T::EthSpec>> {
|
||||
// Only apply this validation if this is a merge beacon block.
|
||||
if let Ok(execution_payload) = block.body().execution_payload() {
|
||||
// This logic should match `is_execution_enabled`. We use only the execution block hash of
|
||||
// the parent here in order to avoid loading the parent state during gossip verification.
|
||||
|
||||
let is_merge_transition_complete = match parent_block.execution_status {
|
||||
// Optimistically declare that an "unknown" status block has completed the merge.
|
||||
ExecutionStatus::Valid(_) | ExecutionStatus::Unknown(_) => true,
|
||||
// It's impossible for an irrelevant block to have completed the merge. It is pre-merge
|
||||
// by definition.
|
||||
ExecutionStatus::Irrelevant(_) => false,
|
||||
// If the parent has an invalid payload then it's impossible to build a valid block upon
|
||||
// it. Reject the block.
|
||||
ExecutionStatus::Invalid(_) => {
|
||||
return Err(BlockError::ParentExecutionPayloadInvalid {
|
||||
parent_root: parent_block.root,
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
if is_merge_transition_complete || execution_payload != &<_>::default() {
|
||||
let expected_timestamp = chain
|
||||
.slot_clock
|
||||
.start_of(block.slot())
|
||||
.map(|d| d.as_secs())
|
||||
.ok_or(BlockError::BeaconChainError(
|
||||
BeaconChainError::UnableToComputeTimeAtSlot,
|
||||
))?;
|
||||
|
||||
// The block's execution payload timestamp is correct with respect to the slot
|
||||
if execution_payload.timestamp != expected_timestamp {
|
||||
return Err(BlockError::ExecutionPayloadError(
|
||||
ExecutionPayloadError::InvalidPayloadTimestamp {
|
||||
expected: expected_timestamp,
|
||||
found: execution_payload.timestamp,
|
||||
},
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets an execution payload for inclusion in a block.
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// Will return an error when using a pre-merge fork `state`. Ensure to only run this function
|
||||
/// after the merge fork.
|
||||
///
|
||||
/// ## Specification
|
||||
///
|
||||
/// Equivalent to the `get_execution_payload` function in the Validator Guide:
|
||||
///
|
||||
/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal
|
||||
pub fn get_execution_payload<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
proposer_index: u64,
|
||||
) -> Result<ExecutionPayload<T::EthSpec>, BlockProductionError> {
|
||||
Ok(prepare_execution_payload_blocking(chain, state, proposer_index)?.unwrap_or_default())
|
||||
}
|
||||
|
||||
/// Wraps the async `prepare_execution_payload` function as a blocking task.
|
||||
pub fn prepare_execution_payload_blocking<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
proposer_index: u64,
|
||||
) -> Result<Option<ExecutionPayload<T::EthSpec>>, BlockProductionError> {
|
||||
let execution_layer = chain
|
||||
.execution_layer
|
||||
.as_ref()
|
||||
.ok_or(BlockProductionError::ExecutionLayerMissing)?;
|
||||
|
||||
execution_layer
|
||||
.block_on_generic(|_| async {
|
||||
prepare_execution_payload(chain, state, proposer_index).await
|
||||
})
|
||||
.map_err(BlockProductionError::BlockingFailed)?
|
||||
}
|
||||
|
||||
/// Prepares an execution payload for inclusion in a block.
|
||||
///
|
||||
/// Will return `Ok(None)` if the merge fork has occurred, but a terminal block has not been found.
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// Will return an error when using a pre-merge fork `state`. Ensure to only run this function
|
||||
/// after the merge fork.
|
||||
///
|
||||
/// ## Specification
|
||||
///
|
||||
/// Equivalent to the `prepare_execution_payload` function in the Validator Guide:
|
||||
///
|
||||
/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal
|
||||
pub async fn prepare_execution_payload<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
proposer_index: u64,
|
||||
) -> Result<Option<ExecutionPayload<T::EthSpec>>, BlockProductionError> {
|
||||
let spec = &chain.spec;
|
||||
let execution_layer = chain
|
||||
.execution_layer
|
||||
.as_ref()
|
||||
.ok_or(BlockProductionError::ExecutionLayerMissing)?;
|
||||
|
||||
let parent_hash = if !is_merge_transition_complete(state) {
|
||||
let is_terminal_block_hash_set = spec.terminal_block_hash != ExecutionBlockHash::zero();
|
||||
let is_activation_epoch_reached =
|
||||
state.current_epoch() >= spec.terminal_block_hash_activation_epoch;
|
||||
|
||||
if is_terminal_block_hash_set && !is_activation_epoch_reached {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let terminal_pow_block_hash = execution_layer
|
||||
.get_terminal_pow_block_hash(spec)
|
||||
.await
|
||||
.map_err(BlockProductionError::TerminalPoWBlockLookupFailed)?;
|
||||
|
||||
if let Some(terminal_pow_block_hash) = terminal_pow_block_hash {
|
||||
terminal_pow_block_hash
|
||||
} else {
|
||||
return Ok(None);
|
||||
}
|
||||
} else {
|
||||
state.latest_execution_payload_header()?.block_hash
|
||||
};
|
||||
|
||||
let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?;
|
||||
let random = *state.get_randao_mix(state.current_epoch())?;
|
||||
let finalized_root = state.finalized_checkpoint().root;
|
||||
|
||||
// The finalized block hash is not included in the specification, however we provide this
|
||||
// parameter so that the execution layer can produce a payload id if one is not already known
|
||||
// (e.g., due to a recent reorg).
|
||||
let finalized_block_hash =
|
||||
if let Some(block) = chain.fork_choice.read().get_block(&finalized_root) {
|
||||
block.execution_status.block_hash()
|
||||
} else {
|
||||
chain
|
||||
.store
|
||||
.get_block(&finalized_root)
|
||||
.map_err(BlockProductionError::FailedToReadFinalizedBlock)?
|
||||
.ok_or(BlockProductionError::MissingFinalizedBlock(finalized_root))?
|
||||
.message()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.ok()
|
||||
.map(|ep| ep.block_hash)
|
||||
};
|
||||
|
||||
// Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter.
|
||||
let execution_payload = execution_layer
|
||||
.get_payload(
|
||||
parent_hash,
|
||||
timestamp,
|
||||
random,
|
||||
finalized_block_hash.unwrap_or_else(ExecutionBlockHash::zero),
|
||||
proposer_index,
|
||||
)
|
||||
.await
|
||||
.map_err(BlockProductionError::GetPayloadFailed)?;
|
||||
|
||||
Ok(Some(execution_payload))
|
||||
}
|
||||
193
beacon_node/beacon_chain/src/fork_revert.rs
Normal file
193
beacon_node/beacon_chain/src/fork_revert.rs
Normal file
@@ -0,0 +1,193 @@
|
||||
use crate::{BeaconForkChoiceStore, BeaconSnapshot};
|
||||
use fork_choice::{ForkChoice, PayloadVerificationStatus};
|
||||
use itertools::process_results;
|
||||
use slog::{info, warn, Logger};
|
||||
use state_processing::state_advance::complete_state_advance;
|
||||
use state_processing::{
|
||||
per_block_processing, per_block_processing::BlockSignatureStrategy, VerifyBlockRoot,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore};
|
||||
use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot};
|
||||
|
||||
const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \
|
||||
consider deleting it by running with the --purge-db flag.";
|
||||
|
||||
/// Revert the head to the last block before the most recent hard fork.
|
||||
///
|
||||
/// This function is destructive and should only be used if there is no viable alternative. It will
|
||||
/// cause the reverted blocks and states to be completely forgotten, lying dormant in the database
|
||||
/// forever.
|
||||
///
|
||||
/// Return the `(head_block_root, head_block)` that should be used post-reversion.
|
||||
pub fn revert_to_fork_boundary<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
||||
current_slot: Slot,
|
||||
head_block_root: Hash256,
|
||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
spec: &ChainSpec,
|
||||
log: &Logger,
|
||||
) -> Result<(Hash256, SignedBeaconBlock<E>), String> {
|
||||
let current_fork = spec.fork_name_at_slot::<E>(current_slot);
|
||||
let fork_epoch = spec
|
||||
.fork_epoch(current_fork)
|
||||
.ok_or_else(|| format!("Current fork '{}' never activates", current_fork))?;
|
||||
|
||||
if current_fork == ForkName::Base {
|
||||
return Err(format!(
|
||||
"Cannot revert to before phase0 hard fork. {}",
|
||||
CORRUPT_DB_MESSAGE
|
||||
));
|
||||
}
|
||||
|
||||
warn!(
|
||||
log,
|
||||
"Reverting invalid head block";
|
||||
"target_fork" => %current_fork,
|
||||
"fork_epoch" => fork_epoch,
|
||||
);
|
||||
let block_iter = ParentRootBlockIterator::fork_tolerant(&store, head_block_root);
|
||||
|
||||
process_results(block_iter, |mut iter| {
|
||||
iter.find_map(|(block_root, block)| {
|
||||
if block.slot() < fork_epoch.start_slot(E::slots_per_epoch()) {
|
||||
Some((block_root, block))
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Reverting block";
|
||||
"block_root" => ?block_root,
|
||||
"slot" => block.slot(),
|
||||
);
|
||||
None
|
||||
}
|
||||
})
|
||||
})
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Error fetching blocks to revert: {:?}. {}",
|
||||
e, CORRUPT_DB_MESSAGE
|
||||
)
|
||||
})?
|
||||
.ok_or_else(|| format!("No pre-fork blocks found. {}", CORRUPT_DB_MESSAGE))
|
||||
}
|
||||
|
||||
/// Reset fork choice to the finalized checkpoint of the supplied head state.
|
||||
///
|
||||
/// The supplied `head_block_root` should correspond to the most recently applied block on
|
||||
/// `head_state`.
|
||||
///
|
||||
/// This function avoids quirks of fork choice initialization by replaying all of the blocks from
|
||||
/// the checkpoint to the head.
|
||||
///
|
||||
/// See this issue for details: https://github.com/ethereum/consensus-specs/issues/2566
|
||||
///
|
||||
/// It will fail if the finalized state or any of the blocks to replay are unavailable.
|
||||
///
|
||||
/// WARNING: this function is destructive and causes fork choice to permanently forget all
|
||||
/// chains other than the chain leading to `head_block_root`. It should only be used in extreme
|
||||
/// circumstances when there is no better alternative.
|
||||
pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
||||
head_block_root: Hash256,
|
||||
head_state: &BeaconState<E>,
|
||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> {
|
||||
// Fetch finalized block.
|
||||
let finalized_checkpoint = head_state.finalized_checkpoint();
|
||||
let finalized_block_root = finalized_checkpoint.root;
|
||||
let finalized_block = store
|
||||
.get_block(&finalized_block_root)
|
||||
.map_err(|e| format!("Error loading finalized block: {:?}", e))?
|
||||
.ok_or_else(|| {
|
||||
format!(
|
||||
"Finalized block missing for revert: {:?}",
|
||||
finalized_block_root
|
||||
)
|
||||
})?;
|
||||
|
||||
// Advance finalized state to finalized epoch (to handle skipped slots).
|
||||
let finalized_state_root = finalized_block.state_root();
|
||||
let mut finalized_state = store
|
||||
.get_state(&finalized_state_root, Some(finalized_block.slot()))
|
||||
.map_err(|e| format!("Error loading finalized state: {:?}", e))?
|
||||
.ok_or_else(|| {
|
||||
format!(
|
||||
"Finalized block state missing from database: {:?}",
|
||||
finalized_state_root
|
||||
)
|
||||
})?;
|
||||
let finalized_slot = finalized_checkpoint.epoch.start_slot(E::slots_per_epoch());
|
||||
complete_state_advance(
|
||||
&mut finalized_state,
|
||||
Some(finalized_state_root),
|
||||
finalized_slot,
|
||||
spec,
|
||||
)
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Error advancing finalized state to finalized epoch: {:?}",
|
||||
e
|
||||
)
|
||||
})?;
|
||||
let finalized_snapshot = BeaconSnapshot {
|
||||
beacon_block_root: finalized_block_root,
|
||||
beacon_block: finalized_block,
|
||||
beacon_state: finalized_state,
|
||||
};
|
||||
|
||||
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store.clone(), &finalized_snapshot);
|
||||
|
||||
let mut fork_choice = ForkChoice::from_anchor(
|
||||
fc_store,
|
||||
finalized_block_root,
|
||||
&finalized_snapshot.beacon_block,
|
||||
&finalized_snapshot.beacon_state,
|
||||
)
|
||||
.map_err(|e| format!("Unable to reset fork choice for revert: {:?}", e))?;
|
||||
|
||||
// Replay blocks from finalized checkpoint back to head.
|
||||
// We do not replay attestations presently, relying on the absence of other blocks
|
||||
// to guarantee `head_block_root` as the head.
|
||||
let blocks = store
|
||||
.load_blocks_to_replay(finalized_slot + 1, head_state.slot(), head_block_root)
|
||||
.map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?;
|
||||
|
||||
let mut state = finalized_snapshot.beacon_state;
|
||||
for block in blocks {
|
||||
complete_state_advance(&mut state, None, block.slot(), spec)
|
||||
.map_err(|e| format!("State advance failed: {:?}", e))?;
|
||||
|
||||
per_block_processing(
|
||||
&mut state,
|
||||
&block,
|
||||
None,
|
||||
BlockSignatureStrategy::NoVerification,
|
||||
VerifyBlockRoot::True,
|
||||
spec,
|
||||
)
|
||||
.map_err(|e| format!("Error replaying block: {:?}", e))?;
|
||||
|
||||
// Setting this to unverified is the safest solution, since we don't have a way to
|
||||
// retro-actively determine if they were valid or not.
|
||||
//
|
||||
// This scenario is so rare that it seems OK to double-verify some blocks.
|
||||
let payload_verification_status = PayloadVerificationStatus::NotVerified;
|
||||
|
||||
let (block, _) = block.deconstruct();
|
||||
fork_choice
|
||||
.on_block(
|
||||
block.slot(),
|
||||
&block,
|
||||
block.canonical_root(),
|
||||
// Reward proposer boost. We are reinforcing the canonical chain.
|
||||
Duration::from_secs(0),
|
||||
&state,
|
||||
payload_verification_status,
|
||||
spec,
|
||||
)
|
||||
.map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?;
|
||||
}
|
||||
|
||||
Ok(fork_choice)
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
use parking_lot::RwLock;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use std::collections::HashMap;
|
||||
use std::iter::FromIterator;
|
||||
use types::{Hash256, Slot};
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
@@ -15,7 +14,7 @@ pub enum Error {
|
||||
/// In order for this struct to be effective, every single block that is imported must be
|
||||
/// registered here.
|
||||
#[derive(Default, Debug)]
|
||||
pub struct HeadTracker(RwLock<HashMap<Hash256, Slot>>);
|
||||
pub struct HeadTracker(pub RwLock<HashMap<Hash256, Slot>>);
|
||||
|
||||
impl HeadTracker {
|
||||
/// Register a block with `Self`, so it may or may not be included in a `Self::heads` call.
|
||||
@@ -29,13 +28,6 @@ impl HeadTracker {
|
||||
map.insert(block_root, slot);
|
||||
}
|
||||
|
||||
/// Removes abandoned head.
|
||||
pub fn remove_head(&self, block_root: Hash256) {
|
||||
let mut map = self.0.write();
|
||||
debug_assert!(map.contains_key(&block_root));
|
||||
map.remove(&block_root);
|
||||
}
|
||||
|
||||
/// Returns true iff `block_root` is a recognized head.
|
||||
pub fn contains_head(&self, block_root: Hash256) -> bool {
|
||||
self.0.read().contains_key(&block_root)
|
||||
@@ -53,14 +45,7 @@ impl HeadTracker {
|
||||
/// Returns a `SszHeadTracker`, which contains all necessary information to restore the state
|
||||
/// of `Self` at some later point.
|
||||
pub fn to_ssz_container(&self) -> SszHeadTracker {
|
||||
let (roots, slots) = self
|
||||
.0
|
||||
.read()
|
||||
.iter()
|
||||
.map(|(hash, slot)| (*hash, *slot))
|
||||
.unzip();
|
||||
|
||||
SszHeadTracker { roots, slots }
|
||||
SszHeadTracker::from_map(&*self.0.read())
|
||||
}
|
||||
|
||||
/// Creates a new `Self` from the given `SszHeadTracker`, restoring `Self` to the same state of
|
||||
@@ -75,13 +60,12 @@ impl HeadTracker {
|
||||
slots_len,
|
||||
})
|
||||
} else {
|
||||
let map = HashMap::from_iter(
|
||||
ssz_container
|
||||
.roots
|
||||
.iter()
|
||||
.zip(ssz_container.slots.iter())
|
||||
.map(|(root, slot)| (*root, *slot)),
|
||||
);
|
||||
let map = ssz_container
|
||||
.roots
|
||||
.iter()
|
||||
.zip(ssz_container.slots.iter())
|
||||
.map(|(root, slot)| (*root, *slot))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
Ok(Self(RwLock::new(map)))
|
||||
}
|
||||
@@ -103,6 +87,13 @@ pub struct SszHeadTracker {
|
||||
slots: Vec<Slot>,
|
||||
}
|
||||
|
||||
impl SszHeadTracker {
|
||||
pub fn from_map(map: &HashMap<Hash256, Slot>) -> Self {
|
||||
let (roots, slots) = map.iter().map(|(hash, slot)| (*hash, *slot)).unzip();
|
||||
SszHeadTracker { roots, slots }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
@@ -121,14 +112,14 @@ mod test {
|
||||
let mut block: BeaconBlock<E> = BeaconBlock::empty(spec);
|
||||
let block_root = Hash256::from_low_u64_be(i);
|
||||
|
||||
block.slot = Slot::new(i);
|
||||
block.parent_root = if i == 0 {
|
||||
*block.slot_mut() = Slot::new(i);
|
||||
*block.parent_root_mut() = if i == 0 {
|
||||
Hash256::random()
|
||||
} else {
|
||||
Hash256::from_low_u64_be(i - 1)
|
||||
};
|
||||
|
||||
head_tracker.register_block(block_root, block.parent_root, block.slot);
|
||||
head_tracker.register_block(block_root, block.parent_root(), block.slot());
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
@@ -139,9 +130,9 @@ mod test {
|
||||
|
||||
let mut block: BeaconBlock<E> = BeaconBlock::empty(spec);
|
||||
let block_root = Hash256::from_low_u64_be(42);
|
||||
block.slot = Slot::new(15);
|
||||
block.parent_root = Hash256::from_low_u64_be(14);
|
||||
head_tracker.register_block(block_root, block.parent_root, block.slot);
|
||||
*block.slot_mut() = Slot::new(15);
|
||||
*block.parent_root_mut() = Hash256::from_low_u64_be(14);
|
||||
head_tracker.register_block(block_root, block.parent_root(), block.slot());
|
||||
|
||||
let heads = head_tracker.heads();
|
||||
|
||||
|
||||
202
beacon_node/beacon_chain/src/historical_blocks.rs
Normal file
202
beacon_node/beacon_chain/src/historical_blocks.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
use crate::{errors::BeaconChainError as Error, metrics, BeaconChain, BeaconChainTypes};
|
||||
use itertools::Itertools;
|
||||
use slog::debug;
|
||||
use state_processing::{
|
||||
per_block_processing::ParallelSignatureSets,
|
||||
signature_sets::{block_proposal_signature_set_from_parts, Error as SignatureSetError},
|
||||
};
|
||||
use std::borrow::Cow;
|
||||
use std::iter;
|
||||
use std::time::Duration;
|
||||
use store::{chunked_vector::BlockRoots, AnchorInfo, ChunkWriter, KeyValueStore};
|
||||
use types::{Hash256, SignedBeaconBlock, Slot};
|
||||
|
||||
/// Use a longer timeout on the pubkey cache.
|
||||
///
|
||||
/// It's ok if historical sync is stalled due to writes from forwards block processing.
|
||||
const PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(30);
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum HistoricalBlockError {
|
||||
/// Block is not available (only returned when fetching historic blocks).
|
||||
BlockOutOfRange { slot: Slot, oldest_block_slot: Slot },
|
||||
/// Block root mismatch, caller should retry with different blocks.
|
||||
MismatchedBlockRoot {
|
||||
block_root: Hash256,
|
||||
expected_block_root: Hash256,
|
||||
},
|
||||
/// Bad signature, caller should retry with different blocks.
|
||||
SignatureSet(SignatureSetError),
|
||||
/// Bad signature, caller should retry with different blocks.
|
||||
InvalidSignature,
|
||||
/// Transitory error, caller should retry with the same blocks.
|
||||
ValidatorPubkeyCacheTimeout,
|
||||
/// No historical sync needed.
|
||||
NoAnchorInfo,
|
||||
/// Logic error: should never occur.
|
||||
IndexOutOfBounds,
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// Store a batch of historical blocks in the database.
|
||||
///
|
||||
/// The `blocks` should be given in slot-ascending order. One of the blocks should have a block
|
||||
/// root corresponding to the `oldest_block_parent` from the store's `AnchorInfo`.
|
||||
///
|
||||
/// The block roots and proposer signatures are verified. If any block doesn't match the parent
|
||||
/// root listed in its successor, then the whole batch will be discarded and
|
||||
/// `MismatchedBlockRoot` will be returned. If any proposer signature is invalid then
|
||||
/// `SignatureSetError` or `InvalidSignature` will be returned.
|
||||
///
|
||||
/// To align with sync we allow some excess blocks with slots greater than or equal to
|
||||
/// `oldest_block_slot` to be provided. They will be ignored without being checked.
|
||||
///
|
||||
/// This function should not be called concurrently with any other function that mutates
|
||||
/// the anchor info (including this function itself). If a concurrent mutation occurs that
|
||||
/// would violate consistency then an `AnchorInfoConcurrentMutation` error will be returned.
|
||||
///
|
||||
/// Return the number of blocks successfully imported.
|
||||
pub fn import_historical_block_batch(
|
||||
&self,
|
||||
blocks: &[SignedBeaconBlock<T::EthSpec>],
|
||||
) -> Result<usize, Error> {
|
||||
let anchor_info = self
|
||||
.store
|
||||
.get_anchor_info()
|
||||
.ok_or(HistoricalBlockError::NoAnchorInfo)?;
|
||||
|
||||
// Take all blocks with slots less than the oldest block slot.
|
||||
let num_relevant =
|
||||
blocks.partition_point(|block| block.slot() < anchor_info.oldest_block_slot);
|
||||
let blocks_to_import = &blocks
|
||||
.get(..num_relevant)
|
||||
.ok_or(HistoricalBlockError::IndexOutOfBounds)?;
|
||||
|
||||
if blocks_to_import.len() != blocks.len() {
|
||||
debug!(
|
||||
self.log,
|
||||
"Ignoring some historic blocks";
|
||||
"oldest_block_slot" => anchor_info.oldest_block_slot,
|
||||
"total_blocks" => blocks.len(),
|
||||
"ignored" => blocks.len().saturating_sub(blocks_to_import.len()),
|
||||
);
|
||||
}
|
||||
|
||||
if blocks_to_import.is_empty() {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
let mut expected_block_root = anchor_info.oldest_block_parent;
|
||||
let mut prev_block_slot = anchor_info.oldest_block_slot;
|
||||
let mut chunk_writer =
|
||||
ChunkWriter::<BlockRoots, _, _>::new(&self.store.cold_db, prev_block_slot.as_usize())?;
|
||||
|
||||
let mut cold_batch = Vec::with_capacity(blocks.len());
|
||||
let mut hot_batch = Vec::with_capacity(blocks.len());
|
||||
|
||||
for block in blocks_to_import.iter().rev() {
|
||||
// Check chain integrity.
|
||||
let block_root = block.canonical_root();
|
||||
|
||||
if block_root != expected_block_root {
|
||||
return Err(HistoricalBlockError::MismatchedBlockRoot {
|
||||
block_root,
|
||||
expected_block_root,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
// Store block in the hot database.
|
||||
hot_batch.push(self.store.block_as_kv_store_op(&block_root, block));
|
||||
|
||||
// Store block roots, including at all skip slots in the freezer DB.
|
||||
for slot in (block.slot().as_usize()..prev_block_slot.as_usize()).rev() {
|
||||
chunk_writer.set(slot, block_root, &mut cold_batch)?;
|
||||
}
|
||||
|
||||
prev_block_slot = block.slot();
|
||||
expected_block_root = block.message().parent_root();
|
||||
|
||||
// If we've reached genesis, add the genesis block root to the batch and set the
|
||||
// anchor slot to 0 to indicate completion.
|
||||
if expected_block_root == self.genesis_block_root {
|
||||
let genesis_slot = self.spec.genesis_slot;
|
||||
chunk_writer.set(
|
||||
genesis_slot.as_usize(),
|
||||
self.genesis_block_root,
|
||||
&mut cold_batch,
|
||||
)?;
|
||||
prev_block_slot = genesis_slot;
|
||||
expected_block_root = Hash256::zero();
|
||||
break;
|
||||
}
|
||||
}
|
||||
chunk_writer.write(&mut cold_batch)?;
|
||||
|
||||
// Verify signatures in one batch, holding the pubkey cache lock for the shortest duration
|
||||
// possible. For each block fetch the parent root from its successor. Slicing from index 1
|
||||
// is safe because we've already checked that `blocks_to_import` is non-empty.
|
||||
let sig_timer = metrics::start_timer(&metrics::BACKFILL_SIGNATURE_TOTAL_TIMES);
|
||||
let setup_timer = metrics::start_timer(&metrics::BACKFILL_SIGNATURE_SETUP_TIMES);
|
||||
let pubkey_cache = self
|
||||
.validator_pubkey_cache
|
||||
.try_read_for(PUBKEY_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(HistoricalBlockError::ValidatorPubkeyCacheTimeout)?;
|
||||
let block_roots = blocks_to_import
|
||||
.get(1..)
|
||||
.ok_or(HistoricalBlockError::IndexOutOfBounds)?
|
||||
.iter()
|
||||
.map(|block| block.parent_root())
|
||||
.chain(iter::once(anchor_info.oldest_block_parent));
|
||||
let signature_set = blocks_to_import
|
||||
.iter()
|
||||
.zip_eq(block_roots)
|
||||
.map(|(block, block_root)| {
|
||||
block_proposal_signature_set_from_parts(
|
||||
block,
|
||||
Some(block_root),
|
||||
block.message().proposer_index(),
|
||||
&self.spec.fork_at_epoch(block.message().epoch()),
|
||||
self.genesis_validators_root,
|
||||
|validator_index| pubkey_cache.get(validator_index).cloned().map(Cow::Owned),
|
||||
&self.spec,
|
||||
)
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(HistoricalBlockError::SignatureSet)
|
||||
.map(ParallelSignatureSets::from)?;
|
||||
drop(pubkey_cache);
|
||||
drop(setup_timer);
|
||||
|
||||
let verify_timer = metrics::start_timer(&metrics::BACKFILL_SIGNATURE_VERIFY_TIMES);
|
||||
if !signature_set.verify() {
|
||||
return Err(HistoricalBlockError::InvalidSignature.into());
|
||||
}
|
||||
drop(verify_timer);
|
||||
drop(sig_timer);
|
||||
|
||||
// Write the I/O batches to disk, writing the blocks themselves first, as it's better
|
||||
// for the hot DB to contain extra blocks than for the cold DB to point to blocks that
|
||||
// do not exist.
|
||||
self.store.hot_db.do_atomically(hot_batch)?;
|
||||
self.store.cold_db.do_atomically(cold_batch)?;
|
||||
|
||||
// Update the anchor.
|
||||
let new_anchor = AnchorInfo {
|
||||
oldest_block_slot: prev_block_slot,
|
||||
oldest_block_parent: expected_block_root,
|
||||
..anchor_info
|
||||
};
|
||||
let backfill_complete = new_anchor.block_backfill_complete();
|
||||
self.store
|
||||
.compare_and_set_anchor_info_with_write(Some(anchor_info), Some(new_anchor))?;
|
||||
|
||||
// If backfill has completed and the chain is configured to reconstruct historic states,
|
||||
// send a message to the background migrator instructing it to begin reconstruction.
|
||||
if backfill_complete && self.config.reconstruct_historic_states {
|
||||
self.store_migrator.process_reconstruction();
|
||||
}
|
||||
|
||||
Ok(blocks_to_import.len())
|
||||
}
|
||||
}
|
||||
@@ -1,43 +1,58 @@
|
||||
#![recursion_limit = "128"] // For lazy-static
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
pub mod attestation_verification;
|
||||
mod attester_cache;
|
||||
mod beacon_chain;
|
||||
mod beacon_fork_choice_store;
|
||||
pub mod beacon_proposer_cache;
|
||||
mod beacon_snapshot;
|
||||
pub mod block_reward;
|
||||
mod block_times_cache;
|
||||
mod block_verification;
|
||||
pub mod builder;
|
||||
pub mod chain_config;
|
||||
mod early_attester_cache;
|
||||
mod errors;
|
||||
pub mod eth1_chain;
|
||||
pub mod events;
|
||||
mod execution_payload;
|
||||
pub mod fork_revert;
|
||||
mod head_tracker;
|
||||
pub mod historical_blocks;
|
||||
mod metrics;
|
||||
pub mod migrate;
|
||||
mod naive_aggregation_pool;
|
||||
mod observed_attestations;
|
||||
mod observed_aggregates;
|
||||
mod observed_attesters;
|
||||
mod observed_block_producers;
|
||||
pub mod observed_operations;
|
||||
mod persisted_beacon_chain;
|
||||
mod persisted_fork_choice;
|
||||
mod pre_finalization_cache;
|
||||
pub mod proposer_prep_service;
|
||||
pub mod schema_change;
|
||||
mod shuffling_cache;
|
||||
mod snapshot_cache;
|
||||
pub mod state_advance_timer;
|
||||
pub mod sync_committee_verification;
|
||||
pub mod test_utils;
|
||||
mod timeout_rw_lock;
|
||||
pub mod validator_monitor;
|
||||
mod validator_pubkey_cache;
|
||||
|
||||
pub use self::beacon_chain::{
|
||||
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, ChainSegmentResult,
|
||||
ForkChoiceError, StateSkipConfig,
|
||||
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult,
|
||||
ForkChoiceError, HeadInfo, HeadSafetyStatus, StateSkipConfig, WhenSlotSkipped,
|
||||
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
|
||||
};
|
||||
pub use self::beacon_snapshot::BeaconSnapshot;
|
||||
pub use self::chain_config::ChainConfig;
|
||||
pub use self::errors::{BeaconChainError, BlockProductionError};
|
||||
pub use self::historical_blocks::HistoricalBlockError;
|
||||
pub use attestation_verification::Error as AttestationError;
|
||||
pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError};
|
||||
pub use block_verification::{BlockError, GossipVerifiedBlock};
|
||||
pub use block_verification::{BlockError, ExecutionPayloadError, GossipVerifiedBlock};
|
||||
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
||||
pub use events::EventHandler;
|
||||
pub use events::ServerSentEventHandler;
|
||||
pub use metrics::scrape_for_metrics;
|
||||
pub use parking_lot;
|
||||
pub use slot_clock;
|
||||
@@ -46,4 +61,5 @@ pub use state_processing::per_block_processing::errors::{
|
||||
ExitValidationError, ProposerSlashingValidationError,
|
||||
};
|
||||
pub use store;
|
||||
pub use timeout_rw_lock::TimeoutRwLock;
|
||||
pub use types;
|
||||
|
||||
@@ -1,8 +1,15 @@
|
||||
use crate::{BeaconChain, BeaconChainTypes};
|
||||
use crate::observed_attesters::SlotSubcommitteeIndex;
|
||||
use crate::types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT;
|
||||
use crate::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||
use lazy_static::lazy_static;
|
||||
pub use lighthouse_metrics::*;
|
||||
use slot_clock::SlotClock;
|
||||
use std::time::Duration;
|
||||
use types::{BeaconState, Epoch, EthSpec, Hash256, Slot};
|
||||
|
||||
/// The maximum time to wait for the snapshot cache lock during a metrics scrape.
|
||||
const SNAPSHOT_CACHE_TIMEOUT: Duration = Duration::from_millis(100);
|
||||
|
||||
lazy_static! {
|
||||
/*
|
||||
* Block Processing
|
||||
@@ -15,6 +22,18 @@ lazy_static! {
|
||||
"beacon_block_processing_successes_total",
|
||||
"Count of blocks processed without error"
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE: Result<IntGauge> = try_create_int_gauge(
|
||||
"beacon_block_processing_snapshot_cache_size",
|
||||
"Count snapshots in the snapshot cache"
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_block_processing_snapshot_cache_misses",
|
||||
"Count of snapshot cache misses"
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_CLONES: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_block_processing_snapshot_cache_clones",
|
||||
"Count of snapshot cache clones"
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_TIMES: Result<Histogram> =
|
||||
try_create_histogram("beacon_block_processing_seconds", "Full runtime of block processing");
|
||||
pub static ref BLOCK_PROCESSING_BLOCK_ROOT: Result<Histogram> = try_create_histogram(
|
||||
@@ -53,6 +72,10 @@ lazy_static! {
|
||||
"beacon_block_processing_attestation_observation_seconds",
|
||||
"Time spent hashing and remembering all the attestations in the block"
|
||||
);
|
||||
pub static ref BLOCK_SYNC_AGGREGATE_SET_BITS: Result<IntGauge> = try_create_int_gauge(
|
||||
"block_sync_aggregate_set_bits",
|
||||
"The number of true bits in the last sync aggregate in a block"
|
||||
);
|
||||
|
||||
/*
|
||||
* Block Production
|
||||
@@ -67,6 +90,30 @@ lazy_static! {
|
||||
);
|
||||
pub static ref BLOCK_PRODUCTION_TIMES: Result<Histogram> =
|
||||
try_create_histogram("beacon_block_production_seconds", "Full runtime of block production");
|
||||
pub static ref BLOCK_PRODUCTION_STATE_LOAD_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_production_state_load_seconds",
|
||||
"Time taken to load the base state for block production"
|
||||
);
|
||||
pub static ref BLOCK_PRODUCTION_SLOT_PROCESS_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_production_slot_process_seconds",
|
||||
"Time taken to advance the state to the block production slot"
|
||||
);
|
||||
pub static ref BLOCK_PRODUCTION_UNAGGREGATED_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_production_unaggregated_seconds",
|
||||
"Time taken to import the naive aggregation pool for block production"
|
||||
);
|
||||
pub static ref BLOCK_PRODUCTION_ATTESTATION_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_production_attestation_seconds",
|
||||
"Time taken to pack attestations into a block"
|
||||
);
|
||||
pub static ref BLOCK_PRODUCTION_PROCESS_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_production_process_seconds",
|
||||
"Time taken to process the block produced"
|
||||
);
|
||||
pub static ref BLOCK_PRODUCTION_STATE_ROOT_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_production_state_root_seconds",
|
||||
"Time taken to calculate the block's state root"
|
||||
);
|
||||
|
||||
/*
|
||||
* Block Statistics
|
||||
@@ -76,6 +123,11 @@ lazy_static! {
|
||||
"Number of attestations in a block"
|
||||
);
|
||||
|
||||
pub static ref BLOCK_SIZE: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_total_size",
|
||||
"Size of a signed beacon block"
|
||||
);
|
||||
|
||||
/*
|
||||
* Unaggregated Attestation Verification
|
||||
*/
|
||||
@@ -115,10 +167,6 @@ lazy_static! {
|
||||
"beacon_attestation_processing_apply_to_agg_pool",
|
||||
"Time spent applying an attestation to the naive aggregation pool"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_AGG_POOL_MAPS_WRITE_LOCK: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_agg_pool_maps_write_lock",
|
||||
"Time spent waiting for the maps write lock when adding to the agg poll"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_AGG_POOL_PRUNE: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_agg_pool_prune",
|
||||
"Time spent for the agg pool to prune"
|
||||
@@ -172,6 +220,26 @@ lazy_static! {
|
||||
"Time spent on the signature verification of attestation processing"
|
||||
);
|
||||
|
||||
/*
|
||||
* Batch Attestation Processing
|
||||
*/
|
||||
pub static ref ATTESTATION_PROCESSING_BATCH_AGG_SIGNATURE_SETUP_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_batch_agg_signature_setup_times",
|
||||
"Time spent on setting up for the signature verification of batch aggregate processing"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_BATCH_AGG_SIGNATURE_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_batch_agg_signature_times",
|
||||
"Time spent on the signature verification of batch aggregate attestation processing"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_BATCH_UNAGG_SIGNATURE_SETUP_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_batch_unagg_signature_setup_times",
|
||||
"Time spent on setting up for the signature verification of batch unaggregate processing"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_BATCH_UNAGG_SIGNATURE_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_batch_unagg_signature_times",
|
||||
"Time spent on the signature verification of batch unaggregate attestation processing"
|
||||
);
|
||||
|
||||
/*
|
||||
* Shuffling cache
|
||||
*/
|
||||
@@ -180,21 +248,33 @@ lazy_static! {
|
||||
pub static ref SHUFFLING_CACHE_MISSES: Result<IntCounter> =
|
||||
try_create_int_counter("beacon_shuffling_cache_misses_total", "Count of times shuffling cache fulfils request");
|
||||
|
||||
/*
|
||||
* Early attester cache
|
||||
*/
|
||||
pub static ref BEACON_EARLY_ATTESTER_CACHE_HITS: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_early_attester_cache_hits",
|
||||
"Count of times the early attester cache returns an attestation"
|
||||
);
|
||||
|
||||
/*
|
||||
* Attestation Production
|
||||
*/
|
||||
pub static ref ATTESTATION_PRODUCTION_REQUESTS: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_attestation_production_requests_total",
|
||||
"Count of all attestation production requests"
|
||||
);
|
||||
pub static ref ATTESTATION_PRODUCTION_SUCCESSES: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_attestation_production_successes_total",
|
||||
"Count of attestations processed without error"
|
||||
);
|
||||
pub static ref ATTESTATION_PRODUCTION_TIMES: Result<Histogram> = try_create_histogram(
|
||||
pub static ref ATTESTATION_PRODUCTION_SECONDS: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_production_seconds",
|
||||
"Full runtime of attestation production"
|
||||
);
|
||||
pub static ref ATTESTATION_PRODUCTION_HEAD_SCRAPE_SECONDS: Result<Histogram> = try_create_histogram(
|
||||
"attestation_production_head_scrape_seconds",
|
||||
"Time taken to read the head state"
|
||||
);
|
||||
pub static ref ATTESTATION_PRODUCTION_CACHE_INTERACTION_SECONDS: Result<Histogram> = try_create_histogram(
|
||||
"attestation_production_cache_interaction_seconds",
|
||||
"Time spent interacting with the attester cache"
|
||||
);
|
||||
pub static ref ATTESTATION_PRODUCTION_CACHE_PRIME_SECONDS: Result<Histogram> = try_create_histogram(
|
||||
"attestation_production_cache_prime_seconds",
|
||||
"Time spent loading a new state from the disk due to a cache miss"
|
||||
);
|
||||
}
|
||||
|
||||
// Second lazy-static block is used to account for macro recursion limit.
|
||||
@@ -218,6 +298,10 @@ lazy_static! {
|
||||
"beacon_fork_choice_reorg_total",
|
||||
"Count of occasions fork choice has switched to a different chain"
|
||||
);
|
||||
pub static ref FORK_CHOICE_REORG_COUNT_INTEROP: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_reorgs_total",
|
||||
"Count of occasions fork choice has switched to a different chain"
|
||||
);
|
||||
pub static ref FORK_CHOICE_TIMES: Result<Histogram> =
|
||||
try_create_histogram("beacon_fork_choice_seconds", "Full runtime of fork choice");
|
||||
pub static ref FORK_CHOICE_FIND_HEAD_TIMES: Result<Histogram> =
|
||||
@@ -230,6 +314,10 @@ lazy_static! {
|
||||
"beacon_fork_choice_process_attestation_seconds",
|
||||
"Time taken to add an attestation to fork choice"
|
||||
);
|
||||
pub static ref FORK_CHOICE_SET_HEAD_LAG_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_fork_choice_set_head_lag_times",
|
||||
"Time taken between finding the head and setting the canonical head value"
|
||||
);
|
||||
pub static ref BALANCES_CACHE_HITS: Result<IntCounter> =
|
||||
try_create_int_counter("beacon_balances_cache_hits_total", "Count of times balances cache fulfils request");
|
||||
pub static ref BALANCES_CACHE_MISSES: Result<IntCounter> =
|
||||
@@ -260,6 +348,8 @@ lazy_static! {
|
||||
try_create_histogram("beacon_update_head_seconds", "Time taken to update the canonical head");
|
||||
pub static ref HEAD_STATE_SLOT: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_slot", "Slot of the block at the head of the chain");
|
||||
pub static ref HEAD_STATE_SLOT_INTEROP: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_slot", "Slot of the block at the head of the chain");
|
||||
pub static ref HEAD_STATE_ROOT: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_root", "Root of the block at the head of the chain");
|
||||
pub static ref HEAD_STATE_LATEST_BLOCK_SLOT: Result<IntGauge> =
|
||||
@@ -268,18 +358,26 @@ lazy_static! {
|
||||
try_create_int_gauge("beacon_head_state_current_justified_root", "Current justified root at the head of the chain");
|
||||
pub static ref HEAD_STATE_CURRENT_JUSTIFIED_EPOCH: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_current_justified_epoch", "Current justified epoch at the head of the chain");
|
||||
pub static ref HEAD_STATE_CURRENT_JUSTIFIED_EPOCH_INTEROP: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_current_justified_epoch", "Current justified epoch at the head of the chain");
|
||||
pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_ROOT: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_previous_justified_root", "Previous justified root at the head of the chain");
|
||||
pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_previous_justified_epoch", "Previous justified epoch at the head of the chain");
|
||||
pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH_INTEROP: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_previous_justified_epoch", "Previous justified epoch at the head of the chain");
|
||||
pub static ref HEAD_STATE_FINALIZED_ROOT: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_finalized_root", "Finalized root at the head of the chain");
|
||||
pub static ref HEAD_STATE_FINALIZED_EPOCH: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_finalized_epoch", "Finalized epoch at the head of the chain");
|
||||
pub static ref HEAD_STATE_FINALIZED_EPOCH_INTEROP: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_finalized_epoch", "Finalized epoch at the head of the chain");
|
||||
pub static ref HEAD_STATE_TOTAL_VALIDATORS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_total_validators_total", "Count of validators at the head of the chain");
|
||||
pub static ref HEAD_STATE_ACTIVE_VALIDATORS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_active_validators_total", "Count of active validators at the head of the chain");
|
||||
pub static ref HEAD_STATE_ACTIVE_VALIDATORS_INTEROP: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_current_active_validators", "Count of active validators at the head of the chain");
|
||||
pub static ref HEAD_STATE_VALIDATOR_BALANCES: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_validator_balances_total", "Sum of all validator balances at the head of the chain");
|
||||
pub static ref HEAD_STATE_SLASHED_VALIDATORS: Result<IntGauge> =
|
||||
@@ -288,34 +386,27 @@ lazy_static! {
|
||||
try_create_int_gauge("beacon_head_state_withdrawn_validators_total", "Sum of all validator balances at the head of the chain");
|
||||
pub static ref HEAD_STATE_ETH1_DEPOSIT_INDEX: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_eth1_deposit_index", "Eth1 deposit index at the head of the chain");
|
||||
pub static ref HEAD_STATE_ETH1_DEPOSITS_INTEROP: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_processed_deposits_total", "Total Eth1 deposits at the head of the chain");
|
||||
|
||||
/*
|
||||
* Operation Pool
|
||||
*/
|
||||
pub static ref OP_POOL_NUM_ATTESTATIONS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_op_pool_attestations_total", "Count of attestations in the op pool");
|
||||
pub static ref OP_POOL_NUM_ATTESTATION_DATA: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_op_pool_attestation_data_total", "Count of attestation data in the op pool");
|
||||
pub static ref OP_POOL_MAX_AGGREGATES_PER_DATA: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_op_pool_max_aggregates_per_data", "Max aggregates per AttestationData");
|
||||
pub static ref OP_POOL_NUM_ATTESTER_SLASHINGS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_op_pool_attester_slashings_total", "Count of attester slashings in the op pool");
|
||||
pub static ref OP_POOL_NUM_PROPOSER_SLASHINGS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_op_pool_proposer_slashings_total", "Count of proposer slashings in the op pool");
|
||||
pub static ref OP_POOL_NUM_VOLUNTARY_EXITS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_op_pool_voluntary_exits_total", "Count of voluntary exits in the op pool");
|
||||
pub static ref OP_POOL_NUM_SYNC_CONTRIBUTIONS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_op_pool_sync_contributions_total", "Count of sync contributions in the op pool");
|
||||
|
||||
/*
|
||||
* Participation Metrics
|
||||
*/
|
||||
pub static ref PARTICIPATION_PREV_EPOCH_ATTESTER: Result<Gauge> = try_create_float_gauge(
|
||||
"beacon_participation_prev_epoch_attester",
|
||||
"Ratio of attesting balances to total balances"
|
||||
);
|
||||
pub static ref PARTICIPATION_PREV_EPOCH_TARGET_ATTESTER: Result<Gauge> = try_create_float_gauge(
|
||||
"beacon_participation_prev_epoch_target_attester",
|
||||
"Ratio of target-attesting balances to total balances"
|
||||
);
|
||||
pub static ref PARTICIPATION_PREV_EPOCH_HEAD_ATTESTER: Result<Gauge> = try_create_float_gauge(
|
||||
"beacon_participation_prev_epoch_head_attester",
|
||||
"Ratio of head-attesting balances to total balances"
|
||||
);
|
||||
|
||||
/*
|
||||
* Attestation Observation Metrics
|
||||
@@ -328,22 +419,548 @@ lazy_static! {
|
||||
"beacon_attn_observation_epoch_aggregators",
|
||||
"Count of aggregators that have been seen by the beacon chain in the previous epoch"
|
||||
);
|
||||
|
||||
/*
|
||||
* Sync Committee Observation Metrics
|
||||
*/
|
||||
pub static ref SYNC_COMM_OBSERVATION_PREV_SLOT_SIGNERS: Result<IntGauge> = try_create_int_gauge(
|
||||
"beacon_sync_comm_observation_slot_signers",
|
||||
"Count of sync committee contributors that have been seen by the beacon chain in the previous slot"
|
||||
);
|
||||
pub static ref SYNC_COMM_OBSERVATION_PREV_SLOT_AGGREGATORS: Result<IntGauge> = try_create_int_gauge(
|
||||
"beacon_sync_comm_observation_slot_aggregators",
|
||||
"Count of sync committee aggregators that have been seen by the beacon chain in the previous slot"
|
||||
);
|
||||
}
|
||||
|
||||
// Third lazy-static block is used to account for macro recursion limit.
|
||||
lazy_static! {
|
||||
/*
|
||||
* Validator Monitor Metrics (balances, etc)
|
||||
*/
|
||||
pub static ref VALIDATOR_MONITOR_BALANCE_GWEI: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_balance_gwei",
|
||||
"The validator's balance in gwei.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_EFFECTIVE_BALANCE_GWEI: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_effective_balance_gwei",
|
||||
"The validator's effective balance in gwei.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_SLASHED: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_slashed",
|
||||
"Set to 1 if the validator is slashed.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_ACTIVE: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_active",
|
||||
"Set to 1 if the validator is active.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_EXITED: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_exited",
|
||||
"Set to 1 if the validator is exited.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_WITHDRAWABLE: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_withdrawable",
|
||||
"Set to 1 if the validator is withdrawable.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_ACTIVATION_ELIGIBILITY_EPOCH: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_activation_eligibility_epoch",
|
||||
"Set to the epoch where the validator will be eligible for activation.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_ACTIVATION_EPOCH: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_activation_epoch",
|
||||
"Set to the epoch where the validator will activate.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_EXIT_EPOCH: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_exit_epoch",
|
||||
"Set to the epoch where the validator will exit.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_WITHDRAWABLE_EPOCH: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_withdrawable_epoch",
|
||||
"Set to the epoch where the validator will be withdrawable.",
|
||||
&["validator"]
|
||||
);
|
||||
|
||||
/*
|
||||
* Validator Monitor Metrics (per-epoch summaries)
|
||||
*/
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT: Result<IntCounterVec> =
|
||||
try_create_int_counter_vec(
|
||||
"validator_monitor_prev_epoch_on_chain_attester_hit",
|
||||
"Incremented if the validator is flagged as a previous epoch attester \
|
||||
during per epoch processing",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS: Result<IntCounterVec> =
|
||||
try_create_int_counter_vec(
|
||||
"validator_monitor_prev_epoch_on_chain_attester_miss",
|
||||
"Incremented if the validator is not flagged as a previous epoch attester \
|
||||
during per epoch processing",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT: Result<IntCounterVec> =
|
||||
try_create_int_counter_vec(
|
||||
"validator_monitor_prev_epoch_on_chain_head_attester_hit",
|
||||
"Incremented if the validator is flagged as a previous epoch head attester \
|
||||
during per epoch processing",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS: Result<IntCounterVec> =
|
||||
try_create_int_counter_vec(
|
||||
"validator_monitor_prev_epoch_on_chain_head_attester_miss",
|
||||
"Incremented if the validator is not flagged as a previous epoch head attester \
|
||||
during per epoch processing",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT: Result<IntCounterVec> =
|
||||
try_create_int_counter_vec(
|
||||
"validator_monitor_prev_epoch_on_chain_target_attester_hit",
|
||||
"Incremented if the validator is flagged as a previous epoch target attester \
|
||||
during per epoch processing",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS: Result<IntCounterVec> =
|
||||
try_create_int_counter_vec(
|
||||
"validator_monitor_prev_epoch_on_chain_target_attester_miss",
|
||||
"Incremented if the validator is not flagged as a previous epoch target attester \
|
||||
during per epoch processing",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_INCLUSION_DISTANCE: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_prev_epoch_on_chain_inclusion_distance",
|
||||
"The attestation inclusion distance calculated during per epoch processing",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATIONS_TOTAL: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_prev_epoch_attestations_total",
|
||||
"The number of unagg. attestations seen in the previous epoch.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATIONS_MIN_DELAY_SECONDS: Result<HistogramVec> =
|
||||
try_create_histogram_vec(
|
||||
"validator_monitor_prev_epoch_attestations_min_delay_seconds",
|
||||
"The min delay between when the validator should send the attestation and when it was received.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_AGGREGATE_INCLUSIONS: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_prev_epoch_attestation_aggregate_inclusions",
|
||||
"The count of times an attestation was seen inside an aggregate.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_BLOCK_INCLUSIONS: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_prev_epoch_attestation_block_inclusions",
|
||||
"The count of times an attestation was seen inside a block.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_BLOCK_MIN_INCLUSION_DISTANCE: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_prev_epoch_attestation_block_min_inclusion_distance",
|
||||
"The minimum inclusion distance observed for the inclusion of an attestation in a block.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_BEACON_BLOCKS_TOTAL: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_prev_epoch_beacon_blocks_total",
|
||||
"The number of beacon_blocks seen in the previous epoch.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_BEACON_BLOCKS_MIN_DELAY_SECONDS: Result<HistogramVec> =
|
||||
try_create_histogram_vec(
|
||||
"validator_monitor_prev_epoch_beacon_blocks_min_delay_seconds",
|
||||
"The min delay between when the validator should send the block and when it was received.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_AGGREGATES_TOTAL: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_prev_epoch_aggregates_total",
|
||||
"The number of aggregates seen in the previous epoch.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_AGGREGATES_MIN_DELAY_SECONDS: Result<HistogramVec> =
|
||||
try_create_histogram_vec(
|
||||
"validator_monitor_prev_epoch_aggregates_min_delay_seconds",
|
||||
"The min delay between when the validator should send the aggregate and when it was received.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_EXITS_TOTAL: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_prev_epoch_exits_total",
|
||||
"The number of exits seen in the previous epoch.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_PROPOSER_SLASHINGS_TOTAL: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_prev_epoch_proposer_slashings_total",
|
||||
"The number of proposer slashings seen in the previous epoch.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_ATTESTER_SLASHINGS_TOTAL: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_prev_epoch_attester_slashings_total",
|
||||
"The number of attester slashings seen in the previous epoch.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_SYNC_COMMITTEE_MESSAGES_TOTAL: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_prev_epoch_sync_committee_messages_total",
|
||||
"The number of sync committee messages seen in the previous epoch.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_SYNC_COMMITTEE_MESSAGES_MIN_DELAY_SECONDS: Result<HistogramVec> =
|
||||
try_create_histogram_vec(
|
||||
"validator_monitor_prev_epoch_sync_committee_messages_min_delay_seconds",
|
||||
"The min delay between when the validator should send the sync committee message and when it was received.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTION_INCLUSIONS: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_prev_epoch_sync_contribution_inclusions",
|
||||
"The count of times a sync signature was seen inside a sync contribution.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_SYNC_SIGNATURE_BLOCK_INCLUSIONS: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_prev_epoch_sync_signature_block_inclusions",
|
||||
"The count of times a sync signature was seen inside a block.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTIONS_TOTAL: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_prev_epoch_sync_contributions_total",
|
||||
"The number of sync contributions seen in the previous epoch.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTION_MIN_DELAY_SECONDS: Result<HistogramVec> =
|
||||
try_create_histogram_vec(
|
||||
"validator_monitor_prev_epoch_sync_contribution_min_delay_seconds",
|
||||
"The min delay between when the validator should send the sync contribution and when it was received.",
|
||||
&["validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_VALIDATOR_IN_CURRENT_SYNC_COMMITTEE: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"validator_monitor_validator_in_current_sync_committee",
|
||||
"Is the validator in the current sync committee (1 for true and 0 for false)",
|
||||
&["validator"]
|
||||
);
|
||||
|
||||
/*
|
||||
* Validator Monitor Metrics (real-time)
|
||||
*/
|
||||
pub static ref VALIDATOR_MONITOR_VALIDATORS_TOTAL: Result<IntGauge> = try_create_int_gauge(
|
||||
"validator_monitor_validators_total",
|
||||
"Count of validators that are specifically monitored by this beacon node"
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||
"validator_monitor_unaggregated_attestation_total",
|
||||
"Number of unaggregated attestations seen",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_DELAY_SECONDS: Result<HistogramVec> = try_create_histogram_vec(
|
||||
"validator_monitor_unaggregated_attestation_delay_seconds",
|
||||
"The delay between when the validator should send the attestation and when it was received.",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||
"validator_monitor_sync_committee_messages_total",
|
||||
"Number of sync committee messages seen",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_DELAY_SECONDS: Result<HistogramVec> = try_create_histogram_vec(
|
||||
"validator_monitor_sync_committee_messages_delay_seconds",
|
||||
"The delay between when the validator should send the sync committee message and when it was received.",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||
"validator_monitor_sync_contributions_total",
|
||||
"Number of sync contributions seen",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_DELAY_SECONDS: Result<HistogramVec> = try_create_histogram_vec(
|
||||
"validator_monitor_sync_contributions_delay_seconds",
|
||||
"The delay between when the aggregator should send the sync contribution and when it was received.",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||
"validator_monitor_aggregated_attestation_total",
|
||||
"Number of aggregated attestations seen",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_DELAY_SECONDS: Result<HistogramVec> = try_create_histogram_vec(
|
||||
"validator_monitor_aggregated_attestation_delay_seconds",
|
||||
"The delay between then the validator should send the aggregate and when it was received.",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_ATTESTATION_IN_AGGREGATE_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||
"validator_monitor_attestation_in_aggregate_total",
|
||||
"Number of times an attestation has been seen in an aggregate",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGE_IN_CONTRIBUTION_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||
"validator_monitor_sync_committee_message_in_contribution_total",
|
||||
"Number of times a sync committee message has been seen in a sync contribution",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_ATTESTATION_IN_AGGREGATE_DELAY_SECONDS: Result<HistogramVec> = try_create_histogram_vec(
|
||||
"validator_monitor_attestation_in_aggregate_delay_seconds",
|
||||
"The delay between when the validator should send the aggregate and when it was received.",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||
"validator_monitor_attestation_in_block_total",
|
||||
"Number of times an attestation has been seen in a block",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGE_IN_BLOCK_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||
"validator_monitor_sync_committee_message_in_block_total",
|
||||
"Number of times a validator's sync committee message has been seen in a sync aggregate",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_DELAY_SLOTS: Result<IntGaugeVec> = try_create_int_gauge_vec(
|
||||
"validator_monitor_attestation_in_block_delay_slots",
|
||||
"The excess slots (beyond the minimum delay) between the attestation slot and the block slot.",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_BEACON_BLOCK_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||
"validator_monitor_beacon_block_total",
|
||||
"Number of beacon blocks seen",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_BEACON_BLOCK_DELAY_SECONDS: Result<HistogramVec> = try_create_histogram_vec(
|
||||
"validator_monitor_beacon_block_delay_seconds",
|
||||
"The delay between when the validator should send the block and when it was received.",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_EXIT_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||
"validator_monitor_exit_total",
|
||||
"Number of beacon exits seen",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_PROPOSER_SLASHING_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||
"validator_monitor_proposer_slashing_total",
|
||||
"Number of proposer slashings seen",
|
||||
&["src", "validator"]
|
||||
);
|
||||
pub static ref VALIDATOR_MONITOR_ATTESTER_SLASHING_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||
"validator_monitor_attester_slashing_total",
|
||||
"Number of attester slashings seen",
|
||||
&["src", "validator"]
|
||||
);
|
||||
|
||||
/*
|
||||
* Block Delay Metrics
|
||||
*/
|
||||
pub static ref BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_observed_slot_start_delay_time",
|
||||
"Duration between the start of the block's slot and the time the block was observed.",
|
||||
);
|
||||
pub static ref BEACON_BLOCK_IMPORTED_OBSERVED_DELAY_TIME: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_imported_observed_delay_time",
|
||||
"Duration between the time the block was observed and the time when it was imported.",
|
||||
);
|
||||
pub static ref BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_head_imported_delay_time",
|
||||
"Duration between the time the block was imported and the time when it was set as head.",
|
||||
);
|
||||
pub static ref BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_head_slot_start_delay_time",
|
||||
"Duration between the start of the block's slot and the time when it was set as head.",
|
||||
);
|
||||
pub static ref BEACON_BLOCK_HEAD_SLOT_START_DELAY_EXCEEDED_TOTAL: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_block_head_slot_start_delay_exceeded_total",
|
||||
"Triggered when the duration between the start of the block's slot and the current time \
|
||||
will result in failed attestations.",
|
||||
);
|
||||
|
||||
/*
|
||||
* General block metrics
|
||||
*/
|
||||
pub static ref GOSSIP_BEACON_BLOCK_SKIPPED_SLOTS: Result<IntGauge> =
|
||||
try_create_int_gauge(
|
||||
"gossip_beacon_block_skipped_slots",
|
||||
"For each gossip blocks, the number of skip slots between it and its parent"
|
||||
);
|
||||
}
|
||||
|
||||
// Fourth lazy-static block is used to account for macro recursion limit.
|
||||
lazy_static! {
|
||||
/*
|
||||
* Sync Committee Message Verification
|
||||
*/
|
||||
pub static ref SYNC_MESSAGE_PROCESSING_REQUESTS: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_sync_committee_message_processing_requests_total",
|
||||
"Count of all sync messages submitted for processing"
|
||||
);
|
||||
pub static ref SYNC_MESSAGE_PROCESSING_SUCCESSES: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_sync_committee_message_processing_successes_total",
|
||||
"Number of sync messages verified for gossip"
|
||||
);
|
||||
pub static ref SYNC_MESSAGE_GOSSIP_VERIFICATION_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_sync_committee_message_gossip_verification_seconds",
|
||||
"Full runtime of sync contribution gossip verification"
|
||||
);
|
||||
|
||||
/*
|
||||
* Sync Committee Contribution Verification
|
||||
*/
|
||||
pub static ref SYNC_CONTRIBUTION_PROCESSING_REQUESTS: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_sync_contribution_processing_requests_total",
|
||||
"Count of all sync contributions submitted for processing"
|
||||
);
|
||||
pub static ref SYNC_CONTRIBUTION_PROCESSING_SUCCESSES: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_sync_contribution_processing_successes_total",
|
||||
"Number of sync contributions verified for gossip"
|
||||
);
|
||||
pub static ref SYNC_CONTRIBUTION_GOSSIP_VERIFICATION_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_sync_contribution_gossip_verification_seconds",
|
||||
"Full runtime of sync contribution gossip verification"
|
||||
);
|
||||
|
||||
/*
|
||||
* General Sync Committee Contribution Processing
|
||||
*/
|
||||
pub static ref SYNC_CONTRIBUTION_PROCESSING_APPLY_TO_AGG_POOL: Result<Histogram> = try_create_histogram(
|
||||
"beacon_sync_contribution_processing_apply_to_agg_pool",
|
||||
"Time spent applying a sync contribution to the naive aggregation pool"
|
||||
);
|
||||
pub static ref SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_PRUNE: Result<Histogram> = try_create_histogram(
|
||||
"beacon_sync_contribution_processing_agg_pool_prune",
|
||||
"Time spent for the agg pool to prune"
|
||||
);
|
||||
pub static ref SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_INSERT: Result<Histogram> = try_create_histogram(
|
||||
"beacon_sync_contribution_processing_agg_pool_insert",
|
||||
"Time spent for the outer pool.insert() function of agg pool"
|
||||
);
|
||||
pub static ref SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_CORE_INSERT: Result<Histogram> = try_create_histogram(
|
||||
"beacon_sync_contribution_processing_agg_pool_core_insert",
|
||||
"Time spent for the core map.insert() function of agg pool"
|
||||
);
|
||||
pub static ref SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_AGGREGATION: Result<Histogram> = try_create_histogram(
|
||||
"beacon_sync_contribution_processing_agg_pool_aggregation",
|
||||
"Time spent doing signature aggregation when adding to the agg poll"
|
||||
);
|
||||
pub static ref SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_CREATE_MAP: Result<Histogram> = try_create_histogram(
|
||||
"beacon_sync_contribution_processing_agg_pool_create_map",
|
||||
"Time spent for creating a map for a new slot"
|
||||
);
|
||||
pub static ref SYNC_CONTRIBUTION_PROCESSING_APPLY_TO_OP_POOL: Result<Histogram> = try_create_histogram(
|
||||
"beacon_sync_contribution_processing_apply_to_op_pool",
|
||||
"Time spent applying a sync contribution to the block inclusion pool"
|
||||
);
|
||||
pub static ref SYNC_CONTRIBUTION_PROCESSING_SIGNATURE_SETUP_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_sync_contribution_processing_signature_setup_seconds",
|
||||
"Time spent on setting up for the signature verification of sync contribution processing"
|
||||
);
|
||||
pub static ref SYNC_CONTRIBUTION_PROCESSING_SIGNATURE_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_sync_contribution_processing_signature_seconds",
|
||||
"Time spent on the signature verification of sync contribution processing"
|
||||
);
|
||||
|
||||
/*
|
||||
* General Sync Committee Contribution Processing
|
||||
*/
|
||||
pub static ref SYNC_MESSAGE_PROCESSING_SIGNATURE_SETUP_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_sync_committee_message_processing_signature_setup_seconds",
|
||||
"Time spent on setting up for the signature verification of sync message processing"
|
||||
);
|
||||
pub static ref SYNC_MESSAGE_PROCESSING_SIGNATURE_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_sync_committee_message_processing_signature_seconds",
|
||||
"Time spent on the signature verification of sync message processing"
|
||||
);
|
||||
|
||||
/*
|
||||
* Checkpoint sync & backfill
|
||||
*/
|
||||
pub static ref BACKFILL_SIGNATURE_SETUP_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_backfill_signature_setup_seconds",
|
||||
"Time spent constructing the signature set during backfill sync"
|
||||
);
|
||||
pub static ref BACKFILL_SIGNATURE_VERIFY_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_backfill_signature_verify_seconds",
|
||||
"Time spent verifying the signature set during backfill sync"
|
||||
);
|
||||
pub static ref BACKFILL_SIGNATURE_TOTAL_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_backfill_signature_total_seconds",
|
||||
"Time spent verifying the signature set during backfill sync, including setup"
|
||||
);
|
||||
|
||||
/*
|
||||
* Pre-finalization block cache.
|
||||
*/
|
||||
pub static ref PRE_FINALIZATION_BLOCK_CACHE_SIZE: Result<IntGauge> =
|
||||
try_create_int_gauge(
|
||||
"beacon_pre_finalization_block_cache_size",
|
||||
"Number of pre-finalization block roots cached for quick rejection"
|
||||
);
|
||||
pub static ref PRE_FINALIZATION_BLOCK_LOOKUP_COUNT: Result<IntGauge> =
|
||||
try_create_int_gauge(
|
||||
"beacon_pre_finalization_block_lookup_count",
|
||||
"Number of block roots subject to single block lookups"
|
||||
);
|
||||
}
|
||||
|
||||
/// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot,
|
||||
/// head state info, etc) and update the Prometheus `DEFAULT_REGISTRY`.
|
||||
pub fn scrape_for_metrics<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>) {
|
||||
if let Ok(head) = beacon_chain.head() {
|
||||
scrape_head_state::<T>(&head.beacon_state, head.beacon_state_root)
|
||||
}
|
||||
let _ = beacon_chain.with_head(|head| {
|
||||
scrape_head_state(&head.beacon_state, head.beacon_state_root());
|
||||
Ok::<_, BeaconChainError>(())
|
||||
});
|
||||
|
||||
if let Some(slot) = beacon_chain.slot_clock.now() {
|
||||
scrape_attestation_observation(slot, beacon_chain);
|
||||
scrape_sync_committee_observation(slot, beacon_chain);
|
||||
}
|
||||
|
||||
let attestation_stats = beacon_chain.op_pool.attestation_stats();
|
||||
|
||||
if let Some(snapshot_cache) = beacon_chain
|
||||
.snapshot_cache
|
||||
.try_write_for(SNAPSHOT_CACHE_TIMEOUT)
|
||||
{
|
||||
set_gauge(
|
||||
&BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE,
|
||||
snapshot_cache.len() as i64,
|
||||
)
|
||||
}
|
||||
|
||||
if let Some((size, num_lookups)) = beacon_chain.pre_finalization_block_cache.metrics() {
|
||||
set_gauge_by_usize(&PRE_FINALIZATION_BLOCK_CACHE_SIZE, size);
|
||||
set_gauge_by_usize(&PRE_FINALIZATION_BLOCK_LOOKUP_COUNT, num_lookups);
|
||||
}
|
||||
|
||||
set_gauge_by_usize(
|
||||
&OP_POOL_NUM_ATTESTATIONS,
|
||||
beacon_chain.op_pool.num_attestations(),
|
||||
attestation_stats.num_attestations,
|
||||
);
|
||||
set_gauge_by_usize(
|
||||
&OP_POOL_NUM_ATTESTATION_DATA,
|
||||
attestation_stats.num_attestation_data,
|
||||
);
|
||||
set_gauge_by_usize(
|
||||
&OP_POOL_MAX_AGGREGATES_PER_DATA,
|
||||
attestation_stats.max_aggregates_per_data,
|
||||
);
|
||||
set_gauge_by_usize(
|
||||
&OP_POOL_NUM_ATTESTER_SLASHINGS,
|
||||
@@ -357,67 +974,108 @@ pub fn scrape_for_metrics<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>) {
|
||||
&OP_POOL_NUM_VOLUNTARY_EXITS,
|
||||
beacon_chain.op_pool.num_voluntary_exits(),
|
||||
);
|
||||
set_gauge_by_usize(
|
||||
&OP_POOL_NUM_SYNC_CONTRIBUTIONS,
|
||||
beacon_chain.op_pool.num_sync_contributions(),
|
||||
);
|
||||
|
||||
beacon_chain
|
||||
.validator_monitor
|
||||
.read()
|
||||
.scrape_metrics(&beacon_chain.slot_clock, &beacon_chain.spec);
|
||||
}
|
||||
|
||||
/// Scrape the given `state` assuming it's the head state, updating the `DEFAULT_REGISTRY`.
|
||||
fn scrape_head_state<T: BeaconChainTypes>(state: &BeaconState<T::EthSpec>, state_root: Hash256) {
|
||||
set_gauge_by_slot(&HEAD_STATE_SLOT, state.slot);
|
||||
fn scrape_head_state<T: EthSpec>(state: &BeaconState<T>, state_root: Hash256) {
|
||||
set_gauge_by_slot(&HEAD_STATE_SLOT, state.slot());
|
||||
set_gauge_by_slot(&HEAD_STATE_SLOT_INTEROP, state.slot());
|
||||
set_gauge_by_hash(&HEAD_STATE_ROOT, state_root);
|
||||
set_gauge_by_slot(
|
||||
&HEAD_STATE_LATEST_BLOCK_SLOT,
|
||||
state.latest_block_header.slot,
|
||||
state.latest_block_header().slot,
|
||||
);
|
||||
set_gauge_by_hash(
|
||||
&HEAD_STATE_CURRENT_JUSTIFIED_ROOT,
|
||||
state.current_justified_checkpoint.root,
|
||||
state.current_justified_checkpoint().root,
|
||||
);
|
||||
set_gauge_by_epoch(
|
||||
&HEAD_STATE_CURRENT_JUSTIFIED_EPOCH,
|
||||
state.current_justified_checkpoint.epoch,
|
||||
state.current_justified_checkpoint().epoch,
|
||||
);
|
||||
set_gauge_by_epoch(
|
||||
&HEAD_STATE_CURRENT_JUSTIFIED_EPOCH_INTEROP,
|
||||
state.current_justified_checkpoint().epoch,
|
||||
);
|
||||
set_gauge_by_hash(
|
||||
&HEAD_STATE_PREVIOUS_JUSTIFIED_ROOT,
|
||||
state.previous_justified_checkpoint.root,
|
||||
state.previous_justified_checkpoint().root,
|
||||
);
|
||||
set_gauge_by_epoch(
|
||||
&HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH,
|
||||
state.previous_justified_checkpoint.epoch,
|
||||
state.previous_justified_checkpoint().epoch,
|
||||
);
|
||||
set_gauge_by_epoch(
|
||||
&HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH_INTEROP,
|
||||
state.previous_justified_checkpoint().epoch,
|
||||
);
|
||||
set_gauge_by_hash(
|
||||
&HEAD_STATE_FINALIZED_ROOT,
|
||||
state.finalized_checkpoint().root,
|
||||
);
|
||||
set_gauge_by_hash(&HEAD_STATE_FINALIZED_ROOT, state.finalized_checkpoint.root);
|
||||
set_gauge_by_epoch(
|
||||
&HEAD_STATE_FINALIZED_EPOCH,
|
||||
state.finalized_checkpoint.epoch,
|
||||
state.finalized_checkpoint().epoch,
|
||||
);
|
||||
set_gauge_by_usize(&HEAD_STATE_TOTAL_VALIDATORS, state.validators.len());
|
||||
set_gauge_by_u64(&HEAD_STATE_VALIDATOR_BALANCES, state.balances.iter().sum());
|
||||
set_gauge_by_usize(
|
||||
&HEAD_STATE_ACTIVE_VALIDATORS,
|
||||
state
|
||||
.validators
|
||||
.iter()
|
||||
.filter(|v| v.is_active_at(state.current_epoch()))
|
||||
.count(),
|
||||
set_gauge_by_epoch(
|
||||
&HEAD_STATE_FINALIZED_EPOCH_INTEROP,
|
||||
state.finalized_checkpoint().epoch,
|
||||
);
|
||||
set_gauge_by_usize(
|
||||
&HEAD_STATE_SLASHED_VALIDATORS,
|
||||
state.validators.iter().filter(|v| v.slashed).count(),
|
||||
set_gauge_by_usize(&HEAD_STATE_TOTAL_VALIDATORS, state.validators().len());
|
||||
set_gauge_by_u64(
|
||||
&HEAD_STATE_VALIDATOR_BALANCES,
|
||||
state.balances().iter().sum(),
|
||||
);
|
||||
set_gauge_by_usize(
|
||||
&HEAD_STATE_WITHDRAWN_VALIDATORS,
|
||||
state
|
||||
.validators
|
||||
.iter()
|
||||
.filter(|v| v.is_withdrawable_at(state.current_epoch()))
|
||||
.count(),
|
||||
set_gauge_by_u64(&HEAD_STATE_ETH1_DEPOSIT_INDEX, state.eth1_deposit_index());
|
||||
set_gauge_by_u64(
|
||||
&HEAD_STATE_ETH1_DEPOSITS_INTEROP,
|
||||
state.eth1_data().deposit_count,
|
||||
);
|
||||
set_gauge_by_u64(&HEAD_STATE_ETH1_DEPOSIT_INDEX, state.eth1_deposit_index);
|
||||
set_gauge_by_usize(&HEAD_STATE_TOTAL_VALIDATORS, state.validators().len());
|
||||
set_gauge_by_u64(
|
||||
&HEAD_STATE_VALIDATOR_BALANCES,
|
||||
state.balances().iter().sum(),
|
||||
);
|
||||
|
||||
let mut num_active: usize = 0;
|
||||
let mut num_slashed: usize = 0;
|
||||
let mut num_withdrawn: usize = 0;
|
||||
|
||||
for v in state.validators() {
|
||||
if v.is_active_at(state.current_epoch()) {
|
||||
num_active += 1;
|
||||
}
|
||||
|
||||
if v.slashed {
|
||||
num_slashed += 1;
|
||||
}
|
||||
|
||||
if v.is_withdrawable_at(state.current_epoch()) {
|
||||
num_withdrawn += 1;
|
||||
}
|
||||
}
|
||||
|
||||
set_gauge_by_usize(&HEAD_STATE_ACTIVE_VALIDATORS, num_active);
|
||||
set_gauge_by_usize(&HEAD_STATE_ACTIVE_VALIDATORS_INTEROP, num_active);
|
||||
set_gauge_by_usize(&HEAD_STATE_SLASHED_VALIDATORS, num_slashed);
|
||||
set_gauge_by_usize(&HEAD_STATE_WITHDRAWN_VALIDATORS, num_withdrawn);
|
||||
}
|
||||
|
||||
fn scrape_attestation_observation<T: BeaconChainTypes>(slot_now: Slot, chain: &BeaconChain<T>) {
|
||||
let prev_epoch = slot_now.epoch(T::EthSpec::slots_per_epoch()) - 1;
|
||||
|
||||
if let Some(count) = chain
|
||||
.observed_attesters
|
||||
.observed_gossip_attesters
|
||||
.read()
|
||||
.observed_validator_count(prev_epoch)
|
||||
{
|
||||
set_gauge_by_usize(&ATTN_OBSERVATION_PREV_EPOCH_ATTESTERS, count);
|
||||
@@ -425,12 +1083,41 @@ fn scrape_attestation_observation<T: BeaconChainTypes>(slot_now: Slot, chain: &B
|
||||
|
||||
if let Some(count) = chain
|
||||
.observed_aggregators
|
||||
.read()
|
||||
.observed_validator_count(prev_epoch)
|
||||
{
|
||||
set_gauge_by_usize(&ATTN_OBSERVATION_PREV_EPOCH_AGGREGATORS, count);
|
||||
}
|
||||
}
|
||||
|
||||
fn scrape_sync_committee_observation<T: BeaconChainTypes>(slot_now: Slot, chain: &BeaconChain<T>) {
|
||||
let prev_slot = slot_now - 1;
|
||||
|
||||
let contributors = chain.observed_sync_contributors.read();
|
||||
let mut contributor_sum = 0;
|
||||
for i in 0..SYNC_COMMITTEE_SUBNET_COUNT {
|
||||
if let Some(count) =
|
||||
contributors.observed_validator_count(SlotSubcommitteeIndex::new(prev_slot, i))
|
||||
{
|
||||
contributor_sum += count;
|
||||
}
|
||||
}
|
||||
drop(contributors);
|
||||
set_gauge_by_usize(&SYNC_COMM_OBSERVATION_PREV_SLOT_SIGNERS, contributor_sum);
|
||||
|
||||
let sync_aggregators = chain.observed_sync_aggregators.read();
|
||||
let mut aggregator_sum = 0;
|
||||
for i in 0..SYNC_COMMITTEE_SUBNET_COUNT {
|
||||
if let Some(count) =
|
||||
sync_aggregators.observed_validator_count(SlotSubcommitteeIndex::new(prev_slot, i))
|
||||
{
|
||||
aggregator_sum += count;
|
||||
}
|
||||
}
|
||||
drop(sync_aggregators);
|
||||
set_gauge_by_usize(&SYNC_COMM_OBSERVATION_PREV_SLOT_AGGREGATORS, aggregator_sum);
|
||||
}
|
||||
|
||||
fn set_gauge_by_slot(gauge: &Result<IntGauge>, value: Slot) {
|
||||
set_gauge(gauge, value.as_u64() as i64);
|
||||
}
|
||||
|
||||
@@ -1,76 +1,375 @@
|
||||
use crate::beacon_chain::BEACON_CHAIN_DB_KEY;
|
||||
use crate::errors::BeaconChainError;
|
||||
use crate::head_tracker::HeadTracker;
|
||||
use crate::head_tracker::{HeadTracker, SszHeadTracker};
|
||||
use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT};
|
||||
use parking_lot::Mutex;
|
||||
use slog::{debug, warn, Logger};
|
||||
use slog::{debug, error, info, warn, Logger};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::mem;
|
||||
use std::sync::mpsc;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{mpsc, Arc};
|
||||
use std::thread;
|
||||
use store::hot_cold_store::{process_finalization, HotColdDBError};
|
||||
use store::iter::{ParentRootBlockIterator, RootsIterator};
|
||||
use store::{Error, ItemStore, StoreOp};
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
use store::hot_cold_store::{migrate_database, HotColdDBError};
|
||||
use store::iter::RootsIterator;
|
||||
use store::{Error, ItemStore, StoreItem, StoreOp};
|
||||
pub use store::{HotColdDB, MemoryStore};
|
||||
use types::*;
|
||||
use types::{BeaconState, EthSpec, Hash256, Slot};
|
||||
use types::{
|
||||
BeaconState, BeaconStateError, BeaconStateHash, Checkpoint, Epoch, EthSpec, Hash256,
|
||||
SignedBeaconBlockHash, Slot,
|
||||
};
|
||||
|
||||
/// Trait for migration processes that update the database upon finalization.
|
||||
pub trait Migrate<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>:
|
||||
Send + Sync + 'static
|
||||
{
|
||||
fn new(db: Arc<HotColdDB<E, Hot, Cold>>, log: Logger) -> Self;
|
||||
/// Compact at least this frequently, finalization permitting (7 days).
|
||||
const MAX_COMPACTION_PERIOD_SECONDS: u64 = 604800;
|
||||
/// Compact at *most* this frequently, to prevent over-compaction during sync (2 hours).
|
||||
const MIN_COMPACTION_PERIOD_SECONDS: u64 = 7200;
|
||||
/// Compact after a large finality gap, if we respect `MIN_COMPACTION_PERIOD_SECONDS`.
|
||||
const COMPACTION_FINALITY_DISTANCE: u64 = 1024;
|
||||
|
||||
fn process_finalization(
|
||||
/// The background migrator runs a thread to perform pruning and migrate state from the hot
|
||||
/// to the cold database.
|
||||
pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
#[allow(clippy::type_complexity)]
|
||||
tx_thread: Option<Mutex<(mpsc::Sender<Notification>, thread::JoinHandle<()>)>>,
|
||||
/// Genesis block root, for persisting the `PersistedBeaconChain`.
|
||||
genesis_block_root: Hash256,
|
||||
log: Logger,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
||||
pub struct MigratorConfig {
|
||||
pub blocking: bool,
|
||||
}
|
||||
|
||||
impl MigratorConfig {
|
||||
pub fn blocking(mut self) -> Self {
|
||||
self.blocking = true;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Pruning can be successful, or in rare cases deferred to a later point.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum PruningOutcome {
|
||||
/// The pruning succeeded and updated the pruning checkpoint from `old_finalized_checkpoint`.
|
||||
Successful {
|
||||
old_finalized_checkpoint: Checkpoint,
|
||||
},
|
||||
DeferredConcurrentMutation,
|
||||
}
|
||||
|
||||
/// Logic errors that can occur during pruning, none of these should ever happen.
|
||||
#[derive(Debug)]
|
||||
pub enum PruningError {
|
||||
IncorrectFinalizedState {
|
||||
state_slot: Slot,
|
||||
new_finalized_slot: Slot,
|
||||
},
|
||||
MissingInfoForCanonicalChain {
|
||||
slot: Slot,
|
||||
},
|
||||
UnexpectedEqualStateRoots,
|
||||
UnexpectedUnequalStateRoots,
|
||||
}
|
||||
|
||||
/// Message sent to the migration thread containing the information it needs to run.
|
||||
pub enum Notification {
|
||||
Finalization(FinalizationNotification),
|
||||
Reconstruction,
|
||||
}
|
||||
|
||||
pub struct FinalizationNotification {
|
||||
finalized_state_root: BeaconStateHash,
|
||||
finalized_checkpoint: Checkpoint,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
genesis_block_root: Hash256,
|
||||
}
|
||||
|
||||
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Hot, Cold> {
|
||||
/// Create a new `BackgroundMigrator` and spawn its thread if necessary.
|
||||
pub fn new(
|
||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
config: MigratorConfig,
|
||||
genesis_block_root: Hash256,
|
||||
log: Logger,
|
||||
) -> Self {
|
||||
let tx_thread = if config.blocking {
|
||||
None
|
||||
} else {
|
||||
Some(Mutex::new(Self::spawn_thread(db.clone(), log.clone())))
|
||||
};
|
||||
Self {
|
||||
db,
|
||||
tx_thread,
|
||||
genesis_block_root,
|
||||
log,
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a finalized checkpoint from the `BeaconChain`.
|
||||
///
|
||||
/// If successful, all forks descending from before the `finalized_checkpoint` will be
|
||||
/// pruned, and the split point of the database will be advanced to the slot of the finalized
|
||||
/// checkpoint.
|
||||
pub fn process_finalization(
|
||||
&self,
|
||||
_state_root: Hash256,
|
||||
_new_finalized_state: BeaconState<E>,
|
||||
_max_finality_distance: u64,
|
||||
_head_tracker: Arc<HeadTracker>,
|
||||
_old_finalized_block_hash: SignedBeaconBlockHash,
|
||||
_new_finalized_block_hash: SignedBeaconBlockHash,
|
||||
finalized_state_root: BeaconStateHash,
|
||||
finalized_checkpoint: Checkpoint,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
) -> Result<(), BeaconChainError> {
|
||||
let notif = FinalizationNotification {
|
||||
finalized_state_root,
|
||||
finalized_checkpoint,
|
||||
head_tracker,
|
||||
genesis_block_root: self.genesis_block_root,
|
||||
};
|
||||
|
||||
// Send to background thread if configured, otherwise run in foreground.
|
||||
if let Some(Notification::Finalization(notif)) =
|
||||
self.send_background_notification(Notification::Finalization(notif))
|
||||
{
|
||||
Self::run_migration(self.db.clone(), notif, &self.log);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn process_reconstruction(&self) {
|
||||
if let Some(Notification::Reconstruction) =
|
||||
self.send_background_notification(Notification::Reconstruction)
|
||||
{
|
||||
Self::run_reconstruction(self.db.clone(), &self.log);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run_reconstruction(db: Arc<HotColdDB<E, Hot, Cold>>, log: &Logger) {
|
||||
if let Err(e) = db.reconstruct_historic_states() {
|
||||
error!(
|
||||
log,
|
||||
"State reconstruction failed";
|
||||
"error" => ?e,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// If configured to run in the background, send `notif` to the background thread.
|
||||
///
|
||||
/// Return `None` if the message was sent to the background thread, `Some(notif)` otherwise.
|
||||
#[must_use = "Message is not processed when this function returns `Some`"]
|
||||
fn send_background_notification(&self, notif: Notification) -> Option<Notification> {
|
||||
// Async path, on the background thread.
|
||||
if let Some(tx_thread) = &self.tx_thread {
|
||||
let (ref mut tx, ref mut thread) = *tx_thread.lock();
|
||||
|
||||
// Restart the background thread if it has crashed.
|
||||
if let Err(tx_err) = tx.send(notif) {
|
||||
let (new_tx, new_thread) = Self::spawn_thread(self.db.clone(), self.log.clone());
|
||||
|
||||
*tx = new_tx;
|
||||
let old_thread = mem::replace(thread, new_thread);
|
||||
|
||||
// Join the old thread, which will probably have panicked, or may have
|
||||
// halted normally just now as a result of us dropping the old `mpsc::Sender`.
|
||||
if let Err(thread_err) = old_thread.join() {
|
||||
warn!(
|
||||
self.log,
|
||||
"Migration thread died, so it was restarted";
|
||||
"reason" => format!("{:?}", thread_err)
|
||||
);
|
||||
}
|
||||
|
||||
// Retry at most once, we could recurse but that would risk overflowing the stack.
|
||||
let _ = tx.send(tx_err.0);
|
||||
}
|
||||
None
|
||||
// Synchronous path, on the current thread.
|
||||
} else {
|
||||
Some(notif)
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform the actual work of `process_finalization`.
|
||||
fn run_migration(
|
||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
notif: FinalizationNotification,
|
||||
log: &Logger,
|
||||
) {
|
||||
debug!(log, "Database consolidation started");
|
||||
|
||||
let finalized_state_root = notif.finalized_state_root;
|
||||
|
||||
let finalized_state = match db.get_state(&finalized_state_root.into(), None) {
|
||||
Ok(Some(state)) => state,
|
||||
other => {
|
||||
error!(
|
||||
log,
|
||||
"Migrator failed to load state";
|
||||
"state_root" => ?finalized_state_root,
|
||||
"error" => ?other
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let old_finalized_checkpoint = match Self::prune_abandoned_forks(
|
||||
db.clone(),
|
||||
notif.head_tracker,
|
||||
finalized_state_root,
|
||||
&finalized_state,
|
||||
notif.finalized_checkpoint,
|
||||
notif.genesis_block_root,
|
||||
log,
|
||||
) {
|
||||
Ok(PruningOutcome::Successful {
|
||||
old_finalized_checkpoint,
|
||||
}) => old_finalized_checkpoint,
|
||||
Ok(PruningOutcome::DeferredConcurrentMutation) => {
|
||||
warn!(
|
||||
log,
|
||||
"Pruning deferred because of a concurrent mutation";
|
||||
"message" => "this is expected only very rarely!"
|
||||
);
|
||||
return;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(log, "Block pruning failed"; "error" => format!("{:?}", e));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match migrate_database(db.clone(), finalized_state_root.into(), &finalized_state) {
|
||||
Ok(()) => {}
|
||||
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
||||
debug!(
|
||||
log,
|
||||
"Database migration postponed, unaligned finalized block";
|
||||
"slot" => slot.as_u64()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
log,
|
||||
"Database migration failed";
|
||||
"error" => format!("{:?}", e)
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Finally, compact the database so that new free space is properly reclaimed.
|
||||
if let Err(e) = Self::run_compaction(
|
||||
db,
|
||||
old_finalized_checkpoint.epoch,
|
||||
notif.finalized_checkpoint.epoch,
|
||||
log,
|
||||
) {
|
||||
warn!(log, "Database compaction failed"; "error" => format!("{:?}", e));
|
||||
}
|
||||
|
||||
debug!(log, "Database consolidation complete");
|
||||
}
|
||||
|
||||
/// Spawn a new child thread to run the migration process.
|
||||
///
|
||||
/// Return a channel handle for sending requests to the thread.
|
||||
fn spawn_thread(
|
||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
log: Logger,
|
||||
) -> (mpsc::Sender<Notification>, thread::JoinHandle<()>) {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let thread = thread::spawn(move || {
|
||||
while let Ok(notif) = rx.recv() {
|
||||
// Read the rest of the messages in the channel, preferring any reconstruction
|
||||
// notification, or the finalization notification with the greatest finalized epoch.
|
||||
let notif =
|
||||
rx.try_iter()
|
||||
.fold(notif, |best, other: Notification| match (&best, &other) {
|
||||
(Notification::Reconstruction, _)
|
||||
| (_, Notification::Reconstruction) => Notification::Reconstruction,
|
||||
(
|
||||
Notification::Finalization(fin1),
|
||||
Notification::Finalization(fin2),
|
||||
) => {
|
||||
if fin2.finalized_checkpoint.epoch > fin1.finalized_checkpoint.epoch
|
||||
{
|
||||
other
|
||||
} else {
|
||||
best
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
match notif {
|
||||
Notification::Reconstruction => Self::run_reconstruction(db.clone(), &log),
|
||||
Notification::Finalization(fin) => Self::run_migration(db.clone(), fin, &log),
|
||||
}
|
||||
}
|
||||
});
|
||||
(tx, thread)
|
||||
}
|
||||
|
||||
/// Traverses live heads and prunes blocks and states of chains that we know can't be built
|
||||
/// upon because finalization would prohibit it. This is an optimisation intended to save disk
|
||||
/// upon because finalization would prohibit it. This is an optimisation intended to save disk
|
||||
/// space.
|
||||
///
|
||||
/// Assumptions:
|
||||
/// * It is called after every finalization.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn prune_abandoned_forks(
|
||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
old_finalized_block_hash: SignedBeaconBlockHash,
|
||||
new_finalized_block_hash: SignedBeaconBlockHash,
|
||||
new_finalized_slot: Slot,
|
||||
) -> Result<(), BeaconChainError> {
|
||||
// There will never be any blocks to prune if there is only a single head in the chain.
|
||||
if head_tracker.heads().len() == 1 {
|
||||
return Ok(());
|
||||
new_finalized_state_hash: BeaconStateHash,
|
||||
new_finalized_state: &BeaconState<E>,
|
||||
new_finalized_checkpoint: Checkpoint,
|
||||
genesis_block_root: Hash256,
|
||||
log: &Logger,
|
||||
) -> Result<PruningOutcome, BeaconChainError> {
|
||||
let old_finalized_checkpoint =
|
||||
store
|
||||
.load_pruning_checkpoint()?
|
||||
.unwrap_or_else(|| Checkpoint {
|
||||
epoch: Epoch::new(0),
|
||||
root: Hash256::zero(),
|
||||
});
|
||||
|
||||
let old_finalized_slot = old_finalized_checkpoint
|
||||
.epoch
|
||||
.start_slot(E::slots_per_epoch());
|
||||
let new_finalized_slot = new_finalized_checkpoint
|
||||
.epoch
|
||||
.start_slot(E::slots_per_epoch());
|
||||
let new_finalized_block_hash = new_finalized_checkpoint.root.into();
|
||||
|
||||
// The finalized state must be for the epoch boundary slot, not the slot of the finalized
|
||||
// block.
|
||||
if new_finalized_state.slot() != new_finalized_slot {
|
||||
return Err(PruningError::IncorrectFinalizedState {
|
||||
state_slot: new_finalized_state.slot(),
|
||||
new_finalized_slot,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
let old_finalized_slot = store
|
||||
.get_block(&old_finalized_block_hash.into())?
|
||||
.ok_or_else(|| BeaconChainError::MissingBeaconBlock(old_finalized_block_hash.into()))?
|
||||
.slot();
|
||||
|
||||
// Collect hashes from new_finalized_block back to old_finalized_block (inclusive)
|
||||
let mut found_block = false; // hack for `take_until`
|
||||
let newly_finalized_blocks: HashMap<SignedBeaconBlockHash, Slot> =
|
||||
ParentRootBlockIterator::new(&*store, new_finalized_block_hash.into())
|
||||
.take_while(|result| match result {
|
||||
Ok((block_hash, _)) => {
|
||||
if found_block {
|
||||
false
|
||||
} else {
|
||||
found_block |= *block_hash == old_finalized_block_hash.into();
|
||||
true
|
||||
}
|
||||
}
|
||||
Err(_) => true,
|
||||
debug!(
|
||||
log,
|
||||
"Starting database pruning";
|
||||
"old_finalized_epoch" => old_finalized_checkpoint.epoch,
|
||||
"new_finalized_epoch" => new_finalized_checkpoint.epoch,
|
||||
);
|
||||
// For each slot between the new finalized checkpoint and the old finalized checkpoint,
|
||||
// collect the beacon block root and state root of the canonical chain.
|
||||
let newly_finalized_chain: HashMap<Slot, (SignedBeaconBlockHash, BeaconStateHash)> =
|
||||
std::iter::once(Ok((
|
||||
new_finalized_slot,
|
||||
(new_finalized_block_hash, new_finalized_state_hash),
|
||||
)))
|
||||
.chain(RootsIterator::new(&store, new_finalized_state).map(|res| {
|
||||
res.map(|(block_root, state_root, slot)| {
|
||||
(slot, (block_root.into(), state_root.into()))
|
||||
})
|
||||
.map(|result| result.map(|(block_hash, block)| (block_hash.into(), block.slot())))
|
||||
.collect::<Result<_, _>>()?;
|
||||
}))
|
||||
.take_while(|res| {
|
||||
res.as_ref()
|
||||
.map_or(true, |(slot, _)| *slot >= old_finalized_slot)
|
||||
})
|
||||
.collect::<Result<_, _>>()?;
|
||||
|
||||
// We don't know which blocks are shared among abandoned chains, so we buffer and delete
|
||||
// everything in one fell swoop.
|
||||
@@ -78,284 +377,237 @@ pub trait Migrate<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>:
|
||||
let mut abandoned_states: HashSet<(Slot, BeaconStateHash)> = HashSet::new();
|
||||
let mut abandoned_heads: HashSet<Hash256> = HashSet::new();
|
||||
|
||||
for (head_hash, head_slot) in head_tracker.heads() {
|
||||
let mut potentially_abandoned_head: Option<Hash256> = Some(head_hash);
|
||||
let mut potentially_abandoned_blocks: Vec<(
|
||||
Slot,
|
||||
Option<SignedBeaconBlockHash>,
|
||||
Option<BeaconStateHash>,
|
||||
)> = Vec::new();
|
||||
let heads = head_tracker.heads();
|
||||
debug!(
|
||||
log,
|
||||
"Extra pruning information";
|
||||
"old_finalized_root" => format!("{:?}", old_finalized_checkpoint.root),
|
||||
"new_finalized_root" => format!("{:?}", new_finalized_checkpoint.root),
|
||||
"head_count" => heads.len(),
|
||||
);
|
||||
|
||||
let head_state_hash = store
|
||||
.get_block(&head_hash)?
|
||||
.ok_or_else(|| BeaconStateError::MissingBeaconBlock(head_hash.into()))?
|
||||
.state_root();
|
||||
|
||||
let iter = std::iter::once(Ok((head_hash, head_state_hash, head_slot)))
|
||||
.chain(RootsIterator::from_block(Arc::clone(&store), head_hash)?);
|
||||
for maybe_tuple in iter {
|
||||
let (block_hash, state_hash, slot) = maybe_tuple?;
|
||||
if slot < old_finalized_slot {
|
||||
// We must assume here any candidate chains include old_finalized_block_hash,
|
||||
// i.e. there aren't any forks starting at a block that is a strict ancestor of
|
||||
// old_finalized_block_hash.
|
||||
break;
|
||||
for (head_hash, head_slot) in heads {
|
||||
// Load head block. If it fails with a decode error, it's likely a reverted block,
|
||||
// so delete it from the head tracker but leave it and its states in the database
|
||||
// This is suboptimal as it wastes disk space, but it's difficult to fix. A re-sync
|
||||
// can be used to reclaim the space.
|
||||
let head_state_root = match store.get_block(&head_hash) {
|
||||
Ok(Some(block)) => block.state_root(),
|
||||
Ok(None) => {
|
||||
return Err(BeaconStateError::MissingBeaconBlock(head_hash.into()).into())
|
||||
}
|
||||
match newly_finalized_blocks.get(&block_hash.into()).copied() {
|
||||
// Block is not finalized, mark it and its state for deletion
|
||||
Err(Error::SszDecodeError(e)) => {
|
||||
warn!(
|
||||
log,
|
||||
"Forgetting invalid head block";
|
||||
"block_root" => ?head_hash,
|
||||
"error" => ?e,
|
||||
);
|
||||
abandoned_heads.insert(head_hash);
|
||||
continue;
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
let mut potentially_abandoned_head = Some(head_hash);
|
||||
let mut potentially_abandoned_blocks = vec![];
|
||||
|
||||
// Iterate backwards from this head, staging blocks and states for deletion.
|
||||
let iter = std::iter::once(Ok((head_hash, head_state_root, head_slot)))
|
||||
.chain(RootsIterator::from_block(&store, head_hash)?);
|
||||
|
||||
for maybe_tuple in iter {
|
||||
let (block_root, state_root, slot) = maybe_tuple?;
|
||||
let block_root = SignedBeaconBlockHash::from(block_root);
|
||||
let state_root = BeaconStateHash::from(state_root);
|
||||
|
||||
match newly_finalized_chain.get(&slot) {
|
||||
// If there's no information about a slot on the finalized chain, then
|
||||
// it should be because it's ahead of the new finalized slot. Stage
|
||||
// the fork's block and state for possible deletion.
|
||||
None => {
|
||||
potentially_abandoned_blocks.push((
|
||||
slot,
|
||||
Some(block_hash.into()),
|
||||
Some(state_hash.into()),
|
||||
));
|
||||
if slot > new_finalized_slot {
|
||||
potentially_abandoned_blocks.push((
|
||||
slot,
|
||||
Some(block_root),
|
||||
Some(state_root),
|
||||
));
|
||||
} else if slot >= old_finalized_slot {
|
||||
return Err(PruningError::MissingInfoForCanonicalChain { slot }.into());
|
||||
} else {
|
||||
// We must assume here any candidate chains include the old finalized
|
||||
// checkpoint, i.e. there aren't any forks starting at a block that is a
|
||||
// strict ancestor of old_finalized_checkpoint.
|
||||
warn!(
|
||||
log,
|
||||
"Found a chain that should already have been pruned";
|
||||
"head_block_root" => format!("{:?}", head_hash),
|
||||
"head_slot" => head_slot,
|
||||
);
|
||||
potentially_abandoned_head.take();
|
||||
break;
|
||||
}
|
||||
}
|
||||
Some(finalized_slot) => {
|
||||
// Block root is finalized, and we have reached the slot it was finalized
|
||||
// at: we've hit a shared part of the chain.
|
||||
if finalized_slot == slot {
|
||||
// The first finalized block of a candidate chain lies after (in terms
|
||||
// of slots order) the newly finalized block. It's not a candidate for
|
||||
// prunning.
|
||||
if finalized_slot == new_finalized_slot {
|
||||
Some((finalized_block_root, finalized_state_root)) => {
|
||||
// This fork descends from a newly finalized block, we can stop.
|
||||
if block_root == *finalized_block_root {
|
||||
// Sanity check: if the slot and block root match, then the
|
||||
// state roots should match too.
|
||||
if state_root != *finalized_state_root {
|
||||
return Err(PruningError::UnexpectedUnequalStateRoots.into());
|
||||
}
|
||||
|
||||
// If the fork descends from the whole finalized chain,
|
||||
// do not prune it. Otherwise continue to delete all
|
||||
// of the blocks and states that have been staged for
|
||||
// deletion so far.
|
||||
if slot == new_finalized_slot {
|
||||
potentially_abandoned_blocks.clear();
|
||||
potentially_abandoned_head.take();
|
||||
}
|
||||
|
||||
// If there are skipped slots on the fork to be pruned, then
|
||||
// we will have just staged the common block for deletion.
|
||||
// Unstage it.
|
||||
else {
|
||||
for (_, block_root, _) in
|
||||
potentially_abandoned_blocks.iter_mut().rev()
|
||||
{
|
||||
if block_root.as_ref() == Some(finalized_block_root) {
|
||||
*block_root = None;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
// Block root is finalized, but we're at a skip slot: delete the state only.
|
||||
else {
|
||||
} else {
|
||||
if state_root == *finalized_state_root {
|
||||
return Err(PruningError::UnexpectedEqualStateRoots.into());
|
||||
}
|
||||
potentially_abandoned_blocks.push((
|
||||
slot,
|
||||
None,
|
||||
Some(state_hash.into()),
|
||||
Some(block_root),
|
||||
Some(state_root),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
abandoned_heads.extend(potentially_abandoned_head.into_iter());
|
||||
if !potentially_abandoned_blocks.is_empty() {
|
||||
if let Some(abandoned_head) = potentially_abandoned_head {
|
||||
debug!(
|
||||
log,
|
||||
"Pruning head";
|
||||
"head_block_root" => format!("{:?}", abandoned_head),
|
||||
"head_slot" => head_slot,
|
||||
);
|
||||
abandoned_heads.insert(abandoned_head);
|
||||
abandoned_blocks.extend(
|
||||
potentially_abandoned_blocks
|
||||
.iter()
|
||||
.filter_map(|(_, maybe_block_hash, _)| *maybe_block_hash),
|
||||
);
|
||||
abandoned_states.extend(potentially_abandoned_blocks.iter().filter_map(
|
||||
|(slot, _, maybe_state_hash)| match maybe_state_hash {
|
||||
None => None,
|
||||
Some(state_hash) => Some((*slot, *state_hash)),
|
||||
},
|
||||
|(slot, _, maybe_state_hash)| maybe_state_hash.map(|sr| (*slot, sr)),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Update the head tracker before the database, so that we maintain the invariant
|
||||
// that a block present in the head tracker is present in the database.
|
||||
// See https://github.com/sigp/lighthouse/issues/1557
|
||||
let mut head_tracker_lock = head_tracker.0.write();
|
||||
|
||||
// Check that all the heads to be deleted are still present. The absence of any
|
||||
// head indicates a race, that will likely resolve itself, so we defer pruning until
|
||||
// later.
|
||||
for head_hash in &abandoned_heads {
|
||||
if !head_tracker_lock.contains_key(head_hash) {
|
||||
return Ok(PruningOutcome::DeferredConcurrentMutation);
|
||||
}
|
||||
}
|
||||
|
||||
// Then remove them for real.
|
||||
for head_hash in abandoned_heads {
|
||||
head_tracker_lock.remove(&head_hash);
|
||||
}
|
||||
|
||||
let batch: Vec<StoreOp<E>> = abandoned_blocks
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.map(StoreOp::DeleteBlock)
|
||||
.chain(
|
||||
abandoned_states
|
||||
.into_iter()
|
||||
.map(|(slot, state_hash)| StoreOp::DeleteState(state_hash, slot)),
|
||||
.map(|(slot, state_hash)| StoreOp::DeleteState(state_hash.into(), Some(slot))),
|
||||
)
|
||||
.collect();
|
||||
store.do_atomically(batch)?;
|
||||
for head_hash in abandoned_heads.into_iter() {
|
||||
head_tracker.remove_head(head_hash);
|
||||
|
||||
let mut kv_batch = store.convert_to_kv_batch(&batch)?;
|
||||
|
||||
// Persist the head in case the process is killed or crashes here. This prevents
|
||||
// the head tracker reverting after our mutation above.
|
||||
let persisted_head = PersistedBeaconChain {
|
||||
_canonical_head_block_root: DUMMY_CANONICAL_HEAD_BLOCK_ROOT,
|
||||
genesis_block_root,
|
||||
ssz_head_tracker: SszHeadTracker::from_map(&*head_tracker_lock),
|
||||
};
|
||||
drop(head_tracker_lock);
|
||||
kv_batch.push(persisted_head.as_kv_store_op(BEACON_CHAIN_DB_KEY));
|
||||
|
||||
// Persist the new finalized checkpoint as the pruning checkpoint.
|
||||
kv_batch.push(store.pruning_checkpoint_store_op(new_finalized_checkpoint));
|
||||
|
||||
store.hot_db.do_atomically(kv_batch)?;
|
||||
debug!(log, "Database pruning complete");
|
||||
|
||||
Ok(PruningOutcome::Successful {
|
||||
old_finalized_checkpoint,
|
||||
})
|
||||
}
|
||||
|
||||
/// Compact the database if it has been more than `COMPACTION_PERIOD_SECONDS` since it
|
||||
/// was last compacted.
|
||||
pub fn run_compaction(
|
||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
old_finalized_epoch: Epoch,
|
||||
new_finalized_epoch: Epoch,
|
||||
log: &Logger,
|
||||
) -> Result<(), Error> {
|
||||
if !db.compact_on_prune() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let last_compaction_timestamp = db
|
||||
.load_compaction_timestamp()?
|
||||
.unwrap_or_else(|| Duration::from_secs(0));
|
||||
let start_time = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or(last_compaction_timestamp);
|
||||
let seconds_since_last_compaction = start_time
|
||||
.checked_sub(last_compaction_timestamp)
|
||||
.as_ref()
|
||||
.map_or(0, Duration::as_secs);
|
||||
|
||||
if seconds_since_last_compaction > MAX_COMPACTION_PERIOD_SECONDS
|
||||
|| (new_finalized_epoch - old_finalized_epoch > COMPACTION_FINALITY_DISTANCE
|
||||
&& seconds_since_last_compaction > MIN_COMPACTION_PERIOD_SECONDS)
|
||||
{
|
||||
info!(
|
||||
log,
|
||||
"Starting database compaction";
|
||||
"old_finalized_epoch" => old_finalized_epoch,
|
||||
"new_finalized_epoch" => new_finalized_epoch,
|
||||
);
|
||||
db.compact()?;
|
||||
|
||||
let finish_time = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or(start_time);
|
||||
db.store_compaction_timestamp(finish_time)?;
|
||||
|
||||
info!(log, "Database compaction complete");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Migrator that does nothing, for stores that don't need migration.
|
||||
pub struct NullMigrator;
|
||||
|
||||
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold> for NullMigrator {
|
||||
fn new(_: Arc<HotColdDB<E, Hot, Cold>>, _: Logger) -> Self {
|
||||
NullMigrator
|
||||
}
|
||||
}
|
||||
|
||||
/// Migrator that immediately calls the store's migration function, blocking the current execution.
|
||||
///
|
||||
/// Mostly useful for tests.
|
||||
pub struct BlockingMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold>
|
||||
for BlockingMigrator<E, Hot, Cold>
|
||||
{
|
||||
fn new(db: Arc<HotColdDB<E, Hot, Cold>>, _: Logger) -> Self {
|
||||
BlockingMigrator { db }
|
||||
}
|
||||
|
||||
fn process_finalization(
|
||||
&self,
|
||||
state_root: Hash256,
|
||||
new_finalized_state: BeaconState<E>,
|
||||
_max_finality_distance: u64,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
old_finalized_block_hash: SignedBeaconBlockHash,
|
||||
new_finalized_block_hash: SignedBeaconBlockHash,
|
||||
) {
|
||||
if let Err(e) = process_finalization(self.db.clone(), state_root, &new_finalized_state) {
|
||||
// This migrator is only used for testing, so we just log to stderr without a logger.
|
||||
eprintln!("Migration error: {:?}", e);
|
||||
}
|
||||
|
||||
if let Err(e) = Self::prune_abandoned_forks(
|
||||
self.db.clone(),
|
||||
head_tracker,
|
||||
old_finalized_block_hash,
|
||||
new_finalized_block_hash,
|
||||
new_finalized_state.slot,
|
||||
) {
|
||||
eprintln!("Pruning error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type MpscSender<E> = mpsc::Sender<(
|
||||
Hash256,
|
||||
BeaconState<E>,
|
||||
Arc<HeadTracker>,
|
||||
SignedBeaconBlockHash,
|
||||
SignedBeaconBlockHash,
|
||||
Slot,
|
||||
)>;
|
||||
|
||||
/// Migrator that runs a background thread to migrate state from the hot to the cold database.
|
||||
pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
tx_thread: Mutex<(MpscSender<E>, thread::JoinHandle<()>)>,
|
||||
log: Logger,
|
||||
}
|
||||
|
||||
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold>
|
||||
for BackgroundMigrator<E, Hot, Cold>
|
||||
{
|
||||
fn new(db: Arc<HotColdDB<E, Hot, Cold>>, log: Logger) -> Self {
|
||||
let tx_thread = Mutex::new(Self::spawn_thread(db.clone(), log.clone()));
|
||||
Self { db, tx_thread, log }
|
||||
}
|
||||
|
||||
/// Perform the freezing operation on the database,
|
||||
fn process_finalization(
|
||||
&self,
|
||||
finalized_state_root: Hash256,
|
||||
new_finalized_state: BeaconState<E>,
|
||||
max_finality_distance: u64,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
old_finalized_block_hash: SignedBeaconBlockHash,
|
||||
new_finalized_block_hash: SignedBeaconBlockHash,
|
||||
) {
|
||||
if !self.needs_migration(new_finalized_state.slot, max_finality_distance) {
|
||||
return;
|
||||
}
|
||||
|
||||
let (ref mut tx, ref mut thread) = *self.tx_thread.lock();
|
||||
|
||||
let new_finalized_slot = new_finalized_state.slot;
|
||||
if let Err(tx_err) = tx.send((
|
||||
finalized_state_root,
|
||||
new_finalized_state,
|
||||
head_tracker,
|
||||
old_finalized_block_hash,
|
||||
new_finalized_block_hash,
|
||||
new_finalized_slot,
|
||||
)) {
|
||||
let (new_tx, new_thread) = Self::spawn_thread(self.db.clone(), self.log.clone());
|
||||
|
||||
drop(mem::replace(tx, new_tx));
|
||||
let old_thread = mem::replace(thread, new_thread);
|
||||
|
||||
// Join the old thread, which will probably have panicked, or may have
|
||||
// halted normally just now as a result of us dropping the old `mpsc::Sender`.
|
||||
if let Err(thread_err) = old_thread.join() {
|
||||
warn!(
|
||||
self.log,
|
||||
"Migration thread died, so it was restarted";
|
||||
"reason" => format!("{:?}", thread_err)
|
||||
);
|
||||
}
|
||||
|
||||
// Retry at most once, we could recurse but that would risk overflowing the stack.
|
||||
let _ = tx.send(tx_err.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Hot, Cold> {
|
||||
/// Return true if a migration needs to be performed, given a new `finalized_slot`.
|
||||
fn needs_migration(&self, finalized_slot: Slot, max_finality_distance: u64) -> bool {
|
||||
let finality_distance = finalized_slot - self.db.get_split_slot();
|
||||
finality_distance > max_finality_distance
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
/// Spawn a new child thread to run the migration process.
|
||||
///
|
||||
/// Return a channel handle for sending new finalized states to the thread.
|
||||
fn spawn_thread(
|
||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
log: Logger,
|
||||
) -> (
|
||||
mpsc::Sender<(
|
||||
Hash256,
|
||||
BeaconState<E>,
|
||||
Arc<HeadTracker>,
|
||||
SignedBeaconBlockHash,
|
||||
SignedBeaconBlockHash,
|
||||
Slot,
|
||||
)>,
|
||||
thread::JoinHandle<()>,
|
||||
) {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let thread = thread::spawn(move || {
|
||||
while let Ok((
|
||||
state_root,
|
||||
state,
|
||||
head_tracker,
|
||||
old_finalized_block_hash,
|
||||
new_finalized_block_hash,
|
||||
new_finalized_slot,
|
||||
)) = rx.recv()
|
||||
{
|
||||
match process_finalization(db.clone(), state_root, &state) {
|
||||
Ok(()) => {}
|
||||
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
||||
debug!(
|
||||
log,
|
||||
"Database migration postponed, unaligned finalized block";
|
||||
"slot" => slot.as_u64()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
log,
|
||||
"Database migration failed";
|
||||
"error" => format!("{:?}", e)
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
match Self::prune_abandoned_forks(
|
||||
db.clone(),
|
||||
head_tracker,
|
||||
old_finalized_block_hash,
|
||||
new_finalized_block_hash,
|
||||
new_finalized_slot,
|
||||
) {
|
||||
Ok(()) => {}
|
||||
Err(e) => warn!(log, "Block pruning failed: {:?}", e),
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
(tx, thread)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
use crate::metrics;
|
||||
use std::collections::HashMap;
|
||||
use types::{Attestation, AttestationData, EthSpec, Slot};
|
||||
use tree_hash::TreeHash;
|
||||
use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT;
|
||||
use types::slot_data::SlotData;
|
||||
use types::sync_committee_contribution::SyncContributionData;
|
||||
use types::{Attestation, AttestationData, EthSpec, Hash256, Slot, SyncCommitteeContribution};
|
||||
|
||||
type AttestationDataRoot = Hash256;
|
||||
type SyncDataRoot = Hash256;
|
||||
|
||||
/// The number of slots that will be stored in the pool.
|
||||
///
|
||||
/// For example, if `SLOTS_RETAINED == 3` and the pool is pruned at slot `6`, then all attestations
|
||||
/// at slots less than `4` will be dropped and any future attestation with a slot less than `4`
|
||||
/// For example, if `SLOTS_RETAINED == 3` and the pool is pruned at slot `6`, then all items
|
||||
/// at slots less than `4` will be dropped and any future item with a slot less than `4`
|
||||
/// will be refused.
|
||||
const SLOTS_RETAINED: usize = 3;
|
||||
|
||||
@@ -14,51 +21,98 @@ const SLOTS_RETAINED: usize = 3;
|
||||
/// This is a DoS protection measure.
|
||||
const MAX_ATTESTATIONS_PER_SLOT: usize = 16_384;
|
||||
|
||||
/// Returned upon successfully inserting an attestation into the pool.
|
||||
/// Returned upon successfully inserting an item into the pool.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum InsertOutcome {
|
||||
/// The `attestation.data` had not been seen before and was added to the pool.
|
||||
NewAttestationData { committee_index: usize },
|
||||
/// A validator signature for the given `attestation.data` was already known. No changes were
|
||||
/// The item had not been seen before and was added to the pool.
|
||||
NewItemInserted { committee_index: usize },
|
||||
/// A validator signature for the given item's `Data` was already known. No changes were
|
||||
/// made.
|
||||
SignatureAlreadyKnown { committee_index: usize },
|
||||
/// The `attestation.data` was known, but a signature for the given validator was not yet
|
||||
/// The item's `Data` was known, but a signature for the given validator was not yet
|
||||
/// known. The signature was aggregated into the pool.
|
||||
SignatureAggregated { committee_index: usize },
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
/// The given `attestation.data.slot` was too low to be stored. No changes were made.
|
||||
/// The given `data.slot` was too low to be stored. No changes were made.
|
||||
SlotTooLow {
|
||||
slot: Slot,
|
||||
lowest_permissible_slot: Slot,
|
||||
},
|
||||
/// The given `attestation.aggregation_bits` field was empty.
|
||||
/// The given `aggregation_bits` field was empty.
|
||||
NoAggregationBitsSet,
|
||||
/// The given `attestation.aggregation_bits` field had more than one signature. The number of
|
||||
/// The given `aggregation_bits` field had more than one signature. The number of
|
||||
/// signatures found is included.
|
||||
MoreThanOneAggregationBitSet(usize),
|
||||
/// We have reached the maximum number of unique `AttestationData` that can be stored in a
|
||||
/// We have reached the maximum number of unique items that can be stored in a
|
||||
/// slot. This is a DoS protection function.
|
||||
ReachedMaxAttestationsPerSlot(usize),
|
||||
/// The given `attestation.aggregation_bits` field had a different length to the one currently
|
||||
ReachedMaxItemsPerSlot(usize),
|
||||
/// The given `aggregation_bits` field had a different length to the one currently
|
||||
/// stored. This indicates a fairly serious error somewhere in the code that called this
|
||||
/// function.
|
||||
InconsistentBitfieldLengths,
|
||||
/// The given `attestation` was for the incorrect slot. This is an internal error.
|
||||
IncorrectSlot { expected: Slot, attestation: Slot },
|
||||
/// The given item was for the incorrect slot. This is an internal error.
|
||||
IncorrectSlot { expected: Slot, actual: Slot },
|
||||
}
|
||||
|
||||
/// Implemented for items in the `NaiveAggregationPool`. Requires that items implement `SlotData`,
|
||||
/// which means they have an associated slot. This handles aggregation of items that are inserted.
|
||||
pub trait AggregateMap {
|
||||
/// `Key` should be a hash of `Data`.
|
||||
type Key;
|
||||
|
||||
/// The item stored in the map
|
||||
type Value: Clone + SlotData;
|
||||
|
||||
/// The unique fields of `Value`, hashed to create `Key`.
|
||||
type Data: SlotData;
|
||||
|
||||
/// Create a new `AggregateMap` with capacity `initial_capacity`.
|
||||
fn new(initial_capacity: usize) -> Self;
|
||||
|
||||
/// Insert a `Value` into `Self`, returning a result.
|
||||
fn insert(&mut self, value: &Self::Value) -> Result<InsertOutcome, Error>;
|
||||
|
||||
/// Get a `Value` from `Self` based on `Data`.
|
||||
fn get(&self, data: &Self::Data) -> Option<Self::Value>;
|
||||
|
||||
/// Get a reference to the inner `HashMap`.
|
||||
fn get_map(&self) -> &HashMap<Self::Key, Self::Value>;
|
||||
|
||||
/// Get a `Value` from `Self` based on `Key`, which is a hash of `Data`.
|
||||
fn get_by_root(&self, root: &Self::Key) -> Option<&Self::Value>;
|
||||
|
||||
/// The number of items store in `Self`.
|
||||
fn len(&self) -> usize;
|
||||
|
||||
/// Start a timer observing inserts.
|
||||
fn start_insert_timer() -> Option<metrics::HistogramTimer>;
|
||||
|
||||
/// Start a timer observing the time it takes to create a new map for a new slot.
|
||||
fn start_create_map_timer() -> Option<metrics::HistogramTimer>;
|
||||
|
||||
/// Start a timer observing the time it takes to prune the pool.
|
||||
fn start_prune_timer() -> Option<metrics::HistogramTimer>;
|
||||
|
||||
/// The default capacity of `Self`.
|
||||
fn default_capacity() -> usize;
|
||||
}
|
||||
|
||||
/// A collection of `Attestation` objects, keyed by their `attestation.data`. Enforces that all
|
||||
/// `attestation` are from the same slot.
|
||||
struct AggregatedAttestationMap<E: EthSpec> {
|
||||
map: HashMap<AttestationData, Attestation<E>>,
|
||||
pub struct AggregatedAttestationMap<E: EthSpec> {
|
||||
map: HashMap<AttestationDataRoot, Attestation<E>>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> AggregatedAttestationMap<E> {
|
||||
impl<E: EthSpec> AggregateMap for AggregatedAttestationMap<E> {
|
||||
type Key = AttestationDataRoot;
|
||||
type Value = Attestation<E>;
|
||||
type Data = AttestationData;
|
||||
|
||||
/// Create an empty collection with the given `initial_capacity`.
|
||||
pub fn new(initial_capacity: usize) -> Self {
|
||||
fn new(initial_capacity: usize) -> Self {
|
||||
Self {
|
||||
map: HashMap::with_capacity(initial_capacity),
|
||||
}
|
||||
@@ -67,7 +121,7 @@ impl<E: EthSpec> AggregatedAttestationMap<E> {
|
||||
/// Insert an attestation into `self`, aggregating it into the pool.
|
||||
///
|
||||
/// The given attestation (`a`) must only have one signature.
|
||||
pub fn insert(&mut self, a: &Attestation<E>) -> Result<InsertOutcome, Error> {
|
||||
fn insert(&mut self, a: &Self::Value) -> Result<InsertOutcome, Error> {
|
||||
let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_CORE_INSERT);
|
||||
|
||||
let set_bits = a
|
||||
@@ -81,13 +135,15 @@ impl<E: EthSpec> AggregatedAttestationMap<E> {
|
||||
let committee_index = set_bits
|
||||
.first()
|
||||
.copied()
|
||||
.ok_or_else(|| Error::NoAggregationBitsSet)?;
|
||||
.ok_or(Error::NoAggregationBitsSet)?;
|
||||
|
||||
if set_bits.len() > 1 {
|
||||
return Err(Error::MoreThanOneAggregationBitSet(set_bits.len()));
|
||||
}
|
||||
|
||||
if let Some(existing_attestation) = self.map.get_mut(&a.data) {
|
||||
let attestation_data_root = a.data.tree_hash_root();
|
||||
|
||||
if let Some(existing_attestation) = self.map.get_mut(&attestation_data_root) {
|
||||
if existing_attestation
|
||||
.aggregation_bits
|
||||
.get(committee_index)
|
||||
@@ -102,60 +158,190 @@ impl<E: EthSpec> AggregatedAttestationMap<E> {
|
||||
}
|
||||
} else {
|
||||
if self.map.len() >= MAX_ATTESTATIONS_PER_SLOT {
|
||||
return Err(Error::ReachedMaxAttestationsPerSlot(
|
||||
MAX_ATTESTATIONS_PER_SLOT,
|
||||
));
|
||||
return Err(Error::ReachedMaxItemsPerSlot(MAX_ATTESTATIONS_PER_SLOT));
|
||||
}
|
||||
|
||||
self.map.insert(a.data.clone(), a.clone());
|
||||
Ok(InsertOutcome::NewAttestationData { committee_index })
|
||||
self.map.insert(attestation_data_root, a.clone());
|
||||
Ok(InsertOutcome::NewItemInserted { committee_index })
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an aggregated `Attestation` with the given `data`, if any.
|
||||
///
|
||||
/// The given `a.data.slot` must match the slot that `self` was initialized with.
|
||||
pub fn get(&self, data: &AttestationData) -> Result<Option<Attestation<E>>, Error> {
|
||||
Ok(self.map.get(data).cloned())
|
||||
fn get(&self, data: &Self::Data) -> Option<Self::Value> {
|
||||
self.map.get(&data.tree_hash_root()).cloned()
|
||||
}
|
||||
|
||||
/// Iterate all attestations in `self`.
|
||||
pub fn iter(&self) -> impl Iterator<Item = &Attestation<E>> {
|
||||
self.map.iter().map(|(_key, attestation)| attestation)
|
||||
fn get_map(&self) -> &HashMap<Self::Key, Self::Value> {
|
||||
&self.map
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
/// Returns an aggregated `Attestation` with the given `root`, if any.
|
||||
fn get_by_root(&self, root: &Self::Key) -> Option<&Self::Value> {
|
||||
self.map.get(root)
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.map.len()
|
||||
}
|
||||
|
||||
fn start_insert_timer() -> Option<metrics::HistogramTimer> {
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_INSERT)
|
||||
}
|
||||
|
||||
fn start_create_map_timer() -> Option<metrics::HistogramTimer> {
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_CREATE_MAP)
|
||||
}
|
||||
|
||||
fn start_prune_timer() -> Option<metrics::HistogramTimer> {
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_PRUNE)
|
||||
}
|
||||
|
||||
/// Use the `TARGET_COMMITTEE_SIZE`.
|
||||
///
|
||||
/// Note: hard-coded until `TARGET_COMMITTEE_SIZE` is available via `EthSpec`.
|
||||
fn default_capacity() -> usize {
|
||||
128
|
||||
}
|
||||
}
|
||||
|
||||
/// A pool of `Attestation` that is specially designed to store "unaggregated" attestations from
|
||||
/// the native aggregation scheme.
|
||||
/// A collection of `SyncCommitteeContribution`, keyed by their `SyncContributionData`. Enforces that all
|
||||
/// contributions are from the same slot.
|
||||
pub struct SyncContributionAggregateMap<E: EthSpec> {
|
||||
map: HashMap<SyncDataRoot, SyncCommitteeContribution<E>>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> AggregateMap for SyncContributionAggregateMap<E> {
|
||||
type Key = SyncDataRoot;
|
||||
type Value = SyncCommitteeContribution<E>;
|
||||
type Data = SyncContributionData;
|
||||
|
||||
/// Create an empty collection with the given `initial_capacity`.
|
||||
fn new(initial_capacity: usize) -> Self {
|
||||
Self {
|
||||
map: HashMap::with_capacity(initial_capacity),
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a sync committee contribution into `self`, aggregating it into the pool.
|
||||
///
|
||||
/// The given sync contribution must only have one signature.
|
||||
fn insert(
|
||||
&mut self,
|
||||
contribution: &SyncCommitteeContribution<E>,
|
||||
) -> Result<InsertOutcome, Error> {
|
||||
let _timer =
|
||||
metrics::start_timer(&metrics::SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_CORE_INSERT);
|
||||
|
||||
let set_bits = contribution
|
||||
.aggregation_bits
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_i, bit)| *bit)
|
||||
.map(|(i, _bit)| i)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let committee_index = set_bits
|
||||
.first()
|
||||
.copied()
|
||||
.ok_or(Error::NoAggregationBitsSet)?;
|
||||
|
||||
if set_bits.len() > 1 {
|
||||
return Err(Error::MoreThanOneAggregationBitSet(set_bits.len()));
|
||||
}
|
||||
|
||||
let sync_data_root = SyncContributionData::from_contribution(contribution).tree_hash_root();
|
||||
|
||||
if let Some(existing_contribution) = self.map.get_mut(&sync_data_root) {
|
||||
if existing_contribution
|
||||
.aggregation_bits
|
||||
.get(committee_index)
|
||||
.map_err(|_| Error::InconsistentBitfieldLengths)?
|
||||
{
|
||||
Ok(InsertOutcome::SignatureAlreadyKnown { committee_index })
|
||||
} else {
|
||||
let _timer = metrics::start_timer(
|
||||
&metrics::SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_AGGREGATION,
|
||||
);
|
||||
existing_contribution.aggregate(contribution);
|
||||
Ok(InsertOutcome::SignatureAggregated { committee_index })
|
||||
}
|
||||
} else {
|
||||
if self.map.len() >= E::sync_committee_size() {
|
||||
return Err(Error::ReachedMaxItemsPerSlot(E::sync_committee_size()));
|
||||
}
|
||||
|
||||
self.map.insert(sync_data_root, contribution.clone());
|
||||
Ok(InsertOutcome::NewItemInserted { committee_index })
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an aggregated `SyncCommitteeContribution` with the given `data`, if any.
|
||||
///
|
||||
/// The given `data.slot` must match the slot that `self` was initialized with.
|
||||
fn get(&self, data: &SyncContributionData) -> Option<SyncCommitteeContribution<E>> {
|
||||
self.map.get(&data.tree_hash_root()).cloned()
|
||||
}
|
||||
|
||||
fn get_map(&self) -> &HashMap<SyncDataRoot, SyncCommitteeContribution<E>> {
|
||||
&self.map
|
||||
}
|
||||
|
||||
/// Returns an aggregated `SyncCommitteeContribution` with the given `root`, if any.
|
||||
fn get_by_root(&self, root: &SyncDataRoot) -> Option<&SyncCommitteeContribution<E>> {
|
||||
self.map.get(root)
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.map.len()
|
||||
}
|
||||
|
||||
fn start_insert_timer() -> Option<metrics::HistogramTimer> {
|
||||
metrics::start_timer(&metrics::SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_INSERT)
|
||||
}
|
||||
|
||||
fn start_create_map_timer() -> Option<metrics::HistogramTimer> {
|
||||
metrics::start_timer(&metrics::SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_CREATE_MAP)
|
||||
}
|
||||
|
||||
fn start_prune_timer() -> Option<metrics::HistogramTimer> {
|
||||
metrics::start_timer(&metrics::SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_PRUNE)
|
||||
}
|
||||
|
||||
/// Default to `SYNC_COMMITTEE_SUBNET_COUNT`.
|
||||
fn default_capacity() -> usize {
|
||||
SYNC_COMMITTEE_SUBNET_COUNT as usize
|
||||
}
|
||||
}
|
||||
|
||||
/// A pool of `Attestation` or `SyncCommitteeContribution` that is specially designed to store
|
||||
/// "unaggregated" messages from the native aggregation scheme.
|
||||
///
|
||||
/// **The `NaiveAggregationPool` does not do any signature or attestation verification. It assumes
|
||||
/// that all `Attestation` objects provided are valid.**
|
||||
/// **The `NaiveAggregationPool` does not do any verification. It assumes that all `Attestation`
|
||||
/// or `SyncCommitteeContribution` objects provided are valid.**
|
||||
///
|
||||
/// ## Details
|
||||
///
|
||||
/// The pool sorts the `Attestation` by `attestation.data.slot`, then by `attestation.data`.
|
||||
/// The pool sorts the items by `slot`, then by `Data`.
|
||||
///
|
||||
/// As each unaggregated attestation is added it is aggregated with any existing `attestation` with
|
||||
/// the same `AttestationData`. Considering that the pool only accepts attestations with a single
|
||||
/// As each item is added it is aggregated with any existing item with the same `Data`. Considering
|
||||
/// that the pool only accepts attestations or sync contributions with a single
|
||||
/// signature, there should only ever be a single aggregated `Attestation` for any given
|
||||
/// `AttestationData`.
|
||||
/// `AttestationData` or a single `SyncCommitteeContribution` for any given `SyncContributionData`.
|
||||
///
|
||||
/// The pool has a capacity for `SLOTS_RETAINED` slots, when a new `attestation.data.slot` is
|
||||
/// The pool has a capacity for `SLOTS_RETAINED` slots, when a new `slot` is
|
||||
/// provided, the oldest slot is dropped and replaced with the new slot. The pool can also be
|
||||
/// pruned by supplying a `current_slot`; all existing attestations with a slot lower than
|
||||
/// `current_slot - SLOTS_RETAINED` will be removed and any future attestation with a slot lower
|
||||
/// than that will also be refused. Pruning is done automatically based upon the attestations it
|
||||
/// pruned by supplying a `current_slot`; all existing items with a slot lower than
|
||||
/// `current_slot - SLOTS_RETAINED` will be removed and any future item with a slot lower
|
||||
/// than that will also be refused. Pruning is done automatically based upon the items it
|
||||
/// receives and it can be triggered manually.
|
||||
pub struct NaiveAggregationPool<E: EthSpec> {
|
||||
pub struct NaiveAggregationPool<T: AggregateMap> {
|
||||
lowest_permissible_slot: Slot,
|
||||
maps: HashMap<Slot, AggregatedAttestationMap<E>>,
|
||||
maps: HashMap<Slot, T>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Default for NaiveAggregationPool<E> {
|
||||
impl<T: AggregateMap> Default for NaiveAggregationPool<T> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lowest_permissible_slot: Slot::new(0),
|
||||
@@ -164,20 +350,20 @@ impl<E: EthSpec> Default for NaiveAggregationPool<E> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> NaiveAggregationPool<E> {
|
||||
/// Insert an attestation into `self`, aggregating it into the pool.
|
||||
impl<T: AggregateMap> NaiveAggregationPool<T> {
|
||||
/// Insert an item into `self`, aggregating it into the pool.
|
||||
///
|
||||
/// The given attestation (`a`) must only have one signature and have an
|
||||
/// `attestation.data.slot` that is not lower than `self.lowest_permissible_slot`.
|
||||
/// The given item must only have one signature and have an
|
||||
/// `slot` that is not lower than `self.lowest_permissible_slot`.
|
||||
///
|
||||
/// The pool may be pruned if the given `attestation.data` has a slot higher than any
|
||||
/// The pool may be pruned if the given item has a slot higher than any
|
||||
/// previously seen.
|
||||
pub fn insert(&mut self, attestation: &Attestation<E>) -> Result<InsertOutcome, Error> {
|
||||
let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_INSERT);
|
||||
let slot = attestation.data.slot;
|
||||
pub fn insert(&mut self, item: &T::Value) -> Result<InsertOutcome, Error> {
|
||||
let _timer = T::start_insert_timer();
|
||||
let slot = item.get_slot();
|
||||
let lowest_permissible_slot = self.lowest_permissible_slot;
|
||||
|
||||
// Reject any attestations that are too old.
|
||||
// Reject any items that are too old.
|
||||
if slot < lowest_permissible_slot {
|
||||
return Err(Error::SlotTooLow {
|
||||
slot,
|
||||
@@ -185,14 +371,10 @@ impl<E: EthSpec> NaiveAggregationPool<E> {
|
||||
});
|
||||
}
|
||||
|
||||
let lock_timer =
|
||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_MAPS_WRITE_LOCK);
|
||||
drop(lock_timer);
|
||||
|
||||
let outcome = if let Some(map) = self.maps.get_mut(&slot) {
|
||||
map.insert(attestation)
|
||||
map.insert(item)
|
||||
} else {
|
||||
let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_CREATE_MAP);
|
||||
let _timer = T::start_create_map_timer();
|
||||
// To avoid re-allocations, try and determine a rough initial capacity for the new item
|
||||
// by obtaining the mean size of all items in earlier epoch.
|
||||
let (count, sum) = self
|
||||
@@ -204,12 +386,11 @@ impl<E: EthSpec> NaiveAggregationPool<E> {
|
||||
.map(|(_slot, map)| map.len())
|
||||
.fold((0, 0), |(count, sum), len| (count + 1, sum + len));
|
||||
|
||||
// Use the mainnet default committee size if we can't determine an average.
|
||||
let initial_capacity = sum.checked_div(count).unwrap_or(128);
|
||||
let initial_capacity = sum.checked_div(count).unwrap_or_else(T::default_capacity);
|
||||
|
||||
let mut item = AggregatedAttestationMap::new(initial_capacity);
|
||||
let outcome = item.insert(attestation);
|
||||
self.maps.insert(slot, item);
|
||||
let mut aggregate_map = T::new(initial_capacity);
|
||||
let outcome = aggregate_map.insert(item);
|
||||
self.maps.insert(slot, aggregate_map);
|
||||
|
||||
outcome
|
||||
};
|
||||
@@ -219,27 +400,36 @@ impl<E: EthSpec> NaiveAggregationPool<E> {
|
||||
outcome
|
||||
}
|
||||
|
||||
/// Returns an aggregated `Attestation` with the given `data`, if any.
|
||||
pub fn get(&self, data: &AttestationData) -> Result<Option<Attestation<E>>, Error> {
|
||||
/// Returns the total number of items stored in `self`.
|
||||
pub fn num_items(&self) -> usize {
|
||||
self.maps.iter().map(|(_, map)| map.len()).sum()
|
||||
}
|
||||
|
||||
/// Returns an aggregated `T::Value` with the given `T::Data`, if any.
|
||||
pub fn get(&self, data: &T::Data) -> Option<T::Value> {
|
||||
self.maps
|
||||
.iter()
|
||||
.find(|(slot, _map)| **slot == data.slot)
|
||||
.map(|(_slot, map)| map.get(data))
|
||||
.unwrap_or_else(|| Ok(None))
|
||||
.get(&data.get_slot())
|
||||
.and_then(|map| map.get(data))
|
||||
}
|
||||
|
||||
/// Iterate all attestations in all slots of `self`.
|
||||
pub fn iter(&self) -> impl Iterator<Item = &Attestation<E>> {
|
||||
self.maps.iter().map(|(_slot, map)| map.iter()).flatten()
|
||||
/// Returns an aggregated `T::Value` with the given `slot` and `root`, if any.
|
||||
pub fn get_by_slot_and_root(&self, slot: Slot, root: &T::Key) -> Option<T::Value> {
|
||||
self.maps
|
||||
.get(&slot)
|
||||
.and_then(|map| map.get_by_root(root).cloned())
|
||||
}
|
||||
|
||||
/// Removes any attestations with a slot lower than `current_slot` and bars any future
|
||||
/// attestations with a slot lower than `current_slot - SLOTS_RETAINED`.
|
||||
/// Iterate all items in all slots of `self`.
|
||||
pub fn iter(&self) -> impl Iterator<Item = &T::Value> {
|
||||
self.maps.values().flat_map(|map| map.get_map().values())
|
||||
}
|
||||
|
||||
/// Removes any items with a slot lower than `current_slot` and bars any future
|
||||
/// items with a slot lower than `current_slot - SLOTS_RETAINED`.
|
||||
pub fn prune(&mut self, current_slot: Slot) {
|
||||
let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_PRUNE);
|
||||
let _timer = T::start_prune_timer();
|
||||
|
||||
// Taking advantage of saturating subtraction on `Slot`.
|
||||
let lowest_permissible_slot = current_slot - Slot::from(SLOTS_RETAINED);
|
||||
let lowest_permissible_slot = current_slot.saturating_sub(Slot::from(SLOTS_RETAINED));
|
||||
|
||||
// No need to prune if the lowest permissible slot has not changed and the queue length is
|
||||
// less than the maximum
|
||||
@@ -280,9 +470,10 @@ impl<E: EthSpec> NaiveAggregationPool<E> {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ssz_types::BitList;
|
||||
use store::BitVector;
|
||||
use types::{
|
||||
test_utils::{generate_deterministic_keypair, test_random_instance},
|
||||
Fork, Hash256,
|
||||
Fork, Hash256, SyncCommitteeMessage,
|
||||
};
|
||||
|
||||
type E = types::MainnetEthSpec;
|
||||
@@ -294,7 +485,14 @@ mod tests {
|
||||
a
|
||||
}
|
||||
|
||||
fn sign(a: &mut Attestation<E>, i: usize, genesis_validators_root: Hash256) {
|
||||
fn get_sync_contribution(slot: Slot) -> SyncCommitteeContribution<E> {
|
||||
let mut a: SyncCommitteeContribution<E> = test_random_instance();
|
||||
a.slot = slot;
|
||||
a.aggregation_bits = BitVector::new();
|
||||
a
|
||||
}
|
||||
|
||||
fn sign_attestation(a: &mut Attestation<E>, i: usize, genesis_validators_root: Hash256) {
|
||||
a.sign(
|
||||
&generate_deterministic_keypair(i).sk,
|
||||
i,
|
||||
@@ -305,190 +503,294 @@ mod tests {
|
||||
.expect("should sign attestation");
|
||||
}
|
||||
|
||||
fn unset_bit(a: &mut Attestation<E>, i: usize) {
|
||||
fn sign_sync_contribution(
|
||||
a: &mut SyncCommitteeContribution<E>,
|
||||
i: usize,
|
||||
genesis_validators_root: Hash256,
|
||||
) {
|
||||
let sync_message = SyncCommitteeMessage::new::<E>(
|
||||
a.slot,
|
||||
a.beacon_block_root,
|
||||
i as u64,
|
||||
&generate_deterministic_keypair(i).sk,
|
||||
&Fork::default(),
|
||||
genesis_validators_root,
|
||||
&E::default_spec(),
|
||||
);
|
||||
let signed_contribution: SyncCommitteeContribution<E> =
|
||||
SyncCommitteeContribution::from_message(&sync_message, a.subcommittee_index, i)
|
||||
.unwrap();
|
||||
|
||||
a.aggregate(&signed_contribution);
|
||||
}
|
||||
|
||||
fn unset_attestation_bit(a: &mut Attestation<E>, i: usize) {
|
||||
a.aggregation_bits
|
||||
.set(i, false)
|
||||
.expect("should unset aggregation bit")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_attestation() {
|
||||
let mut a = get_attestation(Slot::new(0));
|
||||
|
||||
let mut pool = NaiveAggregationPool::default();
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Err(Error::NoAggregationBitsSet),
|
||||
"should not accept attestation without any signatures"
|
||||
);
|
||||
|
||||
sign(&mut a, 0, Hash256::random());
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Ok(InsertOutcome::NewAttestationData { committee_index: 0 }),
|
||||
"should accept new attestation"
|
||||
);
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Ok(InsertOutcome::SignatureAlreadyKnown { committee_index: 0 }),
|
||||
"should acknowledge duplicate signature"
|
||||
);
|
||||
|
||||
let retrieved = pool
|
||||
.get(&a.data)
|
||||
.expect("should not error while getting attestation")
|
||||
.expect("should get an attestation");
|
||||
assert_eq!(
|
||||
retrieved, a,
|
||||
"retrieved attestation should equal the one inserted"
|
||||
);
|
||||
|
||||
sign(&mut a, 1, Hash256::random());
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Err(Error::MoreThanOneAggregationBitSet(2)),
|
||||
"should not accept attestation with multiple signatures"
|
||||
);
|
||||
fn unset_sync_contribution_bit(a: &mut SyncCommitteeContribution<E>, i: usize) {
|
||||
a.aggregation_bits
|
||||
.set(i, false)
|
||||
.expect("should unset aggregation bit")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiple_attestations() {
|
||||
let mut a_0 = get_attestation(Slot::new(0));
|
||||
let mut a_1 = a_0.clone();
|
||||
|
||||
let genesis_validators_root = Hash256::random();
|
||||
sign(&mut a_0, 0, genesis_validators_root);
|
||||
sign(&mut a_1, 1, genesis_validators_root);
|
||||
|
||||
let mut pool = NaiveAggregationPool::default();
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a_0),
|
||||
Ok(InsertOutcome::NewAttestationData { committee_index: 0 }),
|
||||
"should accept a_0"
|
||||
);
|
||||
assert_eq!(
|
||||
pool.insert(&a_1),
|
||||
Ok(InsertOutcome::SignatureAggregated { committee_index: 1 }),
|
||||
"should accept a_1"
|
||||
);
|
||||
|
||||
let retrieved = pool
|
||||
.get(&a_0.data)
|
||||
.expect("should not error while getting attestation")
|
||||
.expect("should get an attestation");
|
||||
|
||||
let mut a_01 = a_0.clone();
|
||||
a_01.aggregate(&a_1);
|
||||
|
||||
assert_eq!(
|
||||
retrieved, a_01,
|
||||
"retrieved attestation should be aggregated"
|
||||
);
|
||||
|
||||
/*
|
||||
* Throw a different attestation data in there and ensure it isn't aggregated
|
||||
*/
|
||||
|
||||
let mut a_different = a_0.clone();
|
||||
let different_root = Hash256::from_low_u64_be(1337);
|
||||
unset_bit(&mut a_different, 0);
|
||||
sign(&mut a_different, 2, genesis_validators_root);
|
||||
assert_ne!(a_different.data.beacon_block_root, different_root);
|
||||
a_different.data.beacon_block_root = different_root;
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a_different),
|
||||
Ok(InsertOutcome::NewAttestationData { committee_index: 2 }),
|
||||
"should accept a_different"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
pool.get(&a_0.data)
|
||||
.expect("should not error while getting attestation")
|
||||
.expect("should get an attestation"),
|
||||
retrieved,
|
||||
"should not have aggregated different attestation data"
|
||||
);
|
||||
fn mutate_attestation_block_root(a: &mut Attestation<E>, block_root: Hash256) {
|
||||
a.data.beacon_block_root = block_root
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn auto_pruning() {
|
||||
let mut base = get_attestation(Slot::new(0));
|
||||
sign(&mut base, 0, Hash256::random());
|
||||
fn mutate_attestation_slot(a: &mut Attestation<E>, slot: Slot) {
|
||||
a.data.slot = slot
|
||||
}
|
||||
|
||||
let mut pool = NaiveAggregationPool::default();
|
||||
fn attestation_block_root_comparator(a: &Attestation<E>, block_root: Hash256) -> bool {
|
||||
a.data.beacon_block_root == block_root
|
||||
}
|
||||
|
||||
for i in 0..SLOTS_RETAINED * 2 {
|
||||
let slot = Slot::from(i);
|
||||
let mut a = base.clone();
|
||||
a.data.slot = slot;
|
||||
fn key_from_attestation(a: &Attestation<E>) -> AttestationData {
|
||||
a.data.clone()
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Ok(InsertOutcome::NewAttestationData { committee_index: 0 }),
|
||||
"should accept new attestation"
|
||||
);
|
||||
fn mutate_sync_contribution_block_root(
|
||||
a: &mut SyncCommitteeContribution<E>,
|
||||
block_root: Hash256,
|
||||
) {
|
||||
a.beacon_block_root = block_root
|
||||
}
|
||||
|
||||
if i < SLOTS_RETAINED {
|
||||
let len = i + 1;
|
||||
assert_eq!(pool.maps.len(), len, "the pool should have length {}", len);
|
||||
} else {
|
||||
assert_eq!(
|
||||
pool.maps.len(),
|
||||
SLOTS_RETAINED,
|
||||
"the pool should have length SLOTS_RETAINED"
|
||||
);
|
||||
fn mutate_sync_contribution_slot(a: &mut SyncCommitteeContribution<E>, slot: Slot) {
|
||||
a.slot = slot
|
||||
}
|
||||
|
||||
let mut pool_slots = pool
|
||||
.maps
|
||||
.iter()
|
||||
.map(|(slot, _map)| *slot)
|
||||
.collect::<Vec<_>>();
|
||||
fn sync_contribution_block_root_comparator(
|
||||
a: &SyncCommitteeContribution<E>,
|
||||
block_root: Hash256,
|
||||
) -> bool {
|
||||
a.beacon_block_root == block_root
|
||||
}
|
||||
|
||||
pool_slots.sort_unstable();
|
||||
fn key_from_sync_contribution(a: &SyncCommitteeContribution<E>) -> SyncContributionData {
|
||||
SyncContributionData::from_contribution(a)
|
||||
}
|
||||
|
||||
macro_rules! test_suite {
|
||||
(
|
||||
$mod_name: ident,
|
||||
$get_method_name: ident,
|
||||
$sign_method_name: ident,
|
||||
$unset_method_name: ident,
|
||||
$block_root_mutator: ident,
|
||||
$slot_mutator: ident,
|
||||
$block_root_comparator: ident,
|
||||
$key_getter: ident,
|
||||
$map_type: ident,
|
||||
$item_limit: expr
|
||||
) => {
|
||||
#[cfg(test)]
|
||||
mod $mod_name {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn single_item() {
|
||||
let mut a = $get_method_name(Slot::new(0));
|
||||
|
||||
let mut pool: NaiveAggregationPool<$map_type<E>> =
|
||||
NaiveAggregationPool::default();
|
||||
|
||||
for (j, pool_slot) in pool_slots.iter().enumerate() {
|
||||
let expected_slot = slot - (SLOTS_RETAINED - 1 - j) as u64;
|
||||
assert_eq!(
|
||||
*pool_slot, expected_slot,
|
||||
"the slot of the map should be {}",
|
||||
expected_slot
|
||||
)
|
||||
pool.insert(&a),
|
||||
Err(Error::NoAggregationBitsSet),
|
||||
"should not accept item without any signatures"
|
||||
);
|
||||
|
||||
$sign_method_name(&mut a, 0, Hash256::random());
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Ok(InsertOutcome::NewItemInserted { committee_index: 0 }),
|
||||
"should accept new item"
|
||||
);
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Ok(InsertOutcome::SignatureAlreadyKnown { committee_index: 0 }),
|
||||
"should acknowledge duplicate signature"
|
||||
);
|
||||
|
||||
let retrieved = pool
|
||||
.get(&$key_getter(&a))
|
||||
.expect("should not error while getting item");
|
||||
assert_eq!(retrieved, a, "retrieved item should equal the one inserted");
|
||||
|
||||
$sign_method_name(&mut a, 1, Hash256::random());
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Err(Error::MoreThanOneAggregationBitSet(2)),
|
||||
"should not accept item with multiple signatures"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiple_items() {
|
||||
let mut a_0 = $get_method_name(Slot::new(0));
|
||||
let mut a_1 = a_0.clone();
|
||||
|
||||
let genesis_validators_root = Hash256::random();
|
||||
$sign_method_name(&mut a_0, 0, genesis_validators_root);
|
||||
$sign_method_name(&mut a_1, 1, genesis_validators_root);
|
||||
|
||||
let mut pool: NaiveAggregationPool<$map_type<E>> =
|
||||
NaiveAggregationPool::default();
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a_0),
|
||||
Ok(InsertOutcome::NewItemInserted { committee_index: 0 }),
|
||||
"should accept a_0"
|
||||
);
|
||||
assert_eq!(
|
||||
pool.insert(&a_1),
|
||||
Ok(InsertOutcome::SignatureAggregated { committee_index: 1 }),
|
||||
"should accept a_1"
|
||||
);
|
||||
|
||||
let retrieved = pool
|
||||
.get(&$key_getter(&a_0))
|
||||
.expect("should not error while getting attestation");
|
||||
|
||||
let mut a_01 = a_0.clone();
|
||||
a_01.aggregate(&a_1);
|
||||
|
||||
assert_eq!(retrieved, a_01, "retrieved item should be aggregated");
|
||||
|
||||
/*
|
||||
* Throw different data in there and ensure it isn't aggregated
|
||||
*/
|
||||
|
||||
let mut a_different = a_0.clone();
|
||||
let different_root = Hash256::from_low_u64_be(1337);
|
||||
$unset_method_name(&mut a_different, 0);
|
||||
$sign_method_name(&mut a_different, 2, genesis_validators_root);
|
||||
assert!(!$block_root_comparator(&a_different, different_root));
|
||||
$block_root_mutator(&mut a_different, different_root);
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a_different),
|
||||
Ok(InsertOutcome::NewItemInserted { committee_index: 2 }),
|
||||
"should accept a_different"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
pool.get(&$key_getter(&a_0))
|
||||
.expect("should not error while getting item"),
|
||||
retrieved,
|
||||
"should not have aggregated different items with different data"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn auto_pruning_item() {
|
||||
let mut base = $get_method_name(Slot::new(0));
|
||||
$sign_method_name(&mut base, 0, Hash256::random());
|
||||
|
||||
let mut pool: NaiveAggregationPool<$map_type<E>> =
|
||||
NaiveAggregationPool::default();
|
||||
|
||||
for i in 0..SLOTS_RETAINED * 2 {
|
||||
let slot = Slot::from(i);
|
||||
let mut a = base.clone();
|
||||
$slot_mutator(&mut a, slot);
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Ok(InsertOutcome::NewItemInserted { committee_index: 0 }),
|
||||
"should accept new item"
|
||||
);
|
||||
|
||||
if i < SLOTS_RETAINED {
|
||||
let len = i + 1;
|
||||
assert_eq!(pool.maps.len(), len, "the pool should have length {}", len);
|
||||
} else {
|
||||
assert_eq!(
|
||||
pool.maps.len(),
|
||||
SLOTS_RETAINED,
|
||||
"the pool should have length SLOTS_RETAINED"
|
||||
);
|
||||
|
||||
let mut pool_slots = pool
|
||||
.maps
|
||||
.iter()
|
||||
.map(|(slot, _map)| *slot)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
pool_slots.sort_unstable();
|
||||
|
||||
for (j, pool_slot) in pool_slots.iter().enumerate() {
|
||||
let expected_slot = slot - (SLOTS_RETAINED - 1 - j) as u64;
|
||||
assert_eq!(
|
||||
*pool_slot, expected_slot,
|
||||
"the slot of the map should be {}",
|
||||
expected_slot
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn max_items() {
|
||||
let mut base = $get_method_name(Slot::new(0));
|
||||
$sign_method_name(&mut base, 0, Hash256::random());
|
||||
|
||||
let mut pool: NaiveAggregationPool<$map_type<E>> =
|
||||
NaiveAggregationPool::default();
|
||||
|
||||
for i in 0..=$item_limit {
|
||||
let mut a = base.clone();
|
||||
$block_root_mutator(&mut a, Hash256::from_low_u64_be(i as u64));
|
||||
|
||||
if i < $item_limit {
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Ok(InsertOutcome::NewItemInserted { committee_index: 0 }),
|
||||
"should accept item below limit"
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Err(Error::ReachedMaxItemsPerSlot($item_limit)),
|
||||
"should not accept item above limit"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn max_attestations() {
|
||||
let mut base = get_attestation(Slot::new(0));
|
||||
sign(&mut base, 0, Hash256::random());
|
||||
test_suite! {
|
||||
attestation_tests,
|
||||
get_attestation,
|
||||
sign_attestation,
|
||||
unset_attestation_bit,
|
||||
mutate_attestation_block_root,
|
||||
mutate_attestation_slot,
|
||||
attestation_block_root_comparator,
|
||||
key_from_attestation,
|
||||
AggregatedAttestationMap,
|
||||
MAX_ATTESTATIONS_PER_SLOT
|
||||
}
|
||||
|
||||
let mut pool = NaiveAggregationPool::default();
|
||||
|
||||
for i in 0..=MAX_ATTESTATIONS_PER_SLOT {
|
||||
let mut a = base.clone();
|
||||
a.data.beacon_block_root = Hash256::from_low_u64_be(i as u64);
|
||||
|
||||
if i < MAX_ATTESTATIONS_PER_SLOT {
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Ok(InsertOutcome::NewAttestationData { committee_index: 0 }),
|
||||
"should accept attestation below limit"
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
Err(Error::ReachedMaxAttestationsPerSlot(
|
||||
MAX_ATTESTATIONS_PER_SLOT
|
||||
)),
|
||||
"should not accept attestation above limit"
|
||||
);
|
||||
}
|
||||
}
|
||||
test_suite! {
|
||||
sync_contribution_tests,
|
||||
get_sync_contribution,
|
||||
sign_sync_contribution,
|
||||
unset_sync_contribution_bit,
|
||||
mutate_sync_contribution_block_root,
|
||||
mutate_sync_contribution_slot,
|
||||
sync_contribution_block_root_comparator,
|
||||
key_from_sync_contribution,
|
||||
SyncContributionAggregateMap,
|
||||
E::sync_committee_size()
|
||||
}
|
||||
}
|
||||
|
||||
507
beacon_node/beacon_chain/src/observed_aggregates.rs
Normal file
507
beacon_node/beacon_chain/src/observed_aggregates.rs
Normal file
@@ -0,0 +1,507 @@
|
||||
//! Provides an `ObservedAggregates` struct which allows us to reject aggregated attestations or
|
||||
//! sync committee contributions if we've already seen them.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::marker::PhantomData;
|
||||
use tree_hash::TreeHash;
|
||||
use types::consts::altair::{
|
||||
SYNC_COMMITTEE_SUBNET_COUNT, TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE,
|
||||
};
|
||||
use types::slot_data::SlotData;
|
||||
use types::{Attestation, EthSpec, Hash256, Slot, SyncCommitteeContribution};
|
||||
|
||||
pub type ObservedSyncContributions<E> = ObservedAggregates<SyncCommitteeContribution<E>, E>;
|
||||
pub type ObservedAggregateAttestations<E> = ObservedAggregates<Attestation<E>, E>;
|
||||
|
||||
/// A trait use to associate capacity constants with the type being stored in `ObservedAggregates`.
|
||||
pub trait Consts {
|
||||
/// The default capacity of items stored per slot, in a single `SlotHashSet`.
|
||||
const DEFAULT_PER_SLOT_CAPACITY: usize;
|
||||
|
||||
/// The maximum number of slots
|
||||
fn max_slot_capacity() -> usize;
|
||||
|
||||
/// The maximum number of items stored per slot, in a single `SlotHashSet`.
|
||||
fn max_per_slot_capacity() -> usize;
|
||||
}
|
||||
|
||||
impl<T: EthSpec> Consts for Attestation<T> {
|
||||
/// Use 128 as it's the target committee size for the mainnet spec. This is perhaps a little
|
||||
/// wasteful for the minimal spec, but considering it's approx. 128 * 32 bytes we're not wasting
|
||||
/// much.
|
||||
const DEFAULT_PER_SLOT_CAPACITY: usize = 128;
|
||||
|
||||
/// We need to keep attestations for each slot of the current epoch.
|
||||
fn max_slot_capacity() -> usize {
|
||||
T::slots_per_epoch() as usize
|
||||
}
|
||||
|
||||
/// As a DoS protection measure, the maximum number of distinct `Attestations` or
|
||||
/// `SyncCommitteeContributions` that will be recorded for each slot.
|
||||
///
|
||||
/// Currently this is set to ~524k. If we say that each entry is 40 bytes (Hash256 (32 bytes) + an
|
||||
/// 8 byte hash) then this comes to about 20mb per slot. If we're storing 34 of these slots, then
|
||||
/// we're at 680mb. This is a lot of memory usage, but probably not a show-stopper for most
|
||||
/// reasonable hardware.
|
||||
///
|
||||
/// Upstream conditions should strongly restrict the amount of attestations that can show up in
|
||||
/// this pool. The maximum size with respect to upstream restrictions is more likely on the order
|
||||
/// of the number of validators.
|
||||
fn max_per_slot_capacity() -> usize {
|
||||
1 << 19 // 524,288
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> Consts for SyncCommitteeContribution<T> {
|
||||
/// Set to `TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE * SYNC_COMMITTEE_SUBNET_COUNT`. This is the
|
||||
/// expected number of aggregators per slot across all subcommittees.
|
||||
const DEFAULT_PER_SLOT_CAPACITY: usize =
|
||||
(SYNC_COMMITTEE_SUBNET_COUNT * TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE) as usize;
|
||||
|
||||
/// We only need to keep contributions related to the current slot.
|
||||
fn max_slot_capacity() -> usize {
|
||||
1
|
||||
}
|
||||
|
||||
/// We should never receive more aggregates than there are sync committee participants.
|
||||
fn max_per_slot_capacity() -> usize {
|
||||
T::sync_committee_size()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum ObserveOutcome {
|
||||
/// This item was already known.
|
||||
AlreadyKnown,
|
||||
/// This was the first time this item was observed.
|
||||
New,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
SlotTooLow {
|
||||
slot: Slot,
|
||||
lowest_permissible_slot: Slot,
|
||||
},
|
||||
/// The function to obtain a set index failed, this is an internal error.
|
||||
InvalidSetIndex(usize),
|
||||
/// We have reached the maximum number of unique items that can be observed in a slot.
|
||||
/// This is a DoS protection function.
|
||||
ReachedMaxObservationsPerSlot(usize),
|
||||
IncorrectSlot {
|
||||
expected: Slot,
|
||||
attestation: Slot,
|
||||
},
|
||||
}
|
||||
|
||||
/// A `HashSet` that contains entries related to some `Slot`.
|
||||
struct SlotHashSet {
|
||||
set: HashSet<Hash256>,
|
||||
slot: Slot,
|
||||
max_capacity: usize,
|
||||
}
|
||||
|
||||
impl SlotHashSet {
|
||||
pub fn new(slot: Slot, initial_capacity: usize, max_capacity: usize) -> Self {
|
||||
Self {
|
||||
slot,
|
||||
set: HashSet::with_capacity(initial_capacity),
|
||||
max_capacity,
|
||||
}
|
||||
}
|
||||
|
||||
/// Store the items in self so future observations recognise its existence.
|
||||
pub fn observe_item<T: SlotData>(
|
||||
&mut self,
|
||||
item: &T,
|
||||
root: Hash256,
|
||||
) -> Result<ObserveOutcome, Error> {
|
||||
if item.get_slot() != self.slot {
|
||||
return Err(Error::IncorrectSlot {
|
||||
expected: self.slot,
|
||||
attestation: item.get_slot(),
|
||||
});
|
||||
}
|
||||
|
||||
if self.set.contains(&root) {
|
||||
Ok(ObserveOutcome::AlreadyKnown)
|
||||
} else {
|
||||
// Here we check to see if this slot has reached the maximum observation count.
|
||||
//
|
||||
// The resulting behaviour is that we are no longer able to successfully observe new
|
||||
// items, however we will continue to return `is_known` values. We could also
|
||||
// disable `is_known`, however then we would stop forwarding items across the
|
||||
// gossip network and I think that this is a worse case than sending some invalid ones.
|
||||
// The underlying libp2p network is responsible for removing duplicate messages, so
|
||||
// this doesn't risk a broadcast loop.
|
||||
if self.set.len() >= self.max_capacity {
|
||||
return Err(Error::ReachedMaxObservationsPerSlot(self.max_capacity));
|
||||
}
|
||||
|
||||
self.set.insert(root);
|
||||
|
||||
Ok(ObserveOutcome::New)
|
||||
}
|
||||
}
|
||||
|
||||
/// Indicates if `item` has been observed before.
|
||||
pub fn is_known<T: SlotData>(&self, item: &T, root: Hash256) -> Result<bool, Error> {
|
||||
if item.get_slot() != self.slot {
|
||||
return Err(Error::IncorrectSlot {
|
||||
expected: self.slot,
|
||||
attestation: item.get_slot(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(self.set.contains(&root))
|
||||
}
|
||||
|
||||
/// The number of observed items in `self`.
|
||||
pub fn len(&self) -> usize {
|
||||
self.set.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores the roots of objects for some number of `Slots`, so we can determine if
|
||||
/// these have previously been seen on the network.
|
||||
pub struct ObservedAggregates<T: TreeHash + SlotData + Consts, E: EthSpec> {
|
||||
lowest_permissible_slot: Slot,
|
||||
sets: Vec<SlotHashSet>,
|
||||
_phantom_spec: PhantomData<E>,
|
||||
_phantom_tree_hash: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: TreeHash + SlotData + Consts, E: EthSpec> Default for ObservedAggregates<T, E> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lowest_permissible_slot: Slot::new(0),
|
||||
sets: vec![],
|
||||
_phantom_spec: PhantomData,
|
||||
_phantom_tree_hash: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TreeHash + SlotData + Consts, E: EthSpec> ObservedAggregates<T, E> {
|
||||
/// Store the root of `item` in `self`.
|
||||
///
|
||||
/// `root` must equal `item.tree_hash_root()`.
|
||||
pub fn observe_item(
|
||||
&mut self,
|
||||
item: &T,
|
||||
root_opt: Option<Hash256>,
|
||||
) -> Result<ObserveOutcome, Error> {
|
||||
let index = self.get_set_index(item.get_slot())?;
|
||||
let root = root_opt.unwrap_or_else(|| item.tree_hash_root());
|
||||
|
||||
self.sets
|
||||
.get_mut(index)
|
||||
.ok_or(Error::InvalidSetIndex(index))
|
||||
.and_then(|set| set.observe_item(item, root))
|
||||
}
|
||||
|
||||
/// Check to see if the `root` of `item` is in self.
|
||||
///
|
||||
/// `root` must equal `a.tree_hash_root()`.
|
||||
pub fn is_known(&mut self, item: &T, root: Hash256) -> Result<bool, Error> {
|
||||
let index = self.get_set_index(item.get_slot())?;
|
||||
|
||||
self.sets
|
||||
.get(index)
|
||||
.ok_or(Error::InvalidSetIndex(index))
|
||||
.and_then(|set| set.is_known(item, root))
|
||||
}
|
||||
|
||||
/// The maximum number of slots that items are stored for.
|
||||
fn max_capacity(&self) -> u64 {
|
||||
// We add `2` in order to account for one slot either side of the range due to
|
||||
// `MAXIMUM_GOSSIP_CLOCK_DISPARITY`.
|
||||
(T::max_slot_capacity() + 2) as u64
|
||||
}
|
||||
|
||||
/// Removes any items with a slot lower than `current_slot` and bars any future
|
||||
/// item with a slot lower than `current_slot - SLOTS_RETAINED`.
|
||||
pub fn prune(&mut self, current_slot: Slot) {
|
||||
let lowest_permissible_slot = current_slot.saturating_sub(self.max_capacity() - 1);
|
||||
|
||||
self.sets.retain(|set| set.slot >= lowest_permissible_slot);
|
||||
|
||||
self.lowest_permissible_slot = lowest_permissible_slot;
|
||||
}
|
||||
|
||||
/// Returns the index of `self.set` that matches `slot`.
|
||||
///
|
||||
/// If there is no existing set for this slot one will be created. If `self.sets.len() >=
|
||||
/// Self::max_capacity()`, the set with the lowest slot will be replaced.
|
||||
fn get_set_index(&mut self, slot: Slot) -> Result<usize, Error> {
|
||||
let lowest_permissible_slot = self.lowest_permissible_slot;
|
||||
|
||||
if slot < lowest_permissible_slot {
|
||||
return Err(Error::SlotTooLow {
|
||||
slot,
|
||||
lowest_permissible_slot,
|
||||
});
|
||||
}
|
||||
|
||||
// Prune the pool if this item indicates that the current slot has advanced.
|
||||
if lowest_permissible_slot + self.max_capacity() < slot + 1 {
|
||||
self.prune(slot)
|
||||
}
|
||||
|
||||
if let Some(index) = self.sets.iter().position(|set| set.slot == slot) {
|
||||
return Ok(index);
|
||||
}
|
||||
|
||||
// To avoid re-allocations, try and determine a rough initial capacity for the new set
|
||||
// by obtaining the mean size of all items in earlier epoch.
|
||||
let (count, sum) = self
|
||||
.sets
|
||||
.iter()
|
||||
// Only include slots that are less than the given slot in the average. This should
|
||||
// generally avoid including recent slots that are still "filling up".
|
||||
.filter(|set| set.slot < slot)
|
||||
.map(|set| set.len())
|
||||
.fold((0, 0), |(count, sum), len| (count + 1, sum + len));
|
||||
// If we are unable to determine an average, just use the `self.default_per_slot_capacity`.
|
||||
let initial_capacity = sum
|
||||
.checked_div(count)
|
||||
.unwrap_or(T::DEFAULT_PER_SLOT_CAPACITY);
|
||||
|
||||
if self.sets.len() < self.max_capacity() as usize || self.sets.is_empty() {
|
||||
let index = self.sets.len();
|
||||
self.sets.push(SlotHashSet::new(
|
||||
slot,
|
||||
initial_capacity,
|
||||
T::max_per_slot_capacity(),
|
||||
));
|
||||
return Ok(index);
|
||||
}
|
||||
|
||||
let index = self
|
||||
.sets
|
||||
.iter()
|
||||
.enumerate()
|
||||
.min_by_key(|(_i, set)| set.slot)
|
||||
.map(|(i, _set)| i)
|
||||
.expect("sets cannot be empty due to previous .is_empty() check");
|
||||
|
||||
self.sets[index] = SlotHashSet::new(slot, initial_capacity, T::max_per_slot_capacity());
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(not(debug_assertions))]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{test_utils::test_random_instance, Hash256};
|
||||
|
||||
type E = types::MainnetEthSpec;
|
||||
|
||||
fn get_attestation(slot: Slot, beacon_block_root: u64) -> Attestation<E> {
|
||||
let mut a: Attestation<E> = test_random_instance();
|
||||
a.data.slot = slot;
|
||||
a.data.beacon_block_root = Hash256::from_low_u64_be(beacon_block_root);
|
||||
a
|
||||
}
|
||||
|
||||
fn get_sync_contribution(slot: Slot, beacon_block_root: u64) -> SyncCommitteeContribution<E> {
|
||||
let mut a: SyncCommitteeContribution<E> = test_random_instance();
|
||||
a.slot = slot;
|
||||
a.beacon_block_root = Hash256::from_low_u64_be(beacon_block_root);
|
||||
a
|
||||
}
|
||||
|
||||
macro_rules! test_suite {
|
||||
($mod_name: ident, $type: ident, $method_name: ident) => {
|
||||
#[cfg(test)]
|
||||
mod $mod_name {
|
||||
use super::*;
|
||||
|
||||
const NUM_ELEMENTS: usize = 8;
|
||||
|
||||
fn single_slot_test(store: &mut $type<E>, slot: Slot) {
|
||||
let items = (0..NUM_ELEMENTS as u64)
|
||||
.map(|i| $method_name(slot, i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for a in &items {
|
||||
assert_eq!(
|
||||
store.is_known(a, a.tree_hash_root()),
|
||||
Ok(false),
|
||||
"should indicate an unknown attestation is unknown"
|
||||
);
|
||||
assert_eq!(
|
||||
store.observe_item(a, None),
|
||||
Ok(ObserveOutcome::New),
|
||||
"should observe new attestation"
|
||||
);
|
||||
}
|
||||
|
||||
for a in &items {
|
||||
assert_eq!(
|
||||
store.is_known(a, a.tree_hash_root()),
|
||||
Ok(true),
|
||||
"should indicate a known attestation is known"
|
||||
);
|
||||
assert_eq!(
|
||||
store.observe_item(a, Some(a.tree_hash_root())),
|
||||
Ok(ObserveOutcome::AlreadyKnown),
|
||||
"should acknowledge an existing attestation"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_slot() {
|
||||
let mut store = $type::default();
|
||||
|
||||
single_slot_test(&mut store, Slot::new(0));
|
||||
|
||||
assert_eq!(store.sets.len(), 1, "should have a single set stored");
|
||||
assert_eq!(
|
||||
store.sets[0].len(),
|
||||
NUM_ELEMENTS,
|
||||
"set should have NUM_ELEMENTS elements"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mulitple_contiguous_slots() {
|
||||
let mut store = $type::default();
|
||||
let max_cap = store.max_capacity();
|
||||
|
||||
for i in 0..max_cap * 3 {
|
||||
let slot = Slot::new(i);
|
||||
|
||||
single_slot_test(&mut store, slot);
|
||||
|
||||
/*
|
||||
* Ensure that the number of sets is correct.
|
||||
*/
|
||||
|
||||
if i < max_cap {
|
||||
assert_eq!(
|
||||
store.sets.len(),
|
||||
i as usize + 1,
|
||||
"should have a {} sets stored",
|
||||
i + 1
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
store.sets.len(),
|
||||
max_cap as usize,
|
||||
"should have max_capacity sets stored"
|
||||
);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that each set contains the correct number of elements.
|
||||
*/
|
||||
|
||||
for set in &store.sets[..] {
|
||||
assert_eq!(
|
||||
set.len(),
|
||||
NUM_ELEMENTS,
|
||||
"each store should have NUM_ELEMENTS elements"
|
||||
)
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that all the sets have the expected slots
|
||||
*/
|
||||
|
||||
let mut store_slots =
|
||||
store.sets.iter().map(|set| set.slot).collect::<Vec<_>>();
|
||||
|
||||
assert!(
|
||||
store_slots.len() <= store.max_capacity() as usize,
|
||||
"store size should not exceed max"
|
||||
);
|
||||
|
||||
store_slots.sort_unstable();
|
||||
|
||||
let expected_slots = (i.saturating_sub(max_cap - 1)..=i)
|
||||
.map(Slot::new)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(expected_slots, store_slots, "should have expected slots");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mulitple_non_contiguous_slots() {
|
||||
let mut store = $type::default();
|
||||
let max_cap = store.max_capacity();
|
||||
|
||||
let to_skip = vec![1_u64, 2, 3, 5, 6, 29, 30, 31, 32, 64];
|
||||
let slots = (0..max_cap * 3)
|
||||
.into_iter()
|
||||
.filter(|i| !to_skip.contains(i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for &i in &slots {
|
||||
if to_skip.contains(&i) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let slot = Slot::from(i);
|
||||
|
||||
single_slot_test(&mut store, slot);
|
||||
|
||||
/*
|
||||
* Ensure that each set contains the correct number of elements.
|
||||
*/
|
||||
|
||||
for set in &store.sets[..] {
|
||||
assert_eq!(
|
||||
set.len(),
|
||||
NUM_ELEMENTS,
|
||||
"each store should have NUM_ELEMENTS elements"
|
||||
)
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that all the sets have the expected slots
|
||||
*/
|
||||
|
||||
let mut store_slots =
|
||||
store.sets.iter().map(|set| set.slot).collect::<Vec<_>>();
|
||||
|
||||
store_slots.sort_unstable();
|
||||
|
||||
assert!(
|
||||
store_slots.len() <= store.max_capacity() as usize,
|
||||
"store size should not exceed max"
|
||||
);
|
||||
|
||||
let lowest = store.lowest_permissible_slot.as_u64();
|
||||
let highest = slot.as_u64();
|
||||
let expected_slots = (lowest..=highest)
|
||||
.filter(|i| !to_skip.contains(i))
|
||||
.map(Slot::new)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(
|
||||
expected_slots,
|
||||
&store_slots[..],
|
||||
"should have expected slots"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
test_suite!(
|
||||
observed_sync_aggregates,
|
||||
ObservedSyncContributions,
|
||||
get_sync_contribution
|
||||
);
|
||||
test_suite!(
|
||||
observed_aggregate_attestations,
|
||||
ObservedAggregateAttestations,
|
||||
get_attestation
|
||||
);
|
||||
}
|
||||
@@ -1,443 +0,0 @@
|
||||
//! Provides an `ObservedAttestations` struct which allows us to reject aggregated attestations if
|
||||
//! we've already seen the aggregated attestation.
|
||||
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::HashSet;
|
||||
use std::marker::PhantomData;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{Attestation, EthSpec, Hash256, Slot};
|
||||
|
||||
/// As a DoS protection measure, the maximum number of distinct `Attestations` that will be
|
||||
/// recorded for each slot.
|
||||
///
|
||||
/// Currently this is set to ~524k. If we say that each entry is 40 bytes (Hash256 (32 bytes) + an
|
||||
/// 8 byte hash) then this comes to about 20mb per slot. If we're storing 34 of these slots, then
|
||||
/// we're at 680mb. This is a lot of memory usage, but probably not a show-stopper for most
|
||||
/// reasonable hardware.
|
||||
///
|
||||
/// Upstream conditions should strongly restrict the amount of attestations that can show up in
|
||||
/// this pool. The maximum size with respect to upstream restrictions is more likely on the order
|
||||
/// of the number of validators.
|
||||
const MAX_OBSERVATIONS_PER_SLOT: usize = 1 << 19; // 524,288
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum ObserveOutcome {
|
||||
/// This attestation was already known.
|
||||
AlreadyKnown,
|
||||
/// This was the first time this attestation was observed.
|
||||
New,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
SlotTooLow {
|
||||
slot: Slot,
|
||||
lowest_permissible_slot: Slot,
|
||||
},
|
||||
/// The function to obtain a set index failed, this is an internal error.
|
||||
InvalidSetIndex(usize),
|
||||
/// We have reached the maximum number of unique `Attestation` that can be observed in a slot.
|
||||
/// This is a DoS protection function.
|
||||
ReachedMaxObservationsPerSlot(usize),
|
||||
IncorrectSlot {
|
||||
expected: Slot,
|
||||
attestation: Slot,
|
||||
},
|
||||
}
|
||||
|
||||
/// A `HashSet` that contains entries related to some `Slot`.
|
||||
struct SlotHashSet {
|
||||
set: HashSet<Hash256>,
|
||||
slot: Slot,
|
||||
}
|
||||
|
||||
impl SlotHashSet {
|
||||
pub fn new(slot: Slot, initial_capacity: usize) -> Self {
|
||||
Self {
|
||||
slot,
|
||||
set: HashSet::with_capacity(initial_capacity),
|
||||
}
|
||||
}
|
||||
|
||||
/// Store the attestation in self so future observations recognise its existence.
|
||||
pub fn observe_attestation<E: EthSpec>(
|
||||
&mut self,
|
||||
a: &Attestation<E>,
|
||||
root: Hash256,
|
||||
) -> Result<ObserveOutcome, Error> {
|
||||
if a.data.slot != self.slot {
|
||||
return Err(Error::IncorrectSlot {
|
||||
expected: self.slot,
|
||||
attestation: a.data.slot,
|
||||
});
|
||||
}
|
||||
|
||||
if self.set.contains(&root) {
|
||||
Ok(ObserveOutcome::AlreadyKnown)
|
||||
} else {
|
||||
// Here we check to see if this slot has reached the maximum observation count.
|
||||
//
|
||||
// The resulting behaviour is that we are no longer able to successfully observe new
|
||||
// attestations, however we will continue to return `is_known` values. We could also
|
||||
// disable `is_known`, however then we would stop forwarding attestations across the
|
||||
// gossip network and I think that this is a worse case than sending some invalid ones.
|
||||
// The underlying libp2p network is responsible for removing duplicate messages, so
|
||||
// this doesn't risk a broadcast loop.
|
||||
if self.set.len() >= MAX_OBSERVATIONS_PER_SLOT {
|
||||
return Err(Error::ReachedMaxObservationsPerSlot(
|
||||
MAX_OBSERVATIONS_PER_SLOT,
|
||||
));
|
||||
}
|
||||
|
||||
self.set.insert(root);
|
||||
|
||||
Ok(ObserveOutcome::New)
|
||||
}
|
||||
}
|
||||
|
||||
/// Indicates if `a` has been observed before.
|
||||
pub fn is_known<E: EthSpec>(&self, a: &Attestation<E>, root: Hash256) -> Result<bool, Error> {
|
||||
if a.data.slot != self.slot {
|
||||
return Err(Error::IncorrectSlot {
|
||||
expected: self.slot,
|
||||
attestation: a.data.slot,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(self.set.contains(&root))
|
||||
}
|
||||
|
||||
/// The number of observed attestations in `self`.
|
||||
pub fn len(&self) -> usize {
|
||||
self.set.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores the roots of `Attestation` objects for some number of `Slots`, so we can determine if
|
||||
/// these have previously been seen on the network.
|
||||
pub struct ObservedAttestations<E: EthSpec> {
|
||||
lowest_permissible_slot: RwLock<Slot>,
|
||||
sets: RwLock<Vec<SlotHashSet>>,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Default for ObservedAttestations<E> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lowest_permissible_slot: RwLock::new(Slot::new(0)),
|
||||
sets: RwLock::new(vec![]),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> ObservedAttestations<E> {
|
||||
/// Store the root of `a` in `self`.
|
||||
///
|
||||
/// `root` must equal `a.tree_hash_root()`.
|
||||
pub fn observe_attestation(
|
||||
&self,
|
||||
a: &Attestation<E>,
|
||||
root_opt: Option<Hash256>,
|
||||
) -> Result<ObserveOutcome, Error> {
|
||||
let index = self.get_set_index(a.data.slot)?;
|
||||
let root = root_opt.unwrap_or_else(|| a.tree_hash_root());
|
||||
|
||||
self.sets
|
||||
.write()
|
||||
.get_mut(index)
|
||||
.ok_or_else(|| Error::InvalidSetIndex(index))
|
||||
.and_then(|set| set.observe_attestation(a, root))
|
||||
}
|
||||
|
||||
/// Check to see if the `root` of `a` is in self.
|
||||
///
|
||||
/// `root` must equal `a.tree_hash_root()`.
|
||||
pub fn is_known(&self, a: &Attestation<E>, root: Hash256) -> Result<bool, Error> {
|
||||
let index = self.get_set_index(a.data.slot)?;
|
||||
|
||||
self.sets
|
||||
.read()
|
||||
.get(index)
|
||||
.ok_or_else(|| Error::InvalidSetIndex(index))
|
||||
.and_then(|set| set.is_known(a, root))
|
||||
}
|
||||
|
||||
/// The maximum number of slots that attestations are stored for.
|
||||
fn max_capacity(&self) -> u64 {
|
||||
// We add `2` in order to account for one slot either side of the range due to
|
||||
// `MAXIMUM_GOSSIP_CLOCK_DISPARITY`.
|
||||
E::slots_per_epoch() + 2
|
||||
}
|
||||
|
||||
/// Removes any attestations with a slot lower than `current_slot` and bars any future
|
||||
/// attestations with a slot lower than `current_slot - SLOTS_RETAINED`.
|
||||
pub fn prune(&self, current_slot: Slot) {
|
||||
// Taking advantage of saturating subtraction on `Slot`.
|
||||
let lowest_permissible_slot = current_slot - (self.max_capacity() - 1);
|
||||
|
||||
self.sets
|
||||
.write()
|
||||
.retain(|set| set.slot >= lowest_permissible_slot);
|
||||
|
||||
*self.lowest_permissible_slot.write() = lowest_permissible_slot;
|
||||
}
|
||||
|
||||
/// Returns the index of `self.set` that matches `slot`.
|
||||
///
|
||||
/// If there is no existing set for this slot one will be created. If `self.sets.len() >=
|
||||
/// Self::max_capacity()`, the set with the lowest slot will be replaced.
|
||||
fn get_set_index(&self, slot: Slot) -> Result<usize, Error> {
|
||||
let lowest_permissible_slot: Slot = *self.lowest_permissible_slot.read();
|
||||
|
||||
if slot < lowest_permissible_slot {
|
||||
return Err(Error::SlotTooLow {
|
||||
slot,
|
||||
lowest_permissible_slot,
|
||||
});
|
||||
}
|
||||
|
||||
// Prune the pool if this attestation indicates that the current slot has advanced.
|
||||
if lowest_permissible_slot + self.max_capacity() < slot + 1 {
|
||||
self.prune(slot)
|
||||
}
|
||||
|
||||
let mut sets = self.sets.write();
|
||||
|
||||
if let Some(index) = sets.iter().position(|set| set.slot == slot) {
|
||||
return Ok(index);
|
||||
}
|
||||
|
||||
// To avoid re-allocations, try and determine a rough initial capacity for the new set
|
||||
// by obtaining the mean size of all items in earlier epoch.
|
||||
let (count, sum) = sets
|
||||
.iter()
|
||||
// Only include slots that are less than the given slot in the average. This should
|
||||
// generally avoid including recent slots that are still "filling up".
|
||||
.filter(|set| set.slot < slot)
|
||||
.map(|set| set.len())
|
||||
.fold((0, 0), |(count, sum), len| (count + 1, sum + len));
|
||||
// If we are unable to determine an average, just use 128 as it's the target committee
|
||||
// size for the mainnet spec. This is perhaps a little wasteful for the minimal spec,
|
||||
// but considering it's approx. 128 * 32 bytes we're not wasting much.
|
||||
let initial_capacity = sum.checked_div(count).unwrap_or(128);
|
||||
|
||||
if sets.len() < self.max_capacity() as usize || sets.is_empty() {
|
||||
let index = sets.len();
|
||||
sets.push(SlotHashSet::new(slot, initial_capacity));
|
||||
return Ok(index);
|
||||
}
|
||||
|
||||
let index = sets
|
||||
.iter()
|
||||
.enumerate()
|
||||
.min_by_key(|(_i, set)| set.slot)
|
||||
.map(|(i, _set)| i)
|
||||
.expect("sets cannot be empty due to previous .is_empty() check");
|
||||
|
||||
sets[index] = SlotHashSet::new(slot, initial_capacity);
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(not(debug_assertions))]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{test_utils::test_random_instance, Hash256};
|
||||
|
||||
type E = types::MainnetEthSpec;
|
||||
|
||||
const NUM_ELEMENTS: usize = 8;
|
||||
|
||||
fn get_attestation(slot: Slot, beacon_block_root: u64) -> Attestation<E> {
|
||||
let mut a: Attestation<E> = test_random_instance();
|
||||
a.data.slot = slot;
|
||||
a.data.beacon_block_root = Hash256::from_low_u64_be(beacon_block_root);
|
||||
a
|
||||
}
|
||||
|
||||
fn single_slot_test(store: &ObservedAttestations<E>, slot: Slot) {
|
||||
let attestations = (0..NUM_ELEMENTS as u64)
|
||||
.map(|i| get_attestation(slot, i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for a in &attestations {
|
||||
assert_eq!(
|
||||
store.is_known(a, a.tree_hash_root()),
|
||||
Ok(false),
|
||||
"should indicate an unknown attestation is unknown"
|
||||
);
|
||||
assert_eq!(
|
||||
store.observe_attestation(a, None),
|
||||
Ok(ObserveOutcome::New),
|
||||
"should observe new attestation"
|
||||
);
|
||||
}
|
||||
|
||||
for a in &attestations {
|
||||
assert_eq!(
|
||||
store.is_known(a, a.tree_hash_root()),
|
||||
Ok(true),
|
||||
"should indicate a known attestation is known"
|
||||
);
|
||||
assert_eq!(
|
||||
store.observe_attestation(a, Some(a.tree_hash_root())),
|
||||
Ok(ObserveOutcome::AlreadyKnown),
|
||||
"should acknowledge an existing attestation"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_slot() {
|
||||
let store = ObservedAttestations::default();
|
||||
|
||||
single_slot_test(&store, Slot::new(0));
|
||||
|
||||
assert_eq!(
|
||||
store.sets.read().len(),
|
||||
1,
|
||||
"should have a single set stored"
|
||||
);
|
||||
assert_eq!(
|
||||
store.sets.read()[0].len(),
|
||||
NUM_ELEMENTS,
|
||||
"set should have NUM_ELEMENTS elements"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mulitple_contiguous_slots() {
|
||||
let store = ObservedAttestations::default();
|
||||
let max_cap = store.max_capacity();
|
||||
|
||||
for i in 0..max_cap * 3 {
|
||||
let slot = Slot::new(i);
|
||||
|
||||
single_slot_test(&store, slot);
|
||||
|
||||
/*
|
||||
* Ensure that the number of sets is correct.
|
||||
*/
|
||||
|
||||
if i < max_cap {
|
||||
assert_eq!(
|
||||
store.sets.read().len(),
|
||||
i as usize + 1,
|
||||
"should have a {} sets stored",
|
||||
i + 1
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
store.sets.read().len(),
|
||||
max_cap as usize,
|
||||
"should have max_capacity sets stored"
|
||||
);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that each set contains the correct number of elements.
|
||||
*/
|
||||
|
||||
for set in &store.sets.read()[..] {
|
||||
assert_eq!(
|
||||
set.len(),
|
||||
NUM_ELEMENTS,
|
||||
"each store should have NUM_ELEMENTS elements"
|
||||
)
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that all the sets have the expected slots
|
||||
*/
|
||||
|
||||
let mut store_slots = store
|
||||
.sets
|
||||
.read()
|
||||
.iter()
|
||||
.map(|set| set.slot)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(
|
||||
store_slots.len() <= store.max_capacity() as usize,
|
||||
"store size should not exceed max"
|
||||
);
|
||||
|
||||
store_slots.sort_unstable();
|
||||
|
||||
let expected_slots = (i.saturating_sub(max_cap - 1)..=i)
|
||||
.map(Slot::new)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(expected_slots, store_slots, "should have expected slots");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mulitple_non_contiguous_slots() {
|
||||
let store = ObservedAttestations::default();
|
||||
let max_cap = store.max_capacity();
|
||||
|
||||
let to_skip = vec![1_u64, 2, 3, 5, 6, 29, 30, 31, 32, 64];
|
||||
let slots = (0..max_cap * 3)
|
||||
.into_iter()
|
||||
.filter(|i| !to_skip.contains(i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for &i in &slots {
|
||||
if to_skip.contains(&i) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let slot = Slot::from(i);
|
||||
|
||||
single_slot_test(&store, slot);
|
||||
|
||||
/*
|
||||
* Ensure that each set contains the correct number of elements.
|
||||
*/
|
||||
|
||||
for set in &store.sets.read()[..] {
|
||||
assert_eq!(
|
||||
set.len(),
|
||||
NUM_ELEMENTS,
|
||||
"each store should have NUM_ELEMENTS elements"
|
||||
)
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that all the sets have the expected slots
|
||||
*/
|
||||
|
||||
let mut store_slots = store
|
||||
.sets
|
||||
.read()
|
||||
.iter()
|
||||
.map(|set| set.slot)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
store_slots.sort_unstable();
|
||||
|
||||
assert!(
|
||||
store_slots.len() <= store.max_capacity() as usize,
|
||||
"store size should not exceed max"
|
||||
);
|
||||
|
||||
let lowest = store.lowest_permissible_slot.read().as_u64();
|
||||
let highest = slot.as_u64();
|
||||
let expected_slots = (lowest..=highest)
|
||||
.filter(|i| !to_skip.contains(i))
|
||||
.map(Slot::new)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(
|
||||
expected_slots,
|
||||
&store_slots[..],
|
||||
"should have expected slots"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,15 +5,44 @@
|
||||
//! the same epoch.
|
||||
//! - `ObservedAggregators`: allows filtering aggregated attestations from the same aggregators in
|
||||
//! the same epoch
|
||||
//!
|
||||
//! Provides an additional two structs that help us filter out sync committee message and
|
||||
//! contribution gossip from validators that have already published messages this slot:
|
||||
//!
|
||||
//! - `ObservedSyncContributors`: allows filtering sync committee messages from the same validator in
|
||||
//! the same slot.
|
||||
//! - `ObservedSyncAggregators`: allows filtering sync committee contributions from the same aggregators in
|
||||
//! the same slot and in the same subcommittee.
|
||||
|
||||
use crate::types::consts::altair::TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE;
|
||||
use bitvec::vec::BitVec;
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::hash::Hash;
|
||||
use std::marker::PhantomData;
|
||||
use types::{Attestation, Epoch, EthSpec, Unsigned};
|
||||
use types::slot_data::SlotData;
|
||||
use types::{Epoch, EthSpec, Slot, Unsigned};
|
||||
|
||||
pub type ObservedAttesters<E> = AutoPruningContainer<EpochBitfield, E>;
|
||||
pub type ObservedAggregators<E> = AutoPruningContainer<EpochHashSet, E>;
|
||||
/// The maximum capacity of the `AutoPruningEpochContainer`.
|
||||
///
|
||||
/// Fits the next, current and previous epochs. We require the next epoch due to the
|
||||
/// `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. We require the previous epoch since the specification
|
||||
/// declares:
|
||||
///
|
||||
/// ```ignore
|
||||
/// aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE
|
||||
/// >= current_slot >= aggregate.data.slot
|
||||
/// ```
|
||||
///
|
||||
/// This means that during the current epoch we will always accept an attestation
|
||||
/// from at least one slot in the previous epoch.
|
||||
pub const MAX_CACHED_EPOCHS: u64 = 3;
|
||||
|
||||
pub type ObservedAttesters<E> = AutoPruningEpochContainer<EpochBitfield, E>;
|
||||
pub type ObservedSyncContributors<E> =
|
||||
AutoPruningSlotContainer<SlotSubcommitteeIndex, SyncContributorSlotHashSet<E>, E>;
|
||||
pub type ObservedAggregators<E> = AutoPruningEpochContainer<EpochHashSet, E>;
|
||||
pub type ObservedSyncAggregators<E> =
|
||||
AutoPruningSlotContainer<SlotSubcommitteeIndex, SyncAggregatorSlotHashSet, E>;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
@@ -21,7 +50,11 @@ pub enum Error {
|
||||
epoch: Epoch,
|
||||
lowest_permissible_epoch: Epoch,
|
||||
},
|
||||
/// We have reached the maximum number of unique `Attestation` that can be observed in a slot.
|
||||
SlotTooLow {
|
||||
slot: Slot,
|
||||
lowest_permissible_slot: Slot,
|
||||
},
|
||||
/// We have reached the maximum number of unique items that can be observed in a slot.
|
||||
/// This is a DoS protection function.
|
||||
ReachedMaxObservationsPerSlot(usize),
|
||||
/// The function to obtain a set index failed, this is an internal error.
|
||||
@@ -49,7 +82,8 @@ pub trait Item {
|
||||
fn contains(&self, validator_index: usize) -> bool;
|
||||
}
|
||||
|
||||
/// Stores a `BitVec` that represents which validator indices have attested during an epoch.
|
||||
/// Stores a `BitVec` that represents which validator indices have attested or sent sync committee
|
||||
/// signatures during an epoch.
|
||||
pub struct EpochBitfield {
|
||||
bitfield: BitVec,
|
||||
}
|
||||
@@ -100,7 +134,7 @@ impl Item for EpochBitfield {
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores a `HashSet` of which validator indices have created an aggregate attestation during an
|
||||
/// Stores a `HashSet` of which validator indices have created an aggregate during an
|
||||
/// epoch.
|
||||
pub struct EpochHashSet {
|
||||
set: HashSet<usize>,
|
||||
@@ -139,6 +173,84 @@ impl Item for EpochHashSet {
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores a `HashSet` of which validator indices have created a sync aggregate during a
|
||||
/// slot.
|
||||
pub struct SyncContributorSlotHashSet<E> {
|
||||
set: HashSet<usize>,
|
||||
phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Item for SyncContributorSlotHashSet<E> {
|
||||
fn with_capacity(capacity: usize) -> Self {
|
||||
Self {
|
||||
set: HashSet::with_capacity(capacity),
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Defaults to the `SYNC_SUBCOMMITTEE_SIZE`.
|
||||
fn default_capacity() -> usize {
|
||||
E::sync_subcommittee_size()
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.set.len()
|
||||
}
|
||||
|
||||
fn validator_count(&self) -> usize {
|
||||
self.set.len()
|
||||
}
|
||||
|
||||
/// Inserts the `validator_index` in the set. Returns `true` if the `validator_index` was
|
||||
/// already in the set.
|
||||
fn insert(&mut self, validator_index: usize) -> bool {
|
||||
!self.set.insert(validator_index)
|
||||
}
|
||||
|
||||
/// Returns `true` if the `validator_index` is in the set.
|
||||
fn contains(&self, validator_index: usize) -> bool {
|
||||
self.set.contains(&validator_index)
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores a `HashSet` of which validator indices have created a sync aggregate during a
|
||||
/// slot.
|
||||
pub struct SyncAggregatorSlotHashSet {
|
||||
set: HashSet<usize>,
|
||||
}
|
||||
|
||||
impl Item for SyncAggregatorSlotHashSet {
|
||||
fn with_capacity(capacity: usize) -> Self {
|
||||
Self {
|
||||
set: HashSet::with_capacity(capacity),
|
||||
}
|
||||
}
|
||||
|
||||
/// Defaults to the `TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE`.
|
||||
fn default_capacity() -> usize {
|
||||
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE as usize
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.set.len()
|
||||
}
|
||||
|
||||
fn validator_count(&self) -> usize {
|
||||
self.set.len()
|
||||
}
|
||||
|
||||
/// Inserts the `validator_index` in the set. Returns `true` if the `validator_index` was
|
||||
/// already in the set.
|
||||
fn insert(&mut self, validator_index: usize) -> bool {
|
||||
!self.set.insert(validator_index)
|
||||
}
|
||||
|
||||
/// Returns `true` if the `validator_index` is in the set.
|
||||
fn contains(&self, validator_index: usize) -> bool {
|
||||
self.set.contains(&validator_index)
|
||||
}
|
||||
}
|
||||
|
||||
/// A container that stores some number of `T` items.
|
||||
///
|
||||
/// This container is "auto-pruning" since it gets an idea of the current slot by which
|
||||
@@ -147,49 +259,46 @@ impl Item for EpochHashSet {
|
||||
/// attestations with an epoch prior to `a.data.target.epoch - 32` will be cleared from the cache.
|
||||
///
|
||||
/// `T` should be set to a `EpochBitfield` or `EpochHashSet`.
|
||||
pub struct AutoPruningContainer<T, E: EthSpec> {
|
||||
lowest_permissible_epoch: RwLock<Epoch>,
|
||||
items: RwLock<HashMap<Epoch, T>>,
|
||||
pub struct AutoPruningEpochContainer<T, E: EthSpec> {
|
||||
lowest_permissible_epoch: Epoch,
|
||||
items: HashMap<Epoch, T>,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<T, E: EthSpec> Default for AutoPruningContainer<T, E> {
|
||||
impl<T, E: EthSpec> Default for AutoPruningEpochContainer<T, E> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lowest_permissible_epoch: RwLock::new(Epoch::new(0)),
|
||||
items: RwLock::new(HashMap::new()),
|
||||
lowest_permissible_epoch: Epoch::new(0),
|
||||
items: HashMap::new(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Item, E: EthSpec> AutoPruningContainer<T, E> {
|
||||
impl<T: Item, E: EthSpec> AutoPruningEpochContainer<T, E> {
|
||||
/// Observe that `validator_index` has produced attestation `a`. Returns `Ok(true)` if `a` has
|
||||
/// previously been observed for `validator_index`.
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// - `validator_index` is higher than `VALIDATOR_REGISTRY_LIMIT`.
|
||||
/// - `a.data.target.slot` is earlier than `self.earliest_permissible_slot`.
|
||||
/// - `a.data.target.slot` is earlier than `self.lowest_permissible_slot`.
|
||||
pub fn observe_validator(
|
||||
&self,
|
||||
a: &Attestation<E>,
|
||||
&mut self,
|
||||
epoch: Epoch,
|
||||
validator_index: usize,
|
||||
) -> Result<bool, Error> {
|
||||
self.sanitize_request(a, validator_index)?;
|
||||
|
||||
let epoch = a.data.target.epoch;
|
||||
self.sanitize_request(epoch, validator_index)?;
|
||||
|
||||
self.prune(epoch);
|
||||
|
||||
let mut items = self.items.write();
|
||||
|
||||
if let Some(item) = items.get_mut(&epoch) {
|
||||
if let Some(item) = self.items.get_mut(&epoch) {
|
||||
Ok(item.insert(validator_index))
|
||||
} else {
|
||||
// To avoid re-allocations, try and determine a rough initial capacity for the new item
|
||||
// by obtaining the mean size of all items in earlier epoch.
|
||||
let (count, sum) = items
|
||||
let (count, sum) = self
|
||||
.items
|
||||
.iter()
|
||||
// Only include epochs that are less than the given slot in the average. This should
|
||||
// generally avoid including recent epochs that are still "filling up".
|
||||
@@ -201,7 +310,7 @@ impl<T: Item, E: EthSpec> AutoPruningContainer<T, E> {
|
||||
|
||||
let mut item = T::with_capacity(initial_capacity);
|
||||
item.insert(validator_index);
|
||||
items.insert(epoch, item);
|
||||
self.items.insert(epoch, item);
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
@@ -213,18 +322,17 @@ impl<T: Item, E: EthSpec> AutoPruningContainer<T, E> {
|
||||
/// ## Errors
|
||||
///
|
||||
/// - `validator_index` is higher than `VALIDATOR_REGISTRY_LIMIT`.
|
||||
/// - `a.data.target.slot` is earlier than `self.earliest_permissible_slot`.
|
||||
/// - `a.data.target.slot` is earlier than `self.lowest_permissible_slot`.
|
||||
pub fn validator_has_been_observed(
|
||||
&self,
|
||||
a: &Attestation<E>,
|
||||
epoch: Epoch,
|
||||
validator_index: usize,
|
||||
) -> Result<bool, Error> {
|
||||
self.sanitize_request(a, validator_index)?;
|
||||
self.sanitize_request(epoch, validator_index)?;
|
||||
|
||||
let exists = self
|
||||
.items
|
||||
.read()
|
||||
.get(&a.data.target.epoch)
|
||||
.get(&epoch)
|
||||
.map_or(false, |item| item.contains(validator_index));
|
||||
|
||||
Ok(exists)
|
||||
@@ -233,19 +341,15 @@ impl<T: Item, E: EthSpec> AutoPruningContainer<T, E> {
|
||||
/// Returns the number of validators that have been observed at the given `epoch`. Returns
|
||||
/// `None` if `self` does not have a cache for that epoch.
|
||||
pub fn observed_validator_count(&self, epoch: Epoch) -> Option<usize> {
|
||||
self.items
|
||||
.read()
|
||||
.get(&epoch)
|
||||
.map(|item| item.validator_count())
|
||||
self.items.get(&epoch).map(|item| item.validator_count())
|
||||
}
|
||||
|
||||
fn sanitize_request(&self, a: &Attestation<E>, validator_index: usize) -> Result<(), Error> {
|
||||
fn sanitize_request(&self, epoch: Epoch, validator_index: usize) -> Result<(), Error> {
|
||||
if validator_index > E::ValidatorRegistryLimit::to_usize() {
|
||||
return Err(Error::ValidatorIndexTooHigh(validator_index));
|
||||
}
|
||||
|
||||
let epoch = a.data.target.epoch;
|
||||
let lowest_permissible_epoch: Epoch = *self.lowest_permissible_epoch.read();
|
||||
let lowest_permissible_epoch = self.lowest_permissible_epoch;
|
||||
if epoch < lowest_permissible_epoch {
|
||||
return Err(Error::EpochTooLow {
|
||||
epoch,
|
||||
@@ -258,11 +362,7 @@ impl<T: Item, E: EthSpec> AutoPruningContainer<T, E> {
|
||||
|
||||
/// The maximum number of epochs stored in `self`.
|
||||
fn max_capacity(&self) -> u64 {
|
||||
// The current epoch and the previous epoch. This is sufficient whilst
|
||||
// GOSSIP_CLOCK_DISPARITY is 1/2 a slot or less:
|
||||
//
|
||||
// https://github.com/ethereum/eth2.0-specs/pull/1706#issuecomment-610151808
|
||||
2
|
||||
MAX_CACHED_EPOCHS
|
||||
}
|
||||
|
||||
/// Updates `self` with the current epoch, removing all attestations that become expired
|
||||
@@ -270,90 +370,251 @@ impl<T: Item, E: EthSpec> AutoPruningContainer<T, E> {
|
||||
///
|
||||
/// Also sets `self.lowest_permissible_epoch` with relation to `current_epoch` and
|
||||
/// `Self::max_capacity`.
|
||||
pub fn prune(&self, current_epoch: Epoch) {
|
||||
// Taking advantage of saturating subtraction on `Slot`.
|
||||
let lowest_permissible_epoch = current_epoch - (self.max_capacity().saturating_sub(1));
|
||||
pub fn prune(&mut self, current_epoch: Epoch) {
|
||||
let lowest_permissible_epoch =
|
||||
current_epoch.saturating_sub(self.max_capacity().saturating_sub(1));
|
||||
|
||||
*self.lowest_permissible_epoch.write() = lowest_permissible_epoch;
|
||||
self.lowest_permissible_epoch = lowest_permissible_epoch;
|
||||
|
||||
self.items
|
||||
.write()
|
||||
.retain(|epoch, _item| *epoch >= lowest_permissible_epoch);
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
/// Returns the `lowest_permissible_epoch`. Used in tests.
|
||||
pub(crate) fn get_lowest_permissible(&self) -> Epoch {
|
||||
self.lowest_permissible_epoch
|
||||
}
|
||||
|
||||
/// Returns `true` if the given `index` has been stored in `self` at `epoch`.
|
||||
///
|
||||
/// This is useful for doppelganger detection.
|
||||
pub fn index_seen_at_epoch(&self, index: usize, epoch: Epoch) -> bool {
|
||||
self.items
|
||||
.get(&epoch)
|
||||
.map(|item| item.contains(index))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// A container that stores some number of `V` items.
|
||||
///
|
||||
/// This container is "auto-pruning" since it gets an idea of the current slot by which
|
||||
/// sync contributions are provided to it and prunes old entries based upon that. For example, if
|
||||
/// `Self::max_capacity == 3` and an attestation with `data.slot` is supplied, then all
|
||||
/// sync contributions with an epoch prior to `data.slot - 3` will be cleared from the cache.
|
||||
///
|
||||
/// `V` should be set to a `SyncAggregatorSlotHashSet` or a `SyncContributorSlotHashSet`.
|
||||
pub struct AutoPruningSlotContainer<K: SlotData + Eq + Hash, V, E: EthSpec> {
|
||||
lowest_permissible_slot: Slot,
|
||||
items: HashMap<K, V>,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<K: SlotData + Eq + Hash, V, E: EthSpec> Default for AutoPruningSlotContainer<K, V, E> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lowest_permissible_slot: Slot::new(0),
|
||||
items: HashMap::new(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: SlotData + Eq + Hash, V: Item, E: EthSpec> AutoPruningSlotContainer<K, V, E> {
|
||||
/// Observe that `validator_index` has produced a sync committee message. Returns `Ok(true)` if
|
||||
/// the sync committee message has previously been observed for `validator_index`.
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// - `validator_index` is higher than `VALIDATOR_REGISTRY_LIMIT`.
|
||||
/// - `key.slot` is earlier than `self.lowest_permissible_slot`.
|
||||
pub fn observe_validator(&mut self, key: K, validator_index: usize) -> Result<bool, Error> {
|
||||
let slot = key.get_slot();
|
||||
self.sanitize_request(slot, validator_index)?;
|
||||
|
||||
self.prune(slot);
|
||||
|
||||
if let Some(item) = self.items.get_mut(&key) {
|
||||
Ok(item.insert(validator_index))
|
||||
} else {
|
||||
// To avoid re-allocations, try and determine a rough initial capacity for the new item
|
||||
// by obtaining the mean size of all items in earlier slot.
|
||||
let (count, sum) = self
|
||||
.items
|
||||
.iter()
|
||||
// Only include slots that are less than the given slot in the average. This should
|
||||
// generally avoid including recent slots that are still "filling up".
|
||||
.filter(|(item_key, _item)| item_key.get_slot() < slot)
|
||||
.map(|(_, item)| item.len())
|
||||
.fold((0, 0), |(count, sum), len| (count + 1, sum + len));
|
||||
|
||||
let initial_capacity = sum.checked_div(count).unwrap_or_else(V::default_capacity);
|
||||
|
||||
let mut item = V::with_capacity(initial_capacity);
|
||||
item.insert(validator_index);
|
||||
self.items.insert(key, item);
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `Ok(true)` if the `validator_index` has already produced a conflicting sync committee message.
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// - `validator_index` is higher than `VALIDATOR_REGISTRY_LIMIT`.
|
||||
/// - `key.slot` is earlier than `self.lowest_permissible_slot`.
|
||||
pub fn validator_has_been_observed(
|
||||
&self,
|
||||
key: K,
|
||||
validator_index: usize,
|
||||
) -> Result<bool, Error> {
|
||||
self.sanitize_request(key.get_slot(), validator_index)?;
|
||||
|
||||
let exists = self
|
||||
.items
|
||||
.get(&key)
|
||||
.map_or(false, |item| item.contains(validator_index));
|
||||
|
||||
Ok(exists)
|
||||
}
|
||||
|
||||
/// Returns the number of validators that have been observed at the given `slot`. Returns
|
||||
/// `None` if `self` does not have a cache for that slot.
|
||||
pub fn observed_validator_count(&self, key: K) -> Option<usize> {
|
||||
self.items.get(&key).map(|item| item.validator_count())
|
||||
}
|
||||
|
||||
fn sanitize_request(&self, slot: Slot, validator_index: usize) -> Result<(), Error> {
|
||||
if validator_index > E::ValidatorRegistryLimit::to_usize() {
|
||||
return Err(Error::ValidatorIndexTooHigh(validator_index));
|
||||
}
|
||||
|
||||
let lowest_permissible_slot = self.lowest_permissible_slot;
|
||||
if slot < lowest_permissible_slot {
|
||||
return Err(Error::SlotTooLow {
|
||||
slot,
|
||||
lowest_permissible_slot,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The maximum number of slots stored in `self`.
|
||||
fn max_capacity(&self) -> u64 {
|
||||
// The next, current and previous slots. We require the next slot due to the
|
||||
// `MAXIMUM_GOSSIP_CLOCK_DISPARITY`.
|
||||
3
|
||||
}
|
||||
|
||||
/// Updates `self` with the current slot, removing all sync committee messages that become expired
|
||||
/// relative to `Self::max_capacity`.
|
||||
///
|
||||
/// Also sets `self.lowest_permissible_slot` with relation to `current_slot` and
|
||||
/// `Self::max_capacity`.
|
||||
pub fn prune(&mut self, current_slot: Slot) {
|
||||
let lowest_permissible_slot =
|
||||
current_slot.saturating_sub(self.max_capacity().saturating_sub(1));
|
||||
|
||||
self.lowest_permissible_slot = lowest_permissible_slot;
|
||||
|
||||
self.items
|
||||
.retain(|key, _item| key.get_slot() >= lowest_permissible_slot);
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
/// Returns the `lowest_permissible_slot`. Used in tests.
|
||||
pub(crate) fn get_lowest_permissible(&self) -> Slot {
|
||||
self.lowest_permissible_slot
|
||||
}
|
||||
}
|
||||
|
||||
/// This is used to key information about sync committee aggregators. We require the
|
||||
/// `subcommittee_index` because it is possible that a validator can aggregate for multiple
|
||||
/// subcommittees in the same slot.
|
||||
#[derive(Eq, PartialEq, Hash, Clone, Copy, PartialOrd, Ord, Debug)]
|
||||
pub struct SlotSubcommitteeIndex {
|
||||
slot: Slot,
|
||||
subcommittee_index: u64,
|
||||
}
|
||||
|
||||
impl SlotData for SlotSubcommitteeIndex {
|
||||
fn get_slot(&self) -> Slot {
|
||||
self.slot
|
||||
}
|
||||
}
|
||||
|
||||
impl SlotSubcommitteeIndex {
|
||||
pub fn new(slot: Slot, subcommittee_index: u64) -> Self {
|
||||
Self {
|
||||
slot,
|
||||
subcommittee_index,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
macro_rules! test_suite {
|
||||
type E = types::MainnetEthSpec;
|
||||
|
||||
macro_rules! test_suite_epoch {
|
||||
($mod_name: ident, $type: ident) => {
|
||||
#[cfg(test)]
|
||||
mod $mod_name {
|
||||
use super::*;
|
||||
use types::test_utils::test_random_instance;
|
||||
|
||||
type E = types::MainnetEthSpec;
|
||||
fn single_period_test(store: &mut $type<E>, period: Epoch) {
|
||||
let validator_indices = [0, 1, 2, 3, 5, 6, 7, 18, 22];
|
||||
|
||||
fn get_attestation(epoch: Epoch) -> Attestation<E> {
|
||||
let mut a: Attestation<E> = test_random_instance();
|
||||
a.data.target.epoch = epoch;
|
||||
a
|
||||
}
|
||||
|
||||
fn single_epoch_test(store: &$type<E>, epoch: Epoch) {
|
||||
let attesters = [0, 1, 2, 3, 5, 6, 7, 18, 22];
|
||||
let a = &get_attestation(epoch);
|
||||
|
||||
for &i in &attesters {
|
||||
for &i in &validator_indices {
|
||||
assert_eq!(
|
||||
store.validator_has_been_observed(a, i),
|
||||
store.validator_has_been_observed(period, i),
|
||||
Ok(false),
|
||||
"should indicate an unknown attestation is unknown"
|
||||
"should indicate an unknown item is unknown"
|
||||
);
|
||||
assert_eq!(
|
||||
store.observe_validator(a, i),
|
||||
store.observe_validator(period, i),
|
||||
Ok(false),
|
||||
"should observe new attestation"
|
||||
"should observe new item"
|
||||
);
|
||||
}
|
||||
|
||||
for &i in &attesters {
|
||||
for &i in &validator_indices {
|
||||
assert_eq!(
|
||||
store.validator_has_been_observed(a, i),
|
||||
store.validator_has_been_observed(period, i),
|
||||
Ok(true),
|
||||
"should indicate a known attestation is known"
|
||||
"should indicate a known item is known"
|
||||
);
|
||||
assert_eq!(
|
||||
store.observe_validator(a, i),
|
||||
store.observe_validator(period, i),
|
||||
Ok(true),
|
||||
"should acknowledge an existing attestation"
|
||||
"should acknowledge an existing item"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_epoch() {
|
||||
let store = $type::default();
|
||||
fn single_period() {
|
||||
let mut store = $type::default();
|
||||
|
||||
single_epoch_test(&store, Epoch::new(0));
|
||||
single_period_test(&mut store, Epoch::new(0));
|
||||
|
||||
assert_eq!(
|
||||
store.items.read().len(),
|
||||
1,
|
||||
"should have a single bitfield stored"
|
||||
);
|
||||
assert_eq!(store.items.len(), 1, "should have a single bitfield stored");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mulitple_contiguous_epochs() {
|
||||
let store = $type::default();
|
||||
fn mulitple_contiguous_periods() {
|
||||
let mut store = $type::default();
|
||||
let max_cap = store.max_capacity();
|
||||
|
||||
for i in 0..max_cap * 3 {
|
||||
let epoch = Epoch::new(i);
|
||||
let period = Epoch::new(i);
|
||||
|
||||
single_epoch_test(&store, epoch);
|
||||
single_period_test(&mut store, period);
|
||||
|
||||
/*
|
||||
* Ensure that the number of sets is correct.
|
||||
@@ -361,14 +622,14 @@ mod tests {
|
||||
|
||||
if i < max_cap {
|
||||
assert_eq!(
|
||||
store.items.read().len(),
|
||||
store.items.len(),
|
||||
i as usize + 1,
|
||||
"should have a {} items stored",
|
||||
i + 1
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
store.items.read().len(),
|
||||
store.items.len(),
|
||||
max_cap as usize,
|
||||
"should have max_capacity items stored"
|
||||
);
|
||||
@@ -378,76 +639,77 @@ mod tests {
|
||||
* Ensure that all the sets have the expected slots
|
||||
*/
|
||||
|
||||
let mut store_epochs = store
|
||||
let mut store_periods = store
|
||||
.items
|
||||
.read()
|
||||
.iter()
|
||||
.map(|(epoch, _set)| *epoch)
|
||||
.map(|(period, _set)| *period)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(
|
||||
store_epochs.len() <= store.max_capacity() as usize,
|
||||
store_periods.len() <= store.max_capacity() as usize,
|
||||
"store size should not exceed max"
|
||||
);
|
||||
|
||||
store_epochs.sort_unstable();
|
||||
store_periods.sort_unstable();
|
||||
|
||||
let expected_epochs = (i.saturating_sub(max_cap - 1)..=i)
|
||||
let expected_periods = (i.saturating_sub(max_cap - 1)..=i)
|
||||
.map(Epoch::new)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(expected_epochs, store_epochs, "should have expected slots");
|
||||
assert_eq!(
|
||||
expected_periods, store_periods,
|
||||
"should have expected slots"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mulitple_non_contiguous_epochs() {
|
||||
let store = $type::default();
|
||||
fn mulitple_non_contiguous_periods() {
|
||||
let mut store = $type::default();
|
||||
let max_cap = store.max_capacity();
|
||||
|
||||
let to_skip = vec![1_u64, 3, 4, 5];
|
||||
let epochs = (0..max_cap * 3)
|
||||
let periods = (0..max_cap * 3)
|
||||
.into_iter()
|
||||
.filter(|i| !to_skip.contains(i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for &i in &epochs {
|
||||
for &i in &periods {
|
||||
if to_skip.contains(&i) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let epoch = Epoch::from(i);
|
||||
let period = Epoch::from(i);
|
||||
|
||||
single_epoch_test(&store, epoch);
|
||||
single_period_test(&mut store, period);
|
||||
|
||||
/*
|
||||
* Ensure that all the sets have the expected slots
|
||||
*/
|
||||
|
||||
let mut store_epochs = store
|
||||
let mut store_periods = store
|
||||
.items
|
||||
.read()
|
||||
.iter()
|
||||
.map(|(epoch, _)| *epoch)
|
||||
.map(|(period, _)| *period)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
store_epochs.sort_unstable();
|
||||
store_periods.sort_unstable();
|
||||
|
||||
assert!(
|
||||
store_epochs.len() <= store.max_capacity() as usize,
|
||||
store_periods.len() <= store.max_capacity() as usize,
|
||||
"store size should not exceed max"
|
||||
);
|
||||
|
||||
let lowest = store.lowest_permissible_epoch.read().as_u64();
|
||||
let highest = epoch.as_u64();
|
||||
let expected_epochs = (lowest..=highest)
|
||||
let lowest = store.get_lowest_permissible().as_u64();
|
||||
let highest = period.as_u64();
|
||||
let expected_periods = (lowest..=highest)
|
||||
.filter(|i| !to_skip.contains(i))
|
||||
.map(Epoch::new)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(
|
||||
expected_epochs,
|
||||
&store_epochs[..],
|
||||
expected_periods,
|
||||
&store_periods[..],
|
||||
"should have expected epochs"
|
||||
);
|
||||
}
|
||||
@@ -456,6 +718,285 @@ mod tests {
|
||||
};
|
||||
}
|
||||
|
||||
test_suite!(observed_attesters, ObservedAttesters);
|
||||
test_suite!(observed_aggregators, ObservedAggregators);
|
||||
test_suite_epoch!(observed_attesters, ObservedAttesters);
|
||||
test_suite_epoch!(observed_aggregators, ObservedAggregators);
|
||||
|
||||
macro_rules! test_suite_slot {
|
||||
($mod_name: ident, $type: ident) => {
|
||||
#[cfg(test)]
|
||||
mod $mod_name {
|
||||
use super::*;
|
||||
|
||||
fn single_period_test(store: &mut $type<E>, key: SlotSubcommitteeIndex) {
|
||||
let validator_indices = [0, 1, 2, 3, 5, 6, 7, 18, 22];
|
||||
|
||||
for &i in &validator_indices {
|
||||
assert_eq!(
|
||||
store.validator_has_been_observed(key, i),
|
||||
Ok(false),
|
||||
"should indicate an unknown item is unknown"
|
||||
);
|
||||
assert_eq!(
|
||||
store.observe_validator(key, i),
|
||||
Ok(false),
|
||||
"should observe new item"
|
||||
);
|
||||
}
|
||||
|
||||
for &i in &validator_indices {
|
||||
assert_eq!(
|
||||
store.validator_has_been_observed(key, i),
|
||||
Ok(true),
|
||||
"should indicate a known item is known"
|
||||
);
|
||||
assert_eq!(
|
||||
store.observe_validator(key, i),
|
||||
Ok(true),
|
||||
"should acknowledge an existing item"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_period() {
|
||||
let mut store = $type::default();
|
||||
|
||||
single_period_test(&mut store, SlotSubcommitteeIndex::new(Slot::new(0), 0));
|
||||
|
||||
assert_eq!(store.items.len(), 1, "should have a single bitfield stored");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_period_multiple_subcommittees() {
|
||||
let mut store = $type::default();
|
||||
|
||||
single_period_test(&mut store, SlotSubcommitteeIndex::new(Slot::new(0), 0));
|
||||
single_period_test(&mut store, SlotSubcommitteeIndex::new(Slot::new(0), 1));
|
||||
single_period_test(&mut store, SlotSubcommitteeIndex::new(Slot::new(0), 2));
|
||||
|
||||
assert_eq!(store.items.len(), 3, "should have three hash sets stored");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mulitple_contiguous_periods_same_subcommittee() {
|
||||
let mut store = $type::default();
|
||||
let max_cap = store.max_capacity();
|
||||
|
||||
for i in 0..max_cap * 3 {
|
||||
let period = SlotSubcommitteeIndex::new(Slot::new(i), 0);
|
||||
|
||||
single_period_test(&mut store, period);
|
||||
|
||||
/*
|
||||
* Ensure that the number of sets is correct.
|
||||
*/
|
||||
|
||||
if i < max_cap {
|
||||
assert_eq!(
|
||||
store.items.len(),
|
||||
i as usize + 1,
|
||||
"should have a {} items stored",
|
||||
i + 1
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
store.items.len(),
|
||||
max_cap as usize,
|
||||
"should have max_capacity items stored"
|
||||
);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that all the sets have the expected slots
|
||||
*/
|
||||
|
||||
let mut store_periods = store
|
||||
.items
|
||||
.iter()
|
||||
.map(|(period, _set)| *period)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(
|
||||
store_periods.len() <= store.max_capacity() as usize,
|
||||
"store size should not exceed max"
|
||||
);
|
||||
|
||||
store_periods.sort_unstable();
|
||||
|
||||
let expected_periods = (i.saturating_sub(max_cap - 1)..=i)
|
||||
.map(|i| SlotSubcommitteeIndex::new(Slot::new(i), 0))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(
|
||||
expected_periods, store_periods,
|
||||
"should have expected slots"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mulitple_non_contiguous_periods_same_subcommitte() {
|
||||
let mut store = $type::default();
|
||||
let max_cap = store.max_capacity();
|
||||
|
||||
let to_skip = vec![1_u64, 3, 4, 5];
|
||||
let periods = (0..max_cap * 3)
|
||||
.into_iter()
|
||||
.filter(|i| !to_skip.contains(i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for &i in &periods {
|
||||
if to_skip.contains(&i) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let period = SlotSubcommitteeIndex::new(Slot::from(i), 0);
|
||||
|
||||
single_period_test(&mut store, period);
|
||||
|
||||
/*
|
||||
* Ensure that all the sets have the expected slots
|
||||
*/
|
||||
|
||||
let mut store_periods = store
|
||||
.items
|
||||
.iter()
|
||||
.map(|(period, _)| *period)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
store_periods.sort_unstable();
|
||||
|
||||
assert!(
|
||||
store_periods.len() <= store.max_capacity() as usize,
|
||||
"store size should not exceed max"
|
||||
);
|
||||
|
||||
let lowest = store.get_lowest_permissible().as_u64();
|
||||
let highest = period.slot.as_u64();
|
||||
let expected_periods = (lowest..=highest)
|
||||
.filter(|i| !to_skip.contains(i))
|
||||
.map(|i| SlotSubcommitteeIndex::new(Slot::new(i), 0))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(
|
||||
expected_periods,
|
||||
&store_periods[..],
|
||||
"should have expected epochs"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mulitple_contiguous_periods_different_subcommittee() {
|
||||
let mut store = $type::default();
|
||||
let max_cap = store.max_capacity();
|
||||
|
||||
for i in 0..max_cap * 3 {
|
||||
let period = SlotSubcommitteeIndex::new(Slot::new(i), i);
|
||||
|
||||
single_period_test(&mut store, period);
|
||||
|
||||
/*
|
||||
* Ensure that the number of sets is correct.
|
||||
*/
|
||||
|
||||
if i < max_cap {
|
||||
assert_eq!(
|
||||
store.items.len(),
|
||||
i as usize + 1,
|
||||
"should have a {} items stored",
|
||||
i + 1
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
store.items.len(),
|
||||
max_cap as usize,
|
||||
"should have max_capacity items stored"
|
||||
);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that all the sets have the expected slots
|
||||
*/
|
||||
|
||||
let mut store_periods = store
|
||||
.items
|
||||
.iter()
|
||||
.map(|(period, _set)| *period)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(
|
||||
store_periods.len() <= store.max_capacity() as usize,
|
||||
"store size should not exceed max"
|
||||
);
|
||||
|
||||
store_periods.sort_unstable();
|
||||
|
||||
let expected_periods = (i.saturating_sub(max_cap - 1)..=i)
|
||||
.map(|i| SlotSubcommitteeIndex::new(Slot::new(i), i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(
|
||||
expected_periods, store_periods,
|
||||
"should have expected slots"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mulitple_non_contiguous_periods_different_subcommitte() {
|
||||
let mut store = $type::default();
|
||||
let max_cap = store.max_capacity();
|
||||
|
||||
let to_skip = vec![1_u64, 3, 4, 5];
|
||||
let periods = (0..max_cap * 3)
|
||||
.into_iter()
|
||||
.filter(|i| !to_skip.contains(i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for &i in &periods {
|
||||
if to_skip.contains(&i) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let period = SlotSubcommitteeIndex::new(Slot::from(i), i);
|
||||
|
||||
single_period_test(&mut store, period);
|
||||
|
||||
/*
|
||||
* Ensure that all the sets have the expected slots
|
||||
*/
|
||||
|
||||
let mut store_periods = store
|
||||
.items
|
||||
.iter()
|
||||
.map(|(period, _)| *period)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
store_periods.sort_unstable();
|
||||
|
||||
assert!(
|
||||
store_periods.len() <= store.max_capacity() as usize,
|
||||
"store size should not exceed max"
|
||||
);
|
||||
|
||||
let lowest = store.get_lowest_permissible().as_u64();
|
||||
let highest = period.slot.as_u64();
|
||||
let expected_periods = (lowest..=highest)
|
||||
.filter(|i| !to_skip.contains(i))
|
||||
.map(|i| SlotSubcommitteeIndex::new(Slot::new(i), i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(
|
||||
expected_periods,
|
||||
&store_periods[..],
|
||||
"should have expected epochs"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
test_suite_slot!(observed_sync_contributors, ObservedSyncContributors);
|
||||
test_suite_slot!(observed_sync_aggregators, ObservedSyncAggregators);
|
||||
}
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
//! Provides the `ObservedBlockProducers` struct which allows for rejecting gossip blocks from
|
||||
//! validators that have already produced a block.
|
||||
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::marker::PhantomData;
|
||||
use types::{BeaconBlock, EthSpec, Slot, Unsigned};
|
||||
use types::{BeaconBlockRef, Epoch, EthSpec, Slot, Unsigned};
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
@@ -27,8 +26,8 @@ pub enum Error {
|
||||
/// active_validator_count`, however in reality that is more like `slots_since_finality *
|
||||
/// known_distinct_shufflings` which is much smaller.
|
||||
pub struct ObservedBlockProducers<E: EthSpec> {
|
||||
finalized_slot: RwLock<Slot>,
|
||||
items: RwLock<HashMap<Slot, HashSet<u64>>>,
|
||||
finalized_slot: Slot,
|
||||
items: HashMap<Slot, HashSet<u64>>,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
@@ -36,8 +35,8 @@ impl<E: EthSpec> Default for ObservedBlockProducers<E> {
|
||||
/// Instantiates `Self` with `finalized_slot == 0`.
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
finalized_slot: RwLock::new(Slot::new(0)),
|
||||
items: RwLock::new(HashMap::new()),
|
||||
finalized_slot: Slot::new(0),
|
||||
items: HashMap::new(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
@@ -53,15 +52,14 @@ impl<E: EthSpec> ObservedBlockProducers<E> {
|
||||
///
|
||||
/// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
|
||||
/// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
|
||||
pub fn observe_proposer(&self, block: &BeaconBlock<E>) -> Result<bool, Error> {
|
||||
pub fn observe_proposer(&mut self, block: BeaconBlockRef<'_, E>) -> Result<bool, Error> {
|
||||
self.sanitize_block(block)?;
|
||||
|
||||
let did_not_exist = self
|
||||
.items
|
||||
.write()
|
||||
.entry(block.slot)
|
||||
.entry(block.slot())
|
||||
.or_insert_with(|| HashSet::with_capacity(E::SlotsPerEpoch::to_usize()))
|
||||
.insert(block.proposer_index);
|
||||
.insert(block.proposer_index());
|
||||
|
||||
Ok(!did_not_exist)
|
||||
}
|
||||
@@ -74,28 +72,27 @@ impl<E: EthSpec> ObservedBlockProducers<E> {
|
||||
///
|
||||
/// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
|
||||
/// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
|
||||
pub fn proposer_has_been_observed(&self, block: &BeaconBlock<E>) -> Result<bool, Error> {
|
||||
pub fn proposer_has_been_observed(&self, block: BeaconBlockRef<'_, E>) -> Result<bool, Error> {
|
||||
self.sanitize_block(block)?;
|
||||
|
||||
let exists = self
|
||||
.items
|
||||
.read()
|
||||
.get(&block.slot)
|
||||
.map_or(false, |set| set.contains(&block.proposer_index));
|
||||
.get(&block.slot())
|
||||
.map_or(false, |set| set.contains(&block.proposer_index()));
|
||||
|
||||
Ok(exists)
|
||||
}
|
||||
|
||||
/// Returns `Ok(())` if the given `block` is sane.
|
||||
fn sanitize_block(&self, block: &BeaconBlock<E>) -> Result<(), Error> {
|
||||
if block.proposer_index > E::ValidatorRegistryLimit::to_u64() {
|
||||
return Err(Error::ValidatorIndexTooHigh(block.proposer_index));
|
||||
fn sanitize_block(&self, block: BeaconBlockRef<'_, E>) -> Result<(), Error> {
|
||||
if block.proposer_index() >= E::ValidatorRegistryLimit::to_u64() {
|
||||
return Err(Error::ValidatorIndexTooHigh(block.proposer_index()));
|
||||
}
|
||||
|
||||
let finalized_slot = *self.finalized_slot.read();
|
||||
if finalized_slot > 0 && block.slot <= finalized_slot {
|
||||
let finalized_slot = self.finalized_slot;
|
||||
if finalized_slot > 0 && block.slot() <= finalized_slot {
|
||||
return Err(Error::FinalizedBlock {
|
||||
slot: block.slot,
|
||||
slot: block.slot(),
|
||||
finalized_slot,
|
||||
});
|
||||
}
|
||||
@@ -109,44 +106,51 @@ impl<E: EthSpec> ObservedBlockProducers<E> {
|
||||
/// equal to or less than `finalized_slot`.
|
||||
///
|
||||
/// No-op if `finalized_slot == 0`.
|
||||
pub fn prune(&self, finalized_slot: Slot) {
|
||||
pub fn prune(&mut self, finalized_slot: Slot) {
|
||||
if finalized_slot == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
*self.finalized_slot.write() = finalized_slot;
|
||||
self.items
|
||||
.write()
|
||||
.retain(|slot, _set| *slot > finalized_slot);
|
||||
self.finalized_slot = finalized_slot;
|
||||
self.items.retain(|slot, _set| *slot > finalized_slot);
|
||||
}
|
||||
|
||||
/// Returns `true` if the given `validator_index` has been stored in `self` at `epoch`.
|
||||
///
|
||||
/// This is useful for doppelganger detection.
|
||||
pub fn index_seen_at_epoch(&self, validator_index: u64, epoch: Epoch) -> bool {
|
||||
self.items.iter().any(|(slot, producers)| {
|
||||
slot.epoch(E::slots_per_epoch()) == epoch && producers.contains(&validator_index)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use types::MainnetEthSpec;
|
||||
use types::{BeaconBlock, MainnetEthSpec};
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
|
||||
fn get_block(slot: u64, proposer: u64) -> BeaconBlock<E> {
|
||||
let mut block = BeaconBlock::empty(&E::default_spec());
|
||||
block.slot = slot.into();
|
||||
block.proposer_index = proposer;
|
||||
*block.slot_mut() = slot.into();
|
||||
*block.proposer_index_mut() = proposer;
|
||||
block
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pruning() {
|
||||
let cache = ObservedBlockProducers::default();
|
||||
let mut cache = ObservedBlockProducers::default();
|
||||
|
||||
assert_eq!(*cache.finalized_slot.read(), 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.read().len(), 0, "no slots should be present");
|
||||
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.len(), 0, "no slots should be present");
|
||||
|
||||
// Slot 0, proposer 0
|
||||
let block_a = &get_block(0, 0);
|
||||
let block_a = get_block(0, 0);
|
||||
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_a),
|
||||
cache.observe_proposer(block_a.to_ref()),
|
||||
Ok(false),
|
||||
"can observe proposer, indicates proposer unobserved"
|
||||
);
|
||||
@@ -155,16 +159,11 @@ mod tests {
|
||||
* Preconditions.
|
||||
*/
|
||||
|
||||
assert_eq!(*cache.finalized_slot.read(), 0, "finalized slot is zero");
|
||||
assert_eq!(
|
||||
cache.items.read().len(),
|
||||
1,
|
||||
"only one slot should be present"
|
||||
);
|
||||
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.len(), 1, "only one slot should be present");
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(0))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
@@ -178,16 +177,11 @@ mod tests {
|
||||
|
||||
cache.prune(Slot::new(0));
|
||||
|
||||
assert_eq!(*cache.finalized_slot.read(), 0, "finalized slot is zero");
|
||||
assert_eq!(
|
||||
cache.items.read().len(),
|
||||
1,
|
||||
"only one slot should be present"
|
||||
);
|
||||
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.len(), 1, "only one slot should be present");
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(0))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
@@ -201,21 +195,21 @@ mod tests {
|
||||
|
||||
cache.prune(E::slots_per_epoch().into());
|
||||
assert_eq!(
|
||||
*cache.finalized_slot.read(),
|
||||
cache.finalized_slot,
|
||||
Slot::from(E::slots_per_epoch()),
|
||||
"finalized slot is updated"
|
||||
);
|
||||
assert_eq!(cache.items.read().len(), 0, "no items left");
|
||||
assert_eq!(cache.items.len(), 0, "no items left");
|
||||
|
||||
/*
|
||||
* Check that we can't insert a finalized block
|
||||
*/
|
||||
|
||||
// First slot of finalized epoch, proposer 0
|
||||
let block_b = &get_block(E::slots_per_epoch(), 0);
|
||||
let block_b = get_block(E::slots_per_epoch(), 0);
|
||||
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_b),
|
||||
cache.observe_proposer(block_b.to_ref()),
|
||||
Err(Error::FinalizedBlock {
|
||||
slot: E::slots_per_epoch().into(),
|
||||
finalized_slot: E::slots_per_epoch().into(),
|
||||
@@ -223,7 +217,7 @@ mod tests {
|
||||
"cant insert finalized block"
|
||||
);
|
||||
|
||||
assert_eq!(cache.items.read().len(), 0, "block was not added");
|
||||
assert_eq!(cache.items.len(), 0, "block was not added");
|
||||
|
||||
/*
|
||||
* Check that we _can_ insert a non-finalized block
|
||||
@@ -232,23 +226,18 @@ mod tests {
|
||||
let three_epochs = E::slots_per_epoch() * 3;
|
||||
|
||||
// First slot of finalized epoch, proposer 0
|
||||
let block_b = &get_block(three_epochs, 0);
|
||||
let block_b = get_block(three_epochs, 0);
|
||||
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_b),
|
||||
cache.observe_proposer(block_b.to_ref()),
|
||||
Ok(false),
|
||||
"can insert non-finalized block"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
cache.items.read().len(),
|
||||
1,
|
||||
"only one slot should be present"
|
||||
);
|
||||
assert_eq!(cache.items.len(), 1, "only one slot should be present");
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(three_epochs))
|
||||
.expect("the three epochs slot should be present")
|
||||
.len(),
|
||||
@@ -264,20 +253,15 @@ mod tests {
|
||||
cache.prune(two_epochs.into());
|
||||
|
||||
assert_eq!(
|
||||
*cache.finalized_slot.read(),
|
||||
cache.finalized_slot,
|
||||
Slot::from(two_epochs),
|
||||
"finalized slot is updated"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
cache.items.read().len(),
|
||||
1,
|
||||
"only one slot should be present"
|
||||
);
|
||||
assert_eq!(cache.items.len(), 1, "only one slot should be present");
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(three_epochs))
|
||||
.expect("the three epochs slot should be present")
|
||||
.len(),
|
||||
@@ -288,42 +272,37 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn simple_observations() {
|
||||
let cache = ObservedBlockProducers::default();
|
||||
let mut cache = ObservedBlockProducers::default();
|
||||
|
||||
// Slot 0, proposer 0
|
||||
let block_a = &get_block(0, 0);
|
||||
let block_a = get_block(0, 0);
|
||||
|
||||
assert_eq!(
|
||||
cache.proposer_has_been_observed(block_a),
|
||||
cache.proposer_has_been_observed(block_a.to_ref()),
|
||||
Ok(false),
|
||||
"no observation in empty cache"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_a),
|
||||
cache.observe_proposer(block_a.to_ref()),
|
||||
Ok(false),
|
||||
"can observe proposer, indicates proposer unobserved"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.proposer_has_been_observed(block_a),
|
||||
cache.proposer_has_been_observed(block_a.to_ref()),
|
||||
Ok(true),
|
||||
"observed block is indicated as true"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_a),
|
||||
cache.observe_proposer(block_a.to_ref()),
|
||||
Ok(true),
|
||||
"observing again indicates true"
|
||||
);
|
||||
|
||||
assert_eq!(*cache.finalized_slot.read(), 0, "finalized slot is zero");
|
||||
assert_eq!(
|
||||
cache.items.read().len(),
|
||||
1,
|
||||
"only one slot should be present"
|
||||
);
|
||||
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.len(), 1, "only one slot should be present");
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(0))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
@@ -332,35 +311,34 @@ mod tests {
|
||||
);
|
||||
|
||||
// Slot 1, proposer 0
|
||||
let block_b = &get_block(1, 0);
|
||||
let block_b = get_block(1, 0);
|
||||
|
||||
assert_eq!(
|
||||
cache.proposer_has_been_observed(block_b),
|
||||
cache.proposer_has_been_observed(block_b.to_ref()),
|
||||
Ok(false),
|
||||
"no observation for new slot"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_b),
|
||||
cache.observe_proposer(block_b.to_ref()),
|
||||
Ok(false),
|
||||
"can observe proposer for new slot, indicates proposer unobserved"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.proposer_has_been_observed(block_b),
|
||||
cache.proposer_has_been_observed(block_b.to_ref()),
|
||||
Ok(true),
|
||||
"observed block in slot 1 is indicated as true"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_b),
|
||||
cache.observe_proposer(block_b.to_ref()),
|
||||
Ok(true),
|
||||
"observing slot 1 again indicates true"
|
||||
);
|
||||
|
||||
assert_eq!(*cache.finalized_slot.read(), 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.read().len(), 2, "two slots should be present");
|
||||
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.len(), 2, "two slots should be present");
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(0))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
@@ -370,7 +348,6 @@ mod tests {
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(1))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
@@ -379,35 +356,34 @@ mod tests {
|
||||
);
|
||||
|
||||
// Slot 0, proposer 1
|
||||
let block_c = &get_block(0, 1);
|
||||
let block_c = get_block(0, 1);
|
||||
|
||||
assert_eq!(
|
||||
cache.proposer_has_been_observed(block_c),
|
||||
cache.proposer_has_been_observed(block_c.to_ref()),
|
||||
Ok(false),
|
||||
"no observation for new proposer"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_c),
|
||||
cache.observe_proposer(block_c.to_ref()),
|
||||
Ok(false),
|
||||
"can observe new proposer, indicates proposer unobserved"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.proposer_has_been_observed(block_c),
|
||||
cache.proposer_has_been_observed(block_c.to_ref()),
|
||||
Ok(true),
|
||||
"observed new proposer block is indicated as true"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.observe_proposer(block_c),
|
||||
cache.observe_proposer(block_c.to_ref()),
|
||||
Ok(true),
|
||||
"observing new proposer again indicates true"
|
||||
);
|
||||
|
||||
assert_eq!(*cache.finalized_slot.read(), 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.read().len(), 2, "two slots should be present");
|
||||
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
|
||||
assert_eq!(cache.items.len(), 2, "two slots should be present");
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(0))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
@@ -417,7 +393,6 @@ mod tests {
|
||||
assert_eq!(
|
||||
cache
|
||||
.items
|
||||
.read()
|
||||
.get(&Slot::new(1))
|
||||
.expect("slot zero should be present")
|
||||
.len(),
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
use derivative::Derivative;
|
||||
use parking_lot::Mutex;
|
||||
use smallvec::SmallVec;
|
||||
use state_processing::{SigVerifiedOp, VerifyOperation};
|
||||
use std::collections::HashSet;
|
||||
use std::iter::FromIterator;
|
||||
use std::marker::PhantomData;
|
||||
use types::{
|
||||
AttesterSlashing, BeaconState, ChainSpec, EthSpec, ProposerSlashing, SignedVoluntaryExit,
|
||||
@@ -25,7 +23,7 @@ pub struct ObservedOperations<T: ObservableOperation<E>, E: EthSpec> {
|
||||
/// For attester slashings, this is the set of all validators who would be slashed by
|
||||
/// previously seen attester slashings, i.e. those validators in the intersection of
|
||||
/// `attestation_1.attester_indices` and `attestation_2.attester_indices`.
|
||||
observed_validator_indices: Mutex<HashSet<u64>>,
|
||||
observed_validator_indices: HashSet<u64>,
|
||||
_phantom: PhantomData<(T, E)>,
|
||||
}
|
||||
|
||||
@@ -58,10 +56,18 @@ impl<E: EthSpec> ObservableOperation<E> for ProposerSlashing {
|
||||
|
||||
impl<E: EthSpec> ObservableOperation<E> for AttesterSlashing<E> {
|
||||
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
|
||||
let attestation_1_indices =
|
||||
HashSet::<u64>::from_iter(self.attestation_1.attesting_indices.iter().copied());
|
||||
let attestation_2_indices =
|
||||
HashSet::<u64>::from_iter(self.attestation_2.attesting_indices.iter().copied());
|
||||
let attestation_1_indices = self
|
||||
.attestation_1
|
||||
.attesting_indices
|
||||
.iter()
|
||||
.copied()
|
||||
.collect::<HashSet<u64>>();
|
||||
let attestation_2_indices = self
|
||||
.attestation_2
|
||||
.attesting_indices
|
||||
.iter()
|
||||
.copied()
|
||||
.collect::<HashSet<u64>>();
|
||||
attestation_1_indices
|
||||
.intersection(&attestation_2_indices)
|
||||
.copied()
|
||||
@@ -71,12 +77,12 @@ impl<E: EthSpec> ObservableOperation<E> for AttesterSlashing<E> {
|
||||
|
||||
impl<T: ObservableOperation<E>, E: EthSpec> ObservedOperations<T, E> {
|
||||
pub fn verify_and_observe(
|
||||
&self,
|
||||
&mut self,
|
||||
op: T,
|
||||
head_state: &BeaconState<E>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<ObservationOutcome<T>, T::Error> {
|
||||
let mut observed_validator_indices = self.observed_validator_indices.lock();
|
||||
let observed_validator_indices = &mut self.observed_validator_indices;
|
||||
let new_validator_indices = op.observed_validators();
|
||||
|
||||
// If all of the new validator indices have been previously observed, short-circuit
|
||||
|
||||
@@ -4,9 +4,19 @@ use ssz_derive::{Decode, Encode};
|
||||
use store::{DBColumn, Error as StoreError, StoreItem};
|
||||
use types::Hash256;
|
||||
|
||||
/// Dummy value to use for the canonical head block root, see below.
|
||||
pub const DUMMY_CANONICAL_HEAD_BLOCK_ROOT: Hash256 = Hash256::repeat_byte(0xff);
|
||||
|
||||
#[derive(Clone, Encode, Decode)]
|
||||
pub struct PersistedBeaconChain {
|
||||
pub canonical_head_block_root: Hash256,
|
||||
/// This value is ignored to resolve the issue described here:
|
||||
///
|
||||
/// https://github.com/sigp/lighthouse/pull/1639
|
||||
///
|
||||
/// Its removal is tracked here:
|
||||
///
|
||||
/// https://github.com/sigp/lighthouse/issues/1784
|
||||
pub _canonical_head_block_root: Hash256,
|
||||
pub genesis_block_root: Hash256,
|
||||
pub ssz_head_tracker: SszHeadTracker,
|
||||
}
|
||||
|
||||
@@ -1,25 +1,47 @@
|
||||
use crate::beacon_fork_choice_store::PersistedForkChoiceStore as ForkChoiceStore;
|
||||
use fork_choice::PersistedForkChoice as ForkChoice;
|
||||
use crate::beacon_fork_choice_store::{
|
||||
PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8,
|
||||
};
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use store::{DBColumn, Error, StoreItem};
|
||||
use superstruct::superstruct;
|
||||
|
||||
#[derive(Encode, Decode)]
|
||||
// If adding a new version you should update this type alias and fix the breakages.
|
||||
pub type PersistedForkChoice = PersistedForkChoiceV8;
|
||||
|
||||
#[superstruct(
|
||||
variants(V1, V7, V8),
|
||||
variant_attributes(derive(Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
pub struct PersistedForkChoice {
|
||||
pub fork_choice: ForkChoice,
|
||||
pub fork_choice_store: ForkChoiceStore,
|
||||
pub fork_choice: fork_choice::PersistedForkChoice,
|
||||
#[superstruct(only(V1))]
|
||||
pub fork_choice_store: PersistedForkChoiceStoreV1,
|
||||
#[superstruct(only(V7))]
|
||||
pub fork_choice_store: PersistedForkChoiceStoreV7,
|
||||
#[superstruct(only(V8))]
|
||||
pub fork_choice_store: PersistedForkChoiceStoreV8,
|
||||
}
|
||||
|
||||
impl StoreItem for PersistedForkChoice {
|
||||
fn db_column() -> DBColumn {
|
||||
DBColumn::ForkChoice
|
||||
}
|
||||
macro_rules! impl_store_item {
|
||||
($type:ty) => {
|
||||
impl StoreItem for $type {
|
||||
fn db_column() -> DBColumn {
|
||||
DBColumn::ForkChoice
|
||||
}
|
||||
|
||||
fn as_store_bytes(&self) -> Vec<u8> {
|
||||
self.as_ssz_bytes()
|
||||
}
|
||||
fn as_store_bytes(&self) -> Vec<u8> {
|
||||
self.as_ssz_bytes()
|
||||
}
|
||||
|
||||
fn from_store_bytes(bytes: &[u8]) -> std::result::Result<Self, Error> {
|
||||
Self::from_ssz_bytes(bytes).map_err(Into::into)
|
||||
}
|
||||
fn from_store_bytes(bytes: &[u8]) -> std::result::Result<Self, Error> {
|
||||
Self::from_ssz_bytes(bytes).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_store_item!(PersistedForkChoiceV1);
|
||||
impl_store_item!(PersistedForkChoiceV7);
|
||||
impl_store_item!(PersistedForkChoiceV8);
|
||||
|
||||
119
beacon_node/beacon_chain/src/pre_finalization_cache.rs
Normal file
119
beacon_node/beacon_chain/src/pre_finalization_cache.rs
Normal file
@@ -0,0 +1,119 @@
|
||||
use crate::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||
use itertools::process_results;
|
||||
use lru::LruCache;
|
||||
use parking_lot::Mutex;
|
||||
use slog::debug;
|
||||
use std::time::Duration;
|
||||
use types::Hash256;
|
||||
|
||||
const BLOCK_ROOT_CACHE_LIMIT: usize = 512;
|
||||
const LOOKUP_LIMIT: usize = 8;
|
||||
const METRICS_TIMEOUT: Duration = Duration::from_millis(100);
|
||||
|
||||
/// Cache for rejecting attestations to blocks from before finalization.
|
||||
///
|
||||
/// It stores a collection of block roots that are pre-finalization and therefore not known to fork
|
||||
/// choice in `verify_head_block_is_known` during attestation processing.
|
||||
#[derive(Default)]
|
||||
pub struct PreFinalizationBlockCache {
|
||||
cache: Mutex<Cache>,
|
||||
}
|
||||
|
||||
struct Cache {
|
||||
/// Set of block roots that are known to be pre-finalization.
|
||||
block_roots: LruCache<Hash256, ()>,
|
||||
/// Set of block roots that are the subject of single block lookups.
|
||||
in_progress_lookups: LruCache<Hash256, ()>,
|
||||
}
|
||||
|
||||
impl Default for Cache {
|
||||
fn default() -> Self {
|
||||
Cache {
|
||||
block_roots: LruCache::new(BLOCK_ROOT_CACHE_LIMIT),
|
||||
in_progress_lookups: LruCache::new(LOOKUP_LIMIT),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// Check whether the block with `block_root` is known to be pre-finalization.
|
||||
///
|
||||
/// The provided `block_root` is assumed to be unknown to fork choice. I.e., it
|
||||
/// is not known to be a descendant of the finalized block.
|
||||
///
|
||||
/// Return `true` if the attestation to this block should be rejected outright,
|
||||
/// return `false` if more information is needed from a single-block-lookup.
|
||||
pub fn is_pre_finalization_block(&self, block_root: Hash256) -> Result<bool, BeaconChainError> {
|
||||
let mut cache = self.pre_finalization_block_cache.cache.lock();
|
||||
|
||||
// Check the cache to see if we already know this pre-finalization block root.
|
||||
if cache.block_roots.contains(&block_root) {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// Avoid repeating the disk lookup for blocks that are already subject to a network lookup.
|
||||
// Sync will take care of de-duplicating the single block lookups.
|
||||
if cache.in_progress_lookups.contains(&block_root) {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// 1. Check memory for a recent pre-finalization block.
|
||||
let is_recent_finalized_block = self.with_head(|head| {
|
||||
process_results(
|
||||
head.beacon_state.rev_iter_block_roots(&self.spec),
|
||||
|mut iter| iter.any(|(_, root)| root == block_root),
|
||||
)
|
||||
.map_err(BeaconChainError::BeaconStateError)
|
||||
})?;
|
||||
if is_recent_finalized_block {
|
||||
cache.block_roots.put(block_root, ());
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// 2. Check on disk.
|
||||
if self.store.get_block(&block_root)?.is_some() {
|
||||
cache.block_roots.put(block_root, ());
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// 3. Check the network with a single block lookup.
|
||||
cache.in_progress_lookups.put(block_root, ());
|
||||
if cache.in_progress_lookups.len() == LOOKUP_LIMIT {
|
||||
// NOTE: we expect this to occur sometimes if a lot of blocks that we look up fail to be
|
||||
// imported for reasons other than being pre-finalization. The cache will eventually
|
||||
// self-repair in this case by replacing old entries with new ones until all the failed
|
||||
// blocks have been flushed out. Solving this issue isn't as simple as hooking the
|
||||
// beacon processor's functions that handle failed blocks because we need the block root
|
||||
// and it has been erased from the `BlockError` by that point.
|
||||
debug!(
|
||||
self.log,
|
||||
"Pre-finalization lookup cache is full";
|
||||
);
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
pub fn pre_finalization_block_rejected(&self, block_root: Hash256) {
|
||||
// Future requests can know that this block is invalid without having to look it up again.
|
||||
let mut cache = self.pre_finalization_block_cache.cache.lock();
|
||||
cache.in_progress_lookups.pop(&block_root);
|
||||
cache.block_roots.put(block_root, ());
|
||||
}
|
||||
}
|
||||
|
||||
impl PreFinalizationBlockCache {
|
||||
pub fn block_processed(&self, block_root: Hash256) {
|
||||
// Future requests will find this block in fork choice, so no need to cache it in the
|
||||
// ongoing lookup cache any longer.
|
||||
self.cache.lock().in_progress_lookups.pop(&block_root);
|
||||
}
|
||||
|
||||
pub fn contains(&self, block_root: Hash256) -> bool {
|
||||
self.cache.lock().block_roots.contains(&block_root)
|
||||
}
|
||||
|
||||
pub fn metrics(&self) -> Option<(usize, usize)> {
|
||||
let cache = self.cache.try_lock_for(METRICS_TIMEOUT)?;
|
||||
Some((cache.block_roots.len(), cache.in_progress_lookups.len()))
|
||||
}
|
||||
}
|
||||
74
beacon_node/beacon_chain/src/proposer_prep_service.rs
Normal file
74
beacon_node/beacon_chain/src/proposer_prep_service.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
use crate::{BeaconChain, BeaconChainTypes};
|
||||
use slog::{debug, error};
|
||||
use slot_clock::SlotClock;
|
||||
use std::sync::Arc;
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio::time::sleep;
|
||||
|
||||
/// At 12s slot times, the means that the payload preparation routine will run 4s before the start
|
||||
/// of each slot (`12 / 3 = 4`).
|
||||
pub const PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR: u32 = 3;
|
||||
|
||||
/// Spawns a routine which ensures the EL is provided advance notice of any block producers.
|
||||
///
|
||||
/// This routine will run once per slot, at `slot_duration / PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR`
|
||||
/// before the start of each slot.
|
||||
///
|
||||
/// The service will not be started if there is no `execution_layer` on the `chain`.
|
||||
pub fn start_proposer_prep_service<T: BeaconChainTypes>(
|
||||
executor: TaskExecutor,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
) {
|
||||
// Avoid spawning the service if there's no EL, it'll just error anyway.
|
||||
if chain.execution_layer.is_some() {
|
||||
executor.clone().spawn(
|
||||
async move { proposer_prep_service(executor, chain).await },
|
||||
"proposer_prep_service",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Loop indefinitely, calling `BeaconChain::prepare_beacon_proposer_async` at an interval.
|
||||
async fn proposer_prep_service<T: BeaconChainTypes>(
|
||||
executor: TaskExecutor,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
) {
|
||||
let slot_duration = chain.slot_clock.slot_duration();
|
||||
|
||||
loop {
|
||||
match chain.slot_clock.duration_to_next_slot() {
|
||||
Some(duration) => {
|
||||
let additional_delay = slot_duration
|
||||
- chain.slot_clock.slot_duration() / PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR;
|
||||
sleep(duration + additional_delay).await;
|
||||
|
||||
debug!(
|
||||
chain.log,
|
||||
"Proposer prepare routine firing";
|
||||
);
|
||||
|
||||
let inner_chain = chain.clone();
|
||||
executor.spawn(
|
||||
async move {
|
||||
if let Err(e) = inner_chain.prepare_beacon_proposer_async().await {
|
||||
error!(
|
||||
inner_chain.log,
|
||||
"Proposer prepare routine failed";
|
||||
"error" => ?e
|
||||
);
|
||||
}
|
||||
},
|
||||
"proposer_prep_update",
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
None => {
|
||||
error!(chain.log, "Failed to read slot clock");
|
||||
// If we can't read the slot clock, just wait another slot.
|
||||
sleep(slot_duration).await;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
212
beacon_node/beacon_chain/src/schema_change.rs
Normal file
212
beacon_node/beacon_chain/src/schema_change.rs
Normal file
@@ -0,0 +1,212 @@
|
||||
//! Utilities for managing database schema changes.
|
||||
mod migration_schema_v6;
|
||||
mod migration_schema_v7;
|
||||
mod migration_schema_v8;
|
||||
mod types;
|
||||
|
||||
use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY};
|
||||
use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7};
|
||||
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
|
||||
use operation_pool::{PersistedOperationPool, PersistedOperationPoolBase};
|
||||
use slog::{warn, Logger};
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use store::config::OnDiskStoreConfig;
|
||||
use store::hot_cold_store::{HotColdDB, HotColdDBError};
|
||||
use store::metadata::{SchemaVersion, CONFIG_KEY, CURRENT_SCHEMA_VERSION};
|
||||
use store::{DBColumn, Error as StoreError, ItemStore, StoreItem};
|
||||
|
||||
const PUBKEY_CACHE_FILENAME: &str = "pubkey_cache.ssz";
|
||||
|
||||
/// Migrate the database from one schema version to another, applying all requisite mutations.
|
||||
pub fn migrate_schema<T: BeaconChainTypes>(
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
datadir: &Path,
|
||||
from: SchemaVersion,
|
||||
to: SchemaVersion,
|
||||
log: Logger,
|
||||
) -> Result<(), StoreError> {
|
||||
match (from, to) {
|
||||
// Migrating from the current schema version to iself is always OK, a no-op.
|
||||
(_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()),
|
||||
// Migrate across multiple versions by recursively migrating one step at a time.
|
||||
(_, _) if from.as_u64() + 1 < to.as_u64() => {
|
||||
let next = SchemaVersion(from.as_u64() + 1);
|
||||
migrate_schema::<T>(db.clone(), datadir, from, next, log.clone())?;
|
||||
migrate_schema::<T>(db, datadir, next, to, log)
|
||||
}
|
||||
// Migration from v0.3.0 to v0.3.x, adding the temporary states column.
|
||||
// Nothing actually needs to be done, but once a DB uses v2 it shouldn't go back.
|
||||
(SchemaVersion(1), SchemaVersion(2)) => {
|
||||
db.store_schema_version(to)?;
|
||||
Ok(())
|
||||
}
|
||||
// Migration for removing the pubkey cache.
|
||||
(SchemaVersion(2), SchemaVersion(3)) => {
|
||||
let pk_cache_path = datadir.join(PUBKEY_CACHE_FILENAME);
|
||||
|
||||
// Load from file, store to DB.
|
||||
ValidatorPubkeyCache::<T>::load_from_file(&pk_cache_path)
|
||||
.and_then(|cache| ValidatorPubkeyCache::convert(cache, db.clone()))
|
||||
.map_err(|e| StoreError::SchemaMigrationError(format!("{:?}", e)))?;
|
||||
|
||||
db.store_schema_version(to)?;
|
||||
|
||||
// Delete cache file now that keys are stored in the DB.
|
||||
fs::remove_file(&pk_cache_path).map_err(|e| {
|
||||
StoreError::SchemaMigrationError(format!(
|
||||
"unable to delete {}: {:?}",
|
||||
pk_cache_path.display(),
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Migration for adding sync committee contributions to the persisted op pool.
|
||||
(SchemaVersion(3), SchemaVersion(4)) => {
|
||||
// Deserialize from what exists in the database using the `PersistedOperationPoolBase`
|
||||
// variant and convert it to the Altair variant.
|
||||
let pool_opt = db
|
||||
.get_item::<PersistedOperationPoolBase<T::EthSpec>>(&OP_POOL_DB_KEY)?
|
||||
.map(PersistedOperationPool::Base)
|
||||
.map(PersistedOperationPool::base_to_altair);
|
||||
|
||||
if let Some(pool) = pool_opt {
|
||||
// Store the converted pool under the same key.
|
||||
db.put_item::<PersistedOperationPool<T::EthSpec>>(&OP_POOL_DB_KEY, &pool)?;
|
||||
}
|
||||
|
||||
db.store_schema_version(to)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Migration for weak subjectivity sync support and clean up of `OnDiskStoreConfig` (#1784).
|
||||
(SchemaVersion(4), SchemaVersion(5)) => {
|
||||
if let Some(OnDiskStoreConfigV4 {
|
||||
slots_per_restore_point,
|
||||
..
|
||||
}) = db.hot_db.get(&CONFIG_KEY)?
|
||||
{
|
||||
let new_config = OnDiskStoreConfig {
|
||||
slots_per_restore_point,
|
||||
};
|
||||
db.hot_db.put(&CONFIG_KEY, &new_config)?;
|
||||
}
|
||||
|
||||
db.store_schema_version(to)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Migration for adding `execution_status` field to the fork choice store.
|
||||
(SchemaVersion(5), SchemaVersion(6)) => {
|
||||
// Database operations to be done atomically
|
||||
let mut ops = vec![];
|
||||
|
||||
// The top-level `PersistedForkChoice` struct is still V1 but will have its internal
|
||||
// bytes for the fork choice updated to V6.
|
||||
let fork_choice_opt = db.get_item::<PersistedForkChoiceV1>(&FORK_CHOICE_DB_KEY)?;
|
||||
if let Some(mut persisted_fork_choice) = fork_choice_opt {
|
||||
migration_schema_v6::update_execution_statuses::<T>(&mut persisted_fork_choice)
|
||||
.map_err(StoreError::SchemaMigrationError)?;
|
||||
|
||||
// Store the converted fork choice store under the same key.
|
||||
ops.push(persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
|
||||
}
|
||||
|
||||
db.store_schema_version_atomically(to, ops)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// 1. Add `proposer_boost_root`.
|
||||
// 2. Update `justified_epoch` to `justified_checkpoint` and `finalized_epoch` to
|
||||
// `finalized_checkpoint`.
|
||||
// 3. This migration also includes a potential update to the justified
|
||||
// checkpoint in case the fork choice store's justified checkpoint and finalized checkpoint
|
||||
// combination does not actually exist for any blocks in fork choice. This was possible in
|
||||
// the consensus spec prior to v1.1.6.
|
||||
//
|
||||
// Relevant issues:
|
||||
//
|
||||
// https://github.com/sigp/lighthouse/issues/2741
|
||||
// https://github.com/ethereum/consensus-specs/pull/2727
|
||||
// https://github.com/ethereum/consensus-specs/pull/2730
|
||||
(SchemaVersion(6), SchemaVersion(7)) => {
|
||||
// Database operations to be done atomically
|
||||
let mut ops = vec![];
|
||||
|
||||
let fork_choice_opt = db.get_item::<PersistedForkChoiceV1>(&FORK_CHOICE_DB_KEY)?;
|
||||
if let Some(persisted_fork_choice_v1) = fork_choice_opt {
|
||||
// This migrates the `PersistedForkChoiceStore`, adding the `proposer_boost_root` field.
|
||||
let mut persisted_fork_choice_v7 = persisted_fork_choice_v1.into();
|
||||
|
||||
let result = migration_schema_v7::update_fork_choice::<T>(
|
||||
&mut persisted_fork_choice_v7,
|
||||
db.clone(),
|
||||
);
|
||||
|
||||
// Fall back to re-initializing fork choice from an anchor state if necessary.
|
||||
if let Err(e) = result {
|
||||
warn!(log, "Unable to migrate to database schema 7, re-initializing fork choice"; "error" => ?e);
|
||||
migration_schema_v7::update_with_reinitialized_fork_choice::<T>(
|
||||
&mut persisted_fork_choice_v7,
|
||||
db.clone(),
|
||||
)
|
||||
.map_err(StoreError::SchemaMigrationError)?;
|
||||
}
|
||||
|
||||
// Store the converted fork choice store under the same key.
|
||||
ops.push(persisted_fork_choice_v7.as_kv_store_op(FORK_CHOICE_DB_KEY));
|
||||
}
|
||||
|
||||
db.store_schema_version_atomically(to, ops)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Migration to add an `epoch` key to the fork choice's balances cache.
|
||||
(SchemaVersion(7), SchemaVersion(8)) => {
|
||||
let mut ops = vec![];
|
||||
let fork_choice_opt = db.get_item::<PersistedForkChoiceV7>(&FORK_CHOICE_DB_KEY)?;
|
||||
if let Some(fork_choice) = fork_choice_opt {
|
||||
let updated_fork_choice =
|
||||
migration_schema_v8::update_fork_choice::<T>(fork_choice, db.clone())?;
|
||||
|
||||
ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
|
||||
}
|
||||
|
||||
db.store_schema_version_atomically(to, ops)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Anything else is an error.
|
||||
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
|
||||
target_version: to,
|
||||
current_version: from,
|
||||
}
|
||||
.into()),
|
||||
}
|
||||
}
|
||||
|
||||
// Store config used in v4 schema and earlier.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
|
||||
pub struct OnDiskStoreConfigV4 {
|
||||
pub slots_per_restore_point: u64,
|
||||
pub _block_cache_size: usize,
|
||||
}
|
||||
|
||||
impl StoreItem for OnDiskStoreConfigV4 {
|
||||
fn db_column() -> DBColumn {
|
||||
DBColumn::BeaconMeta
|
||||
}
|
||||
|
||||
fn as_store_bytes(&self) -> Vec<u8> {
|
||||
self.as_ssz_bytes()
|
||||
}
|
||||
|
||||
fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> {
|
||||
Ok(Self::from_ssz_bytes(bytes)?)
|
||||
}
|
||||
}
|
||||
74
beacon_node/beacon_chain/src/schema_change/README.md
Normal file
74
beacon_node/beacon_chain/src/schema_change/README.md
Normal file
@@ -0,0 +1,74 @@
|
||||
Database Schema Migrations
|
||||
====
|
||||
|
||||
This document is an attempt to record some best practices and design conventions for applying
|
||||
database schema migrations within Lighthouse.
|
||||
|
||||
## General Structure
|
||||
|
||||
If you make a breaking change to an on-disk data structure you need to increment the
|
||||
`SCHEMA_VERSION` in `beacon_node/store/src/metadata.rs` and add a migration from the previous
|
||||
version to the new version.
|
||||
|
||||
The entry-point for database migrations is in `schema_change.rs`, _not_ `migrate.rs` (which deals
|
||||
with finalization). Supporting code for a specific migration may be added in
|
||||
`schema_change/migration_schema_vX.rs`, where `X` is the version being migrated _to_.
|
||||
|
||||
## Combining Schema Changes
|
||||
|
||||
Schema changes may be combined if they are part of the same pull request to
|
||||
`unstable`. Once a schema version is defined in `unstable` we should not apply changes to it
|
||||
without incrementing the version. This prevents conflicts between versions that appear to be the
|
||||
same. This allows us to deploy `unstable` to nodes without having to worry about needing to resync
|
||||
because of a sneaky schema change.
|
||||
|
||||
Changing the on-disk structure for a version _before_ it is merged to `unstable` is OK. You will
|
||||
just have to handle manually resyncing any test nodes (use checkpoint sync).
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
Prefer to name versions of structs by _the version at which the change was introduced_. For example
|
||||
if you add a field to `Foo` in v9, call the previous version `FooV1` (assuming this is `Foo`'s first
|
||||
migration) and write a schema change that migrates from `FooV1` to `FooV9`.
|
||||
|
||||
Prefer to use explicit version names in `schema_change.rs` and the `schema_change` module. To
|
||||
interface with the outside either:
|
||||
|
||||
1. Define a type alias to the latest version, e.g. `pub type Foo = FooV9`, or
|
||||
2. Define a mapping from the latest version to the version used elsewhere, e.g.
|
||||
```rust
|
||||
impl From<FooV9> for Foo {}
|
||||
```
|
||||
|
||||
Avoid names like:
|
||||
|
||||
* `LegacyFoo`
|
||||
* `OldFoo`
|
||||
* `FooWithoutX`
|
||||
|
||||
## First-version vs Last-version
|
||||
|
||||
Previously the schema migration code would name types by the _last_ version at which they were
|
||||
valid. For example if `Foo` changed in `V9` then we would name the two variants `FooV8` and `FooV9`.
|
||||
The problem with this scheme is that if `Foo` changes again in the future at say v12 then `FooV9` would
|
||||
need to be renamed to `FooV11`, which is annoying. Using the _first_ valid version as described
|
||||
above does not have this issue.
|
||||
|
||||
## Using SuperStruct
|
||||
|
||||
If possible, consider using [`superstruct`](https://crates.io/crates/superstruct) to handle data
|
||||
structure changes between versions.
|
||||
|
||||
* Use `superstruct(no_enum)` to avoid generating an unnecessary top-level enum.
|
||||
|
||||
## Example
|
||||
|
||||
A field is added to `Foo` in v9, and there are two variants: `FooV1` and `FooV9`. There is a
|
||||
migration from `FooV1` to `FooV9`. `Foo` is aliased to `FooV9`.
|
||||
|
||||
Some time later another field is added to `Foo` in v12. A new `FooV12` is created, along with a
|
||||
migration from `FooV9` to `FooV12`. The primary `Foo` type gets re-aliased to `FooV12`. The previous
|
||||
migration from V1 to V9 shouldn't break because the schema migration refers to `FooV9` explicitly
|
||||
rather than `Foo`. Due to the re-aliasing (or re-mapping) the compiler will check every usage
|
||||
of `Foo` to make sure that it still makes sense with `FooV12`.
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
///! These functions and structs are only relevant to the database migration from schema 5 to 6.
|
||||
use crate::persisted_fork_choice::PersistedForkChoiceV1;
|
||||
use crate::schema_change::types::{SszContainerV1, SszContainerV6};
|
||||
use crate::BeaconChainTypes;
|
||||
use ssz::four_byte_option_impl;
|
||||
use ssz::{Decode, Encode};
|
||||
|
||||
// Define a "legacy" implementation of `Option<usize>` which uses four bytes for encoding the union
|
||||
// selector.
|
||||
four_byte_option_impl!(four_byte_option_usize, usize);
|
||||
|
||||
pub(crate) fn update_execution_statuses<T: BeaconChainTypes>(
|
||||
persisted_fork_choice: &mut PersistedForkChoiceV1,
|
||||
) -> Result<(), String> {
|
||||
let ssz_container_v1 =
|
||||
SszContainerV1::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes)
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
let ssz_container_v6: SszContainerV6 = ssz_container_v1.into();
|
||||
|
||||
persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v6.as_ssz_bytes();
|
||||
Ok(())
|
||||
}
|
||||
@@ -0,0 +1,327 @@
|
||||
///! These functions and structs are only relevant to the database migration from schema 6 to 7.
|
||||
use crate::beacon_chain::BeaconChainTypes;
|
||||
use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7};
|
||||
use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7};
|
||||
use crate::schema_change::types::{ProtoNodeV6, SszContainerV6, SszContainerV7};
|
||||
use crate::types::{Checkpoint, Epoch, Hash256};
|
||||
use crate::types::{EthSpec, Slot};
|
||||
use crate::{BeaconForkChoiceStore, BeaconSnapshot};
|
||||
use fork_choice::ForkChoice;
|
||||
use proto_array::{core::ProtoNode, core::SszContainer, ProtoArrayForkChoice};
|
||||
use ssz::four_byte_option_impl;
|
||||
use ssz::{Decode, Encode};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use store::hot_cold_store::HotColdDB;
|
||||
use store::iter::BlockRootsIterator;
|
||||
use store::Error as StoreError;
|
||||
|
||||
// Define a "legacy" implementation of `Option<usize>` which uses four bytes for encoding the union
|
||||
// selector.
|
||||
four_byte_option_impl!(four_byte_option_usize, usize);
|
||||
|
||||
/// This method is used to re-initialize fork choice from the finalized state in case we hit an
|
||||
/// error during this migration.
|
||||
pub(crate) fn update_with_reinitialized_fork_choice<T: BeaconChainTypes>(
|
||||
persisted_fork_choice: &mut PersistedForkChoiceV7,
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
) -> Result<(), String> {
|
||||
let anchor_block_root = persisted_fork_choice
|
||||
.fork_choice_store
|
||||
.finalized_checkpoint
|
||||
.root;
|
||||
let anchor_block = db
|
||||
.get_block(&anchor_block_root)
|
||||
.map_err(|e| format!("{:?}", e))?
|
||||
.ok_or_else(|| "Missing anchor beacon block".to_string())?;
|
||||
let anchor_state = db
|
||||
.get_state(&anchor_block.state_root(), Some(anchor_block.slot()))
|
||||
.map_err(|e| format!("{:?}", e))?
|
||||
.ok_or_else(|| "Missing anchor beacon state".to_string())?;
|
||||
let snapshot = BeaconSnapshot {
|
||||
beacon_block: anchor_block,
|
||||
beacon_block_root: anchor_block_root,
|
||||
beacon_state: anchor_state,
|
||||
};
|
||||
let store = BeaconForkChoiceStore::get_forkchoice_store(db, &snapshot);
|
||||
let fork_choice = ForkChoice::from_anchor(
|
||||
store,
|
||||
anchor_block_root,
|
||||
&snapshot.beacon_block,
|
||||
&snapshot.beacon_state,
|
||||
)
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
persisted_fork_choice.fork_choice = fork_choice.to_persisted();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn update_fork_choice<T: BeaconChainTypes>(
|
||||
persisted_fork_choice: &mut PersistedForkChoiceV7,
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
) -> Result<(), StoreError> {
|
||||
// `PersistedForkChoice` stores the `ProtoArray` as a `Vec<u8>`. Deserialize these
|
||||
// bytes assuming the legacy struct, and transform them to the new struct before
|
||||
// re-serializing.
|
||||
let ssz_container_v6 =
|
||||
SszContainerV6::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes)
|
||||
.map_err(|e| {
|
||||
StoreError::SchemaMigrationError(format!(
|
||||
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
// Clone the V6 proto nodes in order to maintain information about `node.justified_epoch`
|
||||
// and `node.finalized_epoch`.
|
||||
let nodes_v6 = ssz_container_v6.nodes.clone();
|
||||
|
||||
let justified_checkpoint = persisted_fork_choice.fork_choice_store.justified_checkpoint;
|
||||
let finalized_checkpoint = persisted_fork_choice.fork_choice_store.finalized_checkpoint;
|
||||
|
||||
// These transformations instantiate `node.justified_checkpoint` and `node.finalized_checkpoint`
|
||||
// to `None`.
|
||||
let ssz_container_v7: SszContainerV7 =
|
||||
ssz_container_v6.into_ssz_container_v7(justified_checkpoint, finalized_checkpoint);
|
||||
let ssz_container: SszContainer = ssz_container_v7.into();
|
||||
let mut fork_choice: ProtoArrayForkChoice = ssz_container.into();
|
||||
|
||||
update_checkpoints::<T>(finalized_checkpoint.root, &nodes_v6, &mut fork_choice, db)
|
||||
.map_err(StoreError::SchemaMigrationError)?;
|
||||
|
||||
// Update the justified checkpoint in the store in case we have a discrepancy
|
||||
// between the store and the proto array nodes.
|
||||
update_store_justified_checkpoint(persisted_fork_choice, &mut fork_choice)
|
||||
.map_err(StoreError::SchemaMigrationError)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct HeadInfo {
|
||||
index: usize,
|
||||
root: Hash256,
|
||||
slot: Slot,
|
||||
}
|
||||
|
||||
fn update_checkpoints<T: BeaconChainTypes>(
|
||||
finalized_root: Hash256,
|
||||
nodes_v6: &[ProtoNodeV6],
|
||||
fork_choice: &mut ProtoArrayForkChoice,
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
) -> Result<(), String> {
|
||||
let heads = find_finalized_descendant_heads(finalized_root, fork_choice);
|
||||
|
||||
// For each head, first gather all epochs we will need to find justified or finalized roots for.
|
||||
for head in heads {
|
||||
// `relevant_epochs` are epochs for which we will need to find the root at the start slot.
|
||||
// We don't need to worry about whether the are finalized or justified epochs.
|
||||
let mut relevant_epochs = HashSet::new();
|
||||
let relevant_epoch_finder = |index, _: &mut ProtoNode| {
|
||||
let (justified_epoch, finalized_epoch) = nodes_v6
|
||||
.get(index)
|
||||
.map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch))
|
||||
.ok_or_else(|| "Index not found in legacy proto nodes".to_string())?;
|
||||
relevant_epochs.insert(justified_epoch);
|
||||
relevant_epochs.insert(finalized_epoch);
|
||||
Ok(())
|
||||
};
|
||||
|
||||
apply_to_chain_of_ancestors(
|
||||
finalized_root,
|
||||
head.index,
|
||||
fork_choice,
|
||||
relevant_epoch_finder,
|
||||
)?;
|
||||
|
||||
// find the block roots associated with each relevant epoch.
|
||||
let roots_by_epoch =
|
||||
map_relevant_epochs_to_roots::<T>(head.root, head.slot, relevant_epochs, db.clone())?;
|
||||
|
||||
// Apply this mutator to the chain of descendants from this head, adding justified
|
||||
// and finalized checkpoints for each.
|
||||
let node_mutator = |index, node: &mut ProtoNode| {
|
||||
let (justified_epoch, finalized_epoch) = nodes_v6
|
||||
.get(index)
|
||||
.map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch))
|
||||
.ok_or_else(|| "Index not found in legacy proto nodes".to_string())?;
|
||||
|
||||
// Update the checkpoints only if they haven't already been populated.
|
||||
if node.justified_checkpoint.is_none() {
|
||||
let justified_checkpoint =
|
||||
roots_by_epoch
|
||||
.get(&justified_epoch)
|
||||
.map(|&root| Checkpoint {
|
||||
epoch: justified_epoch,
|
||||
root,
|
||||
});
|
||||
node.justified_checkpoint = justified_checkpoint;
|
||||
}
|
||||
if node.finalized_checkpoint.is_none() {
|
||||
let finalized_checkpoint =
|
||||
roots_by_epoch
|
||||
.get(&finalized_epoch)
|
||||
.map(|&root| Checkpoint {
|
||||
epoch: finalized_epoch,
|
||||
root,
|
||||
});
|
||||
node.finalized_checkpoint = finalized_checkpoint;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
};
|
||||
|
||||
apply_to_chain_of_ancestors(finalized_root, head.index, fork_choice, node_mutator)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Coverts the given `HashSet<Epoch>` to a `Vec<Epoch>` then reverse sorts by `Epoch`. Next, a
|
||||
/// single `BlockRootsIterator` is created which is used to iterate backwards from the given
|
||||
/// `head_root` and `head_slot`, finding the block root at the start slot of each epoch.
|
||||
fn map_relevant_epochs_to_roots<T: BeaconChainTypes>(
|
||||
head_root: Hash256,
|
||||
head_slot: Slot,
|
||||
epochs: HashSet<Epoch>,
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
) -> Result<HashMap<Epoch, Hash256>, String> {
|
||||
// Convert the `HashSet` to a `Vec` and reverse sort the epochs.
|
||||
let mut relevant_epochs = epochs.into_iter().collect::<Vec<_>>();
|
||||
relevant_epochs.sort_unstable_by(|a, b| b.cmp(a));
|
||||
|
||||
// Iterate backwards from the given `head_root` and `head_slot` and find the block root at each epoch.
|
||||
let mut iter = std::iter::once(Ok((head_root, head_slot)))
|
||||
.chain(BlockRootsIterator::from_block(&db, head_root).map_err(|e| format!("{:?}", e))?);
|
||||
let mut roots_by_epoch = HashMap::new();
|
||||
for epoch in relevant_epochs {
|
||||
let start_slot = epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||
|
||||
let root = iter
|
||||
.find_map(|next| match next {
|
||||
Ok((root, slot)) => (slot == start_slot).then(|| Ok(root)),
|
||||
Err(e) => Some(Err(format!("{:?}", e))),
|
||||
})
|
||||
.transpose()?
|
||||
.ok_or_else(|| "Justified root not found".to_string())?;
|
||||
roots_by_epoch.insert(epoch, root);
|
||||
}
|
||||
Ok(roots_by_epoch)
|
||||
}
|
||||
|
||||
/// Applies a mutator to every node in a chain, starting from the node at the given
|
||||
/// `head_index` and iterating through ancestors until the `finalized_root` is reached.
|
||||
fn apply_to_chain_of_ancestors<F>(
|
||||
finalized_root: Hash256,
|
||||
head_index: usize,
|
||||
fork_choice: &mut ProtoArrayForkChoice,
|
||||
mut node_mutator: F,
|
||||
) -> Result<(), String>
|
||||
where
|
||||
F: FnMut(usize, &mut ProtoNode) -> Result<(), String>,
|
||||
{
|
||||
let head = fork_choice
|
||||
.core_proto_array_mut()
|
||||
.nodes
|
||||
.get_mut(head_index)
|
||||
.ok_or_else(|| "Head index not found in proto nodes".to_string())?;
|
||||
|
||||
node_mutator(head_index, head)?;
|
||||
|
||||
let mut parent_index_opt = head.parent;
|
||||
let mut parent_opt =
|
||||
parent_index_opt.and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index));
|
||||
|
||||
// Iterate backwards through all parents until there is no reference to a parent or we reach
|
||||
// the `finalized_root` node.
|
||||
while let (Some(parent), Some(parent_index)) = (parent_opt, parent_index_opt) {
|
||||
node_mutator(parent_index, parent)?;
|
||||
|
||||
// Break out of this while loop *after* the `node_mutator` has been applied to the finalized
|
||||
// node.
|
||||
if parent.root == finalized_root {
|
||||
break;
|
||||
}
|
||||
|
||||
// Update parent values
|
||||
parent_index_opt = parent.parent;
|
||||
parent_opt = parent_index_opt
|
||||
.and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Finds all heads by finding all nodes in the proto array that are not referenced as parents. Then
|
||||
/// checks that these nodes are descendants of the finalized root in order to determine if they are
|
||||
/// relevant.
|
||||
fn find_finalized_descendant_heads(
|
||||
finalized_root: Hash256,
|
||||
fork_choice: &ProtoArrayForkChoice,
|
||||
) -> Vec<HeadInfo> {
|
||||
let nodes_referenced_as_parents: HashSet<usize> = fork_choice
|
||||
.core_proto_array()
|
||||
.nodes
|
||||
.iter()
|
||||
.filter_map(|node| node.parent)
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
fork_choice
|
||||
.core_proto_array()
|
||||
.nodes
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(index, node)| {
|
||||
(!nodes_referenced_as_parents.contains(&index)
|
||||
&& fork_choice.is_descendant(finalized_root, node.root))
|
||||
.then(|| HeadInfo {
|
||||
index,
|
||||
root: node.root,
|
||||
slot: node.slot,
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
fn update_store_justified_checkpoint(
|
||||
persisted_fork_choice: &mut PersistedForkChoiceV7,
|
||||
fork_choice: &mut ProtoArrayForkChoice,
|
||||
) -> Result<(), String> {
|
||||
let justified_checkpoint = fork_choice
|
||||
.core_proto_array()
|
||||
.nodes
|
||||
.iter()
|
||||
.filter_map(|node| {
|
||||
(node.finalized_checkpoint
|
||||
== Some(persisted_fork_choice.fork_choice_store.finalized_checkpoint))
|
||||
.then(|| node.justified_checkpoint)
|
||||
.flatten()
|
||||
})
|
||||
.max_by_key(|justified_checkpoint| justified_checkpoint.epoch)
|
||||
.ok_or("Proto node with current finalized checkpoint not found")?;
|
||||
|
||||
fork_choice.core_proto_array_mut().justified_checkpoint = justified_checkpoint;
|
||||
persisted_fork_choice.fork_choice.proto_array_bytes = fork_choice.as_bytes();
|
||||
persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Add a zero `proposer_boost_root` when migrating from V1-6 to V7.
|
||||
impl From<PersistedForkChoiceStoreV1> for PersistedForkChoiceStoreV7 {
|
||||
fn from(other: PersistedForkChoiceStoreV1) -> Self {
|
||||
Self {
|
||||
balances_cache: other.balances_cache,
|
||||
time: other.time,
|
||||
finalized_checkpoint: other.finalized_checkpoint,
|
||||
justified_checkpoint: other.justified_checkpoint,
|
||||
justified_balances: other.justified_balances,
|
||||
best_justified_checkpoint: other.best_justified_checkpoint,
|
||||
proposer_boost_root: Hash256::zero(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PersistedForkChoiceV1> for PersistedForkChoiceV7 {
|
||||
fn from(other: PersistedForkChoiceV1) -> Self {
|
||||
Self {
|
||||
fork_choice: other.fork_choice,
|
||||
fork_choice_store: other.fork_choice_store.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
use crate::beacon_chain::BeaconChainTypes;
|
||||
use crate::beacon_fork_choice_store::{
|
||||
BalancesCacheV8, CacheItemV8, PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8,
|
||||
};
|
||||
use crate::persisted_fork_choice::{PersistedForkChoiceV7, PersistedForkChoiceV8};
|
||||
use std::sync::Arc;
|
||||
use store::{Error as StoreError, HotColdDB};
|
||||
use types::EthSpec;
|
||||
|
||||
pub fn update_fork_choice<T: BeaconChainTypes>(
|
||||
fork_choice: PersistedForkChoiceV7,
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
) -> Result<PersistedForkChoiceV8, StoreError> {
|
||||
let PersistedForkChoiceStoreV7 {
|
||||
balances_cache,
|
||||
time,
|
||||
finalized_checkpoint,
|
||||
justified_checkpoint,
|
||||
justified_balances,
|
||||
best_justified_checkpoint,
|
||||
proposer_boost_root,
|
||||
} = fork_choice.fork_choice_store;
|
||||
let mut fork_choice_store = PersistedForkChoiceStoreV8 {
|
||||
balances_cache: BalancesCacheV8::default(),
|
||||
time,
|
||||
finalized_checkpoint,
|
||||
justified_checkpoint,
|
||||
justified_balances,
|
||||
best_justified_checkpoint,
|
||||
proposer_boost_root,
|
||||
};
|
||||
|
||||
// Add epochs to the balances cache. It's safe to just use the block's epoch because
|
||||
// before schema v8 the cache would always miss on skipped slots.
|
||||
for item in balances_cache.items {
|
||||
// Drop any blocks that aren't found, they're presumably too old and this is only a cache.
|
||||
if let Some(block) = db.get_block(&item.block_root)? {
|
||||
fork_choice_store.balances_cache.items.push(CacheItemV8 {
|
||||
block_root: item.block_root,
|
||||
epoch: block.slot().epoch(T::EthSpec::slots_per_epoch()),
|
||||
balances: item.balances,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(PersistedForkChoiceV8 {
|
||||
fork_choice: fork_choice.fork_choice,
|
||||
fork_choice_store,
|
||||
})
|
||||
}
|
||||
192
beacon_node/beacon_chain/src/schema_change/types.rs
Normal file
192
beacon_node/beacon_chain/src/schema_change/types.rs
Normal file
@@ -0,0 +1,192 @@
|
||||
use crate::types::{AttestationShufflingId, Checkpoint, Epoch, Hash256, Slot};
|
||||
use proto_array::core::{ProposerBoost, ProtoNode, SszContainer, VoteTracker};
|
||||
use proto_array::ExecutionStatus;
|
||||
use ssz::four_byte_option_impl;
|
||||
use ssz::Encode;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use superstruct::superstruct;
|
||||
|
||||
// Define a "legacy" implementation of `Option<usize>` which uses four bytes for encoding the union
|
||||
// selector.
|
||||
four_byte_option_impl!(four_byte_option_usize, usize);
|
||||
four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint);
|
||||
|
||||
#[superstruct(
|
||||
variants(V1, V6, V7),
|
||||
variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
pub struct ProtoNode {
|
||||
pub slot: Slot,
|
||||
pub state_root: Hash256,
|
||||
pub target_root: Hash256,
|
||||
pub current_epoch_shuffling_id: AttestationShufflingId,
|
||||
pub next_epoch_shuffling_id: AttestationShufflingId,
|
||||
pub root: Hash256,
|
||||
#[ssz(with = "four_byte_option_usize")]
|
||||
pub parent: Option<usize>,
|
||||
#[superstruct(only(V1, V6))]
|
||||
pub justified_epoch: Epoch,
|
||||
#[superstruct(only(V1, V6))]
|
||||
pub finalized_epoch: Epoch,
|
||||
#[ssz(with = "four_byte_option_checkpoint")]
|
||||
#[superstruct(only(V7))]
|
||||
pub justified_checkpoint: Option<Checkpoint>,
|
||||
#[ssz(with = "four_byte_option_checkpoint")]
|
||||
#[superstruct(only(V7))]
|
||||
pub finalized_checkpoint: Option<Checkpoint>,
|
||||
pub weight: u64,
|
||||
#[ssz(with = "four_byte_option_usize")]
|
||||
pub best_child: Option<usize>,
|
||||
#[ssz(with = "four_byte_option_usize")]
|
||||
pub best_descendant: Option<usize>,
|
||||
#[superstruct(only(V6, V7))]
|
||||
pub execution_status: ExecutionStatus,
|
||||
}
|
||||
|
||||
impl Into<ProtoNodeV6> for ProtoNodeV1 {
|
||||
fn into(self) -> ProtoNodeV6 {
|
||||
ProtoNodeV6 {
|
||||
slot: self.slot,
|
||||
state_root: self.state_root,
|
||||
target_root: self.target_root,
|
||||
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
|
||||
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
|
||||
root: self.root,
|
||||
parent: self.parent,
|
||||
justified_epoch: self.justified_epoch,
|
||||
finalized_epoch: self.finalized_epoch,
|
||||
weight: self.weight,
|
||||
best_child: self.best_child,
|
||||
best_descendant: self.best_descendant,
|
||||
// We set the following execution value as if the block is a pre-merge-fork block. This
|
||||
// is safe as long as we never import a merge block with the old version of proto-array.
|
||||
// This will be safe since we can't actually process merge blocks until we've made this
|
||||
// change to fork choice.
|
||||
execution_status: ExecutionStatus::irrelevant(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<ProtoNodeV7> for ProtoNodeV6 {
|
||||
fn into(self) -> ProtoNodeV7 {
|
||||
ProtoNodeV7 {
|
||||
slot: self.slot,
|
||||
state_root: self.state_root,
|
||||
target_root: self.target_root,
|
||||
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
|
||||
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
|
||||
root: self.root,
|
||||
parent: self.parent,
|
||||
justified_checkpoint: None,
|
||||
finalized_checkpoint: None,
|
||||
weight: self.weight,
|
||||
best_child: self.best_child,
|
||||
best_descendant: self.best_descendant,
|
||||
execution_status: self.execution_status,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<ProtoNode> for ProtoNodeV7 {
|
||||
fn into(self) -> ProtoNode {
|
||||
ProtoNode {
|
||||
slot: self.slot,
|
||||
state_root: self.state_root,
|
||||
target_root: self.target_root,
|
||||
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
|
||||
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
|
||||
root: self.root,
|
||||
parent: self.parent,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
weight: self.weight,
|
||||
best_child: self.best_child,
|
||||
best_descendant: self.best_descendant,
|
||||
execution_status: self.execution_status,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[superstruct(
|
||||
variants(V1, V6, V7),
|
||||
variant_attributes(derive(Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
#[derive(Encode, Decode)]
|
||||
pub struct SszContainer {
|
||||
pub votes: Vec<VoteTracker>,
|
||||
pub balances: Vec<u64>,
|
||||
pub prune_threshold: usize,
|
||||
#[superstruct(only(V1, V6))]
|
||||
pub justified_epoch: Epoch,
|
||||
#[superstruct(only(V1, V6))]
|
||||
pub finalized_epoch: Epoch,
|
||||
#[superstruct(only(V7))]
|
||||
pub justified_checkpoint: Checkpoint,
|
||||
#[superstruct(only(V7))]
|
||||
pub finalized_checkpoint: Checkpoint,
|
||||
#[superstruct(only(V1))]
|
||||
pub nodes: Vec<ProtoNodeV1>,
|
||||
#[superstruct(only(V6))]
|
||||
pub nodes: Vec<ProtoNodeV6>,
|
||||
#[superstruct(only(V7))]
|
||||
pub nodes: Vec<ProtoNodeV7>,
|
||||
pub indices: Vec<(Hash256, usize)>,
|
||||
#[superstruct(only(V7))]
|
||||
pub previous_proposer_boost: ProposerBoost,
|
||||
}
|
||||
|
||||
impl Into<SszContainerV6> for SszContainerV1 {
|
||||
fn into(self) -> SszContainerV6 {
|
||||
let nodes = self.nodes.into_iter().map(Into::into).collect();
|
||||
|
||||
SszContainerV6 {
|
||||
votes: self.votes,
|
||||
balances: self.balances,
|
||||
prune_threshold: self.prune_threshold,
|
||||
justified_epoch: self.justified_epoch,
|
||||
finalized_epoch: self.finalized_epoch,
|
||||
nodes,
|
||||
indices: self.indices,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SszContainerV6 {
|
||||
pub(crate) fn into_ssz_container_v7(
|
||||
self,
|
||||
justified_checkpoint: Checkpoint,
|
||||
finalized_checkpoint: Checkpoint,
|
||||
) -> SszContainerV7 {
|
||||
let nodes = self.nodes.into_iter().map(Into::into).collect();
|
||||
|
||||
SszContainerV7 {
|
||||
votes: self.votes,
|
||||
balances: self.balances,
|
||||
prune_threshold: self.prune_threshold,
|
||||
justified_checkpoint,
|
||||
finalized_checkpoint,
|
||||
nodes,
|
||||
indices: self.indices,
|
||||
previous_proposer_boost: ProposerBoost::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<SszContainer> for SszContainerV7 {
|
||||
fn into(self) -> SszContainer {
|
||||
let nodes = self.nodes.into_iter().map(Into::into).collect();
|
||||
|
||||
SszContainer {
|
||||
votes: self.votes,
|
||||
balances: self.balances,
|
||||
prune_threshold: self.prune_threshold,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
nodes,
|
||||
indices: self.indices,
|
||||
previous_proposer_boost: self.previous_proposer_boost,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::metrics;
|
||||
use lru::LruCache;
|
||||
use types::{beacon_state::CommitteeCache, Epoch, Hash256};
|
||||
use types::{beacon_state::CommitteeCache, AttestationShufflingId, Epoch, Hash256};
|
||||
|
||||
/// The size of the LRU cache that stores committee caches for quicker verification.
|
||||
///
|
||||
@@ -14,7 +14,7 @@ const CACHE_SIZE: usize = 16;
|
||||
/// It has been named `ShufflingCache` because `CommitteeCacheCache` is a bit weird and looks like
|
||||
/// a find/replace error.
|
||||
pub struct ShufflingCache {
|
||||
cache: LruCache<(Epoch, Hash256), CommitteeCache>,
|
||||
cache: LruCache<AttestationShufflingId, CommitteeCache>,
|
||||
}
|
||||
|
||||
impl ShufflingCache {
|
||||
@@ -24,8 +24,8 @@ impl ShufflingCache {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(&mut self, epoch: Epoch, root: Hash256) -> Option<&CommitteeCache> {
|
||||
let opt = self.cache.get(&(epoch, root));
|
||||
pub fn get(&mut self, key: &AttestationShufflingId) -> Option<&CommitteeCache> {
|
||||
let opt = self.cache.get(key);
|
||||
|
||||
if opt.is_some() {
|
||||
metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS);
|
||||
@@ -36,11 +36,40 @@ impl ShufflingCache {
|
||||
opt
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, epoch: Epoch, root: Hash256, committee_cache: &CommitteeCache) {
|
||||
let key = (epoch, root);
|
||||
pub fn contains(&self, key: &AttestationShufflingId) -> bool {
|
||||
self.cache.contains(key)
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, key: AttestationShufflingId, committee_cache: &CommitteeCache) {
|
||||
if !self.cache.contains(&key) {
|
||||
self.cache.put(key, committee_cache.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Contains the shuffling IDs for a beacon block.
|
||||
pub struct BlockShufflingIds {
|
||||
pub current: AttestationShufflingId,
|
||||
pub next: AttestationShufflingId,
|
||||
pub block_root: Hash256,
|
||||
}
|
||||
|
||||
impl BlockShufflingIds {
|
||||
/// Returns the shuffling ID for the given epoch.
|
||||
///
|
||||
/// Returns `None` if `epoch` is prior to `self.current.shuffling_epoch`.
|
||||
pub fn id_for_epoch(&self, epoch: Epoch) -> Option<AttestationShufflingId> {
|
||||
if epoch == self.current.shuffling_epoch {
|
||||
Some(self.current.clone())
|
||||
} else if epoch == self.next.shuffling_epoch {
|
||||
Some(self.next.clone())
|
||||
} else if epoch > self.next.shuffling_epoch {
|
||||
Some(AttestationShufflingId::from_components(
|
||||
epoch,
|
||||
self.block_root,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,140 @@
|
||||
use crate::BeaconSnapshot;
|
||||
use itertools::process_results;
|
||||
use std::cmp;
|
||||
use types::{Epoch, EthSpec, Hash256};
|
||||
use std::time::Duration;
|
||||
use types::{
|
||||
beacon_state::CloneConfig, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock,
|
||||
Slot,
|
||||
};
|
||||
|
||||
/// The default size of the cache.
|
||||
pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 4;
|
||||
|
||||
/// The minimum block delay to clone the state in the cache instead of removing it.
|
||||
/// This helps keep block processing fast during re-orgs from late blocks.
|
||||
const MINIMUM_BLOCK_DELAY_FOR_CLONE: Duration = Duration::from_secs(6);
|
||||
|
||||
/// This snapshot is to be used for verifying a child of `self.beacon_block`.
|
||||
#[derive(Debug)]
|
||||
pub struct PreProcessingSnapshot<T: EthSpec> {
|
||||
/// This state is equivalent to the `self.beacon_block.state_root()` state that has been
|
||||
/// advanced forward one slot using `per_slot_processing`. This state is "primed and ready" for
|
||||
/// the application of another block.
|
||||
pub pre_state: BeaconState<T>,
|
||||
/// This value is only set to `Some` if the `pre_state` was *not* advanced forward.
|
||||
pub beacon_state_root: Option<Hash256>,
|
||||
pub beacon_block: SignedBeaconBlock<T>,
|
||||
pub beacon_block_root: Hash256,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> From<BeaconSnapshot<T>> for PreProcessingSnapshot<T> {
|
||||
fn from(snapshot: BeaconSnapshot<T>) -> Self {
|
||||
let beacon_state_root = Some(snapshot.beacon_state_root());
|
||||
Self {
|
||||
pre_state: snapshot.beacon_state,
|
||||
beacon_state_root,
|
||||
beacon_block: snapshot.beacon_block,
|
||||
beacon_block_root: snapshot.beacon_block_root,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> CacheItem<T> {
|
||||
pub fn new_without_pre_state(snapshot: BeaconSnapshot<T>) -> Self {
|
||||
Self {
|
||||
beacon_block: snapshot.beacon_block,
|
||||
beacon_block_root: snapshot.beacon_block_root,
|
||||
beacon_state: snapshot.beacon_state,
|
||||
pre_state: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn clone_to_snapshot_with(&self, clone_config: CloneConfig) -> BeaconSnapshot<T> {
|
||||
BeaconSnapshot {
|
||||
beacon_state: self.beacon_state.clone_with(clone_config),
|
||||
beacon_block: self.beacon_block.clone(),
|
||||
beacon_block_root: self.beacon_block_root,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_pre_state(self) -> PreProcessingSnapshot<T> {
|
||||
// Do not include the beacon state root if the state has been advanced.
|
||||
let beacon_state_root =
|
||||
Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none());
|
||||
|
||||
PreProcessingSnapshot {
|
||||
beacon_block: self.beacon_block,
|
||||
beacon_block_root: self.beacon_block_root,
|
||||
pre_state: self.pre_state.unwrap_or(self.beacon_state),
|
||||
beacon_state_root,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clone_as_pre_state(&self) -> PreProcessingSnapshot<T> {
|
||||
// Do not include the beacon state root if the state has been advanced.
|
||||
let beacon_state_root =
|
||||
Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none());
|
||||
|
||||
PreProcessingSnapshot {
|
||||
beacon_block: self.beacon_block.clone(),
|
||||
beacon_block_root: self.beacon_block_root,
|
||||
pre_state: self
|
||||
.pre_state
|
||||
.as_ref()
|
||||
.map_or_else(|| self.beacon_state.clone(), |pre_state| pre_state.clone()),
|
||||
beacon_state_root,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The information required for block production.
|
||||
pub struct BlockProductionPreState<T: EthSpec> {
|
||||
/// This state may or may not have been advanced forward a single slot.
|
||||
///
|
||||
/// See the documentation in the `crate::state_advance_timer` module for more information.
|
||||
pub pre_state: BeaconState<T>,
|
||||
/// This value will only be `Some` if `self.pre_state` was **not** advanced forward a single
|
||||
/// slot.
|
||||
///
|
||||
/// This value can be used to avoid tree-hashing the state during the first call to
|
||||
/// `per_slot_processing`.
|
||||
pub state_root: Option<Hash256>,
|
||||
}
|
||||
|
||||
pub enum StateAdvance<T: EthSpec> {
|
||||
/// The cache does not contain the supplied block root.
|
||||
BlockNotFound,
|
||||
/// The cache contains the supplied block root but the state has already been advanced.
|
||||
AlreadyAdvanced,
|
||||
/// The cache contains the supplied block root and the state has not yet been advanced.
|
||||
State {
|
||||
state: Box<BeaconState<T>>,
|
||||
state_root: Hash256,
|
||||
block_slot: Slot,
|
||||
},
|
||||
}
|
||||
|
||||
/// The item stored in the `SnapshotCache`.
|
||||
pub struct CacheItem<T: EthSpec> {
|
||||
beacon_block: SignedBeaconBlock<T>,
|
||||
beacon_block_root: Hash256,
|
||||
/// This state is equivalent to `self.beacon_block.state_root()`.
|
||||
beacon_state: BeaconState<T>,
|
||||
/// This state is equivalent to `self.beacon_state` that has had `per_slot_processing` applied
|
||||
/// to it. This state assists in optimizing block processing.
|
||||
pre_state: Option<BeaconState<T>>,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> Into<BeaconSnapshot<T>> for CacheItem<T> {
|
||||
fn into(self) -> BeaconSnapshot<T> {
|
||||
BeaconSnapshot {
|
||||
beacon_state: self.beacon_state,
|
||||
beacon_block: self.beacon_block,
|
||||
beacon_block_root: self.beacon_block_root,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Provides a cache of `BeaconSnapshot` that is intended primarily for block processing.
|
||||
///
|
||||
/// ## Cache Queuing
|
||||
@@ -20,7 +150,7 @@ pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 4;
|
||||
pub struct SnapshotCache<T: EthSpec> {
|
||||
max_len: usize,
|
||||
head_block_root: Hash256,
|
||||
snapshots: Vec<BeaconSnapshot<T>>,
|
||||
snapshots: Vec<CacheItem<T>>,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> SnapshotCache<T> {
|
||||
@@ -31,15 +161,57 @@ impl<T: EthSpec> SnapshotCache<T> {
|
||||
Self {
|
||||
max_len: cmp::max(max_len, 1),
|
||||
head_block_root: head.beacon_block_root,
|
||||
snapshots: vec![head],
|
||||
snapshots: vec![CacheItem::new_without_pre_state(head)],
|
||||
}
|
||||
}
|
||||
|
||||
/// The block roots of all snapshots contained in `self`.
|
||||
pub fn beacon_block_roots(&self) -> Vec<Hash256> {
|
||||
self.snapshots.iter().map(|s| s.beacon_block_root).collect()
|
||||
}
|
||||
|
||||
/// The number of snapshots contained in `self`.
|
||||
pub fn len(&self) -> usize {
|
||||
self.snapshots.len()
|
||||
}
|
||||
|
||||
/// Insert a snapshot, potentially removing an existing snapshot if `self` is at capacity (see
|
||||
/// struct-level documentation for more info).
|
||||
pub fn insert(&mut self, snapshot: BeaconSnapshot<T>) {
|
||||
pub fn insert(
|
||||
&mut self,
|
||||
snapshot: BeaconSnapshot<T>,
|
||||
pre_state: Option<BeaconState<T>>,
|
||||
spec: &ChainSpec,
|
||||
) {
|
||||
let parent_root = snapshot.beacon_block.message().parent_root();
|
||||
let item = CacheItem {
|
||||
beacon_block: snapshot.beacon_block,
|
||||
beacon_block_root: snapshot.beacon_block_root,
|
||||
beacon_state: snapshot.beacon_state,
|
||||
pre_state,
|
||||
};
|
||||
|
||||
// Remove the grandparent of the block that was just inserted.
|
||||
//
|
||||
// Assuming it's unlikely to see re-orgs deeper than one block, this method helps keep the
|
||||
// cache small by removing any states that already have more than one descendant.
|
||||
//
|
||||
// Remove the grandparent first to free up room in the cache.
|
||||
let grandparent_result =
|
||||
process_results(item.beacon_state.rev_iter_block_roots(spec), |iter| {
|
||||
iter.map(|(_slot, root)| root)
|
||||
.find(|root| *root != item.beacon_block_root && *root != parent_root)
|
||||
});
|
||||
if let Ok(Some(grandparent_root)) = grandparent_result {
|
||||
let head_block_root = self.head_block_root;
|
||||
self.snapshots.retain(|snapshot| {
|
||||
let root = snapshot.beacon_block_root;
|
||||
root == head_block_root || root != grandparent_root
|
||||
});
|
||||
}
|
||||
|
||||
if self.snapshots.len() < self.max_len {
|
||||
self.snapshots.push(snapshot);
|
||||
self.snapshots.push(item);
|
||||
} else {
|
||||
let insert_at = self
|
||||
.snapshots
|
||||
@@ -47,7 +219,7 @@ impl<T: EthSpec> SnapshotCache<T> {
|
||||
.enumerate()
|
||||
.filter_map(|(i, snapshot)| {
|
||||
if snapshot.beacon_block_root != self.head_block_root {
|
||||
Some((i, snapshot.beacon_state.slot))
|
||||
Some((i, snapshot.beacon_state.slot()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@@ -56,32 +228,125 @@ impl<T: EthSpec> SnapshotCache<T> {
|
||||
.map(|(i, _slot)| i);
|
||||
|
||||
if let Some(i) = insert_at {
|
||||
self.snapshots[i] = snapshot;
|
||||
self.snapshots[i] = item;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// If there is a snapshot with `block_root`, remove and return it.
|
||||
pub fn try_remove(&mut self, block_root: Hash256) -> Option<BeaconSnapshot<T>> {
|
||||
/// If available, returns a `CacheItem` that should be used for importing/processing a block.
|
||||
/// The method will remove the block from `self`, carrying across any caches that may or may not
|
||||
/// be built.
|
||||
///
|
||||
/// In the event the block being processed was observed late, clone the cache instead of
|
||||
/// moving it. This allows us to process the next block quickly in the case of a re-org.
|
||||
/// Additionally, if the slot was skipped, clone the cache. This ensures blocks that are
|
||||
/// later than 1 slot still have access to the cache and can be processed quickly.
|
||||
pub fn get_state_for_block_processing(
|
||||
&mut self,
|
||||
block_root: Hash256,
|
||||
block_slot: Slot,
|
||||
block_delay: Option<Duration>,
|
||||
spec: &ChainSpec,
|
||||
) -> Option<(PreProcessingSnapshot<T>, bool)> {
|
||||
self.snapshots
|
||||
.iter()
|
||||
.position(|snapshot| snapshot.beacon_block_root == block_root)
|
||||
.map(|i| self.snapshots.remove(i))
|
||||
.map(|i| {
|
||||
if let Some(cache) = self.snapshots.get(i) {
|
||||
if block_slot > cache.beacon_block.slot() + 1 {
|
||||
return (cache.clone_as_pre_state(), true);
|
||||
}
|
||||
if let Some(delay) = block_delay {
|
||||
if delay >= MINIMUM_BLOCK_DELAY_FOR_CLONE
|
||||
&& delay <= Duration::from_secs(spec.seconds_per_slot) * 4
|
||||
{
|
||||
return (cache.clone_as_pre_state(), true);
|
||||
}
|
||||
}
|
||||
}
|
||||
(self.snapshots.remove(i).into_pre_state(), false)
|
||||
})
|
||||
}
|
||||
|
||||
/// If there is a snapshot with `block_root`, clone it (with only the committee caches) and
|
||||
/// return the clone.
|
||||
pub fn get_cloned(&self, block_root: Hash256) -> Option<BeaconSnapshot<T>> {
|
||||
/// If available, obtains a clone of a `BeaconState` that should be used for block production.
|
||||
/// The clone will use `CloneConfig:all()`, ensuring any tree-hash cache is cloned too.
|
||||
///
|
||||
/// ## Note
|
||||
///
|
||||
/// This method clones the `BeaconState` (instead of removing it) since we assume that any block
|
||||
/// we produce will soon be pushed to the `BeaconChain` for importing/processing. Keeping a copy
|
||||
/// of that `BeaconState` in `self` will greatly help with import times.
|
||||
pub fn get_state_for_block_production(
|
||||
&self,
|
||||
block_root: Hash256,
|
||||
) -> Option<BlockProductionPreState<T>> {
|
||||
self.snapshots
|
||||
.iter()
|
||||
.find(|snapshot| snapshot.beacon_block_root == block_root)
|
||||
.map(|snapshot| snapshot.clone_with_only_committee_caches())
|
||||
.map(|snapshot| {
|
||||
if let Some(pre_state) = &snapshot.pre_state {
|
||||
BlockProductionPreState {
|
||||
pre_state: pre_state.clone_with(CloneConfig::all()),
|
||||
state_root: None,
|
||||
}
|
||||
} else {
|
||||
BlockProductionPreState {
|
||||
pre_state: snapshot.beacon_state.clone_with(CloneConfig::all()),
|
||||
state_root: Some(snapshot.beacon_block.state_root()),
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// If there is a snapshot with `block_root`, clone it and return the clone.
|
||||
pub fn get_cloned(
|
||||
&self,
|
||||
block_root: Hash256,
|
||||
clone_config: CloneConfig,
|
||||
) -> Option<BeaconSnapshot<T>> {
|
||||
self.snapshots
|
||||
.iter()
|
||||
.find(|snapshot| snapshot.beacon_block_root == block_root)
|
||||
.map(|snapshot| snapshot.clone_to_snapshot_with(clone_config))
|
||||
}
|
||||
|
||||
pub fn get_for_state_advance(&mut self, block_root: Hash256) -> StateAdvance<T> {
|
||||
if let Some(snapshot) = self
|
||||
.snapshots
|
||||
.iter_mut()
|
||||
.find(|snapshot| snapshot.beacon_block_root == block_root)
|
||||
{
|
||||
if snapshot.pre_state.is_some() {
|
||||
StateAdvance::AlreadyAdvanced
|
||||
} else {
|
||||
let cloned = snapshot
|
||||
.beacon_state
|
||||
.clone_with(CloneConfig::committee_caches_only());
|
||||
|
||||
StateAdvance::State {
|
||||
state: Box::new(std::mem::replace(&mut snapshot.beacon_state, cloned)),
|
||||
state_root: snapshot.beacon_block.state_root(),
|
||||
block_slot: snapshot.beacon_block.slot(),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
StateAdvance::BlockNotFound
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_pre_state(&mut self, block_root: Hash256, state: BeaconState<T>) -> Option<()> {
|
||||
self.snapshots
|
||||
.iter_mut()
|
||||
.find(|snapshot| snapshot.beacon_block_root == block_root)
|
||||
.map(|snapshot| {
|
||||
snapshot.pre_state = Some(state);
|
||||
})
|
||||
}
|
||||
|
||||
/// Removes all snapshots from the queue that are less than or equal to the finalized epoch.
|
||||
pub fn prune(&mut self, finalized_epoch: Epoch) {
|
||||
self.snapshots.retain(|snapshot| {
|
||||
snapshot.beacon_state.slot > finalized_epoch.start_slot(T::slots_per_epoch())
|
||||
snapshot.beacon_state.slot() > finalized_epoch.start_slot(T::slots_per_epoch())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -97,34 +362,48 @@ impl<T: EthSpec> SnapshotCache<T> {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType};
|
||||
use types::{
|
||||
test_utils::{generate_deterministic_keypair, TestingBeaconStateBuilder},
|
||||
BeaconBlock, Epoch, MainnetEthSpec, SignedBeaconBlock, Slot,
|
||||
test_utils::generate_deterministic_keypair, BeaconBlock, Epoch, MainnetEthSpec,
|
||||
SignedBeaconBlock, Slot,
|
||||
};
|
||||
|
||||
fn get_harness() -> BeaconChainHarness<EphemeralHarnessType<MainnetEthSpec>> {
|
||||
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||
.default_spec()
|
||||
.deterministic_keypairs(1)
|
||||
.fresh_ephemeral_store()
|
||||
.build();
|
||||
|
||||
harness.advance_slot();
|
||||
|
||||
harness
|
||||
}
|
||||
|
||||
const CACHE_SIZE: usize = 4;
|
||||
|
||||
fn get_snapshot(i: u64) -> BeaconSnapshot<MainnetEthSpec> {
|
||||
let spec = MainnetEthSpec::default_spec();
|
||||
|
||||
let state_builder = TestingBeaconStateBuilder::from_deterministic_keypairs(1, &spec);
|
||||
let (beacon_state, _keypairs) = state_builder.build();
|
||||
let beacon_state = get_harness().chain.head_beacon_state().unwrap();
|
||||
|
||||
let signed_beacon_block = SignedBeaconBlock::from_block(
|
||||
BeaconBlock::empty(&spec),
|
||||
generate_deterministic_keypair(0)
|
||||
.sk
|
||||
.sign(Hash256::from_low_u64_be(42)),
|
||||
);
|
||||
|
||||
BeaconSnapshot {
|
||||
beacon_state,
|
||||
beacon_state_root: Hash256::from_low_u64_be(i),
|
||||
beacon_block: SignedBeaconBlock {
|
||||
message: BeaconBlock::empty(&spec),
|
||||
signature: generate_deterministic_keypair(0)
|
||||
.sk
|
||||
.sign(Hash256::from_low_u64_be(42)),
|
||||
},
|
||||
beacon_block: signed_beacon_block,
|
||||
beacon_block_root: Hash256::from_low_u64_be(i),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_get_prune_update() {
|
||||
let spec = MainnetEthSpec::default_spec();
|
||||
let mut cache = SnapshotCache::new(CACHE_SIZE, get_snapshot(0));
|
||||
|
||||
// Insert a bunch of entries in the cache. It should look like this:
|
||||
@@ -138,9 +417,10 @@ mod test {
|
||||
let mut snapshot = get_snapshot(i);
|
||||
|
||||
// Each snapshot should be one slot into an epoch, with each snapshot one epoch apart.
|
||||
snapshot.beacon_state.slot = Slot::from(i * MainnetEthSpec::slots_per_epoch() + 1);
|
||||
*snapshot.beacon_state.slot_mut() =
|
||||
Slot::from(i * MainnetEthSpec::slots_per_epoch() + 1);
|
||||
|
||||
cache.insert(snapshot);
|
||||
cache.insert(snapshot, None, &spec);
|
||||
|
||||
assert_eq!(
|
||||
cache.snapshots.len(),
|
||||
@@ -158,36 +438,51 @@ mod test {
|
||||
// 2 2
|
||||
// 3 3
|
||||
assert_eq!(cache.snapshots.len(), CACHE_SIZE);
|
||||
cache.insert(get_snapshot(42));
|
||||
cache.insert(get_snapshot(42), None, &spec);
|
||||
assert_eq!(cache.snapshots.len(), CACHE_SIZE);
|
||||
|
||||
assert!(
|
||||
cache.try_remove(Hash256::from_low_u64_be(1)).is_none(),
|
||||
cache
|
||||
.get_state_for_block_processing(
|
||||
Hash256::from_low_u64_be(1),
|
||||
Slot::new(0),
|
||||
None,
|
||||
&spec
|
||||
)
|
||||
.is_none(),
|
||||
"the snapshot with the lowest slot should have been removed during the insert function"
|
||||
);
|
||||
assert!(cache.get_cloned(Hash256::from_low_u64_be(1)).is_none());
|
||||
assert!(cache
|
||||
.get_cloned(Hash256::from_low_u64_be(1), CloneConfig::none())
|
||||
.is_none());
|
||||
|
||||
assert!(
|
||||
assert_eq!(
|
||||
cache
|
||||
.get_cloned(Hash256::from_low_u64_be(0))
|
||||
.get_cloned(Hash256::from_low_u64_be(0), CloneConfig::none())
|
||||
.expect("the head should still be in the cache")
|
||||
.beacon_block_root
|
||||
== Hash256::from_low_u64_be(0),
|
||||
.beacon_block_root,
|
||||
Hash256::from_low_u64_be(0),
|
||||
"get_cloned should get the correct snapshot"
|
||||
);
|
||||
assert!(
|
||||
assert_eq!(
|
||||
cache
|
||||
.try_remove(Hash256::from_low_u64_be(0))
|
||||
.get_state_for_block_processing(
|
||||
Hash256::from_low_u64_be(0),
|
||||
Slot::new(0),
|
||||
None,
|
||||
&spec
|
||||
)
|
||||
.expect("the head should still be in the cache")
|
||||
.beacon_block_root
|
||||
== Hash256::from_low_u64_be(0),
|
||||
"try_remove should get the correct snapshot"
|
||||
.0
|
||||
.beacon_block_root,
|
||||
Hash256::from_low_u64_be(0),
|
||||
"get_state_for_block_processing should get the correct snapshot"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
cache.snapshots.len(),
|
||||
CACHE_SIZE - 1,
|
||||
"try_remove should shorten the cache"
|
||||
"get_state_for_block_processing should shorten the cache"
|
||||
);
|
||||
|
||||
// Prune the cache. Afterwards it should look like:
|
||||
@@ -203,17 +498,23 @@ mod test {
|
||||
|
||||
// Over-fill the cache so it needs to eject some old values on insert.
|
||||
for i in 0..CACHE_SIZE as u64 {
|
||||
cache.insert(get_snapshot(u64::max_value() - i));
|
||||
cache.insert(get_snapshot(u64::max_value() - i), None, &spec);
|
||||
}
|
||||
|
||||
// Ensure that the new head value was not removed from the cache.
|
||||
assert!(
|
||||
assert_eq!(
|
||||
cache
|
||||
.try_remove(Hash256::from_low_u64_be(2))
|
||||
.get_state_for_block_processing(
|
||||
Hash256::from_low_u64_be(2),
|
||||
Slot::new(0),
|
||||
None,
|
||||
&spec
|
||||
)
|
||||
.expect("the new head should still be in the cache")
|
||||
.beacon_block_root
|
||||
== Hash256::from_low_u64_be(2),
|
||||
"try_remove should get the correct snapshot"
|
||||
.0
|
||||
.beacon_block_root,
|
||||
Hash256::from_low_u64_be(2),
|
||||
"get_state_for_block_processing should get the correct snapshot"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
394
beacon_node/beacon_chain/src/state_advance_timer.rs
Normal file
394
beacon_node/beacon_chain/src/state_advance_timer.rs
Normal file
@@ -0,0 +1,394 @@
|
||||
//! Provides a timer which runs in the tail-end of each slot and maybe advances the state of the
|
||||
//! head block forward a single slot.
|
||||
//!
|
||||
//! This provides an optimization with the following benefits:
|
||||
//!
|
||||
//! 1. Removes the burden of a single, mandatory `per_slot_processing` call from the leading-edge of
|
||||
//! block processing. This helps import blocks faster.
|
||||
//! 2. Allows the node to learn of the shuffling for the next epoch, before the first block from
|
||||
//! that epoch has arrived. This helps reduce gossip block propagation times.
|
||||
//!
|
||||
//! The downsides to this optimization are:
|
||||
//!
|
||||
//! 1. We are required to store an additional `BeaconState` for the head block. This consumes
|
||||
//! memory.
|
||||
//! 2. There's a possibility that the head block is never built upon, causing wasted CPU cycles.
|
||||
use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS;
|
||||
use crate::{
|
||||
beacon_chain::{ATTESTATION_CACHE_LOCK_TIMEOUT, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT},
|
||||
snapshot_cache::StateAdvance,
|
||||
BeaconChain, BeaconChainError, BeaconChainTypes,
|
||||
};
|
||||
use slog::{debug, error, warn, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use state_processing::per_slot_processing;
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
};
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio::time::sleep;
|
||||
use types::{AttestationShufflingId, EthSpec, Hash256, RelativeEpoch, Slot};
|
||||
|
||||
/// If the head slot is more than `MAX_ADVANCE_DISTANCE` from the current slot, then don't perform
|
||||
/// the state advancement.
|
||||
///
|
||||
/// This avoids doing unnecessary work whilst the node is syncing or has perhaps been put to sleep
|
||||
/// for some period of time.
|
||||
const MAX_ADVANCE_DISTANCE: u64 = 4;
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Error {
|
||||
BeaconChain(BeaconChainError),
|
||||
HeadMissingFromSnapshotCache(Hash256),
|
||||
MaxDistanceExceeded {
|
||||
current_slot: Slot,
|
||||
head_slot: Slot,
|
||||
},
|
||||
StateAlreadyAdvanced {
|
||||
block_root: Hash256,
|
||||
},
|
||||
BadStateSlot {
|
||||
_state_slot: Slot,
|
||||
_block_slot: Slot,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<BeaconChainError> for Error {
|
||||
fn from(e: BeaconChainError) -> Self {
|
||||
Self::BeaconChain(e)
|
||||
}
|
||||
}
|
||||
|
||||
/// Provides a simple thread-safe lock to be used for task co-ordination. Practically equivalent to
|
||||
/// `Mutex<()>`.
|
||||
#[derive(Clone)]
|
||||
struct Lock(Arc<AtomicBool>);
|
||||
|
||||
impl Lock {
|
||||
/// Instantiate an unlocked self.
|
||||
pub fn new() -> Self {
|
||||
Self(Arc::new(AtomicBool::new(false)))
|
||||
}
|
||||
|
||||
/// Lock self, returning `true` if the lock was already set.
|
||||
pub fn lock(&self) -> bool {
|
||||
self.0.fetch_or(true, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Unlock self.
|
||||
pub fn unlock(&self) {
|
||||
self.0.store(false, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawns the timer described in the module-level documentation.
|
||||
pub fn spawn_state_advance_timer<T: BeaconChainTypes>(
|
||||
executor: TaskExecutor,
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
log: Logger,
|
||||
) {
|
||||
executor.spawn(
|
||||
state_advance_timer(executor.clone(), beacon_chain, log),
|
||||
"state_advance_timer",
|
||||
);
|
||||
}
|
||||
|
||||
/// Provides the timer described in the module-level documentation.
|
||||
async fn state_advance_timer<T: BeaconChainTypes>(
|
||||
executor: TaskExecutor,
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
log: Logger,
|
||||
) {
|
||||
let is_running = Lock::new();
|
||||
let slot_clock = &beacon_chain.slot_clock;
|
||||
let slot_duration = slot_clock.slot_duration();
|
||||
|
||||
loop {
|
||||
match beacon_chain.slot_clock.duration_to_next_slot() {
|
||||
Some(duration) => sleep(duration + (slot_duration / 4) * 3).await,
|
||||
None => {
|
||||
error!(log, "Failed to read slot clock");
|
||||
// If we can't read the slot clock, just wait another slot.
|
||||
sleep(slot_duration).await;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Only start spawn the state advance task if the lock was previously free.
|
||||
if !is_running.lock() {
|
||||
let log = log.clone();
|
||||
let beacon_chain = beacon_chain.clone();
|
||||
let is_running = is_running.clone();
|
||||
|
||||
executor.spawn_blocking(
|
||||
move || {
|
||||
match advance_head(&beacon_chain, &log) {
|
||||
Ok(()) => (),
|
||||
Err(Error::BeaconChain(e)) => error!(
|
||||
log,
|
||||
"Failed to advance head state";
|
||||
"error" => ?e
|
||||
),
|
||||
Err(Error::StateAlreadyAdvanced { block_root }) => debug!(
|
||||
log,
|
||||
"State already advanced on slot";
|
||||
"block_root" => ?block_root
|
||||
),
|
||||
Err(Error::MaxDistanceExceeded {
|
||||
current_slot,
|
||||
head_slot,
|
||||
}) => debug!(
|
||||
log,
|
||||
"Refused to advance head state";
|
||||
"head_slot" => head_slot,
|
||||
"current_slot" => current_slot,
|
||||
),
|
||||
other => warn!(
|
||||
log,
|
||||
"Did not advance head state";
|
||||
"reason" => ?other
|
||||
),
|
||||
};
|
||||
|
||||
// Permit this blocking task to spawn again, next time the timer fires.
|
||||
is_running.unlock();
|
||||
},
|
||||
"state_advance_blocking",
|
||||
);
|
||||
} else {
|
||||
warn!(
|
||||
log,
|
||||
"State advance routine overloaded";
|
||||
"msg" => "system resources may be overloaded"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads the `snapshot_cache` from the `beacon_chain` and attempts to take a clone of the
|
||||
/// `BeaconState` of the head block. If it obtains this clone, the state will be advanced a single
|
||||
/// slot then placed back in the `snapshot_cache` to be used for block verification.
|
||||
///
|
||||
/// See the module-level documentation for rationale.
|
||||
fn advance_head<T: BeaconChainTypes>(
|
||||
beacon_chain: &BeaconChain<T>,
|
||||
log: &Logger,
|
||||
) -> Result<(), Error> {
|
||||
let current_slot = beacon_chain.slot()?;
|
||||
|
||||
// These brackets ensure that the `head_slot` value is dropped before we run fork choice and
|
||||
// potentially invalidate it.
|
||||
//
|
||||
// Fork-choice is not run *before* this function to avoid unnecessary calls whilst syncing.
|
||||
{
|
||||
let head_slot = beacon_chain.head_info()?.slot;
|
||||
|
||||
// Don't run this when syncing or if lagging too far behind.
|
||||
if head_slot + MAX_ADVANCE_DISTANCE < current_slot {
|
||||
return Err(Error::MaxDistanceExceeded {
|
||||
current_slot,
|
||||
head_slot,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Run fork choice so we get the latest view of the head.
|
||||
//
|
||||
// This is useful since it's quite likely that the last time we ran fork choice was shortly
|
||||
// after receiving the latest gossip block, but not necessarily after we've received the
|
||||
// majority of attestations.
|
||||
beacon_chain.fork_choice()?;
|
||||
|
||||
let head_root = beacon_chain.head_info()?.block_root;
|
||||
|
||||
let (head_slot, head_state_root, mut state) = match beacon_chain
|
||||
.snapshot_cache
|
||||
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(BeaconChainError::SnapshotCacheLockTimeout)?
|
||||
.get_for_state_advance(head_root)
|
||||
{
|
||||
StateAdvance::AlreadyAdvanced => {
|
||||
return Err(Error::StateAlreadyAdvanced {
|
||||
block_root: head_root,
|
||||
})
|
||||
}
|
||||
StateAdvance::BlockNotFound => return Err(Error::HeadMissingFromSnapshotCache(head_root)),
|
||||
StateAdvance::State {
|
||||
state,
|
||||
state_root,
|
||||
block_slot,
|
||||
} => (block_slot, state_root, *state),
|
||||
};
|
||||
|
||||
let initial_slot = state.slot();
|
||||
let initial_epoch = state.current_epoch();
|
||||
|
||||
let state_root = if state.slot() == head_slot {
|
||||
Some(head_state_root)
|
||||
} else {
|
||||
// Protect against advancing a state more than a single slot.
|
||||
//
|
||||
// Advancing more than one slot without storing the intermediate state would corrupt the
|
||||
// database. Future works might store temporary, intermediate states inside this function.
|
||||
return Err(Error::BadStateSlot {
|
||||
_block_slot: head_slot,
|
||||
_state_slot: state.slot(),
|
||||
});
|
||||
};
|
||||
|
||||
// Advance the state a single slot.
|
||||
if let Some(summary) = per_slot_processing(&mut state, state_root, &beacon_chain.spec)
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
// Expose Prometheus metrics.
|
||||
if let Err(e) = summary.observe_metrics() {
|
||||
error!(
|
||||
log,
|
||||
"Failed to observe epoch summary metrics";
|
||||
"src" => "state_advance_timer",
|
||||
"error" => ?e
|
||||
);
|
||||
}
|
||||
|
||||
// Only notify the validator monitor for recent blocks.
|
||||
if state.current_epoch() + VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64
|
||||
>= current_slot.epoch(T::EthSpec::slots_per_epoch())
|
||||
{
|
||||
// Potentially create logs/metrics for locally monitored validators.
|
||||
if let Err(e) = beacon_chain
|
||||
.validator_monitor
|
||||
.read()
|
||||
.process_validator_statuses(state.current_epoch(), &summary, &beacon_chain.spec)
|
||||
{
|
||||
error!(
|
||||
log,
|
||||
"Unable to process validator statuses";
|
||||
"error" => ?e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug!(
|
||||
log,
|
||||
"Advanced head state one slot";
|
||||
"head_root" => ?head_root,
|
||||
"state_slot" => state.slot(),
|
||||
"current_slot" => current_slot,
|
||||
);
|
||||
|
||||
// Build the current epoch cache, to prepare to compute proposer duties.
|
||||
state
|
||||
.build_committee_cache(RelativeEpoch::Current, &beacon_chain.spec)
|
||||
.map_err(BeaconChainError::from)?;
|
||||
// Build the next epoch cache, to prepare to compute attester duties.
|
||||
state
|
||||
.build_committee_cache(RelativeEpoch::Next, &beacon_chain.spec)
|
||||
.map_err(BeaconChainError::from)?;
|
||||
|
||||
// If the `pre_state` is in a later epoch than `state`, pre-emptively add the proposer shuffling
|
||||
// for the state's current epoch and the committee cache for the state's next epoch.
|
||||
if initial_epoch < state.current_epoch() {
|
||||
// Update the proposer cache.
|
||||
//
|
||||
// We supply the `head_root` as the decision block since the prior `if` statement guarantees
|
||||
// the head root is the latest block from the prior epoch.
|
||||
beacon_chain
|
||||
.beacon_proposer_cache
|
||||
.lock()
|
||||
.insert(
|
||||
state.current_epoch(),
|
||||
head_root,
|
||||
state
|
||||
.get_beacon_proposer_indices(&beacon_chain.spec)
|
||||
.map_err(BeaconChainError::from)?,
|
||||
state.fork(),
|
||||
)
|
||||
.map_err(BeaconChainError::from)?;
|
||||
|
||||
// Update the attester cache.
|
||||
let shuffling_id = AttestationShufflingId::new(head_root, &state, RelativeEpoch::Next)
|
||||
.map_err(BeaconChainError::from)?;
|
||||
let committee_cache = state
|
||||
.committee_cache(RelativeEpoch::Next)
|
||||
.map_err(BeaconChainError::from)?;
|
||||
beacon_chain
|
||||
.shuffling_cache
|
||||
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(BeaconChainError::AttestationCacheLockTimeout)?
|
||||
.insert(shuffling_id.clone(), committee_cache);
|
||||
|
||||
debug!(
|
||||
log,
|
||||
"Primed proposer and attester caches";
|
||||
"head_root" => ?head_root,
|
||||
"next_epoch_shuffling_root" => ?shuffling_id.shuffling_decision_block,
|
||||
"state_epoch" => state.current_epoch(),
|
||||
"current_epoch" => current_slot.epoch(T::EthSpec::slots_per_epoch()),
|
||||
);
|
||||
}
|
||||
|
||||
// Apply the state to the attester cache, if the cache deems it interesting.
|
||||
beacon_chain
|
||||
.attester_cache
|
||||
.maybe_cache_state(&state, head_root, &beacon_chain.spec)
|
||||
.map_err(BeaconChainError::from)?;
|
||||
|
||||
let final_slot = state.slot();
|
||||
|
||||
// Insert the advanced state back into the snapshot cache.
|
||||
beacon_chain
|
||||
.snapshot_cache
|
||||
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(BeaconChainError::SnapshotCacheLockTimeout)?
|
||||
.update_pre_state(head_root, state)
|
||||
.ok_or(Error::HeadMissingFromSnapshotCache(head_root))?;
|
||||
|
||||
// If we have moved into the next slot whilst processing the state then this function is going
|
||||
// to become ineffective and likely become a hindrance as we're stealing the tree hash cache
|
||||
// from the snapshot cache (which may force the next block to rebuild a new one).
|
||||
//
|
||||
// If this warning occurs very frequently on well-resourced machines then we should consider
|
||||
// starting it earlier in the slot. Otherwise, it's a good indication that the machine is too
|
||||
// slow/overloaded and will be useful information for the user.
|
||||
let starting_slot = current_slot;
|
||||
let current_slot = beacon_chain.slot()?;
|
||||
if starting_slot < current_slot {
|
||||
warn!(
|
||||
log,
|
||||
"State advance too slow";
|
||||
"head_root" => %head_root,
|
||||
"advanced_slot" => final_slot,
|
||||
"current_slot" => current_slot,
|
||||
"starting_slot" => starting_slot,
|
||||
"msg" => "system resources may be overloaded",
|
||||
);
|
||||
}
|
||||
|
||||
debug!(
|
||||
log,
|
||||
"Completed state advance";
|
||||
"head_root" => ?head_root,
|
||||
"advanced_slot" => final_slot,
|
||||
"initial_slot" => initial_slot,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn lock() {
|
||||
let lock = Lock::new();
|
||||
assert!(!lock.lock());
|
||||
assert!(lock.lock());
|
||||
assert!(lock.lock());
|
||||
lock.unlock();
|
||||
assert!(!lock.lock());
|
||||
assert!(lock.lock());
|
||||
}
|
||||
}
|
||||
665
beacon_node/beacon_chain/src/sync_committee_verification.rs
Normal file
665
beacon_node/beacon_chain/src/sync_committee_verification.rs
Normal file
@@ -0,0 +1,665 @@
|
||||
//! Provides verification for the following sync committee messages:
|
||||
//!
|
||||
//! - "Unaggregated" `SyncCommitteeMessage` received from either gossip or the HTTP API.
|
||||
//! - "Aggregated" `SignedContributionAndProof` received from gossip or the HTTP API.
|
||||
//!
|
||||
//! For clarity, we define:
|
||||
//!
|
||||
//! - Unaggregated: a `SyncCommitteeMessage` object.
|
||||
//! - Aggregated: a `SignedContributionAndProof` which has zero or more signatures.
|
||||
//! - Note: "zero or more" may soon change to "one or more".
|
||||
//!
|
||||
//! Similar to the `crate::block_verification` module, we try to avoid doing duplicate verification
|
||||
//! work as a sync committee message passes through different stages of verification. We represent these
|
||||
//! different stages of verification with wrapper types. These wrapper-types flow in a particular
|
||||
//! pattern:
|
||||
//!
|
||||
//! ```ignore
|
||||
//! types::SyncCommitteeMessage types::SignedContributionAndProof
|
||||
//! | |
|
||||
//! ▼ ▼
|
||||
//! VerifiedSyncCommitteeMessage VerifiedSyncContribution
|
||||
//! | |
|
||||
//! -------------------------------------
|
||||
//! |
|
||||
//! ▼
|
||||
//! impl SignatureVerifiedSyncContribution
|
||||
//! ```
|
||||
|
||||
use crate::observed_attesters::SlotSubcommitteeIndex;
|
||||
use crate::{
|
||||
beacon_chain::{MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT},
|
||||
metrics,
|
||||
observed_aggregates::ObserveOutcome,
|
||||
BeaconChain, BeaconChainError, BeaconChainTypes,
|
||||
};
|
||||
use bls::{verify_signature_sets, PublicKeyBytes};
|
||||
use derivative::Derivative;
|
||||
use safe_arith::ArithError;
|
||||
use slot_clock::SlotClock;
|
||||
use state_processing::per_block_processing::errors::SyncCommitteeMessageValidationError;
|
||||
use state_processing::signature_sets::{
|
||||
signed_sync_aggregate_selection_proof_signature_set, signed_sync_aggregate_signature_set,
|
||||
sync_committee_contribution_signature_set_from_pubkeys,
|
||||
sync_committee_message_set_from_pubkeys,
|
||||
};
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
use strum::AsRefStr;
|
||||
use tree_hash::TreeHash;
|
||||
use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT;
|
||||
use types::slot_data::SlotData;
|
||||
use types::sync_committee::Error as SyncCommitteeError;
|
||||
use types::{
|
||||
sync_committee_contribution::Error as ContributionError, AggregateSignature, BeaconStateError,
|
||||
EthSpec, Hash256, SignedContributionAndProof, Slot, SyncCommitteeContribution,
|
||||
SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId,
|
||||
};
|
||||
|
||||
/// Returned when a sync committee contribution was not successfully verified. It might not have been verified for
|
||||
/// two reasons:
|
||||
///
|
||||
/// - The sync committee message is malformed or inappropriate for the context (indicated by all variants
|
||||
/// other than `BeaconChainError`).
|
||||
/// - The application encountered an internal error whilst attempting to determine validity
|
||||
/// (the `BeaconChainError` variant)
|
||||
#[derive(Debug, AsRefStr)]
|
||||
pub enum Error {
|
||||
/// The sync committee message is from a slot that is later than the current slot (with respect to the
|
||||
/// gossip clock disparity).
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||
FutureSlot {
|
||||
message_slot: Slot,
|
||||
latest_permissible_slot: Slot,
|
||||
},
|
||||
/// The sync committee message is from a slot that is prior to the earliest permissible slot (with
|
||||
/// respect to the gossip clock disparity).
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||
PastSlot {
|
||||
message_slot: Slot,
|
||||
earliest_permissible_slot: Slot,
|
||||
},
|
||||
/// The sync committee message's aggregation bits were empty when they shouldn't be.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
EmptyAggregationBitfield,
|
||||
/// The `selection_proof` on the sync contribution does not elect it as an aggregator.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
InvalidSelectionProof { aggregator_index: u64 },
|
||||
/// The `selection_proof` on the sync committee contribution selects it as a validator, however the
|
||||
/// aggregator index is not in the committee for that sync contribution.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
AggregatorNotInCommittee { aggregator_index: u64 },
|
||||
/// The aggregator index refers to a validator index that we have not seen.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
AggregatorPubkeyUnknown(u64),
|
||||
/// The sync contribution has been seen before; either in a block, on the gossip network or from a
|
||||
/// local validator.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// It's unclear if this sync contribution is valid, however we have already observed it and do not
|
||||
/// need to observe it again.
|
||||
SyncContributionAlreadyKnown(Hash256),
|
||||
/// There has already been an aggregation observed for this validator, we refuse to process a
|
||||
/// second.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// It's unclear if this sync committee message is valid, however we have already observed an aggregate
|
||||
/// sync committee message from this validator for this epoch and should not observe another.
|
||||
AggregatorAlreadyKnown(u64),
|
||||
/// The aggregator index is higher than the maximum possible validator count.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
UnknownValidatorIndex(usize),
|
||||
/// The public key of the validator has not been seen locally.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// It's unclear if this sync committee message is valid, however we have already observed an aggregate
|
||||
/// sync committee message from this validator for this epoch and should not observe another.
|
||||
UnknownValidatorPubkey(PublicKeyBytes),
|
||||
/// A signature on the sync committee message is invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
InvalidSignature,
|
||||
/// We have already observed a signature for the `validator_index` and refuse to process
|
||||
/// another.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// It's unclear if this sync message is valid, however we have already observed a
|
||||
/// signature from this validator for this slot and should not observe
|
||||
/// another.
|
||||
PriorSyncCommitteeMessageKnown { validator_index: u64, slot: Slot },
|
||||
/// The sync committee message was received on an invalid sync committee message subnet.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
InvalidSubnetId {
|
||||
received: SyncSubnetId,
|
||||
expected: Vec<SyncSubnetId>,
|
||||
},
|
||||
/// The sync message failed the `state_processing` verification stage.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has sent an invalid message.
|
||||
Invalid(SyncCommitteeMessageValidationError),
|
||||
/// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// We were unable to process this sync committee message due to an internal error. It's unclear if the
|
||||
/// sync committee message is valid.
|
||||
BeaconChainError(BeaconChainError),
|
||||
/// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// We were unable to process this sync committee message due to an internal error. It's unclear if the
|
||||
/// sync committee message is valid.
|
||||
BeaconStateError(BeaconStateError),
|
||||
/// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// We were unable to process this sync committee message due to an internal error. It's unclear if the
|
||||
/// sync committee message is valid.
|
||||
InvalidSubcommittee {
|
||||
subcommittee_index: u64,
|
||||
subcommittee_size: u64,
|
||||
},
|
||||
/// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// We were unable to process this sync committee message due to an internal error. It's unclear if the
|
||||
/// sync committee message is valid.
|
||||
ArithError(ArithError),
|
||||
/// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// We were unable to process this sync committee message due to an internal error. It's unclear if the
|
||||
/// sync committee message is valid.
|
||||
ContributionError(ContributionError),
|
||||
/// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// We were unable to process this sync committee message due to an internal error. It's unclear if the
|
||||
/// sync committee message is valid.
|
||||
SyncCommitteeError(SyncCommitteeError),
|
||||
}
|
||||
|
||||
impl From<BeaconChainError> for Error {
|
||||
fn from(e: BeaconChainError) -> Self {
|
||||
Error::BeaconChainError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BeaconStateError> for Error {
|
||||
fn from(e: BeaconStateError) -> Self {
|
||||
Error::BeaconStateError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SyncCommitteeError> for Error {
|
||||
fn from(e: SyncCommitteeError) -> Self {
|
||||
Error::SyncCommitteeError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ArithError> for Error {
|
||||
fn from(e: ArithError) -> Self {
|
||||
Error::ArithError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ContributionError> for Error {
|
||||
fn from(e: ContributionError) -> Self {
|
||||
Error::ContributionError(e)
|
||||
}
|
||||
}
|
||||
|
||||
/// Wraps a `SignedContributionAndProof` that has been verified for propagation on the gossip network.\
|
||||
#[derive(Derivative)]
|
||||
#[derivative(Clone(bound = "T: BeaconChainTypes"))]
|
||||
pub struct VerifiedSyncContribution<T: BeaconChainTypes> {
|
||||
signed_aggregate: SignedContributionAndProof<T::EthSpec>,
|
||||
participant_pubkeys: Vec<PublicKeyBytes>,
|
||||
}
|
||||
|
||||
/// Wraps a `SyncCommitteeMessage` that has been verified for propagation on the gossip network.
|
||||
#[derive(Clone)]
|
||||
pub struct VerifiedSyncCommitteeMessage {
|
||||
sync_message: SyncCommitteeMessage,
|
||||
subnet_positions: HashMap<SyncSubnetId, Vec<usize>>,
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> VerifiedSyncContribution<T> {
|
||||
/// Returns `Ok(Self)` if the `signed_aggregate` is valid to be (re)published on the gossip
|
||||
/// network.
|
||||
pub fn verify(
|
||||
signed_aggregate: SignedContributionAndProof<T::EthSpec>,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Self, Error> {
|
||||
let aggregator_index = signed_aggregate.message.aggregator_index;
|
||||
let contribution = &signed_aggregate.message.contribution;
|
||||
let subcommittee_index = contribution.subcommittee_index as usize;
|
||||
|
||||
// Ensure sync committee contribution is within the MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance.
|
||||
verify_propagation_slot_range(&chain.slot_clock, contribution)?;
|
||||
|
||||
// Validate subcommittee index.
|
||||
if contribution.subcommittee_index >= SYNC_COMMITTEE_SUBNET_COUNT {
|
||||
return Err(Error::InvalidSubcommittee {
|
||||
subcommittee_index: contribution.subcommittee_index,
|
||||
subcommittee_size: SYNC_COMMITTEE_SUBNET_COUNT,
|
||||
});
|
||||
}
|
||||
|
||||
// Ensure that the sync committee message has participants.
|
||||
if contribution.aggregation_bits.is_zero() {
|
||||
return Err(Error::EmptyAggregationBitfield);
|
||||
}
|
||||
|
||||
// Ensure the aggregator's pubkey is in the declared subcommittee of the current sync committee
|
||||
let pubkey_bytes = chain
|
||||
.validator_pubkey_bytes(aggregator_index as usize)?
|
||||
.ok_or(Error::UnknownValidatorIndex(aggregator_index as usize))?;
|
||||
let sync_subcommittee_pubkeys = chain
|
||||
.sync_committee_at_next_slot(contribution.get_slot())?
|
||||
.get_subcommittee_pubkeys(subcommittee_index)?;
|
||||
|
||||
if !sync_subcommittee_pubkeys.contains(&pubkey_bytes) {
|
||||
return Err(Error::AggregatorNotInCommittee { aggregator_index });
|
||||
};
|
||||
|
||||
// Ensure the valid sync contribution has not already been seen locally.
|
||||
let contribution_root = contribution.tree_hash_root();
|
||||
if chain
|
||||
.observed_sync_contributions
|
||||
.write()
|
||||
.is_known(contribution, contribution_root)
|
||||
.map_err(|e| Error::BeaconChainError(e.into()))?
|
||||
{
|
||||
return Err(Error::SyncContributionAlreadyKnown(contribution_root));
|
||||
}
|
||||
|
||||
// Ensure there has been no other observed aggregate for the given `aggregator_index`.
|
||||
//
|
||||
// Note: do not observe yet, only observe once the sync contribution has been verified.
|
||||
let observed_key =
|
||||
SlotSubcommitteeIndex::new(contribution.slot, contribution.subcommittee_index);
|
||||
match chain
|
||||
.observed_sync_aggregators
|
||||
.read()
|
||||
.validator_has_been_observed(observed_key, aggregator_index as usize)
|
||||
{
|
||||
Ok(true) => Err(Error::AggregatorAlreadyKnown(aggregator_index)),
|
||||
Ok(false) => Ok(()),
|
||||
Err(e) => Err(BeaconChainError::from(e).into()),
|
||||
}?;
|
||||
|
||||
// Note: this clones the signature which is known to be a relatively slow operation.
|
||||
//
|
||||
// Future optimizations should remove this clone.
|
||||
let selection_proof =
|
||||
SyncSelectionProof::from(signed_aggregate.message.selection_proof.clone());
|
||||
|
||||
if !selection_proof
|
||||
.is_aggregator::<T::EthSpec>()
|
||||
.map_err(|e| Error::BeaconChainError(e.into()))?
|
||||
{
|
||||
return Err(Error::InvalidSelectionProof { aggregator_index });
|
||||
}
|
||||
|
||||
// Gather all validator pubkeys that signed this contribution.
|
||||
let participant_pubkeys = sync_subcommittee_pubkeys
|
||||
.into_iter()
|
||||
.zip(contribution.aggregation_bits.iter())
|
||||
.filter_map(|(pubkey, bit)| bit.then(|| pubkey))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Ensure that all signatures are valid.
|
||||
if !verify_signed_aggregate_signatures(
|
||||
chain,
|
||||
&signed_aggregate,
|
||||
participant_pubkeys.as_slice(),
|
||||
)? {
|
||||
return Err(Error::InvalidSignature);
|
||||
}
|
||||
|
||||
let contribution = &signed_aggregate.message.contribution;
|
||||
let aggregator_index = signed_aggregate.message.aggregator_index;
|
||||
|
||||
// Observe the valid sync contribution so we do not re-process it.
|
||||
//
|
||||
// It's important to double check that the contribution is not already known, otherwise two
|
||||
// contribution processed at the same time could be published.
|
||||
if let ObserveOutcome::AlreadyKnown = chain
|
||||
.observed_sync_contributions
|
||||
.write()
|
||||
.observe_item(contribution, Some(contribution_root))
|
||||
.map_err(|e| Error::BeaconChainError(e.into()))?
|
||||
{
|
||||
return Err(Error::SyncContributionAlreadyKnown(contribution_root));
|
||||
}
|
||||
|
||||
// Observe the aggregator so we don't process another aggregate from them.
|
||||
//
|
||||
// It's important to double check that the sync committee message is not already known, otherwise two
|
||||
// sync committee messages processed at the same time could be published.
|
||||
if chain
|
||||
.observed_sync_aggregators
|
||||
.write()
|
||||
.observe_validator(observed_key, aggregator_index as usize)
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
return Err(Error::PriorSyncCommitteeMessageKnown {
|
||||
validator_index: aggregator_index,
|
||||
slot: contribution.slot,
|
||||
});
|
||||
}
|
||||
Ok(VerifiedSyncContribution {
|
||||
signed_aggregate,
|
||||
participant_pubkeys,
|
||||
})
|
||||
}
|
||||
|
||||
/// A helper function to add this aggregate to `beacon_chain.op_pool`.
|
||||
pub fn add_to_pool(self, chain: &BeaconChain<T>) -> Result<(), Error> {
|
||||
chain.add_contribution_to_block_inclusion_pool(self)
|
||||
}
|
||||
|
||||
/// Returns the underlying `contribution` for the `signed_aggregate`.
|
||||
pub fn contribution(self) -> SyncCommitteeContribution<T::EthSpec> {
|
||||
self.signed_aggregate.message.contribution
|
||||
}
|
||||
|
||||
/// Returns the underlying `signed_aggregate`.
|
||||
pub fn aggregate(&self) -> &SignedContributionAndProof<T::EthSpec> {
|
||||
&self.signed_aggregate
|
||||
}
|
||||
|
||||
/// Returns the pubkeys of all validators that are included in the aggregate.
|
||||
pub fn participant_pubkeys(&self) -> &[PublicKeyBytes] {
|
||||
&self.participant_pubkeys
|
||||
}
|
||||
}
|
||||
|
||||
impl VerifiedSyncCommitteeMessage {
|
||||
/// Returns `Ok(Self)` if the `sync_message` is valid to be (re)published on the gossip
|
||||
/// network.
|
||||
///
|
||||
/// `subnet_id` is the subnet from which we received this sync message. This function will
|
||||
/// verify that it was received on the correct subnet.
|
||||
pub fn verify<T: BeaconChainTypes>(
|
||||
sync_message: SyncCommitteeMessage,
|
||||
subnet_id: SyncSubnetId,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<Self, Error> {
|
||||
// Ensure sync committee message is for the current slot (within a
|
||||
// MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
|
||||
//
|
||||
// We do not queue future sync committee messages for later processing.
|
||||
verify_propagation_slot_range(&chain.slot_clock, &sync_message)?;
|
||||
|
||||
// Ensure the `subnet_id` is valid for the given validator.
|
||||
let pubkey = chain
|
||||
.validator_pubkey_bytes(sync_message.validator_index as usize)?
|
||||
.ok_or(Error::UnknownValidatorIndex(
|
||||
sync_message.validator_index as usize,
|
||||
))?;
|
||||
|
||||
let sync_committee = chain.sync_committee_at_next_slot(sync_message.get_slot())?;
|
||||
let subnet_positions = sync_committee.subcommittee_positions_for_public_key(&pubkey)?;
|
||||
|
||||
if !subnet_positions.contains_key(&subnet_id) {
|
||||
return Err(Error::InvalidSubnetId {
|
||||
received: subnet_id,
|
||||
expected: subnet_positions.keys().cloned().collect::<Vec<_>>(),
|
||||
});
|
||||
}
|
||||
|
||||
// The sync committee message is the first valid message received for the participating validator
|
||||
// for the slot, sync_message.slot.
|
||||
let validator_index = sync_message.validator_index;
|
||||
if chain
|
||||
.observed_sync_contributors
|
||||
.read()
|
||||
.validator_has_been_observed(
|
||||
SlotSubcommitteeIndex::new(sync_message.slot, subnet_id.into()),
|
||||
validator_index as usize,
|
||||
)
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
return Err(Error::PriorSyncCommitteeMessageKnown {
|
||||
validator_index,
|
||||
slot: sync_message.slot,
|
||||
});
|
||||
}
|
||||
|
||||
// The aggregate signature of the sync committee message is valid.
|
||||
verify_sync_committee_message(chain, &sync_message, &pubkey)?;
|
||||
|
||||
// Now that the sync committee message has been fully verified, store that we have received a valid
|
||||
// sync committee message from this validator.
|
||||
//
|
||||
// It's important to double check that the sync committee message still hasn't been observed, since
|
||||
// there can be a race-condition if we receive two sync committee messages at the same time and
|
||||
// process them in different threads.
|
||||
if chain
|
||||
.observed_sync_contributors
|
||||
.write()
|
||||
.observe_validator(
|
||||
SlotSubcommitteeIndex::new(sync_message.slot, subnet_id.into()),
|
||||
validator_index as usize,
|
||||
)
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
return Err(Error::PriorSyncCommitteeMessageKnown {
|
||||
validator_index,
|
||||
slot: sync_message.slot,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
sync_message,
|
||||
subnet_positions,
|
||||
})
|
||||
}
|
||||
|
||||
/// A helper function to add this sync committee message to `beacon_chain.naive_sync_aggregation_pool`.
|
||||
pub fn add_to_pool<T: BeaconChainTypes>(self, chain: &BeaconChain<T>) -> Result<Self, Error> {
|
||||
chain.add_to_naive_sync_aggregation_pool(self)
|
||||
}
|
||||
|
||||
/// Returns the subcommittee positions for the sync message, keyed on the `SyncSubnetId` for
|
||||
/// the subnets the signature should be sent on.
|
||||
pub fn subnet_positions(&self) -> &HashMap<SyncSubnetId, Vec<usize>> {
|
||||
&self.subnet_positions
|
||||
}
|
||||
|
||||
/// Returns the wrapped `SyncCommitteeMessage`.
|
||||
pub fn sync_message(&self) -> &SyncCommitteeMessage {
|
||||
&self.sync_message
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify that the `sync_contribution` is within the acceptable gossip propagation range, with reference
|
||||
/// to the current slot of the `chain`.
|
||||
///
|
||||
/// Accounts for `MAXIMUM_GOSSIP_CLOCK_DISPARITY`.
|
||||
pub fn verify_propagation_slot_range<S: SlotClock, U: SlotData>(
|
||||
slot_clock: &S,
|
||||
sync_contribution: &U,
|
||||
) -> Result<(), Error> {
|
||||
let message_slot = sync_contribution.get_slot();
|
||||
|
||||
let latest_permissible_slot = slot_clock
|
||||
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
|
||||
.ok_or(BeaconChainError::UnableToReadSlot)?;
|
||||
if message_slot > latest_permissible_slot {
|
||||
return Err(Error::FutureSlot {
|
||||
message_slot,
|
||||
latest_permissible_slot,
|
||||
});
|
||||
}
|
||||
|
||||
let earliest_permissible_slot = slot_clock
|
||||
.now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
|
||||
.ok_or(BeaconChainError::UnableToReadSlot)?;
|
||||
|
||||
if message_slot < earliest_permissible_slot {
|
||||
return Err(Error::PastSlot {
|
||||
message_slot,
|
||||
earliest_permissible_slot,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verifies all the signatures in a `SignedContributionAndProof` using BLS batch verification. This
|
||||
/// includes three signatures:
|
||||
///
|
||||
/// - `signed_aggregate.signature`
|
||||
/// - `signed_aggregate.message.selection_proof`
|
||||
/// - `signed_aggregate.message.aggregate.signature`
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// - `Ok(true)`: if all signatures are valid.
|
||||
/// - `Ok(false)`: if one or more signatures are invalid.
|
||||
/// - `Err(e)`: if there was an error preventing signature verification.
|
||||
pub fn verify_signed_aggregate_signatures<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
signed_aggregate: &SignedContributionAndProof<T::EthSpec>,
|
||||
participant_pubkeys: &[PublicKeyBytes],
|
||||
) -> Result<bool, Error> {
|
||||
let pubkey_cache = chain
|
||||
.validator_pubkey_cache
|
||||
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?;
|
||||
|
||||
let aggregator_index = signed_aggregate.message.aggregator_index;
|
||||
if aggregator_index >= pubkey_cache.len() as u64 {
|
||||
return Err(Error::AggregatorPubkeyUnknown(aggregator_index));
|
||||
}
|
||||
|
||||
let next_slot_epoch =
|
||||
(signed_aggregate.message.contribution.slot + 1).epoch(T::EthSpec::slots_per_epoch());
|
||||
let fork = chain.spec.fork_at_epoch(next_slot_epoch);
|
||||
|
||||
let signature_sets = vec![
|
||||
signed_sync_aggregate_selection_proof_signature_set(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
signed_aggregate,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::SignatureSetError)?,
|
||||
signed_sync_aggregate_signature_set(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
signed_aggregate,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::SignatureSetError)?,
|
||||
sync_committee_contribution_signature_set_from_pubkeys::<T::EthSpec, _>(
|
||||
|validator_index| {
|
||||
pubkey_cache
|
||||
.get_pubkey_from_pubkey_bytes(validator_index)
|
||||
.map(Cow::Borrowed)
|
||||
},
|
||||
participant_pubkeys,
|
||||
&signed_aggregate.message.contribution.signature,
|
||||
signed_aggregate
|
||||
.message
|
||||
.contribution
|
||||
.slot
|
||||
.epoch(T::EthSpec::slots_per_epoch()),
|
||||
signed_aggregate.message.contribution.beacon_block_root,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::SignatureSetError)?,
|
||||
];
|
||||
|
||||
Ok(verify_signature_sets(signature_sets.iter()))
|
||||
}
|
||||
|
||||
/// Verifies that the signature of the `sync_message` is valid.
|
||||
pub fn verify_sync_committee_message<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
sync_message: &SyncCommitteeMessage,
|
||||
pubkey_bytes: &PublicKeyBytes,
|
||||
) -> Result<(), Error> {
|
||||
let signature_setup_timer =
|
||||
metrics::start_timer(&metrics::SYNC_MESSAGE_PROCESSING_SIGNATURE_SETUP_TIMES);
|
||||
|
||||
let pubkey_cache = chain
|
||||
.validator_pubkey_cache
|
||||
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?;
|
||||
|
||||
let pubkey = pubkey_cache
|
||||
.get_pubkey_from_pubkey_bytes(pubkey_bytes)
|
||||
.map(Cow::Borrowed)
|
||||
.ok_or(Error::UnknownValidatorPubkey(*pubkey_bytes))?;
|
||||
|
||||
let next_slot_epoch = (sync_message.get_slot() + 1).epoch(T::EthSpec::slots_per_epoch());
|
||||
let fork = chain.spec.fork_at_epoch(next_slot_epoch);
|
||||
|
||||
let agg_sig = AggregateSignature::from(&sync_message.signature);
|
||||
let signature_set = sync_committee_message_set_from_pubkeys::<T::EthSpec>(
|
||||
pubkey,
|
||||
&agg_sig,
|
||||
sync_message.slot.epoch(T::EthSpec::slots_per_epoch()),
|
||||
sync_message.beacon_block_root,
|
||||
&fork,
|
||||
chain.genesis_validators_root,
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::SignatureSetError)?;
|
||||
|
||||
metrics::stop_timer(signature_setup_timer);
|
||||
|
||||
let _signature_verification_timer =
|
||||
metrics::start_timer(&metrics::SYNC_MESSAGE_PROCESSING_SIGNATURE_TIMES);
|
||||
|
||||
if signature_set.verify() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::InvalidSignature)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,20 +1,48 @@
|
||||
use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::time::Duration;
|
||||
|
||||
/// A simple wrapper around `parking_lot::RwLock` that only permits read/write access with a
|
||||
/// time-out (i.e., no indefinitely-blocking operations).
|
||||
///
|
||||
/// Timeouts can be optionally be disabled at runtime for all instances of this type by calling
|
||||
/// `TimeoutRwLock::disable_timeouts()`.
|
||||
pub struct TimeoutRwLock<T>(RwLock<T>);
|
||||
|
||||
const TIMEOUT_LOCKS_ENABLED_DEFAULT: bool = true;
|
||||
static TIMEOUT_LOCKS_ENABLED: AtomicBool = AtomicBool::new(TIMEOUT_LOCKS_ENABLED_DEFAULT);
|
||||
|
||||
impl TimeoutRwLock<()> {
|
||||
pub fn disable_timeouts() {
|
||||
// Use the strongest `SeqCst` ordering for the write, as it should only happen once.
|
||||
TIMEOUT_LOCKS_ENABLED.store(false, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> TimeoutRwLock<T> {
|
||||
pub fn new(inner: T) -> Self {
|
||||
Self(RwLock::new(inner))
|
||||
}
|
||||
|
||||
fn timeouts_enabled() -> bool {
|
||||
// Use relaxed ordering as it's OK for a few locks to run with timeouts "accidentally",
|
||||
// and we want the atomic check to be as fast as possible.
|
||||
TIMEOUT_LOCKS_ENABLED.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn try_read_for(&self, timeout: Duration) -> Option<RwLockReadGuard<T>> {
|
||||
self.0.try_read_for(timeout)
|
||||
if Self::timeouts_enabled() {
|
||||
self.0.try_read_for(timeout)
|
||||
} else {
|
||||
Some(self.0.read())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_write_for(&self, timeout: Duration) -> Option<RwLockWriteGuard<T>> {
|
||||
self.0.try_write_for(timeout)
|
||||
if Self::timeouts_enabled() {
|
||||
self.0.try_write_for(timeout)
|
||||
} else {
|
||||
Some(self.0.write())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
1535
beacon_node/beacon_chain/src/validator_monitor.rs
Normal file
1535
beacon_node/beacon_chain/src/validator_monitor.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,11 +1,13 @@
|
||||
use crate::errors::BeaconChainError;
|
||||
use crate::{BeaconChainTypes, BeaconStore};
|
||||
use ssz::{Decode, DecodeError, Encode};
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryInto;
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::fs::File;
|
||||
use std::io::{self, Read, Write};
|
||||
use std::path::Path;
|
||||
use types::{BeaconState, EthSpec, PublicKey, PublicKeyBytes, Validator};
|
||||
use store::{DBColumn, Error as StoreError, StoreItem};
|
||||
use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes};
|
||||
|
||||
/// Provides a mapping of `validator_index -> validator_publickey`.
|
||||
///
|
||||
@@ -16,40 +18,37 @@ use types::{BeaconState, EthSpec, PublicKey, PublicKeyBytes, Validator};
|
||||
/// keys in compressed form and they are needed in decompressed form for signature verification.
|
||||
/// Decompression is expensive when many keys are involved.
|
||||
///
|
||||
/// The cache has a `persistence_file` that it uses to maintain a persistent, on-disk
|
||||
/// The cache has a `backing` that it uses to maintain a persistent, on-disk
|
||||
/// copy of itself. This allows it to be restored between process invocations.
|
||||
pub struct ValidatorPubkeyCache {
|
||||
pub struct ValidatorPubkeyCache<T: BeaconChainTypes> {
|
||||
pubkeys: Vec<PublicKey>,
|
||||
indices: HashMap<PublicKeyBytes, usize>,
|
||||
persitence_file: ValidatorPubkeyCacheFile,
|
||||
pubkey_bytes: Vec<PublicKeyBytes>,
|
||||
backing: PubkeyCacheBacking<T>,
|
||||
}
|
||||
|
||||
impl ValidatorPubkeyCache {
|
||||
pub fn load_from_file<P: AsRef<Path>>(path: P) -> Result<Self, BeaconChainError> {
|
||||
ValidatorPubkeyCacheFile::open(&path)
|
||||
.and_then(ValidatorPubkeyCacheFile::into_cache)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
/// Abstraction over on-disk backing.
|
||||
///
|
||||
/// `File` backing is legacy, `Database` is current.
|
||||
enum PubkeyCacheBacking<T: BeaconChainTypes> {
|
||||
File(ValidatorPubkeyCacheFile),
|
||||
Database(BeaconStore<T>),
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> {
|
||||
/// Create a new public key cache using the keys in `state.validators`.
|
||||
///
|
||||
/// Also creates a new persistence file, returning an error if there is already a file at
|
||||
/// `persistence_path`.
|
||||
pub fn new<T: EthSpec, P: AsRef<Path>>(
|
||||
state: &BeaconState<T>,
|
||||
persistence_path: P,
|
||||
pub fn new(
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
store: BeaconStore<T>,
|
||||
) -> Result<Self, BeaconChainError> {
|
||||
if persistence_path.as_ref().exists() {
|
||||
return Err(BeaconChainError::ValidatorPubkeyCacheFileError(format!(
|
||||
"Persistence file already exists: {:?}",
|
||||
persistence_path.as_ref()
|
||||
)));
|
||||
}
|
||||
|
||||
let mut cache = Self {
|
||||
persitence_file: ValidatorPubkeyCacheFile::create(persistence_path)?,
|
||||
pubkeys: vec![],
|
||||
indices: HashMap::new(),
|
||||
pubkey_bytes: vec![],
|
||||
backing: PubkeyCacheBacking::Database(store),
|
||||
};
|
||||
|
||||
cache.import_new_pubkeys(state)?;
|
||||
@@ -57,33 +56,88 @@ impl ValidatorPubkeyCache {
|
||||
Ok(cache)
|
||||
}
|
||||
|
||||
/// Load the pubkey cache from the given on-disk database.
|
||||
pub fn load_from_store(store: BeaconStore<T>) -> Result<Self, BeaconChainError> {
|
||||
let mut pubkeys = vec![];
|
||||
let mut indices = HashMap::new();
|
||||
let mut pubkey_bytes = vec![];
|
||||
|
||||
for validator_index in 0.. {
|
||||
if let Some(DatabasePubkey(pubkey)) =
|
||||
store.get_item(&DatabasePubkey::key_for_index(validator_index))?
|
||||
{
|
||||
pubkeys.push((&pubkey).try_into().map_err(Error::PubkeyDecode)?);
|
||||
pubkey_bytes.push(pubkey);
|
||||
indices.insert(pubkey, validator_index);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ValidatorPubkeyCache {
|
||||
pubkeys,
|
||||
indices,
|
||||
pubkey_bytes,
|
||||
backing: PubkeyCacheBacking::Database(store),
|
||||
})
|
||||
}
|
||||
|
||||
/// DEPRECATED: used only for migration
|
||||
pub fn load_from_file<P: AsRef<Path>>(path: P) -> Result<Self, BeaconChainError> {
|
||||
ValidatorPubkeyCacheFile::open(&path)
|
||||
.and_then(ValidatorPubkeyCacheFile::into_cache)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Convert a cache using `File` backing to one using `Database` backing.
|
||||
///
|
||||
/// This will write all of the keys from `existing_cache` to `store`.
|
||||
pub fn convert(existing_cache: Self, store: BeaconStore<T>) -> Result<Self, BeaconChainError> {
|
||||
let mut result = ValidatorPubkeyCache {
|
||||
pubkeys: Vec::with_capacity(existing_cache.pubkeys.len()),
|
||||
indices: HashMap::with_capacity(existing_cache.indices.len()),
|
||||
pubkey_bytes: Vec::with_capacity(existing_cache.indices.len()),
|
||||
backing: PubkeyCacheBacking::Database(store),
|
||||
};
|
||||
result.import(existing_cache.pubkeys.iter().map(PublicKeyBytes::from))?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Scan the given `state` and add any new validator public keys.
|
||||
///
|
||||
/// Does not delete any keys from `self` if they don't appear in `state`.
|
||||
pub fn import_new_pubkeys<T: EthSpec>(
|
||||
pub fn import_new_pubkeys(
|
||||
&mut self,
|
||||
state: &BeaconState<T>,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
) -> Result<(), BeaconChainError> {
|
||||
if state.validators.len() > self.pubkeys.len() {
|
||||
self.import(&state.validators[self.pubkeys.len()..])
|
||||
if state.validators().len() > self.pubkeys.len() {
|
||||
self.import(
|
||||
state.validators()[self.pubkeys.len()..]
|
||||
.iter()
|
||||
.map(|v| v.pubkey),
|
||||
)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds zero or more validators to `self`.
|
||||
fn import(&mut self, validators: &[Validator]) -> Result<(), BeaconChainError> {
|
||||
self.pubkeys.reserve(validators.len());
|
||||
self.indices.reserve(validators.len());
|
||||
fn import<I>(&mut self, validator_keys: I) -> Result<(), BeaconChainError>
|
||||
where
|
||||
I: Iterator<Item = PublicKeyBytes> + ExactSizeIterator,
|
||||
{
|
||||
self.pubkey_bytes.reserve(validator_keys.len());
|
||||
self.pubkeys.reserve(validator_keys.len());
|
||||
self.indices.reserve(validator_keys.len());
|
||||
|
||||
for v in validators.iter() {
|
||||
for pubkey in validator_keys {
|
||||
let i = self.pubkeys.len();
|
||||
|
||||
if self.indices.contains_key(&v.pubkey) {
|
||||
if self.indices.contains_key(&pubkey) {
|
||||
return Err(BeaconChainError::DuplicateValidatorPublicKey);
|
||||
}
|
||||
|
||||
// The item is written to disk (the persistence file) _before_ it is written into
|
||||
// The item is written to disk _before_ it is written into
|
||||
// the local struct.
|
||||
//
|
||||
// This means that a pubkey cache read from disk will always be equivalent to or
|
||||
@@ -92,15 +146,23 @@ impl ValidatorPubkeyCache {
|
||||
// The motivation behind this ordering is that we do not want to have states that
|
||||
// reference a pubkey that is not in our cache. However, it's fine to have pubkeys
|
||||
// that are never referenced in a state.
|
||||
self.persitence_file.append(i, &v.pubkey)?;
|
||||
match &mut self.backing {
|
||||
PubkeyCacheBacking::File(persistence_file) => {
|
||||
persistence_file.append(i, &pubkey)?;
|
||||
}
|
||||
PubkeyCacheBacking::Database(store) => {
|
||||
store.put_item(&DatabasePubkey::key_for_index(i), &DatabasePubkey(pubkey))?;
|
||||
}
|
||||
}
|
||||
|
||||
self.pubkeys.push(
|
||||
(&v.pubkey)
|
||||
(&pubkey)
|
||||
.try_into()
|
||||
.map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?,
|
||||
);
|
||||
self.pubkey_bytes.push(pubkey);
|
||||
|
||||
self.indices.insert(v.pubkey.clone(), i);
|
||||
self.indices.insert(pubkey, i);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -111,6 +173,16 @@ impl ValidatorPubkeyCache {
|
||||
self.pubkeys.get(i)
|
||||
}
|
||||
|
||||
/// Get the `PublicKey` for a validator with `PublicKeyBytes`.
|
||||
pub fn get_pubkey_from_pubkey_bytes(&self, pubkey: &PublicKeyBytes) -> Option<&PublicKey> {
|
||||
self.get_index(pubkey).and_then(|index| self.get(index))
|
||||
}
|
||||
|
||||
/// Get the public key (in bytes form) for a validator with index `i`.
|
||||
pub fn get_pubkey_bytes(&self, i: usize) -> Option<&PublicKeyBytes> {
|
||||
self.pubkey_bytes.get(i)
|
||||
}
|
||||
|
||||
/// Get the index of a validator with `pubkey`.
|
||||
pub fn get_index(&self, pubkey: &PublicKeyBytes) -> Option<usize> {
|
||||
self.indices.get(pubkey).copied()
|
||||
@@ -122,6 +194,31 @@ impl ValidatorPubkeyCache {
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper for a public key stored in the database.
|
||||
///
|
||||
/// Keyed by the validator index as `Hash256::from_low_u64_be(index)`.
|
||||
struct DatabasePubkey(PublicKeyBytes);
|
||||
|
||||
impl StoreItem for DatabasePubkey {
|
||||
fn db_column() -> DBColumn {
|
||||
DBColumn::PubkeyCache
|
||||
}
|
||||
|
||||
fn as_store_bytes(&self) -> Vec<u8> {
|
||||
self.0.as_ssz_bytes()
|
||||
}
|
||||
|
||||
fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> {
|
||||
Ok(Self(PublicKeyBytes::from_ssz_bytes(bytes)?))
|
||||
}
|
||||
}
|
||||
|
||||
impl DatabasePubkey {
|
||||
fn key_for_index(index: usize) -> Hash256 {
|
||||
Hash256::from_low_u64_be(index as u64)
|
||||
}
|
||||
}
|
||||
|
||||
/// Allows for maintaining an on-disk copy of the `ValidatorPubkeyCache`. The file is raw SSZ bytes
|
||||
/// (not ASCII encoded).
|
||||
///
|
||||
@@ -144,8 +241,8 @@ enum Error {
|
||||
/// The file read from disk does not have a contiguous list of validator public keys. The file
|
||||
/// has become corrupted.
|
||||
InconsistentIndex {
|
||||
expected: Option<usize>,
|
||||
found: usize,
|
||||
_expected: Option<usize>,
|
||||
_found: usize,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -156,19 +253,9 @@ impl From<Error> for BeaconChainError {
|
||||
}
|
||||
|
||||
impl ValidatorPubkeyCacheFile {
|
||||
/// Creates a file for reading and writing.
|
||||
pub fn create<P: AsRef<Path>>(path: P) -> Result<Self, Error> {
|
||||
OpenOptions::new()
|
||||
.create_new(true)
|
||||
.write(true)
|
||||
.open(path)
|
||||
.map(Self)
|
||||
.map_err(Error::Io)
|
||||
}
|
||||
|
||||
/// Opens an existing file for reading and writing.
|
||||
pub fn open<P: AsRef<Path>>(path: P) -> Result<Self, Error> {
|
||||
OpenOptions::new()
|
||||
File::options()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create(false)
|
||||
@@ -187,7 +274,7 @@ impl ValidatorPubkeyCacheFile {
|
||||
}
|
||||
|
||||
/// Creates a `ValidatorPubkeyCache` by reading and parsing the underlying file.
|
||||
pub fn into_cache(mut self) -> Result<ValidatorPubkeyCache, Error> {
|
||||
pub fn into_cache<T: BeaconChainTypes>(mut self) -> Result<ValidatorPubkeyCache<T>, Error> {
|
||||
let mut bytes = vec![];
|
||||
self.0.read_to_end(&mut bytes).map_err(Error::Io)?;
|
||||
|
||||
@@ -195,18 +282,20 @@ impl ValidatorPubkeyCacheFile {
|
||||
|
||||
let mut last = None;
|
||||
let mut pubkeys = Vec::with_capacity(list.len());
|
||||
let mut indices = HashMap::new();
|
||||
let mut indices = HashMap::with_capacity(list.len());
|
||||
let mut pubkey_bytes = Vec::with_capacity(list.len());
|
||||
|
||||
for (index, pubkey) in list {
|
||||
let expected = last.map(|n| n + 1);
|
||||
if expected.map_or(true, |expected| index == expected) {
|
||||
last = Some(index);
|
||||
pubkeys.push((&pubkey).try_into().map_err(Error::PubkeyDecode)?);
|
||||
pubkey_bytes.push(pubkey);
|
||||
indices.insert(pubkey, index);
|
||||
} else {
|
||||
return Err(Error::InconsistentIndex {
|
||||
expected,
|
||||
found: index,
|
||||
_expected: expected,
|
||||
_found: index,
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -214,7 +303,8 @@ impl ValidatorPubkeyCacheFile {
|
||||
Ok(ValidatorPubkeyCache {
|
||||
pubkeys,
|
||||
indices,
|
||||
persitence_file: self,
|
||||
pubkey_bytes,
|
||||
backing: PubkeyCacheBacking::File(self),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -231,20 +321,38 @@ fn append_to_file(file: &mut File, index: usize, pubkey: &PublicKeyBytes) -> Res
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType};
|
||||
use logging::test_logger;
|
||||
use std::sync::Arc;
|
||||
use store::HotColdDB;
|
||||
use tempfile::tempdir;
|
||||
use types::{
|
||||
test_utils::{generate_deterministic_keypair, TestingBeaconStateBuilder},
|
||||
BeaconState, EthSpec, Keypair, MainnetEthSpec,
|
||||
test_utils::generate_deterministic_keypair, BeaconState, EthSpec, Keypair, MainnetEthSpec,
|
||||
};
|
||||
|
||||
fn get_state(validator_count: usize) -> (BeaconState<MainnetEthSpec>, Vec<Keypair>) {
|
||||
let spec = MainnetEthSpec::default_spec();
|
||||
let builder =
|
||||
TestingBeaconStateBuilder::from_deterministic_keypairs(validator_count, &spec);
|
||||
builder.build()
|
||||
type E = MainnetEthSpec;
|
||||
type T = EphemeralHarnessType<E>;
|
||||
|
||||
fn get_state(validator_count: usize) -> (BeaconState<E>, Vec<Keypair>) {
|
||||
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||
.default_spec()
|
||||
.deterministic_keypairs(validator_count)
|
||||
.fresh_ephemeral_store()
|
||||
.build();
|
||||
|
||||
harness.advance_slot();
|
||||
|
||||
(harness.get_current_state(), harness.validator_keypairs)
|
||||
}
|
||||
|
||||
fn check_cache_get(cache: &ValidatorPubkeyCache, keypairs: &[Keypair]) {
|
||||
fn get_store() -> BeaconStore<T> {
|
||||
Arc::new(
|
||||
HotColdDB::open_ephemeral(<_>::default(), E::default_spec(), test_logger()).unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
fn check_cache_get(cache: &ValidatorPubkeyCache<T>, keypairs: &[Keypair]) {
|
||||
let validator_count = keypairs.len();
|
||||
|
||||
for i in 0..validator_count + 1 {
|
||||
@@ -275,10 +383,9 @@ mod test {
|
||||
fn basic_operation() {
|
||||
let (state, keypairs) = get_state(8);
|
||||
|
||||
let dir = tempdir().expect("should create tempdir");
|
||||
let path = dir.path().join("cache.ssz");
|
||||
let store = get_store();
|
||||
|
||||
let mut cache = ValidatorPubkeyCache::new(&state, path).expect("should create cache");
|
||||
let mut cache = ValidatorPubkeyCache::new(&state, store).expect("should create cache");
|
||||
|
||||
check_cache_get(&cache, &keypairs[..]);
|
||||
|
||||
@@ -308,16 +415,16 @@ mod test {
|
||||
fn persistence() {
|
||||
let (state, keypairs) = get_state(8);
|
||||
|
||||
let dir = tempdir().expect("should create tempdir");
|
||||
let path = dir.path().join("cache.ssz");
|
||||
let store = get_store();
|
||||
|
||||
// Create a new cache.
|
||||
let cache = ValidatorPubkeyCache::new(&state, &path).expect("should create cache");
|
||||
let cache = ValidatorPubkeyCache::new(&state, store.clone()).expect("should create cache");
|
||||
check_cache_get(&cache, &keypairs[..]);
|
||||
drop(cache);
|
||||
|
||||
// Re-init the cache from the file.
|
||||
let mut cache = ValidatorPubkeyCache::load_from_file(&path).expect("should open cache");
|
||||
let mut cache =
|
||||
ValidatorPubkeyCache::load_from_store(store.clone()).expect("should open cache");
|
||||
check_cache_get(&cache, &keypairs[..]);
|
||||
|
||||
// Add some more keypairs.
|
||||
@@ -329,7 +436,7 @@ mod test {
|
||||
drop(cache);
|
||||
|
||||
// Re-init the cache from the file.
|
||||
let cache = ValidatorPubkeyCache::load_from_file(&path).expect("should open cache");
|
||||
let cache = ValidatorPubkeyCache::load_from_store(store).expect("should open cache");
|
||||
check_cache_get(&cache, &keypairs[..]);
|
||||
}
|
||||
|
||||
@@ -343,10 +450,10 @@ mod test {
|
||||
append_to_file(&mut file, 0, &pubkey).expect("should write to file");
|
||||
drop(file);
|
||||
|
||||
let cache = ValidatorPubkeyCache::load_from_file(&path).expect("should open cache");
|
||||
let cache = ValidatorPubkeyCache::<T>::load_from_file(&path).expect("should open cache");
|
||||
drop(cache);
|
||||
|
||||
let mut file = OpenOptions::new()
|
||||
let mut file = File::options()
|
||||
.write(true)
|
||||
.append(true)
|
||||
.open(&path)
|
||||
@@ -356,7 +463,7 @@ mod test {
|
||||
drop(file);
|
||||
|
||||
assert!(
|
||||
ValidatorPubkeyCache::load_from_file(&path).is_err(),
|
||||
ValidatorPubkeyCache::<T>::load_from_file(&path).is_err(),
|
||||
"should not parse invalid file"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,13 +1,8 @@
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use beacon_chain::{
|
||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy},
|
||||
StateSkipConfig,
|
||||
};
|
||||
use store::config::StoreConfig;
|
||||
use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy};
|
||||
use beacon_chain::{StateSkipConfig, WhenSlotSkipped};
|
||||
use lazy_static::lazy_static;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot};
|
||||
|
||||
@@ -25,55 +20,51 @@ lazy_static! {
|
||||
#[test]
|
||||
fn produces_attestations() {
|
||||
let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 4;
|
||||
let additional_slots_tested = MainnetEthSpec::slots_per_epoch() * 3;
|
||||
|
||||
let harness = BeaconChainHarness::new(
|
||||
MainnetEthSpec,
|
||||
KEYPAIRS[..].to_vec(),
|
||||
StoreConfig::default(),
|
||||
);
|
||||
|
||||
// Skip past the genesis slot.
|
||||
harness.advance_slot();
|
||||
|
||||
harness.extend_chain(
|
||||
num_blocks_produced as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||
.default_spec()
|
||||
.keypairs(KEYPAIRS[..].to_vec())
|
||||
.fresh_ephemeral_store()
|
||||
.mock_execution_layer()
|
||||
.build();
|
||||
|
||||
let chain = &harness.chain;
|
||||
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
assert_eq!(state.slot, num_blocks_produced, "head should have updated");
|
||||
assert_ne!(
|
||||
state.finalized_checkpoint.epoch, 0,
|
||||
"head should have updated"
|
||||
);
|
||||
|
||||
let current_slot = chain.slot().expect("should get slot");
|
||||
|
||||
// Test all valid committee indices for all slots in the chain.
|
||||
for slot in 0..=current_slot.as_u64() + MainnetEthSpec::slots_per_epoch() * 3 {
|
||||
// for slot in 0..=current_slot.as_u64() + MainnetEthSpec::slots_per_epoch() * 3 {
|
||||
for slot in 0..=num_blocks_produced + additional_slots_tested {
|
||||
if slot > 0 && slot <= num_blocks_produced {
|
||||
harness.advance_slot();
|
||||
|
||||
harness.extend_chain(
|
||||
1,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
}
|
||||
|
||||
let slot = Slot::from(slot);
|
||||
let state = chain
|
||||
let mut state = chain
|
||||
.state_at_slot(slot, StateSkipConfig::WithStateRoots)
|
||||
.expect("should get state");
|
||||
|
||||
let block_slot = if slot > current_slot {
|
||||
current_slot
|
||||
} else {
|
||||
let block_slot = if slot <= num_blocks_produced {
|
||||
slot
|
||||
} else {
|
||||
Slot::from(num_blocks_produced)
|
||||
};
|
||||
|
||||
let block = chain
|
||||
.block_at_slot(block_slot)
|
||||
.block_at_slot(block_slot, WhenSlotSkipped::Prev)
|
||||
.expect("should get block")
|
||||
.expect("block should not be skipped");
|
||||
let block_root = block.message.tree_hash_root();
|
||||
let block_root = block.message().tree_hash_root();
|
||||
|
||||
let epoch_boundary_slot = state
|
||||
.current_epoch()
|
||||
.start_slot(MainnetEthSpec::slots_per_epoch());
|
||||
let target_root = if state.slot == epoch_boundary_slot {
|
||||
let target_root = if state.slot() == epoch_boundary_slot {
|
||||
block_root
|
||||
} else {
|
||||
*state
|
||||
@@ -81,6 +72,9 @@ fn produces_attestations() {
|
||||
.expect("should get target block root")
|
||||
};
|
||||
|
||||
state
|
||||
.build_committee_cache(RelativeEpoch::Current, &harness.chain.spec)
|
||||
.unwrap();
|
||||
let committee_cache = state
|
||||
.committee_cache(RelativeEpoch::Current)
|
||||
.expect("should get committee_cache");
|
||||
@@ -118,15 +112,35 @@ fn produces_attestations() {
|
||||
assert_eq!(data.slot, slot, "bad slot");
|
||||
assert_eq!(data.beacon_block_root, block_root, "bad block root");
|
||||
assert_eq!(
|
||||
data.source, state.current_justified_checkpoint,
|
||||
data.source,
|
||||
state.current_justified_checkpoint(),
|
||||
"bad source"
|
||||
);
|
||||
assert_eq!(
|
||||
data.source, state.current_justified_checkpoint,
|
||||
data.source,
|
||||
state.current_justified_checkpoint(),
|
||||
"bad source"
|
||||
);
|
||||
assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch");
|
||||
assert_eq!(data.target.root, target_root, "bad target root");
|
||||
|
||||
let early_attestation = {
|
||||
let proto_block = chain.fork_choice.read().get_block(&block_root).unwrap();
|
||||
chain
|
||||
.early_attester_cache
|
||||
.add_head_block(block_root, block.clone(), proto_block, &state, &chain.spec)
|
||||
.unwrap();
|
||||
chain
|
||||
.early_attester_cache
|
||||
.try_attest(slot, index, &chain.spec)
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
attestation, early_attestation,
|
||||
"early attester cache inconsistent"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
9
beacon_node/beacon_chain/tests/main.rs
Normal file
9
beacon_node/beacon_chain/tests/main.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
mod attestation_production;
|
||||
mod attestation_verification;
|
||||
mod block_verification;
|
||||
mod merge;
|
||||
mod op_verification;
|
||||
mod payload_invalidation;
|
||||
mod store_tests;
|
||||
mod sync_committee_verification;
|
||||
mod tests;
|
||||
182
beacon_node/beacon_chain/tests/merge.rs
Normal file
182
beacon_node/beacon_chain/tests/merge.rs
Normal file
@@ -0,0 +1,182 @@
|
||||
#![cfg(not(debug_assertions))] // Tests run too slow in debug.
|
||||
|
||||
use beacon_chain::test_utils::BeaconChainHarness;
|
||||
use execution_layer::test_utils::{generate_pow_block, DEFAULT_TERMINAL_BLOCK};
|
||||
use types::*;
|
||||
|
||||
const VALIDATOR_COUNT: usize = 32;
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
|
||||
fn verify_execution_payload_chain<T: EthSpec>(chain: &[ExecutionPayload<T>]) {
|
||||
let mut prev_ep: Option<ExecutionPayload<T>> = None;
|
||||
|
||||
for ep in chain {
|
||||
assert!(*ep != ExecutionPayload::default());
|
||||
assert!(ep.block_hash != ExecutionBlockHash::zero());
|
||||
|
||||
// Check against previous `ExecutionPayload`.
|
||||
if let Some(prev_ep) = prev_ep {
|
||||
assert_eq!(prev_ep.block_hash, ep.parent_hash);
|
||||
assert_eq!(prev_ep.block_number + 1, ep.block_number);
|
||||
}
|
||||
prev_ep = Some(ep.clone());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
// TODO(merge): This isn't working cause the non-zero values in `initialize_beacon_state_from_eth1`
|
||||
// are causing failed lookups to the execution node. I need to come back to this.
|
||||
#[should_panic]
|
||||
fn merge_with_terminal_block_hash_override() {
|
||||
let altair_fork_epoch = Epoch::new(0);
|
||||
let bellatrix_fork_epoch = Epoch::new(0);
|
||||
|
||||
let mut spec = E::default_spec();
|
||||
spec.altair_fork_epoch = Some(altair_fork_epoch);
|
||||
spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch);
|
||||
|
||||
let genesis_pow_block_hash = generate_pow_block(
|
||||
spec.terminal_total_difficulty,
|
||||
DEFAULT_TERMINAL_BLOCK,
|
||||
0,
|
||||
ExecutionBlockHash::zero(),
|
||||
)
|
||||
.unwrap()
|
||||
.block_hash;
|
||||
|
||||
spec.terminal_block_hash = genesis_pow_block_hash;
|
||||
|
||||
let harness = BeaconChainHarness::builder(E::default())
|
||||
.spec(spec)
|
||||
.deterministic_keypairs(VALIDATOR_COUNT)
|
||||
.fresh_ephemeral_store()
|
||||
.mock_execution_layer()
|
||||
.build();
|
||||
|
||||
assert_eq!(
|
||||
harness
|
||||
.execution_block_generator()
|
||||
.latest_block()
|
||||
.unwrap()
|
||||
.block_hash(),
|
||||
genesis_pow_block_hash,
|
||||
"pre-condition"
|
||||
);
|
||||
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.head()
|
||||
.unwrap()
|
||||
.beacon_block
|
||||
.as_merge()
|
||||
.is_ok(),
|
||||
"genesis block should be a merge block"
|
||||
);
|
||||
|
||||
let mut execution_payloads = vec![];
|
||||
for i in 0..E::slots_per_epoch() * 3 {
|
||||
harness.extend_slots(1);
|
||||
|
||||
let block = harness.chain.head().unwrap().beacon_block;
|
||||
|
||||
let execution_payload = block.message().body().execution_payload().unwrap().clone();
|
||||
if i == 0 {
|
||||
assert_eq!(execution_payload.block_hash, genesis_pow_block_hash);
|
||||
}
|
||||
execution_payloads.push(execution_payload);
|
||||
}
|
||||
|
||||
verify_execution_payload_chain(&execution_payloads);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn base_altair_merge_with_terminal_block_after_fork() {
|
||||
let altair_fork_epoch = Epoch::new(4);
|
||||
let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch());
|
||||
let bellatrix_fork_epoch = Epoch::new(8);
|
||||
let merge_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch());
|
||||
|
||||
let mut spec = E::default_spec();
|
||||
spec.altair_fork_epoch = Some(altair_fork_epoch);
|
||||
spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch);
|
||||
|
||||
let mut execution_payloads = vec![];
|
||||
|
||||
let harness = BeaconChainHarness::builder(E::default())
|
||||
.spec(spec)
|
||||
.deterministic_keypairs(VALIDATOR_COUNT)
|
||||
.fresh_ephemeral_store()
|
||||
.mock_execution_layer()
|
||||
.build();
|
||||
|
||||
/*
|
||||
* Start with the base fork.
|
||||
*/
|
||||
|
||||
assert!(harness.chain.head().unwrap().beacon_block.as_base().is_ok());
|
||||
|
||||
/*
|
||||
* Do the Altair fork.
|
||||
*/
|
||||
|
||||
harness.extend_to_slot(altair_fork_slot);
|
||||
|
||||
let altair_head = harness.chain.head().unwrap().beacon_block;
|
||||
assert!(altair_head.as_altair().is_ok());
|
||||
assert_eq!(altair_head.slot(), altair_fork_slot);
|
||||
|
||||
/*
|
||||
* Do the merge fork, without a terminal PoW block.
|
||||
*/
|
||||
|
||||
harness.extend_to_slot(merge_fork_slot);
|
||||
|
||||
let merge_head = harness.chain.head().unwrap().beacon_block;
|
||||
assert!(merge_head.as_merge().is_ok());
|
||||
assert_eq!(merge_head.slot(), merge_fork_slot);
|
||||
assert_eq!(
|
||||
*merge_head.message().body().execution_payload().unwrap(),
|
||||
ExecutionPayload::default()
|
||||
);
|
||||
|
||||
/*
|
||||
* Next merge block shouldn't include an exec payload.
|
||||
*/
|
||||
|
||||
harness.extend_slots(1);
|
||||
|
||||
let one_after_merge_head = harness.chain.head().unwrap().beacon_block;
|
||||
assert_eq!(
|
||||
*one_after_merge_head
|
||||
.message()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap(),
|
||||
ExecutionPayload::default()
|
||||
);
|
||||
assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1);
|
||||
|
||||
/*
|
||||
* Trigger the terminal PoW block.
|
||||
*/
|
||||
|
||||
harness
|
||||
.execution_block_generator()
|
||||
.move_to_terminal_block()
|
||||
.unwrap();
|
||||
|
||||
/*
|
||||
* Next merge block should include an exec payload.
|
||||
*/
|
||||
|
||||
for _ in 0..4 {
|
||||
harness.extend_slots(1);
|
||||
|
||||
let block = harness.chain.head().unwrap().beacon_block;
|
||||
execution_payloads.push(block.message().body().execution_payload().unwrap().clone());
|
||||
}
|
||||
|
||||
verify_execution_payload_chain(&execution_payloads);
|
||||
}
|
||||
@@ -2,21 +2,15 @@
|
||||
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use beacon_chain::observed_operations::ObservationOutcome;
|
||||
use beacon_chain::test_utils::{
|
||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
|
||||
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use std::sync::Arc;
|
||||
use store::{LevelDB, StoreConfig};
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use types::test_utils::{
|
||||
AttesterSlashingTestTask, ProposerSlashingTestTask, TestingAttesterSlashingBuilder,
|
||||
TestingProposerSlashingBuilder, TestingVoluntaryExitBuilder,
|
||||
};
|
||||
use types::*;
|
||||
|
||||
pub const VALIDATOR_COUNT: usize = 24;
|
||||
@@ -32,23 +26,22 @@ type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
|
||||
type HotColdDB = store::HotColdDB<E, LevelDB<E>, LevelDB<E>>;
|
||||
|
||||
fn get_store(db_path: &TempDir) -> Arc<HotColdDB> {
|
||||
let spec = E::default_spec();
|
||||
let spec = test_spec::<E>();
|
||||
let hot_path = db_path.path().join("hot_db");
|
||||
let cold_path = db_path.path().join("cold_db");
|
||||
let config = StoreConfig::default();
|
||||
let log = NullLoggerBuilder.build().expect("logger should build");
|
||||
Arc::new(
|
||||
HotColdDB::open(&hot_path, &cold_path, config, spec, log)
|
||||
.expect("disk store should initialize"),
|
||||
)
|
||||
HotColdDB::open(&hot_path, &cold_path, |_, _, _| Ok(()), config, spec, log)
|
||||
.expect("disk store should initialize")
|
||||
}
|
||||
|
||||
fn get_harness(store: Arc<HotColdDB>, validator_count: usize) -> TestHarness {
|
||||
let harness = BeaconChainHarness::new_with_disk_store(
|
||||
MinimalEthSpec,
|
||||
store,
|
||||
KEYPAIRS[0..validator_count].to_vec(),
|
||||
);
|
||||
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
||||
.default_spec()
|
||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||
.fresh_disk_store(store)
|
||||
.mock_execution_layer()
|
||||
.build();
|
||||
harness.advance_slot();
|
||||
harness
|
||||
}
|
||||
@@ -58,7 +51,7 @@ fn voluntary_exit() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||
let spec = &harness.chain.spec;
|
||||
let spec = &harness.chain.spec.clone();
|
||||
|
||||
harness.extend_chain(
|
||||
(E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize,
|
||||
@@ -66,21 +59,13 @@ fn voluntary_exit() {
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
let head_info = harness.chain.head_info().unwrap();
|
||||
|
||||
let make_exit = |validator_index: usize, exit_epoch: u64| {
|
||||
TestingVoluntaryExitBuilder::new(Epoch::new(exit_epoch), validator_index as u64).build(
|
||||
&KEYPAIRS[validator_index].sk,
|
||||
&head_info.fork,
|
||||
head_info.genesis_validators_root,
|
||||
spec,
|
||||
)
|
||||
};
|
||||
|
||||
let validator_index1 = VALIDATOR_COUNT - 1;
|
||||
let validator_index2 = VALIDATOR_COUNT - 2;
|
||||
|
||||
let exit1 = make_exit(validator_index1, spec.shard_committee_period);
|
||||
let exit1 = harness.make_voluntary_exit(
|
||||
validator_index1 as u64,
|
||||
Epoch::new(spec.shard_committee_period),
|
||||
);
|
||||
|
||||
// First verification should show it to be fresh.
|
||||
assert!(matches!(
|
||||
@@ -100,14 +85,20 @@ fn voluntary_exit() {
|
||||
));
|
||||
|
||||
// A different exit for the same validator should also be detected as a duplicate.
|
||||
let exit2 = make_exit(validator_index1, spec.shard_committee_period + 1);
|
||||
let exit2 = harness.make_voluntary_exit(
|
||||
validator_index1 as u64,
|
||||
Epoch::new(spec.shard_committee_period + 1),
|
||||
);
|
||||
assert!(matches!(
|
||||
harness.chain.verify_voluntary_exit_for_gossip(exit2),
|
||||
Ok(ObservationOutcome::AlreadyKnown)
|
||||
));
|
||||
|
||||
// Exit for a different validator should be fine.
|
||||
let exit3 = make_exit(validator_index2, spec.shard_committee_period);
|
||||
let exit3 = harness.make_voluntary_exit(
|
||||
validator_index2 as u64,
|
||||
Epoch::new(spec.shard_committee_period),
|
||||
);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
@@ -122,25 +113,11 @@ fn proposer_slashing() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||
let spec = &harness.chain.spec;
|
||||
|
||||
let head_info = harness.chain.head_info().unwrap();
|
||||
|
||||
let validator_index1 = VALIDATOR_COUNT - 1;
|
||||
let validator_index2 = VALIDATOR_COUNT - 2;
|
||||
|
||||
let make_slashing = |validator_index: usize| {
|
||||
TestingProposerSlashingBuilder::double_vote::<E>(
|
||||
ProposerSlashingTestTask::Valid,
|
||||
validator_index as u64,
|
||||
&KEYPAIRS[validator_index].sk,
|
||||
&head_info.fork,
|
||||
head_info.genesis_validators_root,
|
||||
spec,
|
||||
)
|
||||
};
|
||||
|
||||
let slashing1 = make_slashing(validator_index1);
|
||||
let slashing1 = harness.make_proposer_slashing(validator_index1 as u64);
|
||||
|
||||
// First slashing for this proposer should be allowed.
|
||||
assert!(matches!(
|
||||
@@ -173,7 +150,7 @@ fn proposer_slashing() {
|
||||
));
|
||||
|
||||
// Proposer slashing for a different index should be accepted
|
||||
let slashing3 = make_slashing(validator_index2);
|
||||
let slashing3 = harness.make_proposer_slashing(validator_index2 as u64);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
@@ -188,9 +165,6 @@ fn attester_slashing() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||
let spec = &harness.chain.spec;
|
||||
|
||||
let head_info = harness.chain.head_info().unwrap();
|
||||
|
||||
// First third of the validators
|
||||
let first_third = (0..VALIDATOR_COUNT as u64 / 3).collect::<Vec<_>>();
|
||||
@@ -201,25 +175,8 @@ fn attester_slashing() {
|
||||
// Last half of the validators
|
||||
let second_half = (VALIDATOR_COUNT as u64 / 2..VALIDATOR_COUNT as u64).collect::<Vec<_>>();
|
||||
|
||||
let signer = |idx: u64, message: &[u8]| {
|
||||
KEYPAIRS[idx as usize]
|
||||
.sk
|
||||
.sign(Hash256::from_slice(&message))
|
||||
};
|
||||
|
||||
let make_slashing = |validators| {
|
||||
TestingAttesterSlashingBuilder::double_vote::<_, E>(
|
||||
AttesterSlashingTestTask::Valid,
|
||||
validators,
|
||||
signer,
|
||||
&head_info.fork,
|
||||
head_info.genesis_validators_root,
|
||||
spec,
|
||||
)
|
||||
};
|
||||
|
||||
// Slashing for first third of validators should be accepted.
|
||||
let slashing1 = make_slashing(&first_third);
|
||||
let slashing1 = harness.make_attester_slashing(first_third);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
@@ -229,7 +186,7 @@ fn attester_slashing() {
|
||||
));
|
||||
|
||||
// Overlapping slashing for first half of validators should also be accepted.
|
||||
let slashing2 = make_slashing(&first_half);
|
||||
let slashing2 = harness.make_attester_slashing(first_half);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
@@ -255,7 +212,7 @@ fn attester_slashing() {
|
||||
));
|
||||
|
||||
// Slashing for last half of validators should be accepted (distinct from all existing)
|
||||
let slashing3 = make_slashing(&second_half);
|
||||
let slashing3 = harness.make_attester_slashing(second_half);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
@@ -264,7 +221,7 @@ fn attester_slashing() {
|
||||
ObservationOutcome::New(_)
|
||||
));
|
||||
// Slashing for last third (contained in last half) should be rejected.
|
||||
let slashing4 = make_slashing(&last_third);
|
||||
let slashing4 = harness.make_attester_slashing(last_third);
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
|
||||
695
beacon_node/beacon_chain/tests/payload_invalidation.rs
Normal file
695
beacon_node/beacon_chain/tests/payload_invalidation.rs
Normal file
@@ -0,0 +1,695 @@
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
use beacon_chain::{
|
||||
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
||||
BeaconChainError, BlockError, ExecutionPayloadError, HeadInfo, StateSkipConfig,
|
||||
WhenSlotSkipped, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON,
|
||||
};
|
||||
use execution_layer::{
|
||||
json_structures::JsonPayloadAttributesV1, ExecutionLayer, PayloadAttributes,
|
||||
};
|
||||
use proto_array::ExecutionStatus;
|
||||
use slot_clock::SlotClock;
|
||||
use task_executor::ShutdownReason;
|
||||
use types::*;
|
||||
|
||||
const VALIDATOR_COUNT: usize = 32;
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
|
||||
#[derive(PartialEq, Clone)]
|
||||
enum Payload {
|
||||
Valid,
|
||||
Invalid {
|
||||
latest_valid_hash: Option<ExecutionBlockHash>,
|
||||
},
|
||||
Syncing,
|
||||
}
|
||||
|
||||
struct InvalidPayloadRig {
|
||||
harness: BeaconChainHarness<EphemeralHarnessType<E>>,
|
||||
enable_attestations: bool,
|
||||
}
|
||||
|
||||
impl InvalidPayloadRig {
|
||||
fn new() -> Self {
|
||||
let mut spec = E::default_spec();
|
||||
spec.altair_fork_epoch = Some(Epoch::new(0));
|
||||
spec.bellatrix_fork_epoch = Some(Epoch::new(0));
|
||||
|
||||
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||
.spec(spec)
|
||||
.deterministic_keypairs(VALIDATOR_COUNT)
|
||||
.mock_execution_layer()
|
||||
.fresh_ephemeral_store()
|
||||
.build();
|
||||
|
||||
// Move to slot 1.
|
||||
harness.advance_slot();
|
||||
|
||||
Self {
|
||||
harness,
|
||||
enable_attestations: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn enable_attestations(mut self) -> Self {
|
||||
self.enable_attestations = true;
|
||||
self
|
||||
}
|
||||
|
||||
fn execution_layer(&self) -> ExecutionLayer {
|
||||
self.harness.chain.execution_layer.clone().unwrap()
|
||||
}
|
||||
|
||||
fn block_hash(&self, block_root: Hash256) -> ExecutionBlockHash {
|
||||
self.harness
|
||||
.chain
|
||||
.get_block(&block_root)
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.message()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.block_hash
|
||||
}
|
||||
|
||||
fn execution_status(&self, block_root: Hash256) -> ExecutionStatus {
|
||||
self.harness
|
||||
.chain
|
||||
.fork_choice
|
||||
.read()
|
||||
.get_block(&block_root)
|
||||
.unwrap()
|
||||
.execution_status
|
||||
}
|
||||
|
||||
fn fork_choice(&self) {
|
||||
self.harness.chain.fork_choice().unwrap();
|
||||
}
|
||||
|
||||
fn head_info(&self) -> HeadInfo {
|
||||
self.harness.chain.head_info().unwrap()
|
||||
}
|
||||
|
||||
fn previous_payload_attributes(&self) -> PayloadAttributes {
|
||||
let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap();
|
||||
let json = mock_execution_layer
|
||||
.server
|
||||
.take_previous_request()
|
||||
.expect("no previous request");
|
||||
let params = json.get("params").expect("no params");
|
||||
let payload_param_json = params.get(1).expect("no payload param");
|
||||
let attributes: JsonPayloadAttributesV1 =
|
||||
serde_json::from_value(payload_param_json.clone()).unwrap();
|
||||
attributes.into()
|
||||
}
|
||||
|
||||
fn move_to_terminal_block(&self) {
|
||||
let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap();
|
||||
mock_execution_layer
|
||||
.server
|
||||
.execution_block_generator()
|
||||
.move_to_terminal_block()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec<Hash256> {
|
||||
(0..num_blocks)
|
||||
.map(|_| self.import_block(is_valid.clone()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn move_to_first_justification(&mut self, is_valid: Payload) {
|
||||
let slots_till_justification = E::slots_per_epoch() * 3;
|
||||
self.build_blocks(slots_till_justification, is_valid);
|
||||
|
||||
let justified_checkpoint = self.head_info().current_justified_checkpoint;
|
||||
assert_eq!(justified_checkpoint.epoch, 2);
|
||||
}
|
||||
|
||||
fn import_block(&mut self, is_valid: Payload) -> Hash256 {
|
||||
self.import_block_parametric(is_valid, |error| {
|
||||
matches!(
|
||||
error,
|
||||
BlockError::ExecutionPayloadError(
|
||||
ExecutionPayloadError::RejectedByExecutionEngine { .. }
|
||||
)
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn block_root_at_slot(&self, slot: Slot) -> Option<Hash256> {
|
||||
self.harness
|
||||
.chain
|
||||
.block_root_at_slot(slot, WhenSlotSkipped::None)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn import_block_parametric<F: Fn(&BlockError<E>) -> bool>(
|
||||
&mut self,
|
||||
is_valid: Payload,
|
||||
evaluate_error: F,
|
||||
) -> Hash256 {
|
||||
let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap();
|
||||
|
||||
let head = self.harness.chain.head().unwrap();
|
||||
let state = head.beacon_state;
|
||||
let slot = state.slot() + 1;
|
||||
let (block, post_state) = self.harness.make_block(state, slot);
|
||||
let block_root = block.canonical_root();
|
||||
|
||||
match is_valid {
|
||||
Payload::Valid | Payload::Syncing => {
|
||||
if is_valid == Payload::Syncing {
|
||||
// Importing a payload whilst returning `SYNCING` simulates an EE that obtains
|
||||
// the block via it's own means (e.g., devp2p).
|
||||
let should_import_payload = true;
|
||||
mock_execution_layer
|
||||
.server
|
||||
.all_payloads_syncing(should_import_payload);
|
||||
} else {
|
||||
mock_execution_layer.server.full_payload_verification();
|
||||
}
|
||||
let root = self.harness.process_block(slot, block.clone()).unwrap();
|
||||
|
||||
if self.enable_attestations {
|
||||
let all_validators: Vec<usize> = (0..VALIDATOR_COUNT).collect();
|
||||
self.harness.attest_block(
|
||||
&post_state,
|
||||
block.state_root(),
|
||||
block_root.into(),
|
||||
&block,
|
||||
&all_validators,
|
||||
);
|
||||
}
|
||||
|
||||
let execution_status = self.execution_status(root.into());
|
||||
|
||||
match is_valid {
|
||||
Payload::Syncing => assert!(execution_status.is_not_verified()),
|
||||
Payload::Valid => assert!(execution_status.is_valid()),
|
||||
Payload::Invalid { .. } => unreachable!(),
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
self.harness.chain.get_block(&block_root).unwrap().unwrap(),
|
||||
block,
|
||||
"block from db must match block imported"
|
||||
);
|
||||
}
|
||||
Payload::Invalid { latest_valid_hash } => {
|
||||
let latest_valid_hash = latest_valid_hash
|
||||
.unwrap_or_else(|| self.block_hash(block.message().parent_root()));
|
||||
|
||||
mock_execution_layer
|
||||
.server
|
||||
.all_payloads_invalid(latest_valid_hash);
|
||||
|
||||
match self.harness.process_block(slot, block) {
|
||||
Err(error) if evaluate_error(&error) => (),
|
||||
Err(other) => {
|
||||
panic!("evaluate_error returned false with {:?}", other)
|
||||
}
|
||||
Ok(_) => panic!("block with invalid payload was imported"),
|
||||
};
|
||||
|
||||
assert!(
|
||||
self.harness
|
||||
.chain
|
||||
.fork_choice
|
||||
.read()
|
||||
.get_block(&block_root)
|
||||
.is_none(),
|
||||
"invalid block must not exist in fork choice"
|
||||
);
|
||||
assert!(
|
||||
self.harness.chain.get_block(&block_root).unwrap().is_none(),
|
||||
"invalid block cannot be accessed via get_block"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
block_root
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple test of the different import types.
|
||||
#[test]
|
||||
fn valid_invalid_syncing() {
|
||||
let mut rig = InvalidPayloadRig::new();
|
||||
rig.move_to_terminal_block();
|
||||
|
||||
rig.import_block(Payload::Valid);
|
||||
rig.import_block(Payload::Invalid {
|
||||
latest_valid_hash: None,
|
||||
});
|
||||
rig.import_block(Payload::Syncing);
|
||||
}
|
||||
|
||||
/// Ensure that an invalid payload can invalidate its parent too (given the right
|
||||
/// `latest_valid_hash`.
|
||||
#[test]
|
||||
fn invalid_payload_invalidates_parent() {
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
rig.import_block(Payload::Valid); // Import a valid transition block.
|
||||
rig.move_to_first_justification(Payload::Syncing);
|
||||
|
||||
let roots = vec![
|
||||
rig.import_block(Payload::Syncing),
|
||||
rig.import_block(Payload::Syncing),
|
||||
rig.import_block(Payload::Syncing),
|
||||
];
|
||||
|
||||
let latest_valid_hash = rig.block_hash(roots[0]);
|
||||
|
||||
rig.import_block(Payload::Invalid {
|
||||
latest_valid_hash: Some(latest_valid_hash),
|
||||
});
|
||||
|
||||
assert!(rig.execution_status(roots[0]).is_valid());
|
||||
assert!(rig.execution_status(roots[1]).is_invalid());
|
||||
assert!(rig.execution_status(roots[2]).is_invalid());
|
||||
|
||||
assert_eq!(rig.head_info().block_root, roots[0]);
|
||||
}
|
||||
|
||||
/// Ensure the client tries to exit when the justified checkpoint is invalidated.
|
||||
#[test]
|
||||
fn justified_checkpoint_becomes_invalid() {
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
rig.import_block(Payload::Valid); // Import a valid transition block.
|
||||
rig.move_to_first_justification(Payload::Syncing);
|
||||
|
||||
let justified_checkpoint = rig.head_info().current_justified_checkpoint;
|
||||
let parent_root_of_justified = rig
|
||||
.harness
|
||||
.chain
|
||||
.get_block(&justified_checkpoint.root)
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.parent_root();
|
||||
let parent_hash_of_justified = rig.block_hash(parent_root_of_justified);
|
||||
|
||||
// No service should have triggered a shutdown, yet.
|
||||
assert!(rig.harness.shutdown_reasons().is_empty());
|
||||
|
||||
// Import a block that will invalidate the justified checkpoint.
|
||||
rig.import_block_parametric(
|
||||
Payload::Invalid {
|
||||
latest_valid_hash: Some(parent_hash_of_justified),
|
||||
},
|
||||
|error| {
|
||||
matches!(
|
||||
error,
|
||||
// The block import should fail since the beacon chain knows the justified payload
|
||||
// is invalid.
|
||||
BlockError::BeaconChainError(BeaconChainError::JustifiedPayloadInvalid { .. })
|
||||
)
|
||||
},
|
||||
);
|
||||
|
||||
// The beacon chain should have triggered a shutdown.
|
||||
assert_eq!(
|
||||
rig.harness.shutdown_reasons(),
|
||||
vec![ShutdownReason::Failure(
|
||||
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON
|
||||
)]
|
||||
);
|
||||
}
|
||||
|
||||
/// Ensure that a `latest_valid_hash` for a pre-finality block only reverts a single block.
|
||||
#[test]
|
||||
fn pre_finalized_latest_valid_hash() {
|
||||
let num_blocks = E::slots_per_epoch() * 4;
|
||||
let finalized_epoch = 2;
|
||||
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
let mut blocks = vec![];
|
||||
blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block.
|
||||
blocks.extend(rig.build_blocks(num_blocks - 1, Payload::Syncing));
|
||||
|
||||
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
||||
|
||||
let pre_finalized_block_root = rig.block_root_at_slot(Slot::new(1)).unwrap();
|
||||
let pre_finalized_block_hash = rig.block_hash(pre_finalized_block_root);
|
||||
|
||||
// No service should have triggered a shutdown, yet.
|
||||
assert!(rig.harness.shutdown_reasons().is_empty());
|
||||
|
||||
// Import a pre-finalized block.
|
||||
rig.import_block(Payload::Invalid {
|
||||
latest_valid_hash: Some(pre_finalized_block_hash),
|
||||
});
|
||||
|
||||
// The latest imported block should be the head.
|
||||
assert_eq!(rig.head_info().block_root, *blocks.last().unwrap());
|
||||
|
||||
// The beacon chain should *not* have triggered a shutdown.
|
||||
assert_eq!(rig.harness.shutdown_reasons(), vec![]);
|
||||
|
||||
// All blocks should still be unverified.
|
||||
for i in E::slots_per_epoch() * finalized_epoch..num_blocks {
|
||||
let slot = Slot::new(i);
|
||||
let root = rig.block_root_at_slot(slot).unwrap();
|
||||
if slot == 1 {
|
||||
assert!(rig.execution_status(root).is_valid());
|
||||
} else {
|
||||
assert!(rig.execution_status(root).is_not_verified());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensure that a `latest_valid_hash` will:
|
||||
///
|
||||
/// - Invalidate descendants of `latest_valid_root`.
|
||||
/// - Validate `latest_valid_root` and its ancestors.
|
||||
#[test]
|
||||
fn latest_valid_hash_will_validate() {
|
||||
const LATEST_VALID_SLOT: u64 = 3;
|
||||
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
|
||||
let mut blocks = vec![];
|
||||
blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block.
|
||||
blocks.extend(rig.build_blocks(4, Payload::Syncing));
|
||||
|
||||
let latest_valid_root = rig
|
||||
.block_root_at_slot(Slot::new(LATEST_VALID_SLOT))
|
||||
.unwrap();
|
||||
let latest_valid_hash = rig.block_hash(latest_valid_root);
|
||||
|
||||
rig.import_block(Payload::Invalid {
|
||||
latest_valid_hash: Some(latest_valid_hash),
|
||||
});
|
||||
|
||||
assert_eq!(rig.head_info().slot, LATEST_VALID_SLOT);
|
||||
|
||||
for slot in 0..=5 {
|
||||
let slot = Slot::new(slot);
|
||||
let root = if slot > 0 {
|
||||
// If not the genesis slot, check the blocks we just produced.
|
||||
blocks[slot.as_usize() - 1]
|
||||
} else {
|
||||
// Genesis slot
|
||||
rig.block_root_at_slot(slot).unwrap()
|
||||
};
|
||||
let execution_status = rig.execution_status(root);
|
||||
|
||||
if slot > LATEST_VALID_SLOT {
|
||||
assert!(execution_status.is_invalid())
|
||||
} else if slot == 0 {
|
||||
assert!(execution_status.is_irrelevant())
|
||||
} else {
|
||||
assert!(execution_status.is_valid())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check behaviour when the `latest_valid_hash` is a junk value.
|
||||
#[test]
|
||||
fn latest_valid_hash_is_junk() {
|
||||
let num_blocks = E::slots_per_epoch() * 5;
|
||||
let finalized_epoch = 3;
|
||||
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
let mut blocks = vec![];
|
||||
blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block.
|
||||
blocks.extend(rig.build_blocks(num_blocks, Payload::Syncing));
|
||||
|
||||
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
||||
|
||||
// No service should have triggered a shutdown, yet.
|
||||
assert!(rig.harness.shutdown_reasons().is_empty());
|
||||
|
||||
let junk_hash = ExecutionBlockHash::repeat_byte(42);
|
||||
rig.import_block(Payload::Invalid {
|
||||
latest_valid_hash: Some(junk_hash),
|
||||
});
|
||||
|
||||
// The latest imported block should be the head.
|
||||
assert_eq!(rig.head_info().block_root, *blocks.last().unwrap());
|
||||
|
||||
// The beacon chain should *not* have triggered a shutdown.
|
||||
assert_eq!(rig.harness.shutdown_reasons(), vec![]);
|
||||
|
||||
// All blocks should still be unverified.
|
||||
for i in E::slots_per_epoch() * finalized_epoch..num_blocks {
|
||||
let slot = Slot::new(i);
|
||||
let root = rig.block_root_at_slot(slot).unwrap();
|
||||
if slot == 1 {
|
||||
assert!(rig.execution_status(root).is_valid());
|
||||
} else {
|
||||
assert!(rig.execution_status(root).is_not_verified());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check that descendants of invalid blocks are also invalidated.
|
||||
#[test]
|
||||
fn invalidates_all_descendants() {
|
||||
let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2;
|
||||
let finalized_epoch = 2;
|
||||
let finalized_slot = E::slots_per_epoch() * 2;
|
||||
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
rig.import_block(Payload::Valid); // Import a valid transition block.
|
||||
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
|
||||
|
||||
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
||||
assert_eq!(rig.head_info().block_root, *blocks.last().unwrap());
|
||||
|
||||
// Apply a block which conflicts with the canonical chain.
|
||||
let fork_slot = Slot::new(4 * E::slots_per_epoch() + 3);
|
||||
let fork_parent_slot = fork_slot - 1;
|
||||
let fork_parent_state = rig
|
||||
.harness
|
||||
.chain
|
||||
.state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots)
|
||||
.unwrap();
|
||||
assert_eq!(fork_parent_state.slot(), fork_parent_slot);
|
||||
let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot);
|
||||
let fork_block_root = rig.harness.chain.process_block(fork_block).unwrap();
|
||||
rig.fork_choice();
|
||||
|
||||
// The latest valid hash will be set to the grandparent of the fork block. This means that the
|
||||
// parent of the fork block will become invalid.
|
||||
let latest_valid_slot = fork_parent_slot - 1;
|
||||
let latest_valid_root = rig
|
||||
.harness
|
||||
.chain
|
||||
.block_root_at_slot(latest_valid_slot, WhenSlotSkipped::None)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(blocks.contains(&latest_valid_root));
|
||||
let latest_valid_hash = rig.block_hash(latest_valid_root);
|
||||
|
||||
// The new block should not become the head, the old head should remain.
|
||||
assert_eq!(rig.head_info().block_root, *blocks.last().unwrap());
|
||||
|
||||
rig.import_block(Payload::Invalid {
|
||||
latest_valid_hash: Some(latest_valid_hash),
|
||||
});
|
||||
|
||||
// The block before the fork should become the head.
|
||||
assert_eq!(rig.head_info().block_root, latest_valid_root);
|
||||
|
||||
// The fork block should be invalidated, even though it's not an ancestor of the block that
|
||||
// triggered the INVALID response from the EL.
|
||||
assert!(rig.execution_status(fork_block_root).is_invalid());
|
||||
|
||||
for root in blocks {
|
||||
let slot = rig.harness.chain.get_block(&root).unwrap().unwrap().slot();
|
||||
|
||||
// Fork choice doesn't have info about pre-finalization, nothing to check here.
|
||||
if slot < finalized_slot {
|
||||
continue;
|
||||
}
|
||||
|
||||
let execution_status = rig.execution_status(root);
|
||||
if slot <= latest_valid_slot {
|
||||
// Blocks prior to the latest valid hash are valid.
|
||||
assert!(execution_status.is_valid());
|
||||
} else {
|
||||
// Blocks after the latest valid hash are invalid.
|
||||
assert!(execution_status.is_invalid());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check that the head will switch after the canonical branch is invalidated.
|
||||
#[test]
|
||||
fn switches_heads() {
|
||||
let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2;
|
||||
let finalized_epoch = 2;
|
||||
let finalized_slot = E::slots_per_epoch() * 2;
|
||||
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
rig.import_block(Payload::Valid); // Import a valid transition block.
|
||||
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
|
||||
|
||||
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
||||
assert_eq!(rig.head_info().block_root, *blocks.last().unwrap());
|
||||
|
||||
// Apply a block which conflicts with the canonical chain.
|
||||
let fork_slot = Slot::new(4 * E::slots_per_epoch() + 3);
|
||||
let fork_parent_slot = fork_slot - 1;
|
||||
let fork_parent_state = rig
|
||||
.harness
|
||||
.chain
|
||||
.state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots)
|
||||
.unwrap();
|
||||
assert_eq!(fork_parent_state.slot(), fork_parent_slot);
|
||||
let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot);
|
||||
let fork_parent_root = fork_block.parent_root();
|
||||
let fork_block_root = rig.harness.chain.process_block(fork_block).unwrap();
|
||||
rig.fork_choice();
|
||||
|
||||
let latest_valid_slot = fork_parent_slot;
|
||||
let latest_valid_hash = rig.block_hash(fork_parent_root);
|
||||
|
||||
// The new block should not become the head, the old head should remain.
|
||||
assert_eq!(rig.head_info().block_root, *blocks.last().unwrap());
|
||||
|
||||
rig.import_block(Payload::Invalid {
|
||||
latest_valid_hash: Some(latest_valid_hash),
|
||||
});
|
||||
|
||||
// The fork block should become the head.
|
||||
assert_eq!(rig.head_info().block_root, fork_block_root);
|
||||
|
||||
// The fork block has not yet been validated.
|
||||
assert!(rig.execution_status(fork_block_root).is_not_verified());
|
||||
|
||||
for root in blocks {
|
||||
let slot = rig.harness.chain.get_block(&root).unwrap().unwrap().slot();
|
||||
|
||||
// Fork choice doesn't have info about pre-finalization, nothing to check here.
|
||||
if slot < finalized_slot {
|
||||
continue;
|
||||
}
|
||||
|
||||
let execution_status = rig.execution_status(root);
|
||||
if slot <= latest_valid_slot {
|
||||
// Blocks prior to the latest valid hash are valid.
|
||||
assert!(execution_status.is_valid());
|
||||
} else {
|
||||
// Blocks after the latest valid hash are invalid.
|
||||
assert!(execution_status.is_invalid());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_during_processing() {
|
||||
let mut rig = InvalidPayloadRig::new();
|
||||
rig.move_to_terminal_block();
|
||||
|
||||
let roots = &[
|
||||
rig.import_block(Payload::Valid),
|
||||
rig.import_block(Payload::Invalid {
|
||||
latest_valid_hash: None,
|
||||
}),
|
||||
rig.import_block(Payload::Valid),
|
||||
];
|
||||
|
||||
// 0 should be present in the chain.
|
||||
assert!(rig.harness.chain.get_block(&roots[0]).unwrap().is_some());
|
||||
// 1 should *not* be present in the chain.
|
||||
assert_eq!(rig.harness.chain.get_block(&roots[1]).unwrap(), None);
|
||||
// 2 should be the head.
|
||||
let head = rig.harness.chain.head_info().unwrap();
|
||||
assert_eq!(head.block_root, roots[2]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_after_optimistic_sync() {
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
rig.import_block(Payload::Valid); // Import a valid transition block.
|
||||
|
||||
let mut roots = vec![
|
||||
rig.import_block(Payload::Syncing),
|
||||
rig.import_block(Payload::Syncing),
|
||||
rig.import_block(Payload::Syncing),
|
||||
];
|
||||
|
||||
for root in &roots {
|
||||
assert!(rig.harness.chain.get_block(root).unwrap().is_some());
|
||||
}
|
||||
|
||||
// 2 should be the head.
|
||||
let head = rig.harness.chain.head_info().unwrap();
|
||||
assert_eq!(head.block_root, roots[2]);
|
||||
|
||||
roots.push(rig.import_block(Payload::Invalid {
|
||||
latest_valid_hash: Some(rig.block_hash(roots[1])),
|
||||
}));
|
||||
|
||||
// Running fork choice is necessary since a block has been invalidated.
|
||||
rig.fork_choice();
|
||||
|
||||
// 1 should be the head, since 2 was invalidated.
|
||||
let head = rig.harness.chain.head_info().unwrap();
|
||||
assert_eq!(head.block_root, roots[1]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn payload_preparation() {
|
||||
let mut rig = InvalidPayloadRig::new();
|
||||
rig.move_to_terminal_block();
|
||||
rig.import_block(Payload::Valid);
|
||||
|
||||
let el = rig.execution_layer();
|
||||
let head = rig.harness.chain.head().unwrap();
|
||||
let current_slot = rig.harness.chain.slot().unwrap();
|
||||
assert_eq!(head.beacon_state.slot(), 1);
|
||||
assert_eq!(current_slot, 1);
|
||||
|
||||
let next_slot = current_slot + 1;
|
||||
let proposer = head
|
||||
.beacon_state
|
||||
.get_beacon_proposer_index(next_slot, &rig.harness.chain.spec)
|
||||
.unwrap();
|
||||
|
||||
let fee_recipient = Address::repeat_byte(99);
|
||||
|
||||
// Provide preparation data to the EL for `proposer`.
|
||||
el.update_proposer_preparation_blocking(
|
||||
Epoch::new(1),
|
||||
&[ProposerPreparationData {
|
||||
validator_index: proposer as u64,
|
||||
fee_recipient,
|
||||
}],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
rig.harness
|
||||
.chain
|
||||
.prepare_beacon_proposer_blocking()
|
||||
.unwrap();
|
||||
|
||||
let payload_attributes = PayloadAttributes {
|
||||
timestamp: rig
|
||||
.harness
|
||||
.chain
|
||||
.slot_clock
|
||||
.start_of(next_slot)
|
||||
.unwrap()
|
||||
.as_secs(),
|
||||
prev_randao: *head
|
||||
.beacon_state
|
||||
.get_randao_mix(head.beacon_state.current_epoch())
|
||||
.unwrap(),
|
||||
suggested_fee_recipient: fee_recipient,
|
||||
};
|
||||
assert_eq!(rig.previous_payload_attributes(), payload_attributes);
|
||||
}
|
||||
@@ -1,160 +0,0 @@
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use beacon_chain::{
|
||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy},
|
||||
BeaconChain, BeaconChainTypes,
|
||||
};
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use std::sync::Arc;
|
||||
use store::{HotColdDB, LevelDB, StoreConfig};
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use types::{EthSpec, Keypair, MinimalEthSpec};
|
||||
|
||||
type E = MinimalEthSpec;
|
||||
|
||||
// Should ideally be divisible by 3.
|
||||
pub const VALIDATOR_COUNT: usize = 24;
|
||||
|
||||
lazy_static! {
|
||||
/// A cached set of keys.
|
||||
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
}
|
||||
|
||||
fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>> {
|
||||
let spec = E::default_spec();
|
||||
let hot_path = db_path.path().join("hot_db");
|
||||
let cold_path = db_path.path().join("cold_db");
|
||||
let config = StoreConfig::default();
|
||||
let log = NullLoggerBuilder.build().expect("logger should build");
|
||||
Arc::new(
|
||||
HotColdDB::open(&hot_path, &cold_path, config, spec, log)
|
||||
.expect("disk store should initialize"),
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn finalizes_after_resuming_from_db() {
|
||||
let validator_count = 16;
|
||||
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 8;
|
||||
let first_half = num_blocks_produced / 2;
|
||||
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
|
||||
let harness = BeaconChainHarness::new_with_disk_store(
|
||||
MinimalEthSpec,
|
||||
store.clone(),
|
||||
KEYPAIRS[0..validator_count].to_vec(),
|
||||
);
|
||||
|
||||
harness.advance_slot();
|
||||
|
||||
harness.extend_chain(
|
||||
first_half as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.head()
|
||||
.expect("should read head")
|
||||
.beacon_state
|
||||
.finalized_checkpoint
|
||||
.epoch
|
||||
> 0,
|
||||
"the chain should have already finalized"
|
||||
);
|
||||
|
||||
let latest_slot = harness.chain.slot().expect("should have a slot");
|
||||
|
||||
harness
|
||||
.chain
|
||||
.persist_head_and_fork_choice()
|
||||
.expect("should persist the head and fork choice");
|
||||
harness
|
||||
.chain
|
||||
.persist_op_pool()
|
||||
.expect("should persist the op pool");
|
||||
harness
|
||||
.chain
|
||||
.persist_eth1_cache()
|
||||
.expect("should persist the eth1 cache");
|
||||
|
||||
let data_dir = harness.data_dir;
|
||||
let original_chain = harness.chain;
|
||||
|
||||
let resumed_harness = BeaconChainHarness::resume_from_disk_store(
|
||||
MinimalEthSpec,
|
||||
store,
|
||||
KEYPAIRS[0..validator_count].to_vec(),
|
||||
data_dir,
|
||||
);
|
||||
|
||||
assert_chains_pretty_much_the_same(&original_chain, &resumed_harness.chain);
|
||||
|
||||
// Set the slot clock of the resumed harness to be in the slot following the previous harness.
|
||||
//
|
||||
// This allows us to produce the block at the next slot.
|
||||
resumed_harness
|
||||
.chain
|
||||
.slot_clock
|
||||
.set_slot(latest_slot.as_u64() + 1);
|
||||
|
||||
resumed_harness.extend_chain(
|
||||
(num_blocks_produced - first_half) as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
let state = &resumed_harness
|
||||
.chain
|
||||
.head()
|
||||
.expect("should read head")
|
||||
.beacon_state;
|
||||
assert_eq!(
|
||||
state.slot, num_blocks_produced,
|
||||
"head should be at the current slot"
|
||||
);
|
||||
assert_eq!(
|
||||
state.current_epoch(),
|
||||
num_blocks_produced / MinimalEthSpec::slots_per_epoch(),
|
||||
"head should be at the expected epoch"
|
||||
);
|
||||
assert_eq!(
|
||||
state.current_justified_checkpoint.epoch,
|
||||
state.current_epoch() - 1,
|
||||
"the head should be justified one behind the current epoch"
|
||||
);
|
||||
assert_eq!(
|
||||
state.finalized_checkpoint.epoch,
|
||||
state.current_epoch() - 2,
|
||||
"the head should be finalized two behind the current epoch"
|
||||
);
|
||||
}
|
||||
|
||||
/// Checks that two chains are the same, for the purpose of this tests.
|
||||
///
|
||||
/// Several fields that are hard/impossible to check are ignored (e.g., the store).
|
||||
fn assert_chains_pretty_much_the_same<T: BeaconChainTypes>(a: &BeaconChain<T>, b: &BeaconChain<T>) {
|
||||
assert_eq!(a.spec, b.spec, "spec should be equal");
|
||||
assert_eq!(a.op_pool, b.op_pool, "op_pool should be equal");
|
||||
assert_eq!(
|
||||
a.head().unwrap(),
|
||||
b.head().unwrap(),
|
||||
"head() should be equal"
|
||||
);
|
||||
assert_eq!(a.heads(), b.heads(), "heads() should be equal");
|
||||
assert_eq!(
|
||||
a.genesis_block_root, b.genesis_block_root,
|
||||
"genesis_block_root should be equal"
|
||||
);
|
||||
assert!(
|
||||
*a.fork_choice.read() == *b.fork_choice.read(),
|
||||
"fork_choice should be equal"
|
||||
);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
667
beacon_node/beacon_chain/tests/sync_committee_verification.rs
Normal file
667
beacon_node/beacon_chain/tests/sync_committee_verification.rs
Normal file
@@ -0,0 +1,667 @@
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
use beacon_chain::sync_committee_verification::Error as SyncCommitteeError;
|
||||
use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee};
|
||||
use int_to_bytes::int_to_bytes32;
|
||||
use lazy_static::lazy_static;
|
||||
use safe_arith::SafeArith;
|
||||
use store::{SignedContributionAndProof, SyncCommitteeMessage};
|
||||
use tree_hash::TreeHash;
|
||||
use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT;
|
||||
use types::{
|
||||
AggregateSignature, Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, SecretKey, Slot,
|
||||
SyncSelectionProof, SyncSubnetId, Unsigned,
|
||||
};
|
||||
|
||||
pub type E = MainnetEthSpec;
|
||||
|
||||
pub const VALIDATOR_COUNT: usize = 256;
|
||||
|
||||
lazy_static! {
|
||||
/// A cached set of keys.
|
||||
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
}
|
||||
|
||||
/// Returns a beacon chain harness.
|
||||
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<E>> {
|
||||
let mut spec = E::default_spec();
|
||||
spec.altair_fork_epoch = Some(Epoch::new(0));
|
||||
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||
.spec(spec)
|
||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||
.fresh_ephemeral_store()
|
||||
.mock_execution_layer()
|
||||
.build();
|
||||
|
||||
harness.advance_slot();
|
||||
|
||||
harness
|
||||
}
|
||||
|
||||
/// Returns a sync message that is valid for some slot in the given `chain`.
|
||||
///
|
||||
/// Also returns some info about who created it.
|
||||
fn get_valid_sync_committee_message(
|
||||
harness: &BeaconChainHarness<EphemeralHarnessType<E>>,
|
||||
slot: Slot,
|
||||
relative_sync_committee: RelativeSyncCommittee,
|
||||
) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) {
|
||||
let head_state = harness
|
||||
.chain
|
||||
.head_beacon_state()
|
||||
.expect("should get head state");
|
||||
let head_block_root = harness
|
||||
.chain
|
||||
.head()
|
||||
.expect("should get head state")
|
||||
.beacon_block_root;
|
||||
let (signature, _) = harness
|
||||
.make_sync_committee_messages(&head_state, head_block_root, slot, relative_sync_committee)
|
||||
.get(0)
|
||||
.expect("sync messages should exist")
|
||||
.get(0)
|
||||
.expect("first sync message should exist")
|
||||
.clone();
|
||||
|
||||
(
|
||||
signature.clone(),
|
||||
signature.validator_index as usize,
|
||||
harness.validator_keypairs[signature.validator_index as usize]
|
||||
.sk
|
||||
.clone(),
|
||||
SyncSubnetId::new(0),
|
||||
)
|
||||
}
|
||||
|
||||
fn get_valid_sync_contribution(
|
||||
harness: &BeaconChainHarness<EphemeralHarnessType<E>>,
|
||||
relative_sync_committee: RelativeSyncCommittee,
|
||||
) -> (SignedContributionAndProof<E>, usize, SecretKey) {
|
||||
let head_state = harness
|
||||
.chain
|
||||
.head_beacon_state()
|
||||
.expect("should get head state");
|
||||
|
||||
let head_block_root = harness
|
||||
.chain
|
||||
.head()
|
||||
.expect("should get head state")
|
||||
.beacon_block_root;
|
||||
let sync_contributions = harness.make_sync_contributions(
|
||||
&head_state,
|
||||
head_block_root,
|
||||
head_state.slot(),
|
||||
relative_sync_committee,
|
||||
);
|
||||
|
||||
let (_, contribution_opt) = sync_contributions
|
||||
.get(0)
|
||||
.expect("sync contributions should exist");
|
||||
let contribution = contribution_opt
|
||||
.as_ref()
|
||||
.cloned()
|
||||
.expect("signed contribution and proof should exist");
|
||||
|
||||
let aggregator_index = contribution.message.aggregator_index as usize;
|
||||
|
||||
(
|
||||
contribution,
|
||||
aggregator_index,
|
||||
harness.validator_keypairs[aggregator_index].sk.clone(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns a proof and index for a validator that is **not** an aggregator for the current sync period.
|
||||
fn get_non_aggregator(
|
||||
harness: &BeaconChainHarness<EphemeralHarnessType<E>>,
|
||||
slot: Slot,
|
||||
) -> (usize, SecretKey) {
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
let sync_subcommittee_size = E::sync_committee_size()
|
||||
.safe_div(SYNC_COMMITTEE_SUBNET_COUNT as usize)
|
||||
.expect("should determine sync subcommittee size");
|
||||
let sync_committee = state
|
||||
.current_sync_committee()
|
||||
.expect("should use altair state")
|
||||
.clone();
|
||||
let non_aggregator_index = sync_committee
|
||||
.pubkeys
|
||||
.chunks(sync_subcommittee_size)
|
||||
.enumerate()
|
||||
.find_map(|(subcommittee_index, subcommittee)| {
|
||||
subcommittee.iter().find_map(|pubkey| {
|
||||
let validator_index = harness
|
||||
.chain
|
||||
.validator_index(&pubkey)
|
||||
.expect("should get validator index")
|
||||
.expect("pubkey should exist in beacon chain");
|
||||
|
||||
let selection_proof = SyncSelectionProof::new::<E>(
|
||||
slot,
|
||||
subcommittee_index as u64,
|
||||
&harness.validator_keypairs[validator_index].sk,
|
||||
&state.fork(),
|
||||
state.genesis_validators_root(),
|
||||
&harness.spec,
|
||||
);
|
||||
|
||||
if !selection_proof
|
||||
.is_aggregator::<E>()
|
||||
.expect("should determine aggregator")
|
||||
{
|
||||
Some(validator_index)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
})
|
||||
.expect("should find at least one non-aggregator");
|
||||
|
||||
let aggregator_sk = harness.validator_keypairs[non_aggregator_index].sk.clone();
|
||||
(non_aggregator_index, aggregator_sk)
|
||||
}
|
||||
|
||||
/// Tests verification of `SignedContributionAndProof` from the gossip network.
|
||||
#[test]
|
||||
fn aggregated_gossip_verification() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let state = harness.get_current_state();
|
||||
|
||||
harness.add_attested_blocks_at_slots(
|
||||
state,
|
||||
Hash256::zero(),
|
||||
&[Slot::new(1), Slot::new(2)],
|
||||
(0..VALIDATOR_COUNT).collect::<Vec<_>>().as_slice(),
|
||||
);
|
||||
|
||||
let current_slot = harness.chain.slot().expect("should get slot");
|
||||
|
||||
let (valid_aggregate, aggregator_index, aggregator_sk) =
|
||||
get_valid_sync_contribution(&harness, RelativeSyncCommittee::Current);
|
||||
|
||||
macro_rules! assert_invalid {
|
||||
($desc: tt, $attn_getter: expr, $($error: pat_param) |+ $( if $guard: expr )?) => {
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_sync_contribution_for_gossip($attn_getter)
|
||||
.err()
|
||||
.expect(&format!(
|
||||
"{} should error during verify_sync_contribution_for_gossip",
|
||||
$desc
|
||||
)),
|
||||
$( $error ) |+ $( if $guard )?
|
||||
),
|
||||
"case: {}",
|
||||
$desc,
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* The following two tests ensure:
|
||||
*
|
||||
* The contribution's slot is for the current slot, i.e. contribution.slot == current_slot
|
||||
* (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
|
||||
*/
|
||||
|
||||
let future_slot = current_slot + 1;
|
||||
assert_invalid!(
|
||||
"aggregate from future slot",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
a.message.contribution.slot = future_slot;
|
||||
a
|
||||
},
|
||||
SyncCommitteeError::FutureSlot { message_slot, latest_permissible_slot }
|
||||
if message_slot == future_slot && latest_permissible_slot == current_slot
|
||||
);
|
||||
|
||||
let early_slot = current_slot
|
||||
.as_u64()
|
||||
// Subtract an additional slot since the harness will be exactly on the start of the
|
||||
// slot and the propagation tolerance will allow an extra slot.
|
||||
.checked_sub(2)
|
||||
.expect("chain is not sufficiently deep for test")
|
||||
.into();
|
||||
assert_invalid!(
|
||||
"aggregate from past slot",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
a.message.contribution.slot = early_slot;
|
||||
a
|
||||
},
|
||||
SyncCommitteeError::PastSlot {
|
||||
message_slot,
|
||||
|
||||
earliest_permissible_slot
|
||||
}
|
||||
if message_slot == early_slot
|
||||
&& earliest_permissible_slot == current_slot - 1
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* The subcommittee index is in the allowed range,
|
||||
* i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`.
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"subcommittee index out of range",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
a.message.contribution.subcommittee_index = SYNC_COMMITTEE_SUBNET_COUNT;
|
||||
a
|
||||
},
|
||||
SyncCommitteeError::InvalidSubcommittee {
|
||||
subcommittee_index,
|
||||
subcommittee_size,
|
||||
}
|
||||
if subcommittee_index == SYNC_COMMITTEE_SUBNET_COUNT && subcommittee_size == SYNC_COMMITTEE_SUBNET_COUNT
|
||||
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* The sync contribution has participants.
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"aggregate with no participants",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
let aggregation_bits = &mut a.message.contribution.aggregation_bits;
|
||||
aggregation_bits.difference_inplace(&aggregation_bits.clone());
|
||||
assert!(aggregation_bits.is_zero());
|
||||
a.message.contribution.signature = AggregateSignature::infinity();
|
||||
a
|
||||
},
|
||||
SyncCommitteeError::EmptyAggregationBitfield
|
||||
);
|
||||
|
||||
/*
|
||||
* This test ensures:
|
||||
*
|
||||
* The aggregator signature, signed_contribution_and_proof.signature, is valid.
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"aggregate with bad signature",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
|
||||
a.signature = aggregator_sk.sign(Hash256::from_low_u64_be(42));
|
||||
|
||||
a
|
||||
},
|
||||
SyncCommitteeError::InvalidSignature
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* The contribution_and_proof.selection_proof is a valid signature of the `SyncAggregatorSelectionData`
|
||||
* derived from the contribution by the validator with index `contribution_and_proof.aggregator_index`.
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"aggregate with bad selection proof signature",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
|
||||
// Generate some random signature until happens to be a valid selection proof. We need
|
||||
// this in order to reach the signature verification code.
|
||||
//
|
||||
// Could run for ever, but that seems _really_ improbable.
|
||||
let mut i: u64 = 0;
|
||||
a.message.selection_proof = loop {
|
||||
i += 1;
|
||||
let proof: SyncSelectionProof = aggregator_sk
|
||||
.sign(Hash256::from_slice(&int_to_bytes32(i)))
|
||||
.into();
|
||||
if proof
|
||||
.is_aggregator::<E>()
|
||||
.expect("should determine aggregator")
|
||||
{
|
||||
break proof.into();
|
||||
}
|
||||
};
|
||||
|
||||
a
|
||||
},
|
||||
SyncCommitteeError::InvalidSignature
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* The aggregate signature is valid for the message `beacon_block_root` and aggregate pubkey
|
||||
* derived from the participation info in `aggregation_bits` for the subcommittee specified by
|
||||
* the `contribution.subcommittee_index`.
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"aggregate with bad aggregate signature",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
|
||||
let mut agg_sig = AggregateSignature::infinity();
|
||||
agg_sig.add_assign(&aggregator_sk.sign(Hash256::from_low_u64_be(42)));
|
||||
a.message.contribution.signature = agg_sig;
|
||||
|
||||
a
|
||||
},
|
||||
SyncCommitteeError::InvalidSignature
|
||||
);
|
||||
|
||||
let too_high_index = <E as EthSpec>::ValidatorRegistryLimit::to_u64() + 1;
|
||||
assert_invalid!(
|
||||
"aggregate with too-high aggregator index",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
a.message.aggregator_index = too_high_index;
|
||||
a
|
||||
},
|
||||
SyncCommitteeError::UnknownValidatorIndex(index)
|
||||
if index == too_high_index as usize
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* The aggregator's validator index is in the declared subcommittee of the current sync
|
||||
* committee -- i.e. state.validators[contribution_and_proof.aggregator_index].pubkey in
|
||||
* get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index).
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"aggregate with unknown aggregator index",
|
||||
{
|
||||
let mut a = valid_aggregate.clone();
|
||||
a.message.contribution.subcommittee_index +=1;
|
||||
a
|
||||
},
|
||||
SyncCommitteeError::AggregatorNotInCommittee {
|
||||
aggregator_index
|
||||
}
|
||||
if aggregator_index == valid_aggregate.message.aggregator_index as u64
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* `contribution_and_proof.selection_proof` selects the validator as an aggregator for the
|
||||
* slot -- i.e. is_sync_committee_aggregator(contribution_and_proof.selection_proof) returns True.
|
||||
*/
|
||||
|
||||
let (non_aggregator_index, non_aggregator_sk) = get_non_aggregator(&harness, current_slot);
|
||||
assert_invalid!(
|
||||
"aggregate from non-aggregator",
|
||||
{
|
||||
SignedContributionAndProof::from_aggregate(
|
||||
non_aggregator_index as u64,
|
||||
valid_aggregate.message.contribution.clone(),
|
||||
None,
|
||||
&non_aggregator_sk,
|
||||
&harness.chain.head_info().expect("should get head info").fork,
|
||||
harness.chain.genesis_validators_root,
|
||||
&harness.chain.spec,
|
||||
)
|
||||
},
|
||||
SyncCommitteeError::InvalidSelectionProof {
|
||||
aggregator_index: index
|
||||
}
|
||||
if index == non_aggregator_index as u64
|
||||
);
|
||||
|
||||
// NOTE: from here on, the tests are stateful, and rely on the valid sync contribution having been
|
||||
// seen. A refactor to give each test case its own state might be nice at some point
|
||||
harness
|
||||
.chain
|
||||
.verify_sync_contribution_for_gossip(valid_aggregate.clone())
|
||||
.expect("should verify sync contribution");
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* The sync committee contribution is the first valid contribution received for the aggregator
|
||||
* with index contribution_and_proof.aggregator_index for the slot contribution.slot and
|
||||
* subcommittee index contribution.subcommittee_index.
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"aggregate that has already been seen",
|
||||
valid_aggregate.clone(),
|
||||
SyncCommitteeError::SyncContributionAlreadyKnown(hash)
|
||||
if hash == valid_aggregate.message.contribution.tree_hash_root()
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* The sync committee contribution is the first valid contribution received for the aggregator
|
||||
* with index `contribution_and_proof.aggregator_index` for the slot `contribution.slot` and
|
||||
* subcommittee index `contribution.subcommittee_index`.
|
||||
*/
|
||||
|
||||
assert_invalid!(
|
||||
"aggregate from aggregator and subcommittee that has already been seen",
|
||||
{
|
||||
let mut a = valid_aggregate;
|
||||
a.message.contribution.beacon_block_root = Hash256::from_low_u64_le(42);
|
||||
a
|
||||
},
|
||||
SyncCommitteeError::AggregatorAlreadyKnown(index)
|
||||
if index == aggregator_index as u64
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures that:
|
||||
*
|
||||
* A sync committee contribution for the slot before the sync committee period boundary is verified
|
||||
* using the `head_state.next_sync_committee`.
|
||||
*/
|
||||
|
||||
// Advance to the slot before the 3rd sync committee period because `current_sync_committee = next_sync_committee`
|
||||
// at genesis.
|
||||
let state = harness.get_current_state();
|
||||
let target_slot = Slot::new(
|
||||
(2 * harness.spec.epochs_per_sync_committee_period.as_u64() * E::slots_per_epoch()) - 1,
|
||||
);
|
||||
|
||||
harness
|
||||
.add_attested_block_at_slot(target_slot, state, Hash256::zero(), &[])
|
||||
.expect("should add block");
|
||||
|
||||
// **Incorrectly** create a sync contribution using the current sync committee
|
||||
let (next_valid_contribution, _, _) =
|
||||
get_valid_sync_contribution(&harness, RelativeSyncCommittee::Current);
|
||||
|
||||
assert_invalid!(
|
||||
"sync contribution created with incorrect sync committee",
|
||||
next_valid_contribution.clone(),
|
||||
SyncCommitteeError::InvalidSignature | SyncCommitteeError::AggregatorNotInCommittee { .. }
|
||||
);
|
||||
}
|
||||
|
||||
/// Tests the verification conditions for sync committee messages on the gossip network.
|
||||
#[test]
|
||||
fn unaggregated_gossip_verification() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
let state = harness.get_current_state();
|
||||
|
||||
harness.add_attested_blocks_at_slots(
|
||||
state,
|
||||
Hash256::zero(),
|
||||
&[Slot::new(1), Slot::new(2)],
|
||||
(0..VALIDATOR_COUNT).collect::<Vec<_>>().as_slice(),
|
||||
);
|
||||
|
||||
let current_slot = harness.chain.slot().expect("should get slot");
|
||||
|
||||
let (valid_sync_committee_message, expected_validator_index, validator_sk, subnet_id) =
|
||||
get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current);
|
||||
|
||||
macro_rules! assert_invalid {
|
||||
($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat_param) |+ $( if $guard: expr )?) => {
|
||||
assert!(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.verify_sync_committee_message_for_gossip($attn_getter, $subnet_getter)
|
||||
.err()
|
||||
.expect(&format!(
|
||||
"{} should error during verify_sync_committee_message_for_gossip",
|
||||
$desc
|
||||
)),
|
||||
$( $error ) |+ $( if $guard )?
|
||||
),
|
||||
"case: {}",
|
||||
$desc,
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* The following test ensures:
|
||||
*
|
||||
* The subnet_id is valid for the given validator, i.e. subnet_id in
|
||||
* compute_subnets_for_sync_committee(state, sync_committee_message.validator_index).
|
||||
*/
|
||||
let id: u64 = subnet_id.into();
|
||||
let invalid_subnet_id = SyncSubnetId::new(id + 1);
|
||||
assert_invalid!(
|
||||
"invalid subnet id",
|
||||
{
|
||||
valid_sync_committee_message.clone()
|
||||
},
|
||||
invalid_subnet_id,
|
||||
SyncCommitteeError::InvalidSubnetId {
|
||||
received,
|
||||
expected,
|
||||
}
|
||||
if received == invalid_subnet_id && expected.contains(&subnet_id)
|
||||
);
|
||||
|
||||
/*
|
||||
* The following two tests ensure:
|
||||
*
|
||||
* This signature is within a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance from the current slot.
|
||||
*/
|
||||
|
||||
let future_slot = current_slot + 1;
|
||||
assert_invalid!(
|
||||
"sync message from future slot",
|
||||
{
|
||||
let mut signature = valid_sync_committee_message.clone();
|
||||
signature.slot = future_slot;
|
||||
signature
|
||||
},
|
||||
subnet_id,
|
||||
SyncCommitteeError::FutureSlot {
|
||||
message_slot,
|
||||
latest_permissible_slot,
|
||||
}
|
||||
if message_slot == future_slot && latest_permissible_slot == current_slot
|
||||
);
|
||||
|
||||
// Subtract an additional slot since the harness will be exactly on the start of the
|
||||
// slot and the propagation tolerance will allow an extra slot.
|
||||
let early_slot = current_slot
|
||||
.as_u64()
|
||||
.checked_sub(2)
|
||||
.expect("chain is not sufficiently deep for test")
|
||||
.into();
|
||||
assert_invalid!(
|
||||
"sync message from past slot",
|
||||
{
|
||||
let mut signature = valid_sync_committee_message.clone();
|
||||
signature.slot = early_slot;
|
||||
signature
|
||||
},
|
||||
subnet_id,
|
||||
SyncCommitteeError::PastSlot {
|
||||
message_slot,
|
||||
|
||||
earliest_permissible_slot,
|
||||
}
|
||||
if message_slot == early_slot && earliest_permissible_slot == current_slot - 1
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures that:
|
||||
*
|
||||
* The signature is valid for the message beacon_block_root for the validator referenced by
|
||||
* validator_index.
|
||||
*/
|
||||
assert_invalid!(
|
||||
"sync message with bad signature",
|
||||
{
|
||||
let mut sync_message = valid_sync_committee_message.clone();
|
||||
|
||||
sync_message.signature = validator_sk.sign(Hash256::from_low_u64_le(424242));
|
||||
|
||||
sync_message
|
||||
},
|
||||
subnet_id,
|
||||
SyncCommitteeError::InvalidSignature
|
||||
);
|
||||
|
||||
harness
|
||||
.chain
|
||||
.verify_sync_committee_message_for_gossip(valid_sync_committee_message.clone(), subnet_id)
|
||||
.expect("valid sync message should be verified");
|
||||
|
||||
/*
|
||||
* The following test ensures that:
|
||||
*
|
||||
* There has been no other valid sync committee message for the declared slot for the
|
||||
* validator referenced by sync_committee_message.validator_index.
|
||||
*/
|
||||
assert_invalid!(
|
||||
"sync message that has already been seen",
|
||||
valid_sync_committee_message,
|
||||
subnet_id,
|
||||
SyncCommitteeError::PriorSyncCommitteeMessageKnown {
|
||||
validator_index,
|
||||
slot,
|
||||
}
|
||||
if validator_index == expected_validator_index as u64 && slot == current_slot
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures that:
|
||||
*
|
||||
* A sync committee message for the slot before the sync committee period boundary is verified
|
||||
* using the `head_state.next_sync_committee`.
|
||||
*/
|
||||
|
||||
// Advance to the slot before the 3rd sync committee period because `current_sync_committee = next_sync_committee`
|
||||
// at genesis.
|
||||
let state = harness.get_current_state();
|
||||
let target_slot = Slot::new(
|
||||
(2 * harness.spec.epochs_per_sync_committee_period.as_u64() * E::slots_per_epoch()) - 1,
|
||||
);
|
||||
|
||||
harness
|
||||
.add_attested_block_at_slot(target_slot, state, Hash256::zero(), &[])
|
||||
.expect("should add block");
|
||||
|
||||
// **Incorrectly** create a sync message using the current sync committee
|
||||
let (next_valid_sync_committee_message, _, _, next_subnet_id) =
|
||||
get_valid_sync_committee_message(&harness, target_slot, RelativeSyncCommittee::Current);
|
||||
|
||||
assert_invalid!(
|
||||
"sync message on incorrect subnet",
|
||||
next_valid_sync_committee_message.clone(),
|
||||
next_subnet_id,
|
||||
SyncCommitteeError::InvalidSubnetId {
|
||||
received,
|
||||
expected,
|
||||
}
|
||||
if received == subnet_id && !expected.contains(&subnet_id)
|
||||
);
|
||||
}
|
||||
@@ -1,19 +1,18 @@
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use beacon_chain::{
|
||||
attestation_verification::Error as AttnError,
|
||||
test_utils::{
|
||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType, OP_POOL_DB_KEY,
|
||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
||||
OP_POOL_DB_KEY,
|
||||
},
|
||||
StateSkipConfig, WhenSlotSkipped,
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
use operation_pool::PersistedOperationPool;
|
||||
use state_processing::{
|
||||
per_slot_processing, per_slot_processing::Error as SlotProcessingError, EpochProcessingError,
|
||||
};
|
||||
use store::config::StoreConfig;
|
||||
use types::{BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot};
|
||||
|
||||
// Should ideally be divisible by 3.
|
||||
@@ -24,12 +23,13 @@ lazy_static! {
|
||||
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
}
|
||||
|
||||
fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<MinimalEthSpec>> {
|
||||
let harness = BeaconChainHarness::new(
|
||||
MinimalEthSpec,
|
||||
KEYPAIRS[0..validator_count].to_vec(),
|
||||
StoreConfig::default(),
|
||||
);
|
||||
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<MinimalEthSpec>> {
|
||||
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
||||
.default_spec()
|
||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||
.fresh_ephemeral_store()
|
||||
.mock_execution_layer()
|
||||
.build();
|
||||
|
||||
harness.advance_slot();
|
||||
|
||||
@@ -39,7 +39,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<Minimal
|
||||
#[test]
|
||||
fn massive_skips() {
|
||||
let harness = get_harness(8);
|
||||
let spec = &MinimalEthSpec::default_spec();
|
||||
let spec = &harness.chain.spec;
|
||||
let mut state = harness.chain.head().expect("should get head").beacon_state;
|
||||
|
||||
// Run per_slot_processing until it returns an error.
|
||||
@@ -50,7 +50,7 @@ fn massive_skips() {
|
||||
}
|
||||
};
|
||||
|
||||
assert!(state.slot > 1, "the state should skip at least one slot");
|
||||
assert!(state.slot() > 1, "the state should skip at least one slot");
|
||||
assert_eq!(
|
||||
error,
|
||||
SlotProcessingError::EpochProcessingError(EpochProcessingError::BeaconStateError(
|
||||
@@ -75,13 +75,13 @@ fn iterators() {
|
||||
|
||||
let block_roots: Vec<(Hash256, Slot)> = harness
|
||||
.chain
|
||||
.rev_iter_block_roots()
|
||||
.forwards_iter_block_roots(Slot::new(0))
|
||||
.expect("should get iter")
|
||||
.map(Result::unwrap)
|
||||
.collect();
|
||||
let state_roots: Vec<(Hash256, Slot)> = harness
|
||||
.chain
|
||||
.rev_iter_state_roots()
|
||||
.forwards_iter_state_roots(Slot::new(0))
|
||||
.expect("should get iter")
|
||||
.map(Result::unwrap)
|
||||
.collect();
|
||||
@@ -110,30 +110,95 @@ fn iterators() {
|
||||
block_roots.windows(2).for_each(|x| {
|
||||
assert_eq!(
|
||||
x[1].1,
|
||||
x[0].1 - 1,
|
||||
"block root slots should be decreasing by one"
|
||||
x[0].1 + 1,
|
||||
"block root slots should be increasing by one"
|
||||
)
|
||||
});
|
||||
state_roots.windows(2).for_each(|x| {
|
||||
assert_eq!(
|
||||
x[1].1,
|
||||
x[0].1 - 1,
|
||||
"state root slots should be decreasing by one"
|
||||
x[0].1 + 1,
|
||||
"state root slots should be increasing by one"
|
||||
)
|
||||
});
|
||||
|
||||
let head = &harness.chain.head().expect("should get head");
|
||||
|
||||
assert_eq!(
|
||||
*block_roots.first().expect("should have some block roots"),
|
||||
*block_roots.last().expect("should have some block roots"),
|
||||
(head.beacon_block_root, head.beacon_block.slot()),
|
||||
"first block root and slot should be for the head block"
|
||||
"last block root and slot should be for the head block"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
*state_roots.first().expect("should have some state roots"),
|
||||
(head.beacon_state_root, head.beacon_state.slot),
|
||||
"first state root and slot should be for the head state"
|
||||
*state_roots.last().expect("should have some state roots"),
|
||||
(head.beacon_state_root(), head.beacon_state.slot()),
|
||||
"last state root and slot should be for the head state"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn find_reorgs() {
|
||||
let num_blocks_produced = MinimalEthSpec::slots_per_historical_root() + 1;
|
||||
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
harness.extend_chain(
|
||||
num_blocks_produced as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
// No need to produce attestations for this test.
|
||||
AttestationStrategy::SomeValidators(vec![]),
|
||||
);
|
||||
|
||||
let head_state = harness.chain.head_beacon_state().unwrap();
|
||||
let head_slot = head_state.slot();
|
||||
let genesis_state = harness
|
||||
.chain
|
||||
.state_at_slot(Slot::new(0), StateSkipConfig::WithStateRoots)
|
||||
.unwrap();
|
||||
|
||||
// because genesis is more than `SLOTS_PER_HISTORICAL_ROOT` away, this should return with the
|
||||
// finalized slot.
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.find_reorg_slot(&genesis_state, harness.chain.genesis_block_root)
|
||||
.unwrap(),
|
||||
head_state
|
||||
.finalized_checkpoint()
|
||||
.epoch
|
||||
.start_slot(MinimalEthSpec::slots_per_epoch())
|
||||
);
|
||||
|
||||
// test head
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.find_reorg_slot(
|
||||
&head_state,
|
||||
harness.chain.head_beacon_block().unwrap().canonical_root()
|
||||
)
|
||||
.unwrap(),
|
||||
head_slot
|
||||
);
|
||||
|
||||
// Re-org back to the slot prior to the head.
|
||||
let prev_slot = head_slot - Slot::new(1);
|
||||
let prev_state = harness
|
||||
.chain
|
||||
.state_at_slot(prev_slot, StateSkipConfig::WithStateRoots)
|
||||
.unwrap();
|
||||
let prev_block_root = harness
|
||||
.chain
|
||||
.block_root_at_slot(prev_slot, WhenSlotSkipped::None)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.find_reorg_slot(&prev_state, prev_block_root)
|
||||
.unwrap(),
|
||||
prev_slot
|
||||
);
|
||||
}
|
||||
|
||||
@@ -170,7 +235,7 @@ fn chooses_fork() {
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
|
||||
assert_eq!(
|
||||
state.slot,
|
||||
state.slot(),
|
||||
Slot::from(initial_blocks + honest_fork_blocks),
|
||||
"head should be at the current slot"
|
||||
);
|
||||
@@ -201,7 +266,8 @@ fn finalizes_with_full_participation() {
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
|
||||
assert_eq!(
|
||||
state.slot, num_blocks_produced,
|
||||
state.slot(),
|
||||
num_blocks_produced,
|
||||
"head should be at the current slot"
|
||||
);
|
||||
assert_eq!(
|
||||
@@ -210,12 +276,12 @@ fn finalizes_with_full_participation() {
|
||||
"head should be at the expected epoch"
|
||||
);
|
||||
assert_eq!(
|
||||
state.current_justified_checkpoint.epoch,
|
||||
state.current_justified_checkpoint().epoch,
|
||||
state.current_epoch() - 1,
|
||||
"the head should be justified one behind the current epoch"
|
||||
);
|
||||
assert_eq!(
|
||||
state.finalized_checkpoint.epoch,
|
||||
state.finalized_checkpoint().epoch,
|
||||
state.current_epoch() - 2,
|
||||
"the head should be finalized two behind the current epoch"
|
||||
);
|
||||
@@ -239,7 +305,8 @@ fn finalizes_with_two_thirds_participation() {
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
|
||||
assert_eq!(
|
||||
state.slot, num_blocks_produced,
|
||||
state.slot(),
|
||||
num_blocks_produced,
|
||||
"head should be at the current slot"
|
||||
);
|
||||
assert_eq!(
|
||||
@@ -253,12 +320,12 @@ fn finalizes_with_two_thirds_participation() {
|
||||
// included in blocks during that epoch.
|
||||
|
||||
assert_eq!(
|
||||
state.current_justified_checkpoint.epoch,
|
||||
state.current_justified_checkpoint().epoch,
|
||||
state.current_epoch() - 2,
|
||||
"the head should be justified two behind the current epoch"
|
||||
);
|
||||
assert_eq!(
|
||||
state.finalized_checkpoint.epoch,
|
||||
state.finalized_checkpoint().epoch,
|
||||
state.current_epoch() - 4,
|
||||
"the head should be finalized three behind the current epoch"
|
||||
);
|
||||
@@ -283,7 +350,8 @@ fn does_not_finalize_with_less_than_two_thirds_participation() {
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
|
||||
assert_eq!(
|
||||
state.slot, num_blocks_produced,
|
||||
state.slot(),
|
||||
num_blocks_produced,
|
||||
"head should be at the current slot"
|
||||
);
|
||||
assert_eq!(
|
||||
@@ -292,11 +360,13 @@ fn does_not_finalize_with_less_than_two_thirds_participation() {
|
||||
"head should be at the expected epoch"
|
||||
);
|
||||
assert_eq!(
|
||||
state.current_justified_checkpoint.epoch, 0,
|
||||
state.current_justified_checkpoint().epoch,
|
||||
0,
|
||||
"no epoch should have been justified"
|
||||
);
|
||||
assert_eq!(
|
||||
state.finalized_checkpoint.epoch, 0,
|
||||
state.finalized_checkpoint().epoch,
|
||||
0,
|
||||
"no epoch should have been finalized"
|
||||
);
|
||||
}
|
||||
@@ -316,7 +386,8 @@ fn does_not_finalize_without_attestation() {
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
|
||||
assert_eq!(
|
||||
state.slot, num_blocks_produced,
|
||||
state.slot(),
|
||||
num_blocks_produced,
|
||||
"head should be at the current slot"
|
||||
);
|
||||
assert_eq!(
|
||||
@@ -325,11 +396,13 @@ fn does_not_finalize_without_attestation() {
|
||||
"head should be at the expected epoch"
|
||||
);
|
||||
assert_eq!(
|
||||
state.current_justified_checkpoint.epoch, 0,
|
||||
state.current_justified_checkpoint().epoch,
|
||||
0,
|
||||
"no epoch should have been justified"
|
||||
);
|
||||
assert_eq!(
|
||||
state.finalized_checkpoint.epoch, 0,
|
||||
state.finalized_checkpoint().epoch,
|
||||
0,
|
||||
"no epoch should have been finalized"
|
||||
);
|
||||
}
|
||||
@@ -354,14 +427,14 @@ fn roundtrip_operation_pool() {
|
||||
.persist_op_pool()
|
||||
.expect("should persist op pool");
|
||||
|
||||
let key = Hash256::from_slice(&OP_POOL_DB_KEY);
|
||||
let restored_op_pool = harness
|
||||
.chain
|
||||
.store
|
||||
.get_item::<PersistedOperationPool<MinimalEthSpec>>(&key)
|
||||
.get_item::<PersistedOperationPool<MinimalEthSpec>>(&OP_POOL_DB_KEY)
|
||||
.expect("should read db")
|
||||
.expect("should find op pool")
|
||||
.into_operation_pool();
|
||||
.into_operation_pool()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(harness.chain.op_pool, restored_op_pool);
|
||||
}
|
||||
@@ -436,23 +509,16 @@ fn attestations_with_increasing_slots() {
|
||||
AttestationStrategy::SomeValidators(vec![]),
|
||||
);
|
||||
|
||||
attestations.append(
|
||||
&mut harness.get_unaggregated_attestations(
|
||||
&AttestationStrategy::AllValidators,
|
||||
&harness.chain.head().expect("should get head").beacon_state,
|
||||
harness
|
||||
.chain
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_block_root,
|
||||
harness
|
||||
.chain
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_block
|
||||
.slot(),
|
||||
),
|
||||
);
|
||||
let head = harness.chain.head().unwrap();
|
||||
let head_state_root = head.beacon_state_root();
|
||||
|
||||
attestations.extend(harness.get_unaggregated_attestations(
|
||||
&AttestationStrategy::AllValidators,
|
||||
&head.beacon_state,
|
||||
head_state_root,
|
||||
head.beacon_block_root,
|
||||
head.beacon_block.slot(),
|
||||
));
|
||||
|
||||
harness.advance_slot();
|
||||
}
|
||||
@@ -460,7 +526,7 @@ fn attestations_with_increasing_slots() {
|
||||
for (attestation, subnet_id) in attestations.into_iter().flatten() {
|
||||
let res = harness
|
||||
.chain
|
||||
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id);
|
||||
.verify_unaggregated_attestation_for_gossip(&attestation, Some(subnet_id));
|
||||
|
||||
let current_slot = harness.chain.slot().expect("should get slot");
|
||||
let expected_attestation_slot = attestation.data.slot;
|
||||
@@ -616,3 +682,154 @@ fn produces_and_processes_with_genesis_skip_slots() {
|
||||
run_skip_slot_test(i)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn block_roots_skip_slot_behaviour() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
|
||||
// Test should be longer than the block roots to ensure a DB lookup is triggered.
|
||||
let chain_length = harness
|
||||
.chain
|
||||
.head()
|
||||
.unwrap()
|
||||
.beacon_state
|
||||
.block_roots()
|
||||
.len() as u64
|
||||
* 3;
|
||||
|
||||
let skipped_slots = [1, 6, 7, 10, chain_length];
|
||||
|
||||
// Build a chain with some skip slots.
|
||||
for i in 1..=chain_length {
|
||||
if i > 1 {
|
||||
harness.advance_slot();
|
||||
}
|
||||
|
||||
let slot = harness.chain.slot().unwrap().as_u64();
|
||||
|
||||
if !skipped_slots.contains(&slot) {
|
||||
harness.extend_chain(
|
||||
1,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let mut prev_unskipped_root = None;
|
||||
|
||||
for target_slot in 0..=chain_length {
|
||||
if skipped_slots.contains(&target_slot) {
|
||||
/*
|
||||
* A skip slot
|
||||
*/
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.block_root_at_slot(target_slot.into(), WhenSlotSkipped::None)
|
||||
.unwrap()
|
||||
.is_none(),
|
||||
"WhenSlotSkipped::None should return None on a skip slot"
|
||||
);
|
||||
|
||||
let skipped_root = harness
|
||||
.chain
|
||||
.block_root_at_slot(target_slot.into(), WhenSlotSkipped::Prev)
|
||||
.unwrap()
|
||||
.expect("WhenSlotSkipped::Prev should always return Some");
|
||||
|
||||
assert_eq!(
|
||||
skipped_root,
|
||||
prev_unskipped_root.expect("test is badly formed"),
|
||||
"WhenSlotSkipped::Prev should accurately return the prior skipped block"
|
||||
);
|
||||
|
||||
let expected_block = harness.chain.get_block(&skipped_root).unwrap().unwrap();
|
||||
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.block_at_slot(target_slot.into(), WhenSlotSkipped::Prev)
|
||||
.unwrap()
|
||||
.unwrap(),
|
||||
expected_block,
|
||||
);
|
||||
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.block_at_slot(target_slot.into(), WhenSlotSkipped::None)
|
||||
.unwrap()
|
||||
.is_none(),
|
||||
"WhenSlotSkipped::None should return None on a skip slot"
|
||||
);
|
||||
} else {
|
||||
/*
|
||||
* Not a skip slot
|
||||
*/
|
||||
let skips_none = harness
|
||||
.chain
|
||||
.block_root_at_slot(target_slot.into(), WhenSlotSkipped::None)
|
||||
.unwrap()
|
||||
.expect("WhenSlotSkipped::None should return Some for non-skipped block");
|
||||
let skips_prev = harness
|
||||
.chain
|
||||
.block_root_at_slot(target_slot.into(), WhenSlotSkipped::Prev)
|
||||
.unwrap()
|
||||
.expect("WhenSlotSkipped::Prev should always return Some");
|
||||
assert_eq!(
|
||||
skips_none, skips_prev,
|
||||
"WhenSlotSkipped::None and WhenSlotSkipped::Prev should be equal on non-skipped slot"
|
||||
);
|
||||
|
||||
let expected_block = harness.chain.get_block(&skips_prev).unwrap().unwrap();
|
||||
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.block_at_slot(target_slot.into(), WhenSlotSkipped::Prev)
|
||||
.unwrap()
|
||||
.unwrap(),
|
||||
expected_block
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
harness
|
||||
.chain
|
||||
.block_at_slot(target_slot.into(), WhenSlotSkipped::None)
|
||||
.unwrap()
|
||||
.unwrap(),
|
||||
expected_block
|
||||
);
|
||||
|
||||
prev_unskipped_root = Some(skips_prev);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* A future, non-existent slot.
|
||||
*/
|
||||
|
||||
let future_slot = harness.chain.slot().unwrap() + 1;
|
||||
assert_eq!(
|
||||
harness.chain.head().unwrap().beacon_block.slot(),
|
||||
future_slot - 2,
|
||||
"test precondition"
|
||||
);
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.block_root_at_slot(future_slot, WhenSlotSkipped::None)
|
||||
.unwrap()
|
||||
.is_none(),
|
||||
"WhenSlotSkipped::None should return None on a future slot"
|
||||
);
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.block_root_at_slot(future_slot, WhenSlotSkipped::Prev)
|
||||
.unwrap()
|
||||
.is_none(),
|
||||
"WhenSlotSkipped::Prev should return None on a future slot"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -2,10 +2,9 @@
|
||||
name = "client"
|
||||
version = "0.2.0"
|
||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
|
||||
[dev-dependencies]
|
||||
sloggers = "1.0.0"
|
||||
toml = "0.5.6"
|
||||
|
||||
[dependencies]
|
||||
@@ -13,31 +12,30 @@ beacon_chain = { path = "../beacon_chain" }
|
||||
store = { path = "../store" }
|
||||
network = { path = "../network" }
|
||||
timer = { path = "../timer" }
|
||||
eth2_libp2p = { path = "../eth2_libp2p" }
|
||||
rest_api = { path = "../rest_api" }
|
||||
lighthouse_network = { path = "../lighthouse_network" }
|
||||
parking_lot = "0.11.0"
|
||||
websocket_server = { path = "../websocket_server" }
|
||||
prometheus = "0.9.0"
|
||||
types = { path = "../../consensus/types" }
|
||||
tree_hash = "0.1.0"
|
||||
eth2_config = { path = "../../common/eth2_config" }
|
||||
slot_clock = { path = "../../common/slot_clock" }
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.110"
|
||||
error-chain = "0.12.2"
|
||||
serde_yaml = "0.8.11"
|
||||
serde = "1.0.116"
|
||||
serde_derive = "1.0.116"
|
||||
error-chain = "0.12.4"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
slog-async = "2.5.0"
|
||||
tokio = "0.2.21"
|
||||
dirs = "2.0.2"
|
||||
futures = "0.3.5"
|
||||
reqwest = "0.10.4"
|
||||
url = "2.1.1"
|
||||
tokio = "1.14.0"
|
||||
dirs = "3.0.1"
|
||||
eth1 = { path = "../eth1" }
|
||||
eth2 = { path = "../../common/eth2" }
|
||||
sensitive_url = { path = "../../common/sensitive_url" }
|
||||
genesis = { path = "../genesis" }
|
||||
task_executor = { path = "../../common/task_executor" }
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
eth2_ssz = "0.1.2"
|
||||
lazy_static = "1.4.0"
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||
time = "0.2.16"
|
||||
bus = "2.2.3"
|
||||
time = "0.3.5"
|
||||
directory = {path = "../../common/directory"}
|
||||
http_api = { path = "../http_api" }
|
||||
http_metrics = { path = "../http_metrics" }
|
||||
slasher = { path = "../../slasher" }
|
||||
slasher_service = { path = "../../slasher/service" }
|
||||
monitoring_api = {path = "../../common/monitoring_api"}
|
||||
execution_layer = { path = "../execution_layer" }
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,19 +1,16 @@
|
||||
use directory::DEFAULT_ROOT_DIR;
|
||||
use network::NetworkConfig;
|
||||
use sensitive_url::SensitiveUrl;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use types::Graffiti;
|
||||
|
||||
pub const DEFAULT_DATADIR: &str = ".lighthouse";
|
||||
|
||||
/// The number initial validators when starting the `Minimal`.
|
||||
const TESTNET_SPEC_CONSTANTS: &str = "minimal";
|
||||
use types::{Graffiti, PublicKeyBytes};
|
||||
|
||||
/// Default directory name for the freezer database under the top-level data dir.
|
||||
const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db";
|
||||
|
||||
/// Defines how the client should initialize the `BeaconChain` and other components.
|
||||
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum ClientGenesis {
|
||||
/// Creates a genesis state as per the 2019 Canada interop specifications.
|
||||
Interop {
|
||||
@@ -30,6 +27,15 @@ pub enum ClientGenesis {
|
||||
/// We include the bytes instead of the `BeaconState<E>` because the `EthSpec` type
|
||||
/// parameter would be very annoying.
|
||||
SszBytes { genesis_state_bytes: Vec<u8> },
|
||||
WeakSubjSszBytes {
|
||||
genesis_state_bytes: Vec<u8>,
|
||||
anchor_state_bytes: Vec<u8>,
|
||||
anchor_block_bytes: Vec<u8>,
|
||||
},
|
||||
CheckpointSyncUrl {
|
||||
genesis_state_bytes: Vec<u8>,
|
||||
url: SensitiveUrl,
|
||||
},
|
||||
}
|
||||
|
||||
impl Default for ClientGenesis {
|
||||
@@ -47,103 +53,137 @@ pub struct Config {
|
||||
/// Path where the freezer database will be located.
|
||||
pub freezer_db_path: Option<PathBuf>,
|
||||
pub log_file: PathBuf,
|
||||
pub spec_constants: String,
|
||||
/// If true, the node will use co-ordinated junk for eth1 values.
|
||||
///
|
||||
/// This is the method used for the 2019 client interop in Canada.
|
||||
pub dummy_eth1_backend: bool,
|
||||
pub sync_eth1_chain: bool,
|
||||
/// A list of hard-coded forks that will be disabled.
|
||||
pub disabled_forks: Vec<String>,
|
||||
/// Graffiti to be inserted everytime we create a block.
|
||||
pub graffiti: Graffiti,
|
||||
/// When true, automatically monitor validators using the HTTP API.
|
||||
pub validator_monitor_auto: bool,
|
||||
/// A list of validator pubkeys to monitor.
|
||||
pub validator_monitor_pubkeys: Vec<PublicKeyBytes>,
|
||||
#[serde(skip)]
|
||||
/// The `genesis` field is not serialized or deserialized by `serde` to ensure it is defined
|
||||
/// via the CLI at runtime, instead of from a configuration file saved to disk.
|
||||
pub genesis: ClientGenesis,
|
||||
pub store: store::StoreConfig,
|
||||
pub network: network::NetworkConfig,
|
||||
pub rest_api: rest_api::Config,
|
||||
pub websocket_server: websocket_server::Config,
|
||||
pub chain: beacon_chain::ChainConfig,
|
||||
pub eth1: eth1::Config,
|
||||
pub execution_layer: Option<execution_layer::Config>,
|
||||
pub http_api: http_api::Config,
|
||||
pub http_metrics: http_metrics::Config,
|
||||
pub monitoring_api: Option<monitoring_api::Config>,
|
||||
pub slasher: Option<slasher::Config>,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
data_dir: PathBuf::from(DEFAULT_DATADIR),
|
||||
data_dir: PathBuf::from(DEFAULT_ROOT_DIR),
|
||||
db_name: "chain_db".to_string(),
|
||||
freezer_db_path: None,
|
||||
log_file: PathBuf::from(""),
|
||||
genesis: <_>::default(),
|
||||
store: <_>::default(),
|
||||
network: NetworkConfig::default(),
|
||||
rest_api: <_>::default(),
|
||||
websocket_server: <_>::default(),
|
||||
spec_constants: TESTNET_SPEC_CONSTANTS.into(),
|
||||
chain: <_>::default(),
|
||||
dummy_eth1_backend: false,
|
||||
sync_eth1_chain: false,
|
||||
eth1: <_>::default(),
|
||||
disabled_forks: Vec::new(),
|
||||
execution_layer: None,
|
||||
graffiti: Graffiti::default(),
|
||||
http_api: <_>::default(),
|
||||
http_metrics: <_>::default(),
|
||||
monitoring_api: None,
|
||||
slasher: None,
|
||||
validator_monitor_auto: false,
|
||||
validator_monitor_pubkeys: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Get the database path without initialising it.
|
||||
pub fn get_db_path(&self) -> Option<PathBuf> {
|
||||
self.get_data_dir()
|
||||
.map(|data_dir| data_dir.join(&self.db_name))
|
||||
pub fn get_db_path(&self) -> PathBuf {
|
||||
self.get_data_dir().join(&self.db_name)
|
||||
}
|
||||
|
||||
/// Get the database path, creating it if necessary.
|
||||
pub fn create_db_path(&self) -> Result<PathBuf, String> {
|
||||
let db_path = self
|
||||
.get_db_path()
|
||||
.ok_or_else(|| "Unable to locate user home directory")?;
|
||||
ensure_dir_exists(db_path)
|
||||
ensure_dir_exists(self.get_db_path())
|
||||
}
|
||||
|
||||
/// Fetch default path to use for the freezer database.
|
||||
fn default_freezer_db_path(&self) -> Option<PathBuf> {
|
||||
self.get_data_dir()
|
||||
.map(|data_dir| data_dir.join(DEFAULT_FREEZER_DB_DIR))
|
||||
fn default_freezer_db_path(&self) -> PathBuf {
|
||||
self.get_data_dir().join(DEFAULT_FREEZER_DB_DIR)
|
||||
}
|
||||
|
||||
/// Returns the path to which the client may initialize the on-disk freezer database.
|
||||
///
|
||||
/// Will attempt to use the user-supplied path from e.g. the CLI, or will default
|
||||
/// to a directory in the data_dir if no path is provided.
|
||||
pub fn get_freezer_db_path(&self) -> Option<PathBuf> {
|
||||
pub fn get_freezer_db_path(&self) -> PathBuf {
|
||||
self.freezer_db_path
|
||||
.clone()
|
||||
.or_else(|| self.default_freezer_db_path())
|
||||
.unwrap_or_else(|| self.default_freezer_db_path())
|
||||
}
|
||||
|
||||
/// Get the freezer DB path, creating it if necessary.
|
||||
pub fn create_freezer_db_path(&self) -> Result<PathBuf, String> {
|
||||
let freezer_db_path = self
|
||||
.get_freezer_db_path()
|
||||
.ok_or_else(|| "Unable to locate user home directory")?;
|
||||
ensure_dir_exists(freezer_db_path)
|
||||
ensure_dir_exists(self.get_freezer_db_path())
|
||||
}
|
||||
|
||||
/// Returns the "modern" path to the data_dir.
|
||||
///
|
||||
/// See `Self::get_data_dir` documentation for more info.
|
||||
fn get_modern_data_dir(&self) -> PathBuf {
|
||||
self.data_dir.clone()
|
||||
}
|
||||
|
||||
/// Returns the "legacy" path to the data_dir.
|
||||
///
|
||||
/// See `Self::get_data_dir` documentation for more info.
|
||||
pub fn get_existing_legacy_data_dir(&self) -> Option<PathBuf> {
|
||||
dirs::home_dir()
|
||||
.map(|home_dir| home_dir.join(&self.data_dir))
|
||||
// Return `None` if the directory does not exists.
|
||||
.filter(|dir| dir.exists())
|
||||
// Return `None` if the legacy directory is identical to the modern.
|
||||
.filter(|dir| *dir != self.get_modern_data_dir())
|
||||
}
|
||||
|
||||
/// Returns the core path for the client.
|
||||
///
|
||||
/// Will not create any directories.
|
||||
pub fn get_data_dir(&self) -> Option<PathBuf> {
|
||||
dirs::home_dir().map(|home_dir| home_dir.join(&self.data_dir))
|
||||
///
|
||||
/// ## Legacy Info
|
||||
///
|
||||
/// Legacy versions of Lighthouse did not properly handle relative paths for `--datadir`.
|
||||
///
|
||||
/// For backwards compatibility, we still compute the legacy path and check if it exists. If
|
||||
/// it does exist, we use that directory rather than the modern path.
|
||||
///
|
||||
/// For more information, see:
|
||||
///
|
||||
/// https://github.com/sigp/lighthouse/pull/2843
|
||||
fn get_data_dir(&self) -> PathBuf {
|
||||
let existing_legacy_dir = self.get_existing_legacy_data_dir();
|
||||
|
||||
if let Some(legacy_dir) = existing_legacy_dir {
|
||||
legacy_dir
|
||||
} else {
|
||||
self.get_modern_data_dir()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the core path for the client.
|
||||
///
|
||||
/// Creates the directory if it does not exist.
|
||||
pub fn create_data_dir(&self) -> Result<PathBuf, String> {
|
||||
let path = self
|
||||
.get_data_dir()
|
||||
.ok_or_else(|| "Unable to locate user home directory".to_string())?;
|
||||
ensure_dir_exists(path)
|
||||
ensure_dir_exists(self.get_data_dir())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,7 +196,6 @@ fn ensure_dir_exists(path: PathBuf) -> Result<PathBuf, String> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use toml;
|
||||
|
||||
#[test]
|
||||
fn serde() {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user