Compare commits

...

5 Commits

Author SHA1 Message Date
Paul Hauner
967700c1ff Bump version to v0.2.8 (#1572)
## Issue Addressed

NA

## Proposed Changes

- Bump versions
- Run `cargo update`

## Additional Info

NA
2020-08-27 07:04:12 +00:00
Adam Szkoda
d9f4819fe0 Alternative (to BeaconChainHarness) BeaconChain testing API (#1380)
The PR:

* Adds the ability to generate a crucial test scenario that isn't possible with `BeaconChainHarness` (i.e. two blocks occupying the same slot; previously forks necessitated skipping slots):

![image](https://user-images.githubusercontent.com/165678/88195404-4bce3580-cc40-11ea-8c08-b48d2e1d5959.png)

* New testing API: Instead of repeatedly calling add_block(), you generate a sorted `Vec<Slot>` and leave it up to the framework to generate blocks at those slots.
* Jumping backwards to an earlier epoch is a hard error, so that tests necessarily generate blocks in a epoch-by-epoch manner.
* Configures the test logger so that output is printed on the console in case a test fails.  The logger also plays well with `--nocapture`, contrary to the existing testing framework
* Rewrites existing fork pruning tests to use the new API
* Adds a tests that triggers finalization at a non epoch boundary slot
* Renamed `BeaconChainYoke` to `BeaconChainTestingRig` because the former has been too confusing
* Fixed multiple tests (e.g. `block_production_different_shuffling_long`, `delete_blocks_and_states`, `shuffling_compatible_simple_fork`) that relied on a weird (and accidental) feature of the old `BeaconChainHarness` that attestations aren't produced for epochs earlier than the current one, thus masking potential bugs in test cases.

Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2020-08-26 09:24:55 +00:00
Michael Sproul
30bb7aecfb Check Cargo.lock freshness on CI (#1565)
Check that `Cargo.lock` is up-to-date on CI so we're not having to push messy lockfile fix ups after releases.
2020-08-26 00:01:08 +00:00
Michael Sproul
4763f03dcc Fix bug in database pruning (#1564)
## Issue Addressed

Closes #1488

## Proposed Changes

* Prevent the pruning algorithm from over-eagerly deleting states at skipped slots when they are shared with the canonical chain.
* Add `debug` logging to the pruning algorithm so we have so better chance of debugging future issues from logs.
* Modify the handling of the "finalized state" in the beacon chain, so that it's always the state at the first slot of the finalized epoch (previously it was the state at the finalized block). This gives database pruning a clearer and cleaner view of things, and will marginally impact the pruning of the op pool, observed proposers, etc (in ways that are safe as far as I can tell).
* Remove duplicated `RevertedFinalizedEpoch` check from `after_finalization`
* Delete useless and unused `max_finality_distance`
* Add tests that exercise pruning with shared states at skip slots
* Delete unnecessary `block_strategy` argument from `add_blocks` and friends in the test harness (will likely conflict with #1380 slightly, sorry @adaszko -- but we can fix that)
* Bonus: add a `BeaconChain::with_head` method. I didn't end up needing it, but it turned out quite nice, so I figured we could keep it?

## Additional Info

Any users who have experienced pruning errors on Medalla will need to resync after upgrading to a release including this change. This should end unbounded `chain_db` growth! 🎉
2020-08-26 00:01:06 +00:00
Pawan Dhananjay
175471a64b Fix order of testnet config load (#1558)
## Issue Addressed

Fixes #1552 

## Proposed Changes

Earlier, we were always loading the hardcoded default testnet config which is a mainnet spec. So running lighthouse with `--spec` option anything other than mainnet gave errors because we tried loading a mainnet genesis spec with `minimal`/`interop` flags.

This PR fixes the order of loading such that we load the hardcoded default spec only if neither `--testnet` and `--testnet-dir` flags are present.
2020-08-25 06:01:42 +00:00
27 changed files with 1858 additions and 1125 deletions

View File

@@ -123,6 +123,8 @@ jobs:
- uses: actions/checkout@v1
- name: Lint code for quality and style with Clippy
run: make lint
- name: Certify Cargo.lock freshness
run: git diff --exit-code Cargo.lock
arbitrary-check:
name: arbitrary-check
runs-on: ubuntu-latest

174
Cargo.lock generated
View File

@@ -2,7 +2,7 @@
# It is not intended for manual editing.
[[package]]
name = "account_manager"
version = "0.2.6"
version = "0.2.8"
dependencies = [
"account_utils",
"bls",
@@ -182,9 +182,9 @@ checksum = "6b602bfe940d21c130f3895acd65221e8a61270debe89d628b9cb4e3ccb8569b"
[[package]]
name = "arbitrary"
version = "0.4.5"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7cb544f1057eaaff4b34f8c4dcf56fc3cd04debd291998405d135017a7c3c0f4"
checksum = "0922a3e746b5a44e111e5603feb6704e5cc959116f66737f50bb5cbd264e9d87"
dependencies = [
"derive_arbitrary",
]
@@ -346,11 +346,13 @@ dependencies = [
"lighthouse_metrics",
"log 0.4.11",
"lru",
"maplit",
"merkle_proof",
"operation_pool",
"parking_lot 0.11.0",
"proto_array",
"rand 0.7.3",
"rand_core 0.5.1",
"rayon",
"safe_arith",
"serde",
@@ -373,7 +375,7 @@ dependencies = [
[[package]]
name = "beacon_node"
version = "0.2.6"
version = "0.2.8"
dependencies = [
"beacon_chain",
"clap",
@@ -460,13 +462,28 @@ dependencies = [
"constant_time_eq",
]
[[package]]
name = "blake3"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce4f9586c9a3151c4b49b19e82ba163dd073614dd057e53c969e1a4db5b52720"
dependencies = [
"arrayref",
"arrayvec",
"cc",
"cfg-if",
"constant_time_eq",
"crypto-mac 0.8.0",
"digest 0.9.0",
]
[[package]]
name = "block-buffer"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b"
dependencies = [
"block-padding",
"block-padding 0.1.5",
"byte-tools",
"byteorder",
"generic-array 0.12.3",
@@ -478,6 +495,7 @@ version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4"
dependencies = [
"block-padding 0.2.1",
"generic-array 0.14.4",
]
@@ -499,6 +517,12 @@ dependencies = [
"byte-tools",
]
[[package]]
name = "block-padding"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae"
[[package]]
name = "bls"
version = "0.2.0"
@@ -530,7 +554,7 @@ dependencies = [
[[package]]
name = "boot_node"
version = "0.2.6"
version = "0.2.8"
dependencies = [
"beacon_node",
"clap",
@@ -692,7 +716,7 @@ checksum = "942f72db697d8767c22d46a598e01f2d3b475501ea43d0db4f16d90259182d0b"
dependencies = [
"num-integer",
"num-traits",
"time 0.1.43",
"time 0.1.44",
]
[[package]]
@@ -1142,9 +1166,9 @@ dependencies = [
[[package]]
name = "derive_arbitrary"
version = "0.4.5"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02b43185d3e7ce7dcd44a23ca761ec026359753ebf480283a571e6463853d2ef"
checksum = "d0f7c6c81276b6b8702074defbdb1938933ddf98c7f7e0dca8d9e9214dd6c730"
dependencies = [
"proc-macro2",
"quote",
@@ -1304,9 +1328,9 @@ checksum = "cd56b59865bce947ac5958779cfa508f6c3b9497cc762b7e24a12d11ccde2c4f"
[[package]]
name = "encoding_rs"
version = "0.8.23"
version = "0.8.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8ac63f94732332f44fe654443c46f6375d1939684c17b0afb6cb56b0456e171"
checksum = "a51b8cf747471cb9499b6d59e59b0444f4c90eba8968c4e44874e92b5b64ace2"
dependencies = [
"cfg-if",
]
@@ -1526,7 +1550,7 @@ dependencies = [
"tokio-io-timeout",
"tokio-util",
"types",
"unsigned-varint 0.3.3 (git+https://github.com/sigp/unsigned-varint?branch=latest-codecs)",
"unsigned-varint 0.3.3",
"void",
]
@@ -1992,7 +2016,7 @@ checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb"
dependencies = [
"cfg-if",
"libc",
"wasi",
"wasi 0.9.0+wasi-snapshot-preview1",
]
[[package]]
@@ -2270,7 +2294,7 @@ dependencies = [
"log 0.3.9",
"mime 0.2.6",
"num_cpus",
"time 0.1.43",
"time 0.1.44",
"traitobject",
"typeable",
"unicase 1.4.2",
@@ -2295,7 +2319,7 @@ dependencies = [
"log 0.4.11",
"net2",
"rustc_version",
"time 0.1.43",
"time 0.1.44",
"tokio 0.1.22",
"tokio-buf",
"tokio-executor",
@@ -2324,7 +2348,7 @@ dependencies = [
"itoa",
"pin-project",
"socket2",
"time 0.1.43",
"time 0.1.44",
"tokio 0.2.22",
"tower-service",
"tracing",
@@ -2537,7 +2561,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]]
name = "lcli"
version = "0.2.6"
version = "0.2.8"
dependencies = [
"bls",
"clap",
@@ -2894,7 +2918,7 @@ dependencies = [
[[package]]
name = "lighthouse"
version = "0.2.6"
version = "0.2.8"
dependencies = [
"account_manager",
"account_utils",
@@ -3112,9 +3136,9 @@ dependencies = [
[[package]]
name = "miniz_oxide"
version = "0.4.0"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f"
checksum = "4d7559a8a40d0f97e1edea3220f698f78b1c5ab67532e49f68fde3910323b722"
dependencies = [
"adler",
]
@@ -3197,24 +3221,25 @@ dependencies = [
[[package]]
name = "multihash"
version = "0.11.2"
version = "0.11.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f75db05d738947aa5389863aadafbcf2e509d7ba099dc2ddcdf4fc66bf7a9e03"
checksum = "51cc1552a982658478dbc22eefb72bb1d4fd1161eb9818f7bbf4347443f07569"
dependencies = [
"blake2b_simd",
"blake2s_simd",
"digest 0.8.1",
"sha-1",
"sha2 0.8.2",
"blake3",
"digest 0.9.0",
"sha-1 0.9.1",
"sha2 0.9.1",
"sha3",
"unsigned-varint 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"unsigned-varint 0.5.0",
]
[[package]]
name = "multimap"
version = "0.8.1"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8883adfde9756c1d30b0f519c9b8c502a94b41ac62f696453c37c7fc0a958ce"
checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333"
[[package]]
name = "multistream-select"
@@ -3704,7 +3729,7 @@ checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d"
dependencies = [
"maplit",
"pest",
"sha-1",
"sha-1 0.8.2",
]
[[package]]
@@ -4121,9 +4146,9 @@ dependencies = [
[[package]]
name = "rayon"
version = "1.3.1"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62f02856753d04e03e26929f820d0a0a337ebe71f849801eea335d464b349080"
checksum = "cfd016f0c045ad38b5251be2c9c0ab806917f82da4d36b2a327e5166adad9270"
dependencies = [
"autocfg 1.0.1",
"crossbeam-deque",
@@ -4133,12 +4158,12 @@ dependencies = [
[[package]]
name = "rayon-core"
version = "1.7.1"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e92e15d89083484e11353891f1af602cc661426deb9564c298b270c726973280"
checksum = "91739a34c4355b5434ce54c9086c5895604a9c278586d1f1aa95e04f66b525a0"
dependencies = [
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-queue",
"crossbeam-utils",
"lazy_static",
"num_cpus",
@@ -4161,9 +4186,9 @@ checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
[[package]]
name = "redox_users"
version = "0.3.4"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431"
checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d"
dependencies = [
"getrandom",
"redox_syscall",
@@ -4226,9 +4251,9 @@ dependencies = [
[[package]]
name = "reqwest"
version = "0.10.7"
version = "0.10.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12427a5577082c24419c9c417db35cfeb65962efc7675bb6b0d5f1f9d315bfe6"
checksum = "e9eaa17ac5d7b838b7503d118fa16ad88f440498bf9ffe5424e621f93190d61e"
dependencies = [
"base64 0.12.3",
"bytes 0.5.6",
@@ -4380,16 +4405,16 @@ dependencies = [
"lru-cache",
"memchr",
"smallvec 1.4.2",
"time 0.1.43",
"time 0.1.44",
]
[[package]]
name = "rust-argon2"
version = "0.7.0"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017"
checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19"
dependencies = [
"base64 0.11.0",
"base64 0.12.3",
"blake2b_simd",
"constant_time_eq",
"crossbeam-utils",
@@ -4687,6 +4712,19 @@ dependencies = [
"opaque-debug 0.2.3",
]
[[package]]
name = "sha-1"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "170a36ea86c864a3f16dd2687712dd6646f7019f301e57537c7f4dc9f5916770"
dependencies = [
"block-buffer 0.9.0",
"cfg-if",
"cpuid-bool",
"digest 0.9.0",
"opaque-debug 0.3.0",
]
[[package]]
name = "sha1"
version = "0.6.0"
@@ -4720,15 +4758,14 @@ dependencies = [
[[package]]
name = "sha3"
version = "0.8.2"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd26bc0e7a2e3a7c959bc494caf58b72ee0c71d67704e9520f736ca7e4853ecf"
checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809"
dependencies = [
"block-buffer 0.7.3",
"byte-tools",
"digest 0.8.1",
"block-buffer 0.9.0",
"digest 0.9.0",
"keccak",
"opaque-debug 0.2.3",
"opaque-debug 0.3.0",
]
[[package]]
@@ -4992,7 +5029,7 @@ dependencies = [
"httparse",
"log 0.4.11",
"rand 0.7.3",
"sha-1",
"sha-1 0.8.2",
]
[[package]]
@@ -5003,9 +5040,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
[[package]]
name = "standback"
version = "0.2.9"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0437cfb83762844799a60e1e3b489d5ceb6a650fbacb86437badc1b6d87b246"
checksum = "33a71ea1ea5f8747d1af1979bfb7e65c3a025a70609f04ceb78425bc5adad8e6"
dependencies = [
"version_check 0.9.2",
]
@@ -5174,9 +5211,9 @@ dependencies = [
[[package]]
name = "syn"
version = "1.0.38"
version = "1.0.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e69abc24912995b3038597a7a593be5053eb0fb44f3cc5beec0deb421790c1f4"
checksum = "891d8d6567fe7c7f8835a3a98af4208f3846fba258c1bc3c31d6e506239f11f9"
dependencies = [
"proc-macro2",
"quote",
@@ -5307,11 +5344,12 @@ dependencies = [
[[package]]
name = "time"
version = "0.1.43"
version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438"
checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255"
dependencies = [
"libc",
"wasi 0.10.0+wasi-snapshot-preview1",
"winapi 0.3.9",
]
@@ -5765,9 +5803,9 @@ dependencies = [
[[package]]
name = "tracing-core"
version = "0.1.14"
version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db63662723c316b43ca36d833707cc93dff82a02ba3d7e354f342682cc8b3545"
checksum = "4f0e00789804e99b20f12bc7003ca416309d28a6f495d6af58d1e2c2842461b5"
dependencies = [
"lazy_static",
]
@@ -5980,12 +6018,6 @@ dependencies = [
"tokio-util",
]
[[package]]
name = "unsigned-varint"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f67332660eb59a6f1eb24ff1220c9e8d01738a8503c6002e30bcfe4bd9f2b4a9"
[[package]]
name = "unsigned-varint"
version = "0.4.0"
@@ -5996,6 +6028,12 @@ dependencies = [
"futures_codec",
]
[[package]]
name = "unsigned-varint"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a98e44fc6af1e18c3a06666d829b4fd8d2714fb2dbffe8ab99d5dc7ea6baa628"
[[package]]
name = "untrusted"
version = "0.7.1"
@@ -6036,7 +6074,7 @@ dependencies = [
[[package]]
name = "validator_client"
version = "0.2.6"
version = "0.2.8"
dependencies = [
"account_utils",
"bls",
@@ -6159,6 +6197,12 @@ version = "0.9.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
[[package]]
name = "wasi"
version = "0.10.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
[[package]]
name = "wasm-bindgen"
version = "0.2.67"
@@ -6449,7 +6493,7 @@ dependencies = [
"mio",
"mio-extras",
"rand 0.7.3",
"sha-1",
"sha-1 0.8.2",
"slab 0.4.2",
"url 2.1.1",
]

View File

@@ -1,6 +1,6 @@
[package]
name = "account_manager"
version = "0.2.7"
version = "0.2.8"
authors = ["Paul Hauner <paul@paulhauner.com>", "Luke Anderson <luke@sigmaprime.io>"]
edition = "2018"

View File

@@ -1,6 +1,6 @@
[package]
name = "beacon_node"
version = "0.2.7"
version = "0.2.8"
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
edition = "2018"

View File

@@ -11,6 +11,7 @@ participation_metrics = [] # Exposes validator participation metrics to Prometh
[dev-dependencies]
int_to_bytes = { path = "../../consensus/int_to_bytes" }
maplit = "1.0.2"
[dependencies]
eth2_config = { path = "../../common/eth2_config" }
@@ -45,6 +46,7 @@ futures = "0.3.5"
genesis = { path = "../genesis" }
integer-sqrt = "0.1.3"
rand = "0.7.3"
rand_core = "0.5.1"
proto_array = { path = "../../consensus/proto_array" }
lru = "0.5.1"
tempfile = "3.1.0"

View File

@@ -426,6 +426,17 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok(iter)
}
/// As for `rev_iter_state_roots` but starting from an arbitrary `BeaconState`.
pub fn rev_iter_state_roots_from<'a>(
&self,
state_root: Hash256,
state: &'a BeaconState<T::EthSpec>,
) -> impl Iterator<Item = Result<(Hash256, Slot), Error>> + 'a {
std::iter::once(Ok((state_root, state.slot)))
.chain(StateRootsIterator::new(self.store.clone(), state))
.map(|result| result.map_err(Into::into))
}
/// Returns the block at the given slot, if any. Only returns blocks in the canonical chain.
///
/// ## Errors
@@ -479,30 +490,36 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// is the state as it was when the head block was received, which could be some slots prior to
/// now.
pub fn head(&self) -> Result<BeaconSnapshot<T::EthSpec>, Error> {
self.canonical_head
self.with_head(|head| Ok(head.clone_with_only_committee_caches()))
}
/// Apply a function to the canonical head without cloning it.
pub fn with_head<U>(
&self,
f: impl FnOnce(&BeaconSnapshot<T::EthSpec>) -> Result<U, Error>,
) -> Result<U, Error> {
let head_lock = self
.canonical_head
.try_read_for(HEAD_LOCK_TIMEOUT)
.ok_or_else(|| Error::CanonicalHeadLockTimeout)
.map(|v| v.clone_with_only_committee_caches())
.ok_or_else(|| Error::CanonicalHeadLockTimeout)?;
f(&head_lock)
}
/// Returns info representing the head block and state.
///
/// A summarized version of `Self::head` that involves less cloning.
pub fn head_info(&self) -> Result<HeadInfo, Error> {
let head = self
.canonical_head
.try_read_for(HEAD_LOCK_TIMEOUT)
.ok_or_else(|| Error::CanonicalHeadLockTimeout)?;
Ok(HeadInfo {
slot: head.beacon_block.slot(),
block_root: head.beacon_block_root,
state_root: head.beacon_state_root,
current_justified_checkpoint: head.beacon_state.current_justified_checkpoint,
finalized_checkpoint: head.beacon_state.finalized_checkpoint,
fork: head.beacon_state.fork,
genesis_time: head.beacon_state.genesis_time,
genesis_validators_root: head.beacon_state.genesis_validators_root,
self.with_head(|head| {
Ok(HeadInfo {
slot: head.beacon_block.slot(),
block_root: head.beacon_block_root,
state_root: head.beacon_state_root,
current_justified_checkpoint: head.beacon_state.current_justified_checkpoint,
finalized_checkpoint: head.beacon_state.finalized_checkpoint,
fork: head.beacon_state.fork,
genesis_time: head.beacon_state.genesis_time,
genesis_validators_root: head.beacon_state.genesis_validators_root,
})
})
}
@@ -1746,7 +1763,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let beacon_block_root = self.fork_choice.write().get_head(self.slot()?)?;
let current_head = self.head_info()?;
let old_finalized_root = current_head.finalized_checkpoint.root;
let old_finalized_checkpoint = current_head.finalized_checkpoint;
if beacon_block_root == current_head.block_root {
return Ok(());
@@ -1826,15 +1843,32 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
);
};
let old_finalized_epoch = current_head.finalized_checkpoint.epoch;
let new_finalized_epoch = new_head.beacon_state.finalized_checkpoint.epoch;
let finalized_root = new_head.beacon_state.finalized_checkpoint.root;
let new_finalized_checkpoint = new_head.beacon_state.finalized_checkpoint;
// State root of the finalized state on the epoch boundary, NOT the state
// of the finalized block. We need to use an iterator in case the state is beyond
// the reach of the new head's `state_roots` array.
let new_finalized_slot = new_finalized_checkpoint
.epoch
.start_slot(T::EthSpec::slots_per_epoch());
let new_finalized_state_root = process_results(
StateRootsIterator::new(self.store.clone(), &new_head.beacon_state),
|mut iter| {
iter.find_map(|(state_root, slot)| {
if slot == new_finalized_slot {
Some(state_root)
} else {
None
}
})
},
)?
.ok_or_else(|| Error::MissingFinalizedStateRoot(new_finalized_slot))?;
// It is an error to try to update to a head with a lesser finalized epoch.
if new_finalized_epoch < old_finalized_epoch {
if new_finalized_checkpoint.epoch < old_finalized_checkpoint.epoch {
return Err(Error::RevertedFinalizedEpoch {
previous_epoch: old_finalized_epoch,
new_epoch: new_finalized_epoch,
previous_epoch: old_finalized_checkpoint.epoch,
new_epoch: new_finalized_checkpoint.epoch,
});
}
@@ -1873,11 +1907,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
);
});
if new_finalized_epoch != old_finalized_epoch {
if new_finalized_checkpoint.epoch != old_finalized_checkpoint.epoch {
self.after_finalization(
old_finalized_epoch,
finalized_root,
old_finalized_root.into(),
old_finalized_checkpoint,
new_finalized_checkpoint,
new_finalized_state_root,
)?;
}
@@ -1905,68 +1939,53 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Performs pruning and finality-based optimizations.
fn after_finalization(
&self,
old_finalized_epoch: Epoch,
finalized_block_root: Hash256,
old_finalized_root: SignedBeaconBlockHash,
old_finalized_checkpoint: Checkpoint,
new_finalized_checkpoint: Checkpoint,
new_finalized_state_root: Hash256,
) -> Result<(), Error> {
let finalized_block = self
.store
.get_block(&finalized_block_root)?
.ok_or_else(|| Error::MissingBeaconBlock(finalized_block_root))?
.message;
self.fork_choice.write().prune()?;
let new_finalized_epoch = finalized_block.slot.epoch(T::EthSpec::slots_per_epoch());
self.observed_block_producers.prune(
new_finalized_checkpoint
.epoch
.start_slot(T::EthSpec::slots_per_epoch()),
);
if new_finalized_epoch < old_finalized_epoch {
Err(Error::RevertedFinalizedEpoch {
previous_epoch: old_finalized_epoch,
new_epoch: new_finalized_epoch,
self.snapshot_cache
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
.map(|mut snapshot_cache| {
snapshot_cache.prune(new_finalized_checkpoint.epoch);
})
} else {
self.fork_choice.write().prune()?;
self.observed_block_producers
.prune(new_finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()));
self.snapshot_cache
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
.map(|mut snapshot_cache| {
snapshot_cache.prune(new_finalized_epoch);
})
.unwrap_or_else(|| {
error!(
self.log,
"Failed to obtain cache write lock";
"lock" => "snapshot_cache",
"task" => "prune"
);
});
let finalized_state = self
.get_state(&finalized_block.state_root, Some(finalized_block.slot))?
.ok_or_else(|| Error::MissingBeaconState(finalized_block.state_root))?;
self.op_pool
.prune_all(&finalized_state, self.head_info()?.fork);
// TODO: configurable max finality distance
let max_finality_distance = 0;
self.store_migrator.process_finalization(
finalized_block.state_root,
finalized_state,
max_finality_distance,
Arc::clone(&self.head_tracker),
old_finalized_root,
finalized_block_root.into(),
);
let _ = self.event_handler.register(EventKind::BeaconFinalization {
epoch: new_finalized_epoch,
root: finalized_block_root,
.unwrap_or_else(|| {
error!(
self.log,
"Failed to obtain cache write lock";
"lock" => "snapshot_cache",
"task" => "prune"
);
});
Ok(())
}
let finalized_state = self
.get_state(&new_finalized_state_root, None)?
.ok_or_else(|| Error::MissingBeaconState(new_finalized_state_root))?;
self.op_pool
.prune_all(&finalized_state, self.head_info()?.fork);
self.store_migrator.process_finalization(
new_finalized_state_root.into(),
finalized_state,
self.head_tracker.clone(),
old_finalized_checkpoint,
new_finalized_checkpoint,
)?;
let _ = self.event_handler.register(EventKind::BeaconFinalization {
epoch: new_finalized_checkpoint.epoch,
root: new_finalized_checkpoint.root,
});
Ok(())
}
/// Returns `true` if the given block root has not been processed.
@@ -2051,10 +2070,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.beacon_block_root;
let mut visited: HashSet<Hash256> = HashSet::new();
let mut finalized_blocks: HashSet<Hash256> = HashSet::new();
let mut justified_blocks: HashSet<Hash256> = HashSet::new();
let genesis_block_hash = Hash256::zero();
writeln!(output, "digraph beacon {{").unwrap();
writeln!(output, "\t_{:?}[label=\"genesis\"];", genesis_block_hash).unwrap();
writeln!(output, "\t_{:?}[label=\"zero\"];", genesis_block_hash).unwrap();
// Canonical head needs to be processed first as otherwise finalized blocks aren't detected
// properly.
@@ -2085,6 +2105,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.unwrap()
.unwrap();
finalized_blocks.insert(state.finalized_checkpoint.root);
justified_blocks.insert(state.current_justified_checkpoint.root);
justified_blocks.insert(state.previous_justified_checkpoint.root);
}
if block_hash == canonical_head_hash {
@@ -2105,6 +2127,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
signed_beacon_block.slot()
)
.unwrap();
} else if justified_blocks.contains(&block_hash) {
writeln!(
output,
"\t_{:?}[label=\"{} ({})\" shape=cds];",
block_hash,
block_hash,
signed_beacon_block.slot()
)
.unwrap();
} else {
writeln!(
output,
@@ -2134,6 +2165,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let mut file = std::fs::File::create(file_name).unwrap();
self.dump_as_dot(&mut file);
}
// Should be used in tests only
pub fn set_graffiti(&mut self, graffiti: Graffiti) {
self.graffiti = graffiti;
}
}
impl<T: BeaconChainTypes> Drop for BeaconChain<T> {

View File

@@ -1,5 +1,6 @@
use crate::beacon_chain::ForkChoiceError;
use crate::eth1_chain::Error as Eth1ChainError;
use crate::migrate::PruningError;
use crate::naive_aggregation_pool::Error as NaiveAggregationError;
use crate::observed_attestations::Error as ObservedAttestationsError;
use crate::observed_attesters::Error as ObservedAttestersError;
@@ -61,6 +62,7 @@ pub enum BeaconChainError {
requested_slot: Slot,
max_task_runtime: Duration,
},
MissingFinalizedStateRoot(Slot),
/// Returned when an internal check fails, indicating corrupt data.
InvariantViolated(String),
SszTypesError(SszTypesError),
@@ -79,6 +81,7 @@ pub enum BeaconChainError {
ObservedAttestationsError(ObservedAttestationsError),
ObservedAttestersError(ObservedAttestersError),
ObservedBlockProducersError(ObservedBlockProducersError),
PruningError(PruningError),
ArithError(ArithError),
}
@@ -94,6 +97,7 @@ easy_from_to!(ObservedAttestationsError, BeaconChainError);
easy_from_to!(ObservedAttestersError, BeaconChainError);
easy_from_to!(ObservedBlockProducersError, BeaconChainError);
easy_from_to!(BlockSignatureVerifierError, BeaconChainError);
easy_from_to!(PruningError, BeaconChainError);
easy_from_to!(ArithError, BeaconChainError);
#[derive(Debug)]

View File

@@ -2,6 +2,10 @@
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate slog;
extern crate slog_term;
pub mod attestation_verification;
mod beacon_chain;
mod beacon_fork_choice_store;

View File

@@ -7,12 +7,28 @@ use std::mem;
use std::sync::mpsc;
use std::sync::Arc;
use std::thread;
use store::hot_cold_store::{process_finalization, HotColdDBError};
use store::iter::{ParentRootBlockIterator, RootsIterator};
use store::hot_cold_store::{migrate_database, HotColdDBError};
use store::iter::RootsIterator;
use store::{Error, ItemStore, StoreOp};
pub use store::{HotColdDB, MemoryStore};
use types::*;
use types::{BeaconState, EthSpec, Hash256, Slot};
use types::{
BeaconState, BeaconStateError, BeaconStateHash, Checkpoint, EthSpec, Hash256,
SignedBeaconBlockHash, Slot,
};
/// Logic errors that can occur during pruning, none of these should ever happen.
#[derive(Debug)]
pub enum PruningError {
IncorrectFinalizedState {
state_slot: Slot,
new_finalized_slot: Slot,
},
MissingInfoForCanonicalChain {
slot: Slot,
},
UnexpectedEqualStateRoots,
UnexpectedUnequalStateRoots,
}
/// Trait for migration processes that update the database upon finalization.
pub trait Migrate<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>:
@@ -22,17 +38,17 @@ pub trait Migrate<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>:
fn process_finalization(
&self,
_state_root: Hash256,
_finalized_state_root: BeaconStateHash,
_new_finalized_state: BeaconState<E>,
_max_finality_distance: u64,
_head_tracker: Arc<HeadTracker>,
_old_finalized_block_hash: SignedBeaconBlockHash,
_new_finalized_block_hash: SignedBeaconBlockHash,
) {
_old_finalized_checkpoint: Checkpoint,
_new_finalized_checkpoint: Checkpoint,
) -> Result<(), BeaconChainError> {
Ok(())
}
/// Traverses live heads and prunes blocks and states of chains that we know can't be built
/// upon because finalization would prohibit it. This is an optimisation intended to save disk
/// upon because finalization would prohibit it. This is an optimisation intended to save disk
/// space.
///
/// Assumptions:
@@ -40,37 +56,63 @@ pub trait Migrate<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>:
fn prune_abandoned_forks(
store: Arc<HotColdDB<E, Hot, Cold>>,
head_tracker: Arc<HeadTracker>,
old_finalized_block_hash: SignedBeaconBlockHash,
new_finalized_block_hash: SignedBeaconBlockHash,
new_finalized_slot: Slot,
new_finalized_state_hash: BeaconStateHash,
new_finalized_state: &BeaconState<E>,
old_finalized_checkpoint: Checkpoint,
new_finalized_checkpoint: Checkpoint,
log: &Logger,
) -> Result<(), BeaconChainError> {
// There will never be any blocks to prune if there is only a single head in the chain.
if head_tracker.heads().len() == 1 {
return Ok(());
}
let old_finalized_slot = store
.get_block(&old_finalized_block_hash.into())?
.ok_or_else(|| BeaconChainError::MissingBeaconBlock(old_finalized_block_hash.into()))?
.slot();
let old_finalized_slot = old_finalized_checkpoint
.epoch
.start_slot(E::slots_per_epoch());
let new_finalized_slot = new_finalized_checkpoint
.epoch
.start_slot(E::slots_per_epoch());
let new_finalized_block_hash = new_finalized_checkpoint.root.into();
// Collect hashes from new_finalized_block back to old_finalized_block (inclusive)
let mut found_block = false; // hack for `take_until`
let newly_finalized_blocks: HashMap<SignedBeaconBlockHash, Slot> =
ParentRootBlockIterator::new(&*store, new_finalized_block_hash.into())
.take_while(|result| match result {
Ok((block_hash, _)) => {
if found_block {
false
} else {
found_block |= *block_hash == old_finalized_block_hash.into();
true
}
}
Err(_) => true,
})
.map(|result| result.map(|(block_hash, block)| (block_hash.into(), block.slot())))
.collect::<Result<_, _>>()?;
// The finalized state must be for the epoch boundary slot, not the slot of the finalized
// block.
if new_finalized_state.slot != new_finalized_slot {
return Err(PruningError::IncorrectFinalizedState {
state_slot: new_finalized_state.slot,
new_finalized_slot,
}
.into());
}
debug!(
log,
"Starting database pruning";
"old_finalized_epoch" => old_finalized_checkpoint.epoch,
"old_finalized_root" => format!("{:?}", old_finalized_checkpoint.root),
"new_finalized_epoch" => new_finalized_checkpoint.epoch,
"new_finalized_root" => format!("{:?}", new_finalized_checkpoint.root),
);
// For each slot between the new finalized checkpoint and the old finalized checkpoint,
// collect the beacon block root and state root of the canonical chain.
let newly_finalized_chain: HashMap<Slot, (SignedBeaconBlockHash, BeaconStateHash)> =
std::iter::once(Ok((
new_finalized_slot,
(new_finalized_block_hash, new_finalized_state_hash),
)))
.chain(
RootsIterator::new(store.clone(), new_finalized_state).map(|res| {
res.map(|(block_root, state_root, slot)| {
(slot, (block_root.into(), state_root.into()))
})
}),
)
.take_while(|res| {
res.as_ref()
.map_or(true, |(slot, _)| *slot >= old_finalized_slot)
})
.collect::<Result<_, _>>()?;
// We don't know which blocks are shared among abandoned chains, so we buffer and delete
// everything in one fell swoop.
@@ -79,75 +121,110 @@ pub trait Migrate<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>:
let mut abandoned_heads: HashSet<Hash256> = HashSet::new();
for (head_hash, head_slot) in head_tracker.heads() {
let mut potentially_abandoned_head: Option<Hash256> = Some(head_hash);
let mut potentially_abandoned_blocks: Vec<(
Slot,
Option<SignedBeaconBlockHash>,
Option<BeaconStateHash>,
)> = Vec::new();
let mut potentially_abandoned_head = Some(head_hash);
let mut potentially_abandoned_blocks = vec![];
let head_state_hash = store
.get_block(&head_hash)?
.ok_or_else(|| BeaconStateError::MissingBeaconBlock(head_hash.into()))?
.state_root();
// Iterate backwards from this head, staging blocks and states for deletion.
let iter = std::iter::once(Ok((head_hash, head_state_hash, head_slot)))
.chain(RootsIterator::from_block(Arc::clone(&store), head_hash)?);
.chain(RootsIterator::from_block(store.clone(), head_hash)?);
for maybe_tuple in iter {
let (block_hash, state_hash, slot) = maybe_tuple?;
if slot < old_finalized_slot {
// We must assume here any candidate chains include old_finalized_block_hash,
// i.e. there aren't any forks starting at a block that is a strict ancestor of
// old_finalized_block_hash.
break;
}
match newly_finalized_blocks.get(&block_hash.into()).copied() {
// Block is not finalized, mark it and its state for deletion
let (block_root, state_root, slot) = maybe_tuple?;
let block_root = SignedBeaconBlockHash::from(block_root);
let state_root = BeaconStateHash::from(state_root);
match newly_finalized_chain.get(&slot) {
// If there's no information about a slot on the finalized chain, then
// it should be because it's ahead of the new finalized slot. Stage
// the fork's block and state for possible deletion.
None => {
potentially_abandoned_blocks.push((
slot,
Some(block_hash.into()),
Some(state_hash.into()),
));
if slot > new_finalized_slot {
potentially_abandoned_blocks.push((
slot,
Some(block_root),
Some(state_root),
));
} else if slot >= old_finalized_slot {
return Err(PruningError::MissingInfoForCanonicalChain { slot }.into());
} else {
// We must assume here any candidate chains include the old finalized
// checkpoint, i.e. there aren't any forks starting at a block that is a
// strict ancestor of old_finalized_checkpoint.
warn!(
log,
"Found a chain that should already have been pruned";
"head_block_root" => format!("{:?}", head_hash),
"head_slot" => head_slot,
);
break;
}
}
Some(finalized_slot) => {
// Block root is finalized, and we have reached the slot it was finalized
// at: we've hit a shared part of the chain.
if finalized_slot == slot {
// The first finalized block of a candidate chain lies after (in terms
// of slots order) the newly finalized block. It's not a candidate for
// prunning.
if finalized_slot == new_finalized_slot {
Some((finalized_block_root, finalized_state_root)) => {
// This fork descends from a newly finalized block, we can stop.
if block_root == *finalized_block_root {
// Sanity check: if the slot and block root match, then the
// state roots should match too.
if state_root != *finalized_state_root {
return Err(PruningError::UnexpectedUnequalStateRoots.into());
}
// If the fork descends from the whole finalized chain,
// do not prune it. Otherwise continue to delete all
// of the blocks and states that have been staged for
// deletion so far.
if slot == new_finalized_slot {
potentially_abandoned_blocks.clear();
potentially_abandoned_head.take();
}
// If there are skipped slots on the fork to be pruned, then
// we will have just staged the common block for deletion.
// Unstage it.
else {
for (_, block_root, _) in
potentially_abandoned_blocks.iter_mut().rev()
{
if block_root.as_ref() == Some(finalized_block_root) {
*block_root = None;
} else {
break;
}
}
}
break;
}
// Block root is finalized, but we're at a skip slot: delete the state only.
else {
} else {
if state_root == *finalized_state_root {
return Err(PruningError::UnexpectedEqualStateRoots.into());
}
potentially_abandoned_blocks.push((
slot,
None,
Some(state_hash.into()),
Some(block_root),
Some(state_root),
));
}
}
}
}
abandoned_heads.extend(potentially_abandoned_head.into_iter());
if !potentially_abandoned_blocks.is_empty() {
if let Some(abandoned_head) = potentially_abandoned_head {
debug!(
log,
"Pruning head";
"head_block_root" => format!("{:?}", abandoned_head),
"head_slot" => head_slot,
);
abandoned_heads.insert(abandoned_head);
abandoned_blocks.extend(
potentially_abandoned_blocks
.iter()
.filter_map(|(_, maybe_block_hash, _)| *maybe_block_hash),
);
abandoned_states.extend(potentially_abandoned_blocks.iter().filter_map(
|(slot, _, maybe_state_hash)| match maybe_state_hash {
None => None,
Some(state_hash) => Some((*slot, *state_hash)),
},
|(slot, _, maybe_state_hash)| maybe_state_hash.map(|sr| (*slot, sr)),
));
}
}
@@ -161,11 +238,14 @@ pub trait Migrate<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>:
.map(|(slot, state_hash)| StoreOp::DeleteState(state_hash, slot)),
)
.collect();
store.do_atomically(batch)?;
for head_hash in abandoned_heads.into_iter() {
head_tracker.remove_head(head_hash);
}
debug!(log, "Database pruning complete");
Ok(())
}
}
@@ -174,6 +254,17 @@ pub trait Migrate<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>:
pub struct NullMigrator;
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold> for NullMigrator {
fn process_finalization(
&self,
_finalized_state_root: BeaconStateHash,
_new_finalized_state: BeaconState<E>,
_head_tracker: Arc<HeadTracker>,
_old_finalized_checkpoint: Checkpoint,
_new_finalized_checkpoint: Checkpoint,
) -> Result<(), BeaconChainError> {
Ok(())
}
fn new(_: Arc<HotColdDB<E, Hot, Cold>>, _: Logger) -> Self {
NullMigrator
}
@@ -184,48 +275,59 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold> fo
/// Mostly useful for tests.
pub struct BlockingMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
db: Arc<HotColdDB<E, Hot, Cold>>,
log: Logger,
}
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold>
for BlockingMigrator<E, Hot, Cold>
{
fn new(db: Arc<HotColdDB<E, Hot, Cold>>, _: Logger) -> Self {
BlockingMigrator { db }
fn new(db: Arc<HotColdDB<E, Hot, Cold>>, log: Logger) -> Self {
BlockingMigrator { db, log }
}
fn process_finalization(
&self,
state_root: Hash256,
finalized_state_root: BeaconStateHash,
new_finalized_state: BeaconState<E>,
_max_finality_distance: u64,
head_tracker: Arc<HeadTracker>,
old_finalized_block_hash: SignedBeaconBlockHash,
new_finalized_block_hash: SignedBeaconBlockHash,
) {
if let Err(e) = Self::prune_abandoned_forks(
old_finalized_checkpoint: Checkpoint,
new_finalized_checkpoint: Checkpoint,
) -> Result<(), BeaconChainError> {
Self::prune_abandoned_forks(
self.db.clone(),
head_tracker,
old_finalized_block_hash,
new_finalized_block_hash,
new_finalized_state.slot,
) {
eprintln!("Pruning error: {:?}", e);
}
finalized_state_root,
&new_finalized_state,
old_finalized_checkpoint,
new_finalized_checkpoint,
&self.log,
)?;
if let Err(e) = process_finalization(self.db.clone(), state_root, &new_finalized_state) {
// This migrator is only used for testing, so we just log to stderr without a logger.
eprintln!("Migration error: {:?}", e);
match migrate_database(
self.db.clone(),
finalized_state_root.into(),
&new_finalized_state,
) {
Ok(()) => Ok(()),
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
debug!(
self.log,
"Database migration postponed, unaligned finalized block";
"slot" => slot.as_u64()
);
Ok(())
}
Err(e) => Err(e.into()),
}
}
}
type MpscSender<E> = mpsc::Sender<(
Hash256,
BeaconStateHash,
BeaconState<E>,
Arc<HeadTracker>,
SignedBeaconBlockHash,
SignedBeaconBlockHash,
Slot,
Checkpoint,
Checkpoint,
)>;
/// Migrator that runs a background thread to migrate state from the hot to the cold database.
@@ -243,34 +345,26 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold>
Self { db, tx_thread, log }
}
/// Perform the freezing operation on the database,
fn process_finalization(
&self,
finalized_state_root: Hash256,
finalized_state_root: BeaconStateHash,
new_finalized_state: BeaconState<E>,
max_finality_distance: u64,
head_tracker: Arc<HeadTracker>,
old_finalized_block_hash: SignedBeaconBlockHash,
new_finalized_block_hash: SignedBeaconBlockHash,
) {
if !self.needs_migration(new_finalized_state.slot, max_finality_distance) {
return;
}
old_finalized_checkpoint: Checkpoint,
new_finalized_checkpoint: Checkpoint,
) -> Result<(), BeaconChainError> {
let (ref mut tx, ref mut thread) = *self.tx_thread.lock();
let new_finalized_slot = new_finalized_state.slot;
if let Err(tx_err) = tx.send((
finalized_state_root,
new_finalized_state,
head_tracker,
old_finalized_block_hash,
new_finalized_block_hash,
new_finalized_slot,
old_finalized_checkpoint,
new_finalized_checkpoint,
)) {
let (new_tx, new_thread) = Self::spawn_thread(self.db.clone(), self.log.clone());
drop(mem::replace(tx, new_tx));
*tx = new_tx;
let old_thread = mem::replace(thread, new_thread);
// Join the old thread, which will probably have panicked, or may have
@@ -286,57 +380,43 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold>
// Retry at most once, we could recurse but that would risk overflowing the stack.
let _ = tx.send(tx_err.0);
}
Ok(())
}
}
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Hot, Cold> {
/// Return true if a migration needs to be performed, given a new `finalized_slot`.
fn needs_migration(&self, finalized_slot: Slot, max_finality_distance: u64) -> bool {
let finality_distance = finalized_slot - self.db.get_split_slot();
finality_distance > max_finality_distance
}
#[allow(clippy::type_complexity)]
/// Spawn a new child thread to run the migration process.
///
/// Return a channel handle for sending new finalized states to the thread.
fn spawn_thread(
db: Arc<HotColdDB<E, Hot, Cold>>,
log: Logger,
) -> (
mpsc::Sender<(
Hash256,
BeaconState<E>,
Arc<HeadTracker>,
SignedBeaconBlockHash,
SignedBeaconBlockHash,
Slot,
)>,
thread::JoinHandle<()>,
) {
) -> (MpscSender<E>, thread::JoinHandle<()>) {
let (tx, rx) = mpsc::channel();
let thread = thread::spawn(move || {
while let Ok((
state_root,
state,
head_tracker,
old_finalized_block_hash,
new_finalized_block_hash,
new_finalized_slot,
old_finalized_checkpoint,
new_finalized_checkpoint,
)) = rx.recv()
{
match Self::prune_abandoned_forks(
db.clone(),
head_tracker,
old_finalized_block_hash,
new_finalized_block_hash,
new_finalized_slot,
state_root,
&state,
old_finalized_checkpoint,
new_finalized_checkpoint,
&log,
) {
Ok(()) => {}
Err(e) => warn!(log, "Block pruning failed: {:?}", e),
}
match process_finalization(db.clone(), state_root, &state) {
match migrate_database(db.clone(), state_root.into(), &state) {
Ok(()) => {}
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
debug!(

File diff suppressed because it is too large Load Diff

View File

@@ -26,7 +26,7 @@ lazy_static! {
fn produces_attestations() {
let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 4;
let harness = BeaconChainHarness::new(
let mut harness = BeaconChainHarness::new_with_store_config(
MainnetEthSpec,
KEYPAIRS[..].to_vec(),
StoreConfig::default(),

View File

@@ -5,7 +5,9 @@ extern crate lazy_static;
use beacon_chain::{
attestation_verification::Error as AttnError,
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType},
test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, NullMigratorEphemeralHarnessType,
},
BeaconChain, BeaconChainTypes,
};
use int_to_bytes::int_to_bytes32;
@@ -30,7 +32,7 @@ lazy_static! {
}
/// Returns a beacon chain harness.
fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<E>> {
fn get_harness(validator_count: usize) -> BeaconChainHarness<NullMigratorEphemeralHarnessType<E>> {
let harness = BeaconChainHarness::new_with_target_aggregators(
MainnetEthSpec,
KEYPAIRS[0..validator_count].to_vec(),
@@ -184,8 +186,7 @@ fn get_non_aggregator<T: BeaconChainTypes>(
/// Tests verification of `SignedAggregateAndProof` from the gossip network.
#[test]
fn aggregated_gossip_verification() {
let harness = get_harness(VALIDATOR_COUNT);
let chain = &harness.chain;
let mut harness = get_harness(VALIDATOR_COUNT);
// Extend the chain out a few epochs so we have some chain depth to play with.
harness.extend_chain(
@@ -197,7 +198,7 @@ fn aggregated_gossip_verification() {
// Advance into a slot where there have not been blocks or attestations produced.
harness.advance_slot();
let current_slot = chain.slot().expect("should get slot");
let current_slot = harness.chain.slot().expect("should get slot");
assert_eq!(
current_slot % E::slots_per_epoch(),
@@ -532,8 +533,7 @@ fn aggregated_gossip_verification() {
/// Tests the verification conditions for an unaggregated attestation on the gossip network.
#[test]
fn unaggregated_gossip_verification() {
let harness = get_harness(VALIDATOR_COUNT);
let chain = &harness.chain;
let mut harness = get_harness(VALIDATOR_COUNT);
// Extend the chain out a few epochs so we have some chain depth to play with.
harness.extend_chain(
@@ -545,8 +545,8 @@ fn unaggregated_gossip_verification() {
// Advance into a slot where there have not been blocks or attestations produced.
harness.advance_slot();
let current_slot = chain.slot().expect("should get slot");
let current_epoch = chain.epoch().expect("should get epoch");
let current_slot = harness.chain.slot().expect("should get slot");
let current_epoch = harness.chain.epoch().expect("should get epoch");
assert_eq!(
current_slot % E::slots_per_epoch(),
@@ -772,8 +772,7 @@ fn unaggregated_gossip_verification() {
/// This also checks that we can do a state lookup if we don't get a hit from the shuffling cache.
#[test]
fn attestation_that_skips_epochs() {
let harness = get_harness(VALIDATOR_COUNT);
let chain = &harness.chain;
let mut harness = get_harness(VALIDATOR_COUNT);
// Extend the chain out a few epochs so we have some chain depth to play with.
harness.extend_chain(
@@ -782,16 +781,18 @@ fn attestation_that_skips_epochs() {
AttestationStrategy::SomeValidators(vec![]),
);
let current_slot = chain.slot().expect("should get slot");
let current_epoch = chain.epoch().expect("should get epoch");
let current_slot = harness.chain.slot().expect("should get slot");
let current_epoch = harness.chain.epoch().expect("should get epoch");
let earlier_slot = (current_epoch - 2).start_slot(MainnetEthSpec::slots_per_epoch());
let earlier_block = chain
let earlier_block = harness
.chain
.block_at_slot(earlier_slot)
.expect("should not error getting block at slot")
.expect("should find block at slot");
let mut state = chain
let mut state = harness
.chain
.get_state(&earlier_block.state_root(), Some(earlier_slot))
.expect("should not error getting state")
.expect("should find state");

View File

@@ -4,7 +4,9 @@
extern crate lazy_static;
use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType},
test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, NullMigratorEphemeralHarnessType,
},
BeaconSnapshot, BlockError,
};
use store::config::StoreConfig;
@@ -31,7 +33,7 @@ lazy_static! {
}
fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
harness.extend_chain(
CHAIN_SEGMENT_LENGTH,
@@ -48,8 +50,8 @@ fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
.collect()
}
fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<E>> {
let harness = BeaconChainHarness::new(
fn get_harness(validator_count: usize) -> BeaconChainHarness<NullMigratorEphemeralHarnessType<E>> {
let harness = BeaconChainHarness::new_with_store_config(
MainnetEthSpec,
KEYPAIRS[0..validator_count].to_vec(),
StoreConfig::default(),
@@ -81,7 +83,7 @@ fn junk_aggregate_signature() -> AggregateSignature {
fn update_proposal_signatures(
snapshots: &mut [BeaconSnapshot<E>],
harness: &BeaconChainHarness<HarnessType<E>>,
harness: &BeaconChainHarness<NullMigratorEphemeralHarnessType<E>>,
) {
for snapshot in snapshots {
let spec = &harness.chain.spec;
@@ -91,7 +93,7 @@ fn update_proposal_signatures(
.get_beacon_proposer_index(slot, spec)
.expect("should find proposer index");
let keypair = harness
.keypairs
.validators_keypairs
.get(proposer_index)
.expect("proposer keypair should be available");
@@ -274,7 +276,7 @@ fn chain_segment_non_linear_slots() {
}
fn assert_invalid_signature(
harness: &BeaconChainHarness<HarnessType<E>>,
harness: &BeaconChainHarness<NullMigratorEphemeralHarnessType<E>>,
block_index: usize,
snapshots: &[BeaconSnapshot<E>],
item: &str,
@@ -325,7 +327,7 @@ fn assert_invalid_signature(
// slot) tuple.
}
fn get_invalid_sigs_harness() -> BeaconChainHarness<HarnessType<E>> {
fn get_invalid_sigs_harness() -> BeaconChainHarness<NullMigratorEphemeralHarnessType<E>> {
let harness = get_harness(VALIDATOR_COUNT);
harness
.chain

View File

@@ -7,7 +7,7 @@ extern crate lazy_static;
use beacon_chain::observed_operations::ObservationOutcome;
use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
AttestationStrategy, BeaconChainHarness, BlockStrategy, BlockingMigratorDiskHarnessType,
};
use sloggers::{null::NullLoggerBuilder, Build};
use std::sync::Arc;
@@ -28,7 +28,7 @@ lazy_static! {
}
type E = MinimalEthSpec;
type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
type TestHarness = BeaconChainHarness<BlockingMigratorDiskHarnessType<E>>;
type HotColdDB = store::HotColdDB<E, LevelDB<E>, LevelDB<E>>;
fn get_store(db_path: &TempDir) -> Arc<HotColdDB> {
@@ -57,8 +57,8 @@ fn get_harness(store: Arc<HotColdDB>, validator_count: usize) -> TestHarness {
fn voluntary_exit() {
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
let spec = &harness.chain.spec;
let mut harness = get_harness(store.clone(), VALIDATOR_COUNT);
let spec = &harness.chain.spec.clone();
harness.extend_chain(
(E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize,

View File

@@ -44,7 +44,7 @@ fn finalizes_after_resuming_from_db() {
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = BeaconChainHarness::new_with_disk_store(
let mut harness = BeaconChainHarness::new_with_disk_store(
MinimalEthSpec,
store.clone(),
KEYPAIRS[0..validator_count].to_vec(),
@@ -88,7 +88,7 @@ fn finalizes_after_resuming_from_db() {
let data_dir = harness.data_dir;
let original_chain = harness.chain;
let resumed_harness = BeaconChainHarness::resume_from_disk_store(
let mut resumed_harness = BeaconChainHarness::resume_from_disk_store(
MinimalEthSpec,
store,
KEYPAIRS[0..validator_count].to_vec(),

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,8 @@ extern crate lazy_static;
use beacon_chain::{
attestation_verification::Error as AttnError,
test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType, OP_POOL_DB_KEY,
AttestationStrategy, BeaconChainHarness, BlockStrategy, NullMigratorEphemeralHarnessType,
OP_POOL_DB_KEY,
},
};
use operation_pool::PersistedOperationPool;
@@ -24,8 +25,10 @@ lazy_static! {
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
}
fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<MinimalEthSpec>> {
let harness = BeaconChainHarness::new(
fn get_harness(
validator_count: usize,
) -> BeaconChainHarness<NullMigratorEphemeralHarnessType<MinimalEthSpec>> {
let harness = BeaconChainHarness::new_with_store_config(
MinimalEthSpec,
KEYPAIRS[0..validator_count].to_vec(),
StoreConfig::default(),
@@ -64,7 +67,7 @@ fn massive_skips() {
fn iterators() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
harness.extend_chain(
num_blocks_produced as usize,
@@ -139,7 +142,7 @@ fn iterators() {
#[test]
fn chooses_fork() {
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
let delay = MinimalEthSpec::default_spec().min_attestation_inclusion_delay as usize;
@@ -190,7 +193,7 @@ fn chooses_fork() {
fn finalizes_with_full_participation() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
harness.extend_chain(
num_blocks_produced as usize,
@@ -225,7 +228,7 @@ fn finalizes_with_full_participation() {
fn finalizes_with_two_thirds_participation() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
let attesters = (0..two_thirds).collect();
@@ -268,7 +271,7 @@ fn finalizes_with_two_thirds_participation() {
fn does_not_finalize_with_less_than_two_thirds_participation() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
let less_than_two_thirds = two_thirds - 1;
@@ -305,7 +308,7 @@ fn does_not_finalize_with_less_than_two_thirds_participation() {
fn does_not_finalize_without_attestation() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
harness.extend_chain(
num_blocks_produced as usize,
@@ -338,7 +341,7 @@ fn does_not_finalize_without_attestation() {
fn roundtrip_operation_pool() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
// Add some attestations
harness.extend_chain(
@@ -370,7 +373,7 @@ fn roundtrip_operation_pool() {
fn unaggregated_attestations_added_to_fork_choice_some_none() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() / 2;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
harness.extend_chain(
num_blocks_produced as usize,
@@ -424,7 +427,7 @@ fn unaggregated_attestations_added_to_fork_choice_some_none() {
fn attestations_with_increasing_slots() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
let mut attestations = vec![];
@@ -486,7 +489,7 @@ fn attestations_with_increasing_slots() {
fn unaggregated_attestations_added_to_fork_choice_all_updated() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
harness.extend_chain(
num_blocks_produced as usize,
@@ -541,7 +544,7 @@ fn unaggregated_attestations_added_to_fork_choice_all_updated() {
fn run_skip_slot_test(skip_slots: u64) {
let num_validators = 8;
let harness_a = get_harness(num_validators);
let mut harness_a = get_harness(num_validators);
let harness_b = get_harness(num_validators);
for _ in 0..skip_slots {

View File

@@ -23,7 +23,7 @@ mod tests {
let log = get_logger();
let beacon_chain = Arc::new(
BeaconChainHarness::new(
BeaconChainHarness::new_with_store_config(
MinimalEthSpec,
generate_deterministic_keypairs(8),
StoreConfig::default(),

View File

@@ -14,7 +14,7 @@ use crate::{
};
use lru::LruCache;
use parking_lot::{Mutex, RwLock};
use slog::{debug, error, trace, warn, Logger};
use slog::{debug, error, info, trace, warn, Logger};
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use state_processing::{
@@ -147,6 +147,12 @@ impl<E: EthSpec> HotColdDB<E, LevelDB<E>, LevelDB<E>> {
// Load the previous split slot from the database (if any). This ensures we can
// stop and restart correctly.
if let Some(split) = db.load_split()? {
info!(
db.log,
"Hot-Cold DB initialized";
"split_slot" => split.slot,
"split_state" => format!("{:?}", split.state_root)
);
*db.split.write() = split;
}
Ok(db)
@@ -819,7 +825,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
}
/// Advance the split point of the store, moving new finalized states to the freezer.
pub fn process_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
store: Arc<HotColdDB<E, Hot, Cold>>,
frozen_head_root: Hash256,
frozen_head: &BeaconState<E>,

View File

@@ -27,7 +27,7 @@ pub mod iter;
use std::borrow::Cow;
pub use self::config::StoreConfig;
pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split};
pub use self::hot_cold_store::{BlockReplay, HotColdDB, HotStateSummary, Split};
pub use self::leveldb_store::LevelDB;
pub use self::memory_store::MemoryStore;
pub use self::partial_beacon_state::PartialBeaconState;

View File

@@ -1,6 +1,6 @@
[package]
name = "boot_node"
version = "0.2.7"
version = "0.2.8"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2018"

View File

@@ -10,7 +10,7 @@ use target_info::Target;
/// `Lighthouse/v0.2.0-1419501f2+`
pub const VERSION: &str = git_version!(
args = ["--always", "--dirty=+"],
prefix = "Lighthouse/v0.2.7-",
prefix = "Lighthouse/v0.2.8-",
fallback = "unknown"
);

View File

@@ -1,8 +1,10 @@
#![cfg(not(debug_assertions))]
use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType},
BeaconChain, BeaconChainError, BeaconForkChoiceStore, ForkChoiceError,
test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, NullMigratorEphemeralHarnessType,
},
BeaconChain, BeaconChainError, BeaconForkChoiceStore, ForkChoiceError, StateSkipConfig,
};
use fork_choice::{
ForkChoiceStore, InvalidAttestation, InvalidBlock, QueuedAttestation,
@@ -18,7 +20,7 @@ use types::{BeaconBlock, BeaconState, Hash256, SignedBeaconBlock};
pub type E = MainnetEthSpec;
pub const VALIDATOR_COUNT: usize = 16;
pub const VALIDATOR_COUNT: usize = 32;
/// Defines some delay between when an attestation is created and when it is mutated.
pub enum MutationDelay {
@@ -30,7 +32,7 @@ pub enum MutationDelay {
/// A helper struct to make testing fork choice more ergonomic and less repetitive.
struct ForkChoiceTest {
harness: BeaconChainHarness<HarnessType<E>>,
harness: BeaconChainHarness<NullMigratorEphemeralHarnessType<E>>,
}
impl ForkChoiceTest {
@@ -115,22 +117,31 @@ impl ForkChoiceTest {
}
/// Build the chain whilst `predicate` returns `true`.
pub fn apply_blocks_while<F>(self, mut predicate: F) -> Self
pub fn apply_blocks_while<F>(mut self, mut predicate: F) -> Self
where
F: FnMut(&BeaconBlock<E>, &BeaconState<E>) -> bool,
{
self.harness.advance_slot();
self.harness.extend_chain_while(
|block, state| predicate(&block.message, state),
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
let mut state = self.harness.get_current_state();
let validators = self.harness.get_all_validators();
loop {
let slot = self.harness.get_current_slot();
let (block, state_) = self.harness.make_block(state, slot);
state = state_;
if !predicate(&block.message, &state) {
break;
}
let block_hash = self.harness.process_block(slot, block.clone());
self.harness
.attest_block(&state, block_hash, &block, &validators);
self.harness.advance_slot();
}
self
}
/// Apply `count` blocks to the chain (with attestations).
pub fn apply_blocks(self, count: usize) -> Self {
pub fn apply_blocks(mut self, count: usize) -> Self {
self.harness.advance_slot();
self.harness.extend_chain(
count,
@@ -142,7 +153,7 @@ impl ForkChoiceTest {
}
/// Apply `count` blocks to the chain (without attestations).
pub fn apply_blocks_without_new_attestations(self, count: usize) -> Self {
pub fn apply_blocks_without_new_attestations(mut self, count: usize) -> Self {
self.harness.advance_slot();
self.harness.extend_chain(
count,
@@ -181,13 +192,22 @@ impl ForkChoiceTest {
/// Applies a block directly to fork choice, bypassing the beacon chain.
///
/// Asserts the block was applied successfully.
pub fn apply_block_directly_to_fork_choice<F>(self, mut func: F) -> Self
pub fn apply_block_directly_to_fork_choice<F>(mut self, mut func: F) -> Self
where
F: FnMut(&mut BeaconBlock<E>, &mut BeaconState<E>),
{
let (mut block, mut state) = self.harness.get_block();
let state = self
.harness
.chain
.state_at_slot(
self.harness.get_current_slot() - 1,
StateSkipConfig::WithStateRoots,
)
.unwrap();
let slot = self.harness.get_current_slot();
let (mut block, mut state) = self.harness.make_block(state, slot);
func(&mut block.message, &mut state);
let current_slot = self.harness.chain.slot().unwrap();
let current_slot = self.harness.get_current_slot();
self.harness
.chain
.fork_choice
@@ -201,7 +221,7 @@ impl ForkChoiceTest {
///
/// Asserts that an error occurred and allows inspecting it via `comparison_func`.
pub fn apply_invalid_block_directly_to_fork_choice<F, G>(
self,
mut self,
mut mutation_func: F,
mut comparison_func: G,
) -> Self
@@ -209,9 +229,18 @@ impl ForkChoiceTest {
F: FnMut(&mut BeaconBlock<E>, &mut BeaconState<E>),
G: FnMut(ForkChoiceError),
{
let (mut block, mut state) = self.harness.get_block();
let state = self
.harness
.chain
.state_at_slot(
self.harness.get_current_slot() - 1,
StateSkipConfig::WithStateRoots,
)
.unwrap();
let slot = self.harness.get_current_slot();
let (mut block, mut state) = self.harness.make_block(state, slot);
mutation_func(&mut block.message, &mut state);
let current_slot = self.harness.chain.slot().unwrap();
let current_slot = self.harness.get_current_slot();
let err = self
.harness
.chain
@@ -267,20 +296,21 @@ impl ForkChoiceTest {
///
/// Also returns some info about who created it.
fn apply_attestation_to_chain<F, G>(
self,
mut self,
delay: MutationDelay,
mut mutation_func: F,
mut comparison_func: G,
) -> Self
where
F: FnMut(&mut IndexedAttestation<E>, &BeaconChain<HarnessType<E>>),
F: FnMut(&mut IndexedAttestation<E>, &BeaconChain<NullMigratorEphemeralHarnessType<E>>),
G: FnMut(Result<(), BeaconChainError>),
{
let chain = &self.harness.chain;
let head = chain.head().expect("should get head");
let current_slot = chain.slot().expect("should get slot");
let head = self.harness.chain.head().expect("should get head");
let current_slot = self.harness.chain.slot().expect("should get slot");
let mut attestation = chain
let mut attestation = self
.harness
.chain
.produce_unaggregated_attestation(current_slot, 0)
.expect("should not error while producing attestation");
@@ -298,9 +328,13 @@ impl ForkChoiceTest {
.get_committee_count_at_slot(current_slot)
.expect("should not error while getting committee count");
let subnet_id =
SubnetId::compute_subnet::<E>(current_slot, 0, committee_count, &chain.spec)
.expect("should compute subnet id");
let subnet_id = SubnetId::compute_subnet::<E>(
current_slot,
0,
committee_count,
&self.harness.chain.spec,
)
.expect("should compute subnet id");
let validator_sk = generate_deterministic_keypair(validator_index).sk;
@@ -309,12 +343,14 @@ impl ForkChoiceTest {
&validator_sk,
validator_committee_index,
&head.beacon_state.fork,
chain.genesis_validators_root,
&chain.spec,
self.harness.chain.genesis_validators_root,
&self.harness.chain.spec,
)
.expect("should sign attestation");
let mut verified_attestation = chain
let mut verified_attestation = self
.harness
.chain
.verify_unaggregated_attestation_for_gossip(attestation, subnet_id)
.expect("precondition: should gossip verify attestation");
@@ -327,9 +363,15 @@ impl ForkChoiceTest {
);
}
mutation_func(verified_attestation.__indexed_attestation_mut(), chain);
mutation_func(
verified_attestation.__indexed_attestation_mut(),
&self.harness.chain,
);
let result = chain.apply_attestation_to_fork_choice(&verified_attestation);
let result = self
.harness
.chain
.apply_attestation_to_fork_choice(&verified_attestation);
comparison_func(result);

View File

@@ -1,7 +1,7 @@
[package]
name = "lcli"
description = "Lighthouse CLI (modeled after zcli)"
version = "0.2.7"
version = "0.2.8"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"

View File

@@ -1,6 +1,6 @@
[package]
name = "lighthouse"
version = "0.2.7"
version = "0.2.8"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2018"

View File

@@ -189,13 +189,16 @@ fn run<E: EthSpec>(
// Parse testnet config from the `testnet` and `testnet-dir` flag in that order
// else, use the default
let mut optional_testnet_config = Eth2TestnetConfig::hard_coded_default()?;
let mut optional_testnet_config = None;
if matches.is_present("testnet") {
optional_testnet_config = clap_utils::parse_hardcoded_network(matches, "testnet")?;
};
if matches.is_present("testnet-dir") {
optional_testnet_config = clap_utils::parse_testnet_dir(matches, "testnet-dir")?;
};
if optional_testnet_config.is_none() {
optional_testnet_config = Eth2TestnetConfig::hard_coded_default()?;
}
let builder = if let Some(log_path) = matches.value_of("logfile") {
let path = log_path

View File

@@ -1,6 +1,6 @@
[package]
name = "validator_client"
version = "0.2.7"
version = "0.2.8"
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>", "Luke Anderson <luke@lukeanderson.com.au>"]
edition = "2018"