Compare commits

...

8 Commits

Author SHA1 Message Date
Michael Sproul
7aceff4d13 Add safe_sum and use it in state_processing (#1620)
## Issue Addressed

Closes #1098

## Proposed Changes

Add a `SafeArithIter` trait with a `safe_sum` method, and use it in `state_processing`. This seems to be the only place in `consensus` where it is relevant -- i.e. where we were using `sum` and the integer_arith lint is enabled.

## Additional Info

This PR doesn't include any Clippy linting to prevent `sum` from being called. It seems there is no existing Clippy lint that suits our purpose, but I'm going to look into that and maybe schedule writing one as a lower-priority task.

This theoretically _is_ a consensus breaking change, but it shouldn't impact Medalla (or any other testnet) because `slashings` shouldn't overflow!
2020-09-22 05:40:04 +00:00
Michael Sproul
4fca306397 Update BLST, add force-adx support (#1595)
## Issue Addressed

Closes #1504
Closes https://github.com/sigp/lighthouse/issues/1505

## Proposed Changes

* Update `blst` to the latest version, which is more portable and includes finer-grained compilation controls (see below).
* Detect the case where a binary has been explicitly compiled with ADX support but it's missing at runtime, and report a nicer error than `SIGILL`.

## Known Issues

* None. The previous issue with `make build-aarch64` (https://github.com/supranational/blst/issues/27), has been resolved.

## Additional Info

I think we should tweak our release process and our Docker builds so that we provide two options:

Binaries:

* `lighthouse`: compiled with `modern`/`force-adx`, for CPUs 2013 and newer
* `lighthouse-portable`: compiled with `portable` for older CPUs

Docker images:

* `sigp/lighthouse:latest`: multi-arch image with `modern` x86_64 and vanilla aarch64 binary
* `sigp/lighthouse:latest-portable`: multi-arch image with `portable` builds for x86_64 and aarch64

And relevant Docker images for the releases (as per https://github.com/sigp/lighthouse/pull/1574#issuecomment-687766141), tagged `v0.x.y` and `v0.x.y-portable`
2020-09-22 05:40:02 +00:00
Paul Hauner
d85d5a435e Bump to v0.2.11 (#1645)
## Issue Addressed

NA

## Proposed Changes

- Bump version to v0.2.11
- Run `cargo update`.


## Additional Info

NA
2020-09-22 04:45:15 +00:00
Paul Hauner
bd39cc8e26 Apply hotfix for inconsistent head (#1639)
## Issue Addressed

- Resolves #1616

## Proposed Changes

If we look at the function which persists fork choice and the canonical head to disk:

1db8daae0c/beacon_node/beacon_chain/src/beacon_chain.rs (L234-L280)

There is a race-condition which might cause the canonical head and fork choice values to be out-of-sync.

I believe this is the cause of #1616. I managed to recreate the issue and produce a database that was unable to sync under the `master` branch but able to sync with this branch.

These new changes solve the issue by ignoring the persisted `canonical_head_block_root` value and instead getting fork choice to generate it. This ensures that the canonical head is in-sync with fork choice.

## Additional Info

This is hotfix method that leaves some crusty code hanging around. Once this PR is merged (to satisfy the v0.2.x users) we should later update and merge #1638 so we can have a clean fix for the v0.3.x versions.
2020-09-22 02:06:10 +00:00
Pawan Dhananjay
14ff38539c Add trusted peers (#1640)
## Issue Addressed

Closes #1581 

## Proposed Changes

Adds a new cli option for trusted peers who always have the maximum possible score.
2020-09-22 01:12:36 +00:00
Michael Sproul
5d17eb899f Update LevelDB to v0.8.6, removing patch (#1636)
Removes our dependency on a fork of LevelDB now that https://github.com/skade/leveldb-sys/pull/17 is merged
2020-09-21 11:53:53 +00:00
Age Manning
1db8daae0c Shift metadata to the global network variables (#1631)
## Issue Addressed

N/A

## Proposed Changes

Shifts the local `metadata` to `network_globals` making it accessible to the HTTP API and other areas of lighthouse.

## Additional Info

N/A
2020-09-21 02:00:38 +00:00
Pawan Dhananjay
7b97c4ad30 Snappy additional sanity checks (#1625)
## Issue Addressed

N/A

## Proposed Changes

Adds the following check from the spec

> A reader SHOULD NOT read more than max_encoded_len(n) bytes after reading the SSZ length-prefix n from the header.
2020-09-21 01:06:25 +00:00
34 changed files with 631 additions and 256 deletions

82
Cargo.lock generated
View File

@@ -2,7 +2,7 @@
# It is not intended for manual editing.
[[package]]
name = "account_manager"
version = "0.2.10"
version = "0.2.11"
dependencies = [
"account_utils",
"bls",
@@ -370,7 +370,7 @@ dependencies = [
[[package]]
name = "beacon_node"
version = "0.2.10"
version = "0.2.11"
dependencies = [
"beacon_chain",
"clap",
@@ -386,7 +386,7 @@ dependencies = [
"exit-future",
"futures 0.3.5",
"genesis",
"hyper 0.13.7",
"hyper 0.13.8",
"lighthouse_version",
"logging",
"node_test_rig",
@@ -525,7 +525,7 @@ dependencies = [
[[package]]
name = "blst"
version = "0.1.1"
source = "git+https://github.com/sigp/blst.git?rev=284f7059642851c760a09fb1708bcb59c7ca323c#284f7059642851c760a09fb1708bcb59c7ca323c"
source = "git+https://github.com/supranational/blst.git?rev=a8398ed284b0d78858302ad1ceb25a80e7bbe535#a8398ed284b0d78858302ad1ceb25a80e7bbe535"
dependencies = [
"cc",
"glob",
@@ -534,7 +534,7 @@ dependencies = [
[[package]]
name = "boot_node"
version = "0.2.10"
version = "0.2.11"
dependencies = [
"beacon_node",
"clap",
@@ -777,7 +777,7 @@ dependencies = [
"sloggers",
"slot_clock",
"store",
"time 0.2.20",
"time 0.2.21",
"timer",
"tokio 0.2.22",
"toml",
@@ -1292,9 +1292,9 @@ dependencies = [
[[package]]
name = "ed25519-dalek"
version = "1.0.0"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53d2e93f837d749c16d118e7ddf7a4dfd0ac8f452cf51e46e9348824e5ef6851"
checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d"
dependencies = [
"curve25519-dalek 3.0.0",
"ed25519",
@@ -2146,9 +2146,9 @@ dependencies = [
[[package]]
name = "hermit-abi"
version = "0.1.15"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9"
checksum = "4c30f6d0bc6b00693347368a67d41b58f2fb851215ff1da49e90fe2c5c667151"
dependencies = [
"libc",
]
@@ -2262,6 +2262,12 @@ version = "1.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9"
[[package]]
name = "httpdate"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47"
[[package]]
name = "humantime"
version = "1.3.0"
@@ -2322,9 +2328,9 @@ dependencies = [
[[package]]
name = "hyper"
version = "0.13.7"
version = "0.13.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e68a8dd9716185d9e64ea473ea6ef63529252e3e27623295a0378a19665d5eb"
checksum = "2f3afcfae8af5ad0576a31e768415edb627824129e8e5a29b8bfccb2f234e835"
dependencies = [
"bytes 0.5.6",
"futures-channel",
@@ -2334,10 +2340,10 @@ dependencies = [
"http 0.2.1",
"http-body 0.3.1",
"httparse",
"httpdate",
"itoa",
"pin-project",
"socket2",
"time 0.1.44",
"tokio 0.2.22",
"tower-service",
"tracing",
@@ -2364,7 +2370,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed"
dependencies = [
"bytes 0.5.6",
"hyper 0.13.7",
"hyper 0.13.8",
"native-tls",
"tokio 0.2.22",
"tokio-tls 0.3.1",
@@ -2431,9 +2437,12 @@ dependencies = [
[[package]]
name = "instant"
version = "0.1.6"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b141fdc7836c525d4d594027d318c84161ca17aaf8113ab1f81ab93ae897485"
checksum = "63312a18f7ea8760cdd0a7c5aac1a619752a246b833545e3e36d1f81f7cd9e66"
dependencies = [
"cfg-if",
]
[[package]]
name = "int_to_bytes"
@@ -2553,7 +2562,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]]
name = "lcli"
version = "0.2.10"
version = "0.2.11"
dependencies = [
"bls",
"clap",
@@ -2585,9 +2594,9 @@ dependencies = [
[[package]]
name = "leveldb"
version = "0.8.5"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5a2fa830c44ac4762564389a7efe22688a469c8d7b71dd11da2e35c33ae96c2"
checksum = "32651baaaa5596b3a6e0bee625e73fd0334c167db0ea5ac68750ef9a629a2d6a"
dependencies = [
"db-key",
"leveldb-sys",
@@ -2596,8 +2605,9 @@ dependencies = [
[[package]]
name = "leveldb-sys"
version = "2.0.6"
source = "git+https://github.com/michaelsproul/leveldb-sys?branch=v2.0.6-cmake#e784dba085921bad187ff74b3418c539914a02ff"
version = "2.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76c44b9b785ca705d58190ebd432a4e7edb900eadf236ff966d7d1307e482e87"
dependencies = [
"cmake",
"libc",
@@ -2911,7 +2921,7 @@ dependencies = [
[[package]]
name = "lighthouse"
version = "0.2.10"
version = "0.2.11"
dependencies = [
"account_manager",
"account_utils",
@@ -3071,9 +3081,9 @@ checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
[[package]]
name = "memoffset"
version = "0.5.5"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f"
checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa"
dependencies = [
"autocfg 1.0.1",
]
@@ -4108,9 +4118,9 @@ dependencies = [
[[package]]
name = "rayon-core"
version = "1.8.0"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91739a34c4355b5434ce54c9086c5895604a9c278586d1f1aa95e04f66b525a0"
checksum = "e8c4fec834fb6e6d2dd5eece3c7b432a52f0ba887cf40e595190c4107edc08bf"
dependencies = [
"crossbeam-channel",
"crossbeam-deque",
@@ -4212,7 +4222,7 @@ dependencies = [
"futures-util",
"http 0.2.1",
"http-body 0.3.1",
"hyper 0.13.7",
"hyper 0.13.8",
"hyper-tls 0.4.3",
"ipnet",
"js-sys",
@@ -4251,7 +4261,7 @@ dependencies = [
"futures 0.3.5",
"hex 0.4.2",
"http 0.2.1",
"hyper 0.13.7",
"hyper 0.13.8",
"itertools 0.9.0",
"lazy_static",
"lighthouse_metrics",
@@ -4288,7 +4298,7 @@ dependencies = [
"eth2_hashing",
"eth2_ssz",
"eth2_ssz_derive",
"hyper 0.13.7",
"hyper 0.13.8",
"procinfo",
"psutil",
"rayon",
@@ -4915,9 +4925,9 @@ checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252"
[[package]]
name = "snafu"
version = "0.6.8"
version = "0.6.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7f5aed652511f5c9123cf2afbe9c244c29db6effa2abb05c866e965c82405ce"
checksum = "9c4e6046e4691afe918fd1b603fd6e515bcda5388a1092a9edbada307d159f09"
dependencies = [
"doc-comment",
"snafu-derive",
@@ -4925,9 +4935,9 @@ dependencies = [
[[package]]
name = "snafu-derive"
version = "0.6.8"
version = "0.6.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebf8f7d5720104a9df0f7076a8682024e958bba0fe9848767bb44f251f3648e9"
checksum = "7073448732a89f2f3e6581989106067f403d378faeafb4a50812eb814170d3e5"
dependencies = [
"proc-macro2",
"quote",
@@ -5309,9 +5319,9 @@ dependencies = [
[[package]]
name = "time"
version = "0.2.20"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d4953c513c9bf1b97e9cdd83f11d60c4b0a83462880a360d80d96953a953fee"
checksum = "2c2e31fb28e2a9f01f5ed6901b066c1ba2333c04b64dc61254142bafcb3feb2c"
dependencies = [
"const_fn",
"libc",
@@ -6026,7 +6036,7 @@ dependencies = [
[[package]]
name = "validator_client"
version = "0.2.10"
version = "0.2.11"
dependencies = [
"account_utils",
"bls",

View File

@@ -79,4 +79,3 @@ eth2_ssz = { path = "consensus/ssz" }
eth2_ssz_derive = { path = "consensus/ssz_derive" }
eth2_ssz_types = { path = "consensus/ssz_types" }
eth2_hashing = { path = "crypto/eth2_hashing" }
leveldb-sys = { git = "https://github.com/michaelsproul/leveldb-sys", branch = "v2.0.6-cmake" }

View File

@@ -1,6 +1,6 @@
[package]
name = "account_manager"
version = "0.2.10"
version = "0.2.11"
authors = ["Paul Hauner <paul@paulhauner.com>", "Luke Anderson <luke@sigmaprime.io>"]
edition = "2018"

View File

@@ -1,6 +1,6 @@
[package]
name = "beacon_node"
version = "0.2.10"
version = "0.2.11"
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
edition = "2018"

View File

@@ -97,11 +97,12 @@ pub struct BeaconChainBuilder<T: BeaconChainTypes> {
#[allow(clippy::type_complexity)]
store: Option<Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>>,
store_migrator: Option<T::StoreMigrator>,
canonical_head: Option<BeaconSnapshot<T::EthSpec>>,
/// The finalized checkpoint to anchor the chain. May be genesis or a higher
/// checkpoint.
pub finalized_snapshot: Option<BeaconSnapshot<T::EthSpec>>,
pub genesis_time: Option<u64>,
genesis_block_root: Option<Hash256>,
#[allow(clippy::type_complexity)]
fork_choice: Option<
ForkChoice<BeaconForkChoiceStore<T::EthSpec, T::HotStore, T::ColdStore>, T::EthSpec>,
>,
op_pool: Option<OperationPool<T::EthSpec>>,
eth1_chain: Option<Eth1Chain<T::Eth1Chain, T::EthSpec>>,
event_handler: Option<T::EventHandler>,
@@ -146,9 +147,9 @@ where
Self {
store: None,
store_migrator: None,
canonical_head: None,
finalized_snapshot: None,
genesis_time: None,
genesis_block_root: None,
fork_choice: None,
op_pool: None,
eth1_chain: None,
event_handler: None,
@@ -278,22 +279,31 @@ where
.to_string()
})?;
self.genesis_block_root = Some(chain.genesis_block_root);
self.head_tracker = Some(
HeadTracker::from_ssz_container(&chain.ssz_head_tracker)
.map_err(|e| format!("Failed to decode head tracker for database: {:?}", e))?,
);
let persisted_fork_choice = store
.get_item::<PersistedForkChoice>(&Hash256::from_slice(&FORK_CHOICE_DB_KEY))
.map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))?
.ok_or_else(|| "No persisted fork choice present in database.".to_string())?;
let head_block_root = chain.canonical_head_block_root;
let head_block = store
.get_item::<SignedBeaconBlock<TEthSpec>>(&head_block_root)
.map_err(|e| format!("DB error when reading head block: {:?}", e))?
.ok_or_else(|| "Head block not found in store".to_string())?;
let head_state_root = head_block.state_root();
let head_state = store
.get_state(&head_state_root, Some(head_block.slot()))
.map_err(|e| format!("DB error when reading head state: {:?}", e))?
.ok_or_else(|| "Head state not found in store".to_string())?;
let fc_store = BeaconForkChoiceStore::from_persisted(
persisted_fork_choice.fork_choice_store,
store.clone(),
)
.map_err(|e| format!("Unable to load ForkChoiceStore: {:?}", e))?;
let fork_choice =
ForkChoice::from_persisted(persisted_fork_choice.fork_choice, fc_store)
.map_err(|e| format!("Unable to parse persisted fork choice from disk: {:?}", e))?;
let genesis_block = store
.get_item::<SignedBeaconBlock<TEthSpec>>(&chain.genesis_block_root)
.map_err(|e| format!("DB error when reading genesis block: {:?}", e))?
.ok_or_else(|| "Genesis block not found in store".to_string())?;
let genesis_state = store
.get_state(&genesis_block.state_root(), Some(genesis_block.slot()))
.map_err(|e| format!("DB error when reading genesis state: {:?}", e))?
.ok_or_else(|| "Genesis block not found in store".to_string())?;
self.genesis_time = Some(genesis_state.genesis_time);
self.op_pool = Some(
store
@@ -303,35 +313,16 @@ where
.unwrap_or_else(OperationPool::new),
);
let finalized_block_root = head_state.finalized_checkpoint.root;
let finalized_block = store
.get_item::<SignedBeaconBlock<TEthSpec>>(&finalized_block_root)
.map_err(|e| format!("DB error when reading finalized block: {:?}", e))?
.ok_or_else(|| "Finalized block not found in store".to_string())?;
let finalized_state_root = finalized_block.state_root();
let finalized_state = store
.get_state(&finalized_state_root, Some(finalized_block.slot()))
.map_err(|e| format!("DB error when reading finalized state: {:?}", e))?
.ok_or_else(|| "Finalized state not found in store".to_string())?;
self.finalized_snapshot = Some(BeaconSnapshot {
beacon_block_root: finalized_block_root,
beacon_block: finalized_block,
beacon_state_root: finalized_state_root,
beacon_state: finalized_state,
});
self.canonical_head = Some(BeaconSnapshot {
beacon_block_root: head_block_root,
beacon_block: head_block,
beacon_state_root: head_state_root,
beacon_state: head_state,
});
let pubkey_cache = ValidatorPubkeyCache::load_from_file(pubkey_cache_path)
.map_err(|e| format!("Unable to open persisted pubkey cache: {:?}", e))?;
self.genesis_block_root = Some(chain.genesis_block_root);
self.head_tracker = Some(
HeadTracker::from_ssz_container(&chain.ssz_head_tracker)
.map_err(|e| format!("Failed to decode head tracker for database: {:?}", e))?,
);
self.validator_pubkey_cache = Some(pubkey_cache);
self.fork_choice = Some(fork_choice);
Ok(self)
}
@@ -374,12 +365,20 @@ where
)
})?;
self.finalized_snapshot = Some(BeaconSnapshot {
let genesis = BeaconSnapshot {
beacon_block_root,
beacon_block,
beacon_state_root,
beacon_state,
});
};
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis);
let fork_choice = ForkChoice::from_genesis(fc_store, &genesis.beacon_block.message)
.map_err(|e| format!("Unable to build initialize ForkChoice: {:?}", e))?;
self.fork_choice = Some(fork_choice);
self.genesis_time = Some(genesis.beacon_state.genesis_time);
Ok(self.empty_op_pool())
}
@@ -457,23 +456,73 @@ where
.store
.clone()
.ok_or_else(|| "Cannot build without a store.".to_string())?;
let mut fork_choice = self
.fork_choice
.ok_or_else(|| "Cannot build without fork choice.".to_string())?;
let genesis_block_root = self
.genesis_block_root
.ok_or_else(|| "Cannot build without a genesis block root".to_string())?;
// If this beacon chain is being loaded from disk, use the stored head. Otherwise, just use
// the finalized checkpoint (which is probably genesis).
let mut canonical_head = if let Some(head) = self.canonical_head {
head
let current_slot = if slot_clock
.is_prior_to_genesis()
.ok_or_else(|| "Unable to read slot clock".to_string())?
{
self.spec.genesis_slot
} else {
self.finalized_snapshot
.ok_or_else(|| "Cannot build without a state".to_string())?
slot_clock
.now()
.ok_or_else(|| "Unable to read slot".to_string())?
};
let head_block_root = fork_choice
.get_head(current_slot)
.map_err(|e| format!("Unable to get fork choice head: {:?}", e))?;
let head_block = store
.get_item::<SignedBeaconBlock<TEthSpec>>(&head_block_root)
.map_err(|e| format!("DB error when reading head block: {:?}", e))?
.ok_or_else(|| "Head block not found in store".to_string())?;
let head_state_root = head_block.state_root();
let head_state = store
.get_state(&head_state_root, Some(head_block.slot()))
.map_err(|e| format!("DB error when reading head state: {:?}", e))?
.ok_or_else(|| "Head state not found in store".to_string())?;
let mut canonical_head = BeaconSnapshot {
beacon_block_root: head_block_root,
beacon_block: head_block,
beacon_state_root: head_state_root,
beacon_state: head_state,
};
if canonical_head.beacon_block.state_root() != canonical_head.beacon_state_root {
return Err("beacon_block.state_root != beacon_state".to_string());
}
canonical_head
.beacon_state
.build_all_caches(&self.spec)
.map_err(|e| format!("Failed to build state caches: {:?}", e))?;
if canonical_head.beacon_block.state_root() != canonical_head.beacon_state_root {
return Err("beacon_block.state_root != beacon_state".to_string());
// Perform a check to ensure that the finalization points of the head and fork choice are
// consistent.
//
// This is a sanity check to detect database corruption.
let fc_finalized = fork_choice.finalized_checkpoint();
let head_finalized = canonical_head.beacon_state.finalized_checkpoint;
if fc_finalized != head_finalized {
if head_finalized.root == Hash256::zero()
&& head_finalized.epoch == fc_finalized.epoch
&& fc_finalized.root == genesis_block_root
{
// This is a legal edge-case encountered during genesis.
} else {
return Err(format!(
"Database corrupt: fork choice is finalized at {:?} whilst head is finalized at \
{:?}",
fc_finalized, head_finalized
));
}
}
let pubkey_cache_path = self
@@ -485,26 +534,6 @@ where
.map_err(|e| format!("Unable to init validator pubkey cache: {:?}", e))
})?;
let persisted_fork_choice = store
.get_item::<PersistedForkChoice>(&Hash256::from_slice(&FORK_CHOICE_DB_KEY))
.map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))?;
let fork_choice = if let Some(persisted) = persisted_fork_choice {
let fc_store =
BeaconForkChoiceStore::from_persisted(persisted.fork_choice_store, store.clone())
.map_err(|e| format!("Unable to load ForkChoiceStore: {:?}", e))?;
ForkChoice::from_persisted(persisted.fork_choice, fc_store)
.map_err(|e| format!("Unable to parse persisted fork choice from disk: {:?}", e))?
} else {
let genesis = &canonical_head;
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store.clone(), genesis);
ForkChoice::from_genesis(fc_store, &genesis.beacon_block.message)
.map_err(|e| format!("Unable to build initialize ForkChoice: {:?}", e))?
};
let beacon_chain = BeaconChain {
spec: self.spec,
config: self.chain_config,
@@ -533,9 +562,7 @@ where
eth1_chain: self.eth1_chain,
genesis_validators_root: canonical_head.beacon_state.genesis_validators_root,
canonical_head: TimeoutRwLock::new(canonical_head.clone()),
genesis_block_root: self
.genesis_block_root
.ok_or_else(|| "Cannot build without a genesis block root".to_string())?,
genesis_block_root,
fork_choice: RwLock::new(fork_choice),
event_handler: self
.event_handler
@@ -634,11 +661,8 @@ where
/// Requires the state to be initialized.
pub fn testing_slot_clock(self, slot_duration: Duration) -> Result<Self, String> {
let genesis_time = self
.finalized_snapshot
.as_ref()
.ok_or_else(|| "testing_slot_clock requires an initialized state")?
.beacon_state
.genesis_time;
.genesis_time
.ok_or_else(|| "testing_slot_clock requires an initialized state")?;
let slot_clock = TestingSlotClock::new(
Slot::new(0),

View File

@@ -6,6 +6,13 @@ use types::Hash256;
#[derive(Clone, Encode, Decode)]
pub struct PersistedBeaconChain {
/// This value is ignored to resolve the issue described here:
///
/// https://github.com/sigp/lighthouse/pull/1639
///
/// The following PR will clean-up and remove this field:
///
/// https://github.com/sigp/lighthouse/pull/1638
pub canonical_head_block_root: Hash256,
pub genesis_block_root: Hash256,
pub ssz_head_tracker: SszHeadTracker,

View File

@@ -153,8 +153,11 @@ fn assert_chains_pretty_much_the_same<T: BeaconChainTypes>(a: &BeaconChain<T>, b
a.genesis_block_root, b.genesis_block_root,
"genesis_block_root should be equal"
);
let slot = a.slot().unwrap();
assert!(
*a.fork_choice.read() == *b.fork_choice.read(),
"fork_choice should be equal"
a.fork_choice.write().get_head(slot).unwrap()
== b.fork_choice.write().get_head(slot).unwrap(),
"fork_choice heads should be equal"
);
}

View File

@@ -713,11 +713,8 @@ where
.ok_or_else(|| "system_time_slot_clock requires a beacon_chain_builder")?;
let genesis_time = beacon_chain_builder
.finalized_snapshot
.as_ref()
.ok_or_else(|| "system_time_slot_clock requires an initialized beacon state")?
.beacon_state
.genesis_time;
.genesis_time
.ok_or_else(|| "system_time_slot_clock requires an initialized beacon state")?;
let spec = self
.chain_spec

View File

@@ -1,6 +1,7 @@
use crate::peer_manager::{score::PeerAction, PeerManager, PeerManagerEvent};
use crate::rpc::*;
use crate::types::{EnrBitfield, GossipEncoding, GossipKind, GossipTopic, SubnetDiscovery};
use crate::service::METADATA_FILENAME;
use crate::types::{GossipEncoding, GossipKind, GossipTopic, SubnetDiscovery};
use crate::Eth2Enr;
use crate::{error, metrics, Enr, NetworkConfig, NetworkGlobals, PubsubMessage, TopicHash};
use futures::prelude::*;
@@ -23,9 +24,9 @@ use libp2p::{
PeerId,
};
use slog::{crit, debug, o, trace, warn};
use ssz::{Decode, Encode};
use ssz::Encode;
use std::fs::File;
use std::io::{Read, Write};
use std::io::Write;
use std::path::PathBuf;
use std::{
collections::VecDeque,
@@ -38,7 +39,6 @@ use types::{EnrForkId, EthSpec, SignedBeaconBlock, SubnetId};
mod handler;
const MAX_IDENTIFY_ADDRESSES: usize = 10;
const METADATA_FILENAME: &str = "metadata";
/// Builds the network behaviour that manages the core protocols of eth2.
/// This core behaviour is managed by `Behaviour` which adds peer management to all core
@@ -58,8 +58,6 @@ pub struct Behaviour<TSpec: EthSpec> {
events: VecDeque<BehaviourEvent<TSpec>>,
/// Queue of peers to disconnect and an optional reason for the disconnection.
peers_to_dc: VecDeque<(PeerId, Option<GoodbyeReason>)>,
/// The current meta data of the node, so respond to pings and get metadata
meta_data: MetaData<TSpec>,
/// A collections of variables accessible outside the network service.
network_globals: Arc<NetworkGlobals<TSpec>>,
/// Keeps track of the current EnrForkId for upgrading gossipsub topics.
@@ -95,8 +93,6 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
.eth2()
.expect("Local ENR must have a fork id");
let meta_data = load_or_build_metadata(&net_conf.network_dir, &log);
let gossipsub = Gossipsub::new(MessageAuthenticity::Anonymous, net_conf.gs_config.clone())
.map_err(|e| format!("Could not construct gossipsub: {:?}", e))?;
@@ -115,7 +111,6 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
.await?,
events: VecDeque::new(),
peers_to_dc: VecDeque::new(),
meta_data,
network_globals,
enr_fork_id,
waker: None,
@@ -407,21 +402,31 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
/// Updates the current meta data of the node to match the local ENR.
fn update_metadata(&mut self) {
self.meta_data.seq_number += 1;
self.meta_data.attnets = self
let local_attnets = self
.peer_manager
.discovery()
.local_enr()
.bitfield::<TSpec>()
.expect("Local discovery must have bitfield");
{
// write lock scope
let mut meta_data = self.network_globals.local_metadata.write();
meta_data.seq_number += 1;
meta_data.attnets = local_attnets;
}
// Save the updated metadata to disk
save_metadata_to_disk(&self.network_dir, self.meta_data.clone(), &self.log);
save_metadata_to_disk(
&self.network_dir,
self.network_globals.local_metadata.read().clone(),
&self.log,
);
}
/// Sends a Ping request to the peer.
fn ping(&mut self, id: RequestId, peer_id: PeerId) {
let ping = crate::rpc::Ping {
data: self.meta_data.seq_number,
data: self.network_globals.local_metadata.read().seq_number,
};
trace!(self.log, "Sending Ping"; "request_id" => id, "peer_id" => peer_id.to_string());
@@ -432,7 +437,7 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
/// Sends a Pong response to the peer.
fn pong(&mut self, id: PeerRequestId, peer_id: PeerId) {
let ping = crate::rpc::Ping {
data: self.meta_data.seq_number,
data: self.network_globals.local_metadata.read().seq_number,
};
trace!(self.log, "Sending Pong"; "request_id" => id.1, "peer_id" => peer_id.to_string());
let event = RPCCodedResponse::Success(RPCResponse::Pong(ping));
@@ -448,7 +453,9 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
/// Sends a METADATA response to a peer.
fn send_meta_data_response(&mut self, id: PeerRequestId, peer_id: PeerId) {
let event = RPCCodedResponse::Success(RPCResponse::MetaData(self.meta_data.clone()));
let event = RPCCodedResponse::Success(RPCResponse::MetaData(
self.network_globals.local_metadata.read().clone(),
));
self.eth2_rpc.send_response(peer_id, id, event);
}
@@ -1107,45 +1114,8 @@ pub enum BehaviourEvent<TSpec: EthSpec> {
StatusPeer(PeerId),
}
/// Load metadata from persisted file. Return default metadata if loading fails.
fn load_or_build_metadata<E: EthSpec>(network_dir: &PathBuf, log: &slog::Logger) -> MetaData<E> {
// Default metadata
let mut meta_data = MetaData {
seq_number: 0,
attnets: EnrBitfield::<E>::default(),
};
// Read metadata from persisted file if available
let metadata_path = network_dir.join(METADATA_FILENAME);
if let Ok(mut metadata_file) = File::open(metadata_path) {
let mut metadata_ssz = Vec::new();
if metadata_file.read_to_end(&mut metadata_ssz).is_ok() {
match MetaData::<E>::from_ssz_bytes(&metadata_ssz) {
Ok(persisted_metadata) => {
meta_data.seq_number = persisted_metadata.seq_number;
// Increment seq number if persisted attnet is not default
if persisted_metadata.attnets != meta_data.attnets {
meta_data.seq_number += 1;
}
debug!(log, "Loaded metadata from disk");
}
Err(e) => {
debug!(
log,
"Metadata from file could not be decoded";
"error" => format!("{:?}", e),
);
}
}
}
};
debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number);
save_metadata_to_disk(network_dir, meta_data.clone(), &log);
meta_data
}
/// Persist metadata to disk
fn save_metadata_to_disk<E: EthSpec>(dir: &PathBuf, metadata: MetaData<E>, log: &slog::Logger) {
pub fn save_metadata_to_disk<E: EthSpec>(dir: &PathBuf, metadata: MetaData<E>, log: &slog::Logger) {
let _ = std::fs::create_dir_all(&dir);
match File::create(dir.join(METADATA_FILENAME))
.and_then(|mut f| f.write_all(&metadata.as_ssz_bytes()))

View File

@@ -1,5 +1,5 @@
use crate::types::GossipKind;
use crate::Enr;
use crate::{Enr, PeerIdSerialized};
use discv5::{Discv5Config, Discv5ConfigBuilder};
use libp2p::gossipsub::{
GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId, ValidationMode,
@@ -58,6 +58,9 @@ pub struct Config {
/// List of libp2p nodes to initially connect to.
pub libp2p_nodes: Vec<Multiaddr>,
/// List of trusted libp2p nodes which are not scored.
pub trusted_peers: Vec<PeerIdSerialized>,
/// Client version
pub client_version: String,
@@ -139,6 +142,7 @@ impl Default for Config {
boot_nodes_enr: vec![],
boot_nodes_multiaddr: vec![],
libp2p_nodes: vec![],
trusted_peers: vec![],
client_version: lighthouse_version::version_with_platform(),
disable_discovery: false,
topics,

View File

@@ -14,6 +14,50 @@ pub mod rpc;
mod service;
pub mod types;
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
use std::str::FromStr;
/// Wrapper over a libp2p `PeerId` which implements `Serialize` and `Deserialize`
#[derive(Clone, Debug)]
pub struct PeerIdSerialized(libp2p::PeerId);
impl From<PeerIdSerialized> for PeerId {
fn from(peer_id: PeerIdSerialized) -> Self {
peer_id.0
}
}
impl FromStr for PeerIdSerialized {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self(
PeerId::from_str(s).map_err(|e| format!("Invalid peer id: {}", e))?,
))
}
}
impl Serialize for PeerIdSerialized {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.0.to_string())
}
}
impl<'de> Deserialize<'de> for PeerIdSerialized {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s: String = Deserialize::deserialize(deserializer)?;
Ok(Self(PeerId::from_str(&s).map_err(|e| {
de::Error::custom(format!("Failed to deserialise peer id: {:?}", e))
})?))
}
}
pub use crate::types::{error, Enr, GossipTopic, NetworkGlobals, PubsubMessage, SubnetDiscovery};
pub use behaviour::{BehaviourEvent, PeerRequestId, Request, Response};
pub use config::Config as NetworkConfig;

View File

@@ -133,9 +133,9 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason) {
// get the peer info
if let Some(info) = self.network_globals.peers.write().peer_info_mut(peer_id) {
debug!(self.log, "Sending goodbye to peer"; "peer_id" => peer_id.to_string(), "reason" => reason.to_string(), "score" => info.score.to_string());
debug!(self.log, "Sending goodbye to peer"; "peer_id" => peer_id.to_string(), "reason" => reason.to_string(), "score" => info.score().to_string());
// Goodbye's are fatal
info.score.apply_peer_action(PeerAction::Fatal);
info.apply_peer_action_to_score(PeerAction::Fatal);
if info.connection_status.is_connected_or_dialing() {
self.events
.push(PeerManagerEvent::DisconnectPeer(peer_id.clone(), reason));
@@ -155,12 +155,12 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
let mut unban_peer = None;
if let Some(info) = self.network_globals.peers.write().peer_info_mut(peer_id) {
let previous_state = info.score.state();
info.score.apply_peer_action(action);
if previous_state != info.score.state() {
match info.score.state() {
let previous_state = info.score_state();
info.apply_peer_action_to_score(action);
if previous_state != info.score_state() {
match info.score_state() {
ScoreState::Banned => {
debug!(self.log, "Peer has been banned"; "peer_id" => peer_id.to_string(), "score" => info.score.to_string());
debug!(self.log, "Peer has been banned"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string());
ban_peer = Some(peer_id.clone());
if info.connection_status.is_connected_or_dialing() {
self.events.push(PeerManagerEvent::DisconnectPeer(
@@ -170,7 +170,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
}
}
ScoreState::Disconnected => {
debug!(self.log, "Peer transitioned to disconnect state"; "peer_id" => peer_id.to_string(), "score" => info.score.to_string(), "past_state" => previous_state.to_string());
debug!(self.log, "Peer transitioned to disconnect state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
// disconnect the peer if it's currently connected or dialing
unban_peer = Some(peer_id.clone());
if info.connection_status.is_connected_or_dialing() {
@@ -182,13 +182,13 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
// TODO: Update the peer manager to inform that the peer is disconnecting.
}
ScoreState::Healthy => {
debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score.to_string(), "past_state" => previous_state.to_string());
debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
// unban the peer if it was previously banned.
unban_peer = Some(peer_id.clone());
}
}
} else {
debug!(self.log, "Peer score adjusted"; "peer_id" => peer_id.to_string(), "score" => info.score.to_string());
debug!(self.log, "Peer score adjusted"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string());
}
}
@@ -689,9 +689,9 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
let mut to_unban_peers = Vec::new();
for (peer_id, info) in pdb.peers_mut() {
let previous_state = info.score.state();
let previous_state = info.score_state();
// Update scores
info.score.update();
info.score_update();
/* TODO: Implement logic about connection lifetimes
match info.connection_status {
@@ -746,10 +746,10 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
*/
// handle score transitions
if previous_state != info.score.state() {
match info.score.state() {
if previous_state != info.score_state() {
match info.score_state() {
ScoreState::Banned => {
debug!(self.log, "Peer has been banned"; "peer_id" => peer_id.to_string(), "score" => info.score.to_string());
debug!(self.log, "Peer has been banned"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string());
to_ban_peers.push(peer_id.clone());
if info.connection_status.is_connected_or_dialing() {
self.events.push(PeerManagerEvent::DisconnectPeer(
@@ -759,7 +759,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
}
}
ScoreState::Disconnected => {
debug!(self.log, "Peer transitioned to disconnect state"; "peer_id" => peer_id.to_string(), "score" => info.score.to_string(), "past_state" => previous_state.to_string());
debug!(self.log, "Peer transitioned to disconnect state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
// disconnect the peer if it's currently connected or dialing
to_unban_peers.push(peer_id.clone());
if info.connection_status.is_connected_or_dialing() {
@@ -771,7 +771,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
// TODO: Update peer manager to report that it's disconnecting.
}
ScoreState::Healthy => {
debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score.to_string(), "past_state" => previous_state.to_string());
debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
// unban the peer if it was previously banned.
to_unban_peers.push(peer_id.clone());
}
@@ -821,7 +821,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
.take(connected_peer_count - self.target_peers)
//we only need to disconnect peers with healthy scores, since the others got already
//disconnected in update_peer_scores
.filter(|(_, info)| info.score.state() == ScoreState::Healthy)
.filter(|(_, info)| info.score_state() == ScoreState::Healthy)
{
self.events.push(PeerManagerEvent::DisconnectPeer(
(*peer_id).clone(),

View File

@@ -1,5 +1,5 @@
use super::client::Client;
use super::score::Score;
use super::score::{PeerAction, Score, ScoreState};
use super::PeerSyncStatus;
use crate::rpc::MetaData;
use crate::Multiaddr;
@@ -19,7 +19,7 @@ pub struct PeerInfo<T: EthSpec> {
/// The connection status of the peer
_status: PeerStatus,
/// The peers reputation
pub score: Score,
score: Score,
/// Client managing this peer
pub client: Client,
/// Connection status of this peer
@@ -36,6 +36,8 @@ pub struct PeerInfo<T: EthSpec> {
/// necessary.
#[serde(skip)]
pub min_ttl: Option<Instant>,
/// Is the peer a trusted peer.
pub is_trusted: bool,
}
impl<TSpec: EthSpec> Default for PeerInfo<TSpec> {
@@ -49,11 +51,21 @@ impl<TSpec: EthSpec> Default for PeerInfo<TSpec> {
sync_status: PeerSyncStatus::Unknown,
meta_data: None,
min_ttl: None,
is_trusted: false,
}
}
}
impl<T: EthSpec> PeerInfo<T> {
/// Return a PeerInfo struct for a trusted peer.
pub fn trusted_peer_info() -> Self {
PeerInfo {
score: Score::max_score(),
is_trusted: true,
..Default::default()
}
}
/// Returns if the peer is subscribed to a given `SubnetId`
pub fn on_subnet(&self, subnet_id: SubnetId) -> bool {
if let Some(meta_data) = &self.meta_data {
@@ -69,6 +81,38 @@ impl<T: EthSpec> PeerInfo<T> {
pub fn has_future_duty(&self) -> bool {
self.min_ttl.map_or(false, |i| i >= Instant::now())
}
/// Returns score of the peer.
pub fn score(&self) -> Score {
self.score
}
/// Returns the state of the peer based on the score.
pub(crate) fn score_state(&self) -> ScoreState {
self.score.state()
}
/// Applies decay rates to a non-trusted peer's score.
pub fn score_update(&mut self) {
if !self.is_trusted {
self.score.update()
}
}
/// Apply peer action to a non-trusted peer's score.
pub fn apply_peer_action_to_score(&mut self, peer_action: PeerAction) {
if !self.is_trusted {
self.score.apply_peer_action(peer_action)
}
}
#[cfg(test)]
/// Add an f64 to a non-trusted peer's score abiding by the limits.
pub fn add_to_score(&mut self, score: f64) {
if !self.is_trusted {
self.score.add(score)
}
}
}
#[derive(Clone, Debug, Serialize)]

View File

@@ -86,12 +86,17 @@ impl BannedPeersCount {
}
impl<TSpec: EthSpec> PeerDB<TSpec> {
pub fn new(log: &slog::Logger) -> Self {
pub fn new(trusted_peers: Vec<PeerId>, log: &slog::Logger) -> Self {
// Initialize the peers hashmap with trusted peers
let peers = trusted_peers
.into_iter()
.map(|peer_id| (peer_id, PeerInfo::trusted_peer_info()))
.collect();
Self {
log: log.clone(),
disconnected_peers: 0,
banned_peers_count: BannedPeersCount::new(),
peers: HashMap::new(),
peers,
}
}
@@ -101,7 +106,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
pub fn score(&self, peer_id: &PeerId) -> Score {
self.peers
.get(peer_id)
.map_or(Score::default(), |info| info.score)
.map_or(Score::default(), |info| info.score())
}
/// Returns an iterator over all peers in the db.
@@ -159,7 +164,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
/// Returns true if the Peer is banned.
pub fn is_banned(&self, peer_id: &PeerId) -> bool {
if let Some(peer) = self.peers.get(peer_id) {
match peer.score.state() {
match peer.score().state() {
ScoreState::Banned => true,
_ => self.ip_is_banned(peer),
}
@@ -181,7 +186,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
/// Returns true if the Peer is either banned or in the disconnected state.
pub fn is_banned_or_disconnected(&self, peer_id: &PeerId) -> bool {
if let Some(peer) = self.peers.get(peer_id) {
match peer.score.state() {
match peer.score().state() {
ScoreState::Banned | ScoreState::Disconnected => true,
_ => self.ip_is_banned(peer),
}
@@ -264,7 +269,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
.collect::<Vec<_>>();
connected.shuffle(&mut rand::thread_rng());
connected.sort_by_key(|(_, info)| info.score);
connected.sort_by_key(|(_, info)| info.score());
connected
}
@@ -279,7 +284,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
.iter()
.filter(|(_, info)| is_status(&info.connection_status))
.collect::<Vec<_>>();
by_status.sort_by_key(|(_, info)| info.score);
by_status.sort_by_key(|(_, info)| info.score());
by_status.into_iter().rev().collect()
}
@@ -291,7 +296,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
self.peers
.iter()
.filter(|(_, info)| is_status(&info.connection_status))
.max_by_key(|(_, info)| info.score)
.max_by_key(|(_, info)| info.score())
.map(|(id, _)| id)
}
@@ -455,8 +460,8 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
.filter(|(_, info)| info.connection_status.is_banned())
.min_by(|(_, info_a), (_, info_b)| {
info_a
.score
.partial_cmp(&info_b.score)
.score()
.partial_cmp(&info_b.score())
.unwrap_or(std::cmp::Ordering::Equal)
}) {
self.banned_peers_count
@@ -485,8 +490,8 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
.filter(|(_, info)| info.connection_status.is_disconnected())
.min_by(|(_, info_a), (_, info_b)| {
info_a
.score
.partial_cmp(&info_b.score)
.score()
.partial_cmp(&info_b.score())
.unwrap_or(std::cmp::Ordering::Equal)
})
.map(|(id, _)| id.clone())
@@ -543,13 +548,13 @@ mod tests {
fn add_score<TSpec: EthSpec>(db: &mut PeerDB<TSpec>, peer_id: &PeerId, score: f64) {
if let Some(info) = db.peer_info_mut(peer_id) {
info.score.add(score);
info.add_to_score(score);
}
}
fn get_db() -> PeerDB<M> {
let log = build_log(slog::Level::Debug, false);
PeerDB::new(&log)
PeerDB::new(vec![], &log)
}
#[test]
@@ -938,4 +943,28 @@ mod tests {
assert!(pdb.is_banned(&p1));
assert!(!pdb.is_banned(&p2));
}
#[test]
fn test_trusted_peers_score() {
let trusted_peer = PeerId::random();
let log = build_log(slog::Level::Debug, false);
let mut pdb: PeerDB<M> = PeerDB::new(vec![trusted_peer.clone()], &log);
pdb.connect_ingoing(&trusted_peer);
// Check trusted status and score
assert!(pdb.peer_info(&trusted_peer).unwrap().is_trusted);
assert_eq!(
pdb.peer_info(&trusted_peer).unwrap().score().score(),
Score::max_score().score()
);
// Adding/Subtracting score should have no effect on a trusted peer
add_score(&mut pdb, &trusted_peer, -50.0);
assert_eq!(
pdb.peer_info(&trusted_peer).unwrap().score().score(),
Score::max_score().score()
);
}
}

View File

@@ -145,6 +145,13 @@ impl std::fmt::Display for ScoreState {
}
impl Score {
/// Return max possible score.
pub fn max_score() -> Self {
Score {
score: MAX_SCORE,
last_updated: Instant::now(),
}
}
/// Access to the underlying score.
pub fn score(&self) -> f64 {
self.score

View File

@@ -99,6 +99,7 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyInboundCodec<TSpec> {
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if self.len.is_none() {
// Decode the length of the uncompressed bytes from an unsigned varint
// Note: length-prefix of > 10 bytes(uint64) would be a decoding error
match self.inner.decode(src).map_err(RPCError::from)? {
Some(length) => {
self.len = Some(length);
@@ -116,7 +117,7 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyInboundCodec<TSpec> {
let mut reader = FrameDecoder::new(Cursor::new(&src));
let mut decoded_buffer = vec![0; length];
match reader.read_exact(&mut decoded_buffer) {
match read_exact(&mut reader, &mut decoded_buffer, length) {
Ok(()) => {
// `n` is how many bytes the reader read in the compressed stream
let n = reader.get_ref().position();
@@ -194,8 +195,7 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyInboundCodec<TSpec> {
}
}
Err(e) => match e.kind() {
// Haven't received enough bytes to decode yet
// TODO: check if this is the only Error variant where we return `Ok(None)`
// Haven't received enough bytes to decode yet, wait for more
ErrorKind::UnexpectedEof => Ok(None),
_ => Err(e).map_err(RPCError::from),
},
@@ -277,6 +277,7 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyOutboundCodec<TSpec> {
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if self.len.is_none() {
// Decode the length of the uncompressed bytes from an unsigned varint
// Note: length-prefix of > 10 bytes(uint64) would be a decoding error
match self.inner.decode(src).map_err(RPCError::from)? {
Some(length) => {
self.len = Some(length as usize);
@@ -293,7 +294,7 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyOutboundCodec<TSpec> {
}
let mut reader = FrameDecoder::new(Cursor::new(&src));
let mut decoded_buffer = vec![0; length];
match reader.read_exact(&mut decoded_buffer) {
match read_exact(&mut reader, &mut decoded_buffer, length) {
Ok(()) => {
// `n` is how many bytes the reader read in the compressed stream
let n = reader.get_ref().position();
@@ -364,8 +365,7 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyOutboundCodec<TSpec> {
}
}
Err(e) => match e.kind() {
// Haven't received enough bytes to decode yet
// TODO: check if this is the only Error variant where we return `Ok(None)`
// Haven't received enough bytes to decode yet, wait for more
ErrorKind::UnexpectedEof => Ok(None),
_ => Err(e).map_err(RPCError::from),
},
@@ -398,7 +398,7 @@ impl<TSpec: EthSpec> OutboundCodec<RPCRequest<TSpec>> for SSZSnappyOutboundCodec
}
let mut reader = FrameDecoder::new(Cursor::new(&src));
let mut decoded_buffer = vec![0; length];
match reader.read_exact(&mut decoded_buffer) {
match read_exact(&mut reader, &mut decoded_buffer, length) {
Ok(()) => {
// `n` is how many bytes the reader read in the compressed stream
let n = reader.get_ref().position();
@@ -409,11 +409,67 @@ impl<TSpec: EthSpec> OutboundCodec<RPCRequest<TSpec>> for SSZSnappyOutboundCodec
)?)))
}
Err(e) => match e.kind() {
// Haven't received enough bytes to decode yet
// TODO: check if this is the only Error variant where we return `Ok(None)`
// Haven't received enough bytes to decode yet, wait for more
ErrorKind::UnexpectedEof => Ok(None),
_ => Err(e).map_err(RPCError::from),
},
}
}
}
/// Wrapper over `read` implementation of `FrameDecoder`.
///
/// Works like the standard `read_exact` implementation, except that it returns an error if length of
// compressed bytes read from the underlying reader is greater than worst case compression length for snappy.
fn read_exact<T: std::convert::AsRef<[u8]>>(
reader: &mut FrameDecoder<Cursor<T>>,
mut buf: &mut [u8],
uncompressed_length: usize,
) -> Result<(), std::io::Error> {
// Calculate worst case compression length for given uncompressed length
let max_compressed_len = snap::raw::max_compress_len(uncompressed_length) as u64;
// Initialize the position of the reader
let mut pos = reader.get_ref().position();
let mut count = 0;
while !buf.is_empty() {
match reader.read(buf) {
Ok(0) => break,
Ok(n) => {
let tmp = buf;
buf = &mut tmp[n..];
}
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
// Get current position of reader
let curr_pos = reader.get_ref().position();
// Note: reader should always advance forward. However, this behaviour
// depends on the implementation of `snap::FrameDecoder`, so it is better
// to check to avoid underflow panic.
if curr_pos > pos {
count += reader.get_ref().position() - pos;
pos = curr_pos;
} else {
return Err(std::io::Error::new(
ErrorKind::InvalidData,
"snappy: reader is not advanced forward while reading",
));
}
if count > max_compressed_len {
return Err(std::io::Error::new(
ErrorKind::InvalidData,
"snappy: compressed data is > max_compressed_len",
));
}
}
if !buf.is_empty() {
Err(std::io::Error::new(
ErrorKind::UnexpectedEof,
"failed to fill whole buffer",
))
} else {
Ok(())
}
}

View File

@@ -1,8 +1,10 @@
use crate::behaviour::{Behaviour, BehaviourEvent, PeerRequestId, Request, Response};
use crate::behaviour::{
save_metadata_to_disk, Behaviour, BehaviourEvent, PeerRequestId, Request, Response,
};
use crate::discovery::enr;
use crate::multiaddr::Protocol;
use crate::rpc::{GoodbyeReason, RPCResponseErrorCode, RequestId};
use crate::types::{error, GossipKind};
use crate::rpc::{GoodbyeReason, MetaData, RPCResponseErrorCode, RequestId};
use crate::types::{error, EnrBitfield, GossipKind};
use crate::EnrExt;
use crate::{NetworkConfig, NetworkGlobals, PeerAction};
use futures::prelude::*;
@@ -15,6 +17,7 @@ use libp2p::{
PeerId, Swarm, Transport,
};
use slog::{crit, debug, info, o, trace, warn};
use ssz::Decode;
use std::fs::File;
use std::io::prelude::*;
use std::io::{Error, ErrorKind};
@@ -26,6 +29,8 @@ use types::{EnrForkId, EthSpec};
pub const NETWORK_KEY_FILENAME: &str = "key";
/// The maximum simultaneous libp2p connections per peer.
const MAX_CONNECTIONS_PER_PEER: usize = 1;
/// The filename to store our local metadata.
pub const METADATA_FILENAME: &str = "metadata";
/// The types of events than can be obtained from polling the libp2p service.
///
@@ -70,11 +75,20 @@ impl<TSpec: EthSpec> Service<TSpec> {
enr::build_or_load_enr::<TSpec>(local_keypair.clone(), config, enr_fork_id, &log)?;
let local_peer_id = enr.peer_id();
let meta_data = load_or_build_metadata(&config.network_dir, &log);
// set up a collection of variables accessible outside of the network crate
let network_globals = Arc::new(NetworkGlobals::new(
enr.clone(),
config.libp2p_port,
config.discovery_port,
meta_data,
config
.trusted_peers
.iter()
.map(|x| PeerId::from(x.clone()))
.collect(),
&log,
));
@@ -420,3 +434,43 @@ fn strip_peer_id(addr: &mut Multiaddr) {
_ => {}
}
}
/// Load metadata from persisted file. Return default metadata if loading fails.
fn load_or_build_metadata<E: EthSpec>(
network_dir: &std::path::PathBuf,
log: &slog::Logger,
) -> MetaData<E> {
// Default metadata
let mut meta_data = MetaData {
seq_number: 0,
attnets: EnrBitfield::<E>::default(),
};
// Read metadata from persisted file if available
let metadata_path = network_dir.join(METADATA_FILENAME);
if let Ok(mut metadata_file) = File::open(metadata_path) {
let mut metadata_ssz = Vec::new();
if metadata_file.read_to_end(&mut metadata_ssz).is_ok() {
match MetaData::<E>::from_ssz_bytes(&metadata_ssz) {
Ok(persisted_metadata) => {
meta_data.seq_number = persisted_metadata.seq_number;
// Increment seq number if persisted attnet is not default
if persisted_metadata.attnets != meta_data.attnets {
meta_data.seq_number += 1;
}
debug!(log, "Loaded metadata from disk");
}
Err(e) => {
debug!(
log,
"Metadata from file could not be decoded";
"error" => format!("{:?}", e),
);
}
}
}
};
debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number);
save_metadata_to_disk(network_dir, meta_data.clone(), &log);
meta_data
}

View File

@@ -1,5 +1,6 @@
//! A collection of variables that are accessible outside of the network thread itself.
use crate::peer_manager::PeerDB;
use crate::rpc::MetaData;
use crate::types::SyncState;
use crate::Client;
use crate::EnrExt;
@@ -22,6 +23,8 @@ pub struct NetworkGlobals<TSpec: EthSpec> {
pub listen_port_udp: AtomicU16,
/// The collection of known peers.
pub peers: RwLock<PeerDB<TSpec>>,
// The local meta data of our node.
pub local_metadata: RwLock<MetaData<TSpec>>,
/// The current gossipsub topic subscriptions.
pub gossipsub_subscriptions: RwLock<HashSet<GossipTopic>>,
/// The current sync status of the node.
@@ -29,14 +32,22 @@ pub struct NetworkGlobals<TSpec: EthSpec> {
}
impl<TSpec: EthSpec> NetworkGlobals<TSpec> {
pub fn new(enr: Enr, tcp_port: u16, udp_port: u16, log: &slog::Logger) -> Self {
pub fn new(
enr: Enr,
tcp_port: u16,
udp_port: u16,
local_metadata: MetaData<TSpec>,
trusted_peers: Vec<PeerId>,
log: &slog::Logger,
) -> Self {
NetworkGlobals {
local_enr: RwLock::new(enr.clone()),
peer_id: RwLock::new(enr.peer_id()),
listen_multiaddrs: RwLock::new(Vec::new()),
listen_port_tcp: AtomicU16::new(tcp_port),
listen_port_udp: AtomicU16::new(udp_port),
peers: RwLock::new(PeerDB::new(log)),
local_metadata: RwLock::new(local_metadata),
peers: RwLock::new(PeerDB::new(trusted_peers, log)),
gossipsub_subscriptions: RwLock::new(HashSet::new()),
sync_state: RwLock::new(SyncState::Stalled),
}

View File

@@ -8,6 +8,8 @@ mod tests {
migrate::NullMigrator,
};
use eth2_libp2p::discovery::{build_enr, Keypair};
use eth2_libp2p::rpc::methods::MetaData;
use eth2_libp2p::types::EnrBitfield;
use eth2_libp2p::{
discovery::CombinedKey, CombinedKeyExt, NetworkConfig, NetworkGlobals, SubnetDiscovery,
};
@@ -102,7 +104,14 @@ mod tests {
let enr_key = CombinedKey::from_libp2p(&Keypair::generate_secp256k1()).unwrap();
let enr = build_enr::<MinimalEthSpec>(&enr_key, &config, EnrForkId::default()).unwrap();
let network_globals: NetworkGlobals<MinimalEthSpec> = NetworkGlobals::new(enr, 0, 0, &log);
// Default metadata
let meta_data = MetaData {
seq_number: 0,
attnets: EnrBitfield::<MinimalEthSpec>::default(),
};
let network_globals: NetworkGlobals<MinimalEthSpec> =
NetworkGlobals::new(enr, 0, 0, meta_data, vec![], &log);
AttestationService::new(beacon_chain, Arc::new(network_globals), &log)
}

View File

@@ -131,7 +131,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.help("Disables the discv5 discovery protocol. The node will not search for new peers or participate in the discovery protocol.")
.takes_value(false),
)
.arg(
Arg::with_name("trusted-peers")
.long("trusted-peers")
.value_name("TRUSTED_PEERS")
.help("One or more comma-delimited trusted peer ids which always have the highest score according to the peer scoring system.")
.takes_value(true),
)
/* REST API related arguments */
.arg(
Arg::with_name("http")

View File

@@ -2,7 +2,7 @@ use beacon_chain::builder::PUBKEY_CACHE_FILENAME;
use clap::ArgMatches;
use clap_utils::BAD_TESTNET_DIR_MESSAGE;
use client::{config::DEFAULT_DATADIR, ClientConfig, ClientGenesis};
use eth2_libp2p::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig};
use eth2_libp2p::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized};
use eth2_testnet_config::Eth2TestnetConfig;
use slog::{crit, info, Logger};
use ssz::Encode;
@@ -343,6 +343,17 @@ pub fn set_network_config(
.collect::<Result<Vec<Multiaddr>, _>>()?;
}
if let Some(trusted_peers_str) = cli_args.value_of("trusted-peers") {
config.trusted_peers = trusted_peers_str
.split(',')
.map(|peer_id| {
peer_id
.parse()
.map_err(|_| format!("Invalid trusted peer id: {}", peer_id))
})
.collect::<Result<Vec<PeerIdSerialized>, _>>()?;
}
if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") {
config.enr_udp_port = Some(
enr_udp_port_str

View File

@@ -15,7 +15,7 @@ rayon = "1.3.0"
[dependencies]
db-key = "0.0.5"
leveldb = "0.8.5"
leveldb = "0.8.6"
parking_lot = "0.11.0"
itertools = "0.9.0"
eth2_ssz = "0.1.2"

View File

@@ -18,17 +18,15 @@ project.
The `Makefile` in the project contains four targets for cross-compiling:
- `build-x86_64`: builds an optimized version for x86_64 processors (suitable
for most users).
- `build-x86_64-portable`: builds a version x86_64 processors which avoids
using some modern CPU instructions that might cause an "illegal
instruction" error on older CPUs.
- `build-aarch64`: builds an optimized version for 64bit ARM processors
- `build-x86_64`: builds an optimized version for x86_64 processors (suitable for most users).
Supports Intel Broadwell (2014) and newer, and AMD Ryzen (2017) and newer.
- `build-x86_64-portable`: builds a version for x86_64 processors which avoids using some modern CPU
instructions that are incompatible with older CPUs. Suitable for pre-Broadwell/Ryzen CPUs.
- `build-aarch64`: builds an optimized version for 64-bit ARM processors
(suitable for Raspberry Pi 4).
- `build-aarch64-portable`: builds a version 64 bit ARM processors which avoids
using some modern CPU instructions that might cause an "illegal
instruction" error on older CPUs.
- `build-aarch64-portable`: builds a version for 64-bit ARM processors which avoids using some
modern CPU instructions. In practice, very few ARM processors lack the instructions necessary to
run the faster non-portable build.
### Example

View File

@@ -1,6 +1,6 @@
[package]
name = "boot_node"
version = "0.2.10"
version = "0.2.11"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2018"

View File

@@ -10,7 +10,7 @@ use target_info::Target;
/// `Lighthouse/v0.2.0-1419501f2+`
pub const VERSION: &str = git_version!(
args = ["--always", "--dirty=+"],
prefix = "Lighthouse/v0.2.10-",
prefix = "Lighthouse/v0.2.11-",
fallback = "unknown"
);

View File

@@ -3,7 +3,8 @@ use std::marker::PhantomData;
use proto_array::{Block as ProtoBlock, ProtoArrayForkChoice};
use ssz_derive::{Decode, Encode};
use types::{
BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, IndexedAttestation, Slot,
BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256,
IndexedAttestation, Slot,
};
use crate::ForkChoiceStore;
@@ -758,6 +759,11 @@ where
.is_descendant(self.fc_store.finalized_checkpoint().root, block_root)
}
/// Return the current finalized checkpoint.
pub fn finalized_checkpoint(&self) -> Checkpoint {
*self.fc_store.finalized_checkpoint()
}
/// Returns the latest message for a given validator, if any.
///
/// Returns `(block_root, block_slot)`.

View File

@@ -0,0 +1,70 @@
use crate::{Result, SafeArith};
/// Extension trait for iterators, providing a safe replacement for `sum`.
pub trait SafeArithIter<T> {
fn safe_sum(self) -> Result<T>;
}
impl<I, T> SafeArithIter<T> for I
where
I: Iterator<Item = T> + Sized,
T: SafeArith,
{
fn safe_sum(mut self) -> Result<T> {
self.try_fold(T::ZERO, |acc, x| acc.safe_add(x))
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::ArithError;
#[test]
fn empty_sum() {
let v: Vec<u64> = vec![];
assert_eq!(v.into_iter().safe_sum(), Ok(0));
}
#[test]
fn unsigned_sum_small() {
let v = vec![400u64, 401, 402, 403, 404, 405, 406];
assert_eq!(
v.iter().copied().safe_sum().unwrap(),
v.iter().copied().sum()
);
}
#[test]
fn unsigned_sum_overflow() {
let v = vec![u64::MAX, 1];
assert_eq!(v.into_iter().safe_sum(), Err(ArithError::Overflow));
}
#[test]
fn signed_sum_small() {
let v = vec![-1i64, -2i64, -3i64, 3, 2, 1];
assert_eq!(v.into_iter().safe_sum(), Ok(0));
}
#[test]
fn signed_sum_overflow_above() {
let v = vec![1, 2, 3, 4, i16::MAX, 0, 1, 2, 3];
assert_eq!(v.into_iter().safe_sum(), Err(ArithError::Overflow));
}
#[test]
fn signed_sum_overflow_below() {
let v = vec![i16::MIN, -1];
assert_eq!(v.into_iter().safe_sum(), Err(ArithError::Overflow));
}
#[test]
fn signed_sum_almost_overflow() {
let v = vec![i64::MIN, 1, -1i64, i64::MAX, i64::MAX, 1];
assert_eq!(
v.iter().copied().safe_sum().unwrap(),
v.iter().copied().sum()
);
}
}

View File

@@ -1,4 +1,7 @@
//! Library for safe arithmetic on integers, avoiding overflow and division by zero.
mod iter;
pub use iter::SafeArithIter;
/// Error representing the failure of an arithmetic operation.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
@@ -7,7 +10,7 @@ pub enum ArithError {
DivisionByZero,
}
type Result<T> = std::result::Result<T, ArithError>;
pub type Result<T> = std::result::Result<T, ArithError>;
macro_rules! assign_method {
($name:ident, $op:ident, $doc_op:expr) => {

View File

@@ -1,4 +1,4 @@
use safe_arith::SafeArith;
use safe_arith::{SafeArith, SafeArithIter};
use types::{BeaconStateError as Error, *};
/// Process slashings.
@@ -10,7 +10,7 @@ pub fn process_slashings<T: EthSpec>(
spec: &ChainSpec,
) -> Result<(), Error> {
let epoch = state.current_epoch();
let sum_slashings = state.get_all_slashings().iter().sum::<u64>();
let sum_slashings = state.get_all_slashings().iter().copied().safe_sum()?;
for (index, validator) in state.validators.iter().enumerate() {
if validator.slashed

View File

@@ -17,7 +17,7 @@ eth2_hashing = "0.1.0"
ethereum-types = "0.9.1"
arbitrary = { version = "0.4.4", features = ["derive"], optional = true }
zeroize = { version = "1.0.0", features = ["zeroize_derive"] }
blst = { git = "https://github.com/sigp/blst.git", rev = "284f7059642851c760a09fb1708bcb59c7ca323c" }
blst = { git = "https://github.com/supranational/blst.git", rev = "a8398ed284b0d78858302ad1ceb25a80e7bbe535" }
[features]
default = ["supranational"]
@@ -25,3 +25,4 @@ fake_crypto = []
milagro = []
supranational = []
supranational-portable = ["supranational", "blst/portable"]
supranational-force-adx = ["supranational", "blst/force-adx"]

View File

@@ -1,7 +1,7 @@
[package]
name = "lcli"
description = "Lighthouse CLI (modeled after zcli)"
version = "0.2.10"
version = "0.2.11"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"

View File

@@ -1,6 +1,6 @@
[package]
name = "lighthouse"
version = "0.2.10"
version = "0.2.11"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2018"
@@ -9,6 +9,9 @@ edition = "2018"
write_ssz_files = ["beacon_node/write_ssz_files"]
# Compiles the BLS crypto code so that the binary is portable across machines.
portable = ["bls/supranational-portable"]
# Compiles BLST so that it always uses ADX instructions.
# Compatible with processors from 2013 onwards.
modern = ["bls/supranational-force-adx"]
# Uses the slower Milagro BLS library, which is written in native Rust.
milagro = ["bls/milagro"]

View File

@@ -16,6 +16,8 @@ pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml";
fn bls_library_name() -> &'static str {
if cfg!(feature = "portable") {
"blst-portable"
} else if cfg!(feature = "modern") {
"blst-modern"
} else if cfg!(feature = "milagro") {
"milagro"
} else {
@@ -181,6 +183,13 @@ fn run<E: EthSpec>(
));
}
#[cfg(all(feature = "modern", target_arch = "x86_64"))]
if !std::is_x86_feature_detected!("adx") {
return Err(format!(
"CPU incompatible with optimized binary, please try Lighthouse portable build"
));
}
let debug_level = matches
.value_of("debug-level")
.ok_or_else(|| "Expected --debug-level flag".to_string())?;

View File

@@ -1,6 +1,6 @@
[package]
name = "validator_client"
version = "0.2.10"
version = "0.2.11"
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>", "Luke Anderson <luke@lukeanderson.com.au>"]
edition = "2018"