mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-04 01:01:41 +00:00
Compare commits
27 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
46a06069c6 | ||
|
|
c00e6c2c6f | ||
|
|
8772c02fa0 | ||
|
|
c7ac967d5a | ||
|
|
cb26c15eb6 | ||
|
|
fcb4893f72 | ||
|
|
11076912d9 | ||
|
|
7404f1ce54 | ||
|
|
f0c9339153 | ||
|
|
9ee71d6fec | ||
|
|
5ed4c1daca | ||
|
|
f8da151b0b | ||
|
|
556190ff46 | ||
|
|
b711cfe2bb | ||
|
|
2f9999752e | ||
|
|
b3fc48e887 | ||
|
|
b0e9e3dcef | ||
|
|
63fe5542e7 | ||
|
|
3574bad6cd | ||
|
|
78744cd07a | ||
|
|
492ce07ed3 | ||
|
|
e004b98eab | ||
|
|
e2ae5010a6 | ||
|
|
4c4dad9fb5 | ||
|
|
157e31027a | ||
|
|
7e7fad5734 | ||
|
|
0a0f4daf9d |
711
Cargo.lock
generated
711
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -63,10 +63,15 @@ members = [
|
||||
"lighthouse",
|
||||
"lighthouse/environment",
|
||||
|
||||
"testing/simulator",
|
||||
"remote_signer",
|
||||
"remote_signer/backend",
|
||||
"remote_signer/client",
|
||||
|
||||
"testing/ef_tests",
|
||||
"testing/eth1_test_rig",
|
||||
"testing/node_test_rig",
|
||||
"testing/remote_signer_test",
|
||||
"testing/simulator",
|
||||
"testing/state_transition_vectors",
|
||||
|
||||
"validator_client",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM rust:1.45.1 AS builder
|
||||
FROM rust:1.47.0 AS builder
|
||||
RUN apt-get update && apt-get install -y cmake
|
||||
COPY . lighthouse
|
||||
ARG PORTABLE
|
||||
|
||||
1
Makefile
1
Makefile
@@ -74,7 +74,6 @@ build-release-tarballs:
|
||||
$(MAKE) build-aarch64-portable
|
||||
$(call tarball_release_binary,$(BUILD_PATH_AARCH64),$(AARCH64_TAG),"-portable")
|
||||
|
||||
|
||||
# Runs the full workspace tests in **release**, without downloading any additional
|
||||
# test vectors.
|
||||
test-release:
|
||||
|
||||
20
README.md
20
README.md
@@ -9,19 +9,23 @@ An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prim
|
||||
[Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da
|
||||
[Chat Link]: https://discord.gg/cyAszAh
|
||||
[Book Status]:https://img.shields.io/badge/user--docs-master-informational
|
||||
[Book Link]: http://lighthouse-book.sigmaprime.io/
|
||||
[Book Link]: https://lighthouse-book.sigmaprime.io
|
||||
|
||||
[Documentation](http://lighthouse-book.sigmaprime.io/)
|
||||
[Documentation](https://lighthouse-book.sigmaprime.io)
|
||||
|
||||

|
||||
|
||||
**🚨🚨🚨 Note: Lighthouse is not *yet* ready to produce mainnet deposits. The developers will require some
|
||||
time to test against the mainnet deposit contract, once it is released. DO NOT SUBMIT VALIDATOR
|
||||
DEPOSITS WITH LIGHTHOUSE. 🚨🚨🚨**
|
||||
|
||||
## Overview
|
||||
|
||||
Lighthouse is:
|
||||
|
||||
- Fully open-source, licensed under Apache 2.0.
|
||||
- Security-focused. Fuzzing has begun and security reviews are underway.
|
||||
- Built in [Rust](https://www.rust-lang.org/), a modern language providing unique safety guarantees and
|
||||
- Built in [Rust](https://www.rust-lang.org), a modern language providing unique safety guarantees and
|
||||
excellent performance (comparable to C++).
|
||||
- Funded by various organisations, including Sigma Prime, the
|
||||
Ethereum Foundation, ConsenSys and private individuals.
|
||||
@@ -30,6 +34,12 @@ Lighthouse is:
|
||||
|
||||
Like all Ethereum 2.0 clients, Lighthouse is a work-in-progress.
|
||||
|
||||
## Eth2 Deposit Contract
|
||||
|
||||
The Lighthouse team acknowledges
|
||||
[`0x00000000219ab540356cBB839Cbe05303d7705Fa`](https://etherscan.io/address/0x00000000219ab540356cbb839cbe05303d7705fa)
|
||||
as the canonical Eth2 deposit contract address.
|
||||
|
||||
## Development Status
|
||||
|
||||
Current development overview:
|
||||
@@ -54,7 +64,7 @@ Current development overview:
|
||||
|
||||
## Documentation
|
||||
|
||||
The [Lighthouse Book](http://lighthouse-book.sigmaprime.io/) contains information
|
||||
The [Lighthouse Book](https://lighthouse-book.sigmaprime.io) contains information
|
||||
for testnet users and developers.
|
||||
|
||||
If you'd like some background on Sigma Prime, please see the [Lighthouse Update
|
||||
@@ -66,7 +76,7 @@ If you'd like some background on Sigma Prime, please see the [Lighthouse Update
|
||||
Lighthouse welcomes contributors.
|
||||
|
||||
If you are looking to contribute, please head to the
|
||||
[Contributing](http://lighthouse-book.sigmaprime.io/contributing.html) section
|
||||
[Contributing](https://lighthouse-book.sigmaprime.io/contributing.html) section
|
||||
of the Lighthouse book.
|
||||
|
||||
## Contact
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "account_manager"
|
||||
version = "0.3.3"
|
||||
version = "0.3.4"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Luke Anderson <luke@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
|
||||
@@ -25,6 +25,8 @@ pub const STORE_WITHDRAW_FLAG: &str = "store-withdrawal-keystore";
|
||||
pub const COUNT_FLAG: &str = "count";
|
||||
pub const AT_MOST_FLAG: &str = "at-most";
|
||||
pub const WALLET_PASSWORD_PROMPT: &str = "Enter your wallet's password:";
|
||||
pub const MAINNET_WARNING: &str = "These are *not* mainnet validators! Submitting a mainnet \
|
||||
deposit for this validator will result in lost ETH.";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new(CMD)
|
||||
@@ -229,6 +231,8 @@ pub fn cli_run<T: EthSpec>(
|
||||
.map_err(|e| format!("Unable to build validator directory: {:?}", e))?;
|
||||
|
||||
println!("{}/{}\t{}", i + 1, n, voting_pubkey.to_hex_string());
|
||||
|
||||
println!("{}", MAINNET_WARNING);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "beacon_node"
|
||||
version = "0.3.3"
|
||||
version = "0.3.4"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
||||
edition = "2018"
|
||||
|
||||
|
||||
@@ -984,9 +984,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
///
|
||||
/// - `VerifiedUnaggregatedAttestation`
|
||||
/// - `VerifiedAggregatedAttestation`
|
||||
pub fn apply_attestation_to_fork_choice<'a>(
|
||||
pub fn apply_attestation_to_fork_choice(
|
||||
&self,
|
||||
verified: &'a impl SignatureVerifiedAttestation<T>,
|
||||
verified: &impl SignatureVerifiedAttestation<T>,
|
||||
) -> Result<(), Error> {
|
||||
let _timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES);
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ use crate::errors::BeaconChainError;
|
||||
use crate::head_tracker::{HeadTracker, SszHeadTracker};
|
||||
use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT};
|
||||
use parking_lot::Mutex;
|
||||
use slog::{debug, warn, Logger};
|
||||
use slog::{debug, error, info, warn, Logger};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::mem;
|
||||
use std::sync::mpsc;
|
||||
@@ -29,7 +29,6 @@ pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>
|
||||
thread::JoinHandle<()>,
|
||||
)>,
|
||||
>,
|
||||
latest_checkpoint: Arc<Mutex<Checkpoint>>,
|
||||
/// Genesis block root, for persisting the `PersistedBeaconChain`.
|
||||
genesis_block_root: Hash256,
|
||||
log: Logger,
|
||||
@@ -74,7 +73,6 @@ pub struct MigrationNotification<E: EthSpec> {
|
||||
finalized_state: BeaconState<E>,
|
||||
finalized_checkpoint: Checkpoint,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
latest_checkpoint: Arc<Mutex<Checkpoint>>,
|
||||
genesis_block_root: Hash256,
|
||||
}
|
||||
|
||||
@@ -91,14 +89,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
} else {
|
||||
Some(Mutex::new(Self::spawn_thread(db.clone(), log.clone())))
|
||||
};
|
||||
let latest_checkpoint = Arc::new(Mutex::new(Checkpoint {
|
||||
root: Hash256::zero(),
|
||||
epoch: Epoch::new(0),
|
||||
}));
|
||||
Self {
|
||||
db,
|
||||
tx_thread,
|
||||
latest_checkpoint,
|
||||
genesis_block_root,
|
||||
log,
|
||||
}
|
||||
@@ -121,7 +114,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
finalized_state,
|
||||
finalized_checkpoint,
|
||||
head_tracker,
|
||||
latest_checkpoint: self.latest_checkpoint.clone(),
|
||||
genesis_block_root: self.genesis_block_root,
|
||||
};
|
||||
|
||||
@@ -164,7 +156,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
notif: MigrationNotification<E>,
|
||||
log: &Logger,
|
||||
) {
|
||||
let mut latest_checkpoint = notif.latest_checkpoint.lock();
|
||||
let finalized_state_root = notif.finalized_state_root;
|
||||
let finalized_state = notif.finalized_state;
|
||||
|
||||
@@ -173,11 +164,11 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
notif.head_tracker,
|
||||
finalized_state_root,
|
||||
&finalized_state,
|
||||
*latest_checkpoint,
|
||||
notif.finalized_checkpoint,
|
||||
notif.genesis_block_root,
|
||||
log,
|
||||
) {
|
||||
Ok(PruningOutcome::Successful) => {}
|
||||
Ok(PruningOutcome::DeferredConcurrentMutation) => {
|
||||
warn!(
|
||||
log,
|
||||
@@ -186,18 +177,13 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
);
|
||||
return;
|
||||
}
|
||||
Ok(PruningOutcome::Successful) => {
|
||||
// Update the migrator's idea of the latest checkpoint only if the
|
||||
// pruning process was successful.
|
||||
*latest_checkpoint = notif.finalized_checkpoint;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(log, "Block pruning failed"; "error" => format!("{:?}", e));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match migrate_database(db, finalized_state_root.into(), &finalized_state) {
|
||||
match migrate_database(db.clone(), finalized_state_root.into(), &finalized_state) {
|
||||
Ok(()) => {}
|
||||
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
||||
debug!(
|
||||
@@ -212,8 +198,20 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
"Database migration failed";
|
||||
"error" => format!("{:?}", e)
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Finally, compact the database so that new free space is properly reclaimed.
|
||||
debug!(log, "Starting database compaction");
|
||||
if let Err(e) = db.compact() {
|
||||
error!(
|
||||
log,
|
||||
"Database compaction failed";
|
||||
"error" => format!("{:?}", e)
|
||||
);
|
||||
}
|
||||
debug!(log, "Database compaction complete");
|
||||
}
|
||||
|
||||
/// Spawn a new child thread to run the migration process.
|
||||
@@ -244,11 +242,18 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
new_finalized_state_hash: BeaconStateHash,
|
||||
new_finalized_state: &BeaconState<E>,
|
||||
old_finalized_checkpoint: Checkpoint,
|
||||
new_finalized_checkpoint: Checkpoint,
|
||||
genesis_block_root: Hash256,
|
||||
log: &Logger,
|
||||
) -> Result<PruningOutcome, BeaconChainError> {
|
||||
let old_finalized_checkpoint =
|
||||
store
|
||||
.load_pruning_checkpoint()?
|
||||
.unwrap_or_else(|| Checkpoint {
|
||||
epoch: Epoch::new(0),
|
||||
root: Hash256::zero(),
|
||||
});
|
||||
|
||||
let old_finalized_slot = old_finalized_checkpoint
|
||||
.epoch
|
||||
.start_slot(E::slots_per_epoch());
|
||||
@@ -267,15 +272,12 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
.into());
|
||||
}
|
||||
|
||||
debug!(
|
||||
info!(
|
||||
log,
|
||||
"Starting database pruning";
|
||||
"old_finalized_epoch" => old_finalized_checkpoint.epoch,
|
||||
"old_finalized_root" => format!("{:?}", old_finalized_checkpoint.root),
|
||||
"new_finalized_epoch" => new_finalized_checkpoint.epoch,
|
||||
"new_finalized_root" => format!("{:?}", new_finalized_checkpoint.root),
|
||||
);
|
||||
|
||||
// For each slot between the new finalized checkpoint and the old finalized checkpoint,
|
||||
// collect the beacon block root and state root of the canonical chain.
|
||||
let newly_finalized_chain: HashMap<Slot, (SignedBeaconBlockHash, BeaconStateHash)> =
|
||||
@@ -303,7 +305,13 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
let mut abandoned_heads: HashSet<Hash256> = HashSet::new();
|
||||
|
||||
let heads = head_tracker.heads();
|
||||
debug!(log, "Pruning {} heads", heads.len());
|
||||
debug!(
|
||||
log,
|
||||
"Extra pruning information";
|
||||
"old_finalized_root" => format!("{:?}", old_finalized_checkpoint.root),
|
||||
"new_finalized_root" => format!("{:?}", new_finalized_checkpoint.root),
|
||||
"head_count" => heads.len(),
|
||||
);
|
||||
|
||||
for (head_hash, head_slot) in heads {
|
||||
let mut potentially_abandoned_head = Some(head_hash);
|
||||
@@ -457,8 +465,11 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
||||
drop(head_tracker_lock);
|
||||
kv_batch.push(persisted_head.as_kv_store_op(BEACON_CHAIN_DB_KEY));
|
||||
|
||||
// Persist the new finalized checkpoint as the pruning checkpoint.
|
||||
kv_batch.push(store.pruning_checkpoint_store_op(new_finalized_checkpoint));
|
||||
|
||||
store.hot_db.do_atomically(kv_batch)?;
|
||||
debug!(log, "Database pruning complete");
|
||||
info!(log, "Database pruning complete");
|
||||
|
||||
Ok(PruningOutcome::Successful)
|
||||
}
|
||||
|
||||
@@ -16,14 +16,15 @@ use eth2_libp2p::NetworkGlobals;
|
||||
use genesis::{interop_genesis_state, Eth1GenesisService};
|
||||
use network::{NetworkConfig, NetworkMessage, NetworkService};
|
||||
use parking_lot::Mutex;
|
||||
use slog::{debug, info};
|
||||
use slog::{debug, info, warn};
|
||||
use ssz::Decode;
|
||||
use std::net::SocketAddr;
|
||||
use std::net::TcpListener;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use timer::spawn_timer;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tokio::sync::{mpsc::UnboundedSender, oneshot};
|
||||
use types::{
|
||||
test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec,
|
||||
SignedBeaconBlockHash,
|
||||
@@ -202,6 +203,53 @@ where
|
||||
context.eth2_config().spec.clone(),
|
||||
);
|
||||
|
||||
// If the HTTP API server is enabled, start an instance of it where it only
|
||||
// contains a reference to the eth1 service (all non-eth1 endpoints will fail
|
||||
// gracefully).
|
||||
//
|
||||
// Later in this function we will shutdown this temporary "waiting for genesis"
|
||||
// server so the real one can be started later.
|
||||
let (exit_tx, exit_rx) = oneshot::channel::<()>();
|
||||
let http_listen_opt = if self.http_api_config.enabled {
|
||||
#[allow(clippy::type_complexity)]
|
||||
let ctx: Arc<
|
||||
http_api::Context<
|
||||
Witness<
|
||||
TSlotClock,
|
||||
TEth1Backend,
|
||||
TEthSpec,
|
||||
TEventHandler,
|
||||
THotStore,
|
||||
TColdStore,
|
||||
>,
|
||||
>,
|
||||
> = Arc::new(http_api::Context {
|
||||
config: self.http_api_config.clone(),
|
||||
chain: None,
|
||||
network_tx: None,
|
||||
network_globals: None,
|
||||
eth1_service: Some(genesis_service.eth1_service.clone()),
|
||||
log: context.log().clone(),
|
||||
});
|
||||
|
||||
// Discard the error from the oneshot.
|
||||
let exit_future = async {
|
||||
let _ = exit_rx.await;
|
||||
};
|
||||
|
||||
let (listen_addr, server) = http_api::serve(ctx, exit_future)
|
||||
.map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?;
|
||||
|
||||
context
|
||||
.clone()
|
||||
.executor
|
||||
.spawn_without_exit(async move { server.await }, "http-api");
|
||||
|
||||
Some(listen_addr)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let genesis_state = genesis_service
|
||||
.wait_for_genesis_state(
|
||||
Duration::from_millis(ETH1_GENESIS_UPDATE_INTERVAL_MILLIS),
|
||||
@@ -209,6 +257,22 @@ where
|
||||
)
|
||||
.await?;
|
||||
|
||||
let _ = exit_tx.send(());
|
||||
|
||||
if let Some(http_listen) = http_listen_opt {
|
||||
// This is a bit of a hack to ensure that the HTTP server has indeed shutdown.
|
||||
//
|
||||
// We will restart it again after we've finished setting up for genesis.
|
||||
while TcpListener::bind(http_listen).is_err() {
|
||||
warn!(
|
||||
context.log(),
|
||||
"Waiting for HTTP server port to open";
|
||||
"port" => http_listen
|
||||
);
|
||||
tokio::time::delay_for(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
|
||||
builder
|
||||
.genesis_state(genesis_state)
|
||||
.map(|v| (v, Some(genesis_service.into_core_service())))?
|
||||
|
||||
@@ -42,9 +42,9 @@ regex = "1.3.9"
|
||||
[dependencies.libp2p]
|
||||
#version = "0.23.0"
|
||||
git = "https://github.com/sigp/rust-libp2p"
|
||||
rev = "8c6ce6eb1228de568568f6cd72fb134dea5f9669"
|
||||
rev = "b6278e1ba7b6bcfad1eef300f72148705da5d8d2"
|
||||
default-features = false
|
||||
features = ["websocket", "identify", "mplex", "noise", "gossipsub", "dns", "tcp-tokio"]
|
||||
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp-tokio"]
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "0.2.22", features = ["full"] }
|
||||
|
||||
@@ -0,0 +1,346 @@
|
||||
use crate::types::{GossipEncoding, GossipKind, GossipTopic};
|
||||
use crate::{error, TopicHash};
|
||||
use libp2p::gossipsub::{
|
||||
GenericGossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds,
|
||||
TopicScoreParams,
|
||||
};
|
||||
use std::cmp::max;
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
use std::time::Duration;
|
||||
use types::{ChainSpec, EnrForkId, EthSpec, Slot, SubnetId};
|
||||
|
||||
const MAX_IN_MESH_SCORE: f64 = 10.0;
|
||||
const MAX_FIRST_MESSAGE_DELIVERIES_SCORE: f64 = 40.0;
|
||||
const BEACON_BLOCK_WEIGHT: f64 = 0.5;
|
||||
const BEACON_AGGREGATE_PROOF_WEIGHT: f64 = 0.5;
|
||||
const VOLUNTARY_EXIT_WEIGHT: f64 = 0.05;
|
||||
const PROPOSER_SLASHING_WEIGHT: f64 = 0.05;
|
||||
const ATTESTER_SLASHING_WEIGHT: f64 = 0.05;
|
||||
|
||||
pub struct PeerScoreSettings<TSpec: EthSpec> {
|
||||
slot: Duration,
|
||||
epoch: Duration,
|
||||
|
||||
beacon_attestation_subnet_weight: f64,
|
||||
max_positive_score: f64,
|
||||
|
||||
decay_interval: Duration,
|
||||
decay_to_zero: f64,
|
||||
|
||||
mesh_n: usize,
|
||||
max_committees_per_slot: usize,
|
||||
target_committee_size: usize,
|
||||
target_aggregators_per_committee: usize,
|
||||
attestation_subnet_count: u64,
|
||||
phantom: PhantomData<TSpec>,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> PeerScoreSettings<TSpec> {
|
||||
pub fn new<T>(
|
||||
chain_spec: &ChainSpec,
|
||||
gs_config: &GenericGossipsubConfig<T>,
|
||||
) -> PeerScoreSettings<TSpec> {
|
||||
let slot = Duration::from_millis(chain_spec.milliseconds_per_slot);
|
||||
let beacon_attestation_subnet_weight = 1.0 / chain_spec.attestation_subnet_count as f64;
|
||||
let max_positive_score = (MAX_IN_MESH_SCORE + MAX_FIRST_MESSAGE_DELIVERIES_SCORE)
|
||||
* (BEACON_BLOCK_WEIGHT
|
||||
+ BEACON_AGGREGATE_PROOF_WEIGHT
|
||||
+ beacon_attestation_subnet_weight * chain_spec.attestation_subnet_count as f64
|
||||
+ VOLUNTARY_EXIT_WEIGHT
|
||||
+ PROPOSER_SLASHING_WEIGHT
|
||||
+ ATTESTER_SLASHING_WEIGHT);
|
||||
|
||||
PeerScoreSettings {
|
||||
slot,
|
||||
epoch: slot * TSpec::slots_per_epoch() as u32,
|
||||
beacon_attestation_subnet_weight,
|
||||
max_positive_score,
|
||||
decay_interval: slot,
|
||||
decay_to_zero: 0.01,
|
||||
mesh_n: gs_config.mesh_n(),
|
||||
max_committees_per_slot: chain_spec.max_committees_per_slot,
|
||||
target_committee_size: chain_spec.target_committee_size,
|
||||
target_aggregators_per_committee: chain_spec.target_aggregators_per_committee as usize,
|
||||
attestation_subnet_count: chain_spec.attestation_subnet_count,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_peer_score_params(
|
||||
&self,
|
||||
active_validators: usize,
|
||||
thresholds: &PeerScoreThresholds,
|
||||
enr_fork_id: &EnrForkId,
|
||||
current_slot: Slot,
|
||||
) -> error::Result<PeerScoreParams> {
|
||||
let mut params = PeerScoreParams::default();
|
||||
|
||||
params.decay_interval = self.decay_interval;
|
||||
params.decay_to_zero = self.decay_to_zero;
|
||||
params.retain_score = self.epoch * 100;
|
||||
params.app_specific_weight = 1.0;
|
||||
params.ip_colocation_factor_threshold = 3.0;
|
||||
params.behaviour_penalty_threshold = 6.0;
|
||||
|
||||
params.behaviour_penalty_decay = self.score_parameter_decay(self.epoch * 10);
|
||||
|
||||
let target_value = Self::decay_convergence(
|
||||
params.behaviour_penalty_decay,
|
||||
10.0 / TSpec::slots_per_epoch() as f64,
|
||||
) - params.behaviour_penalty_threshold;
|
||||
params.behaviour_penalty_weight = thresholds.gossip_threshold / target_value.powi(2);
|
||||
|
||||
params.topic_score_cap = self.max_positive_score * 0.5;
|
||||
params.ip_colocation_factor_weight = -params.topic_score_cap;
|
||||
|
||||
params.topics = HashMap::new();
|
||||
|
||||
let get_hash = |kind: GossipKind| -> TopicHash {
|
||||
let topic: Topic =
|
||||
GossipTopic::new(kind, GossipEncoding::default(), enr_fork_id.fork_digest).into();
|
||||
topic.hash()
|
||||
};
|
||||
|
||||
//first all fixed topics
|
||||
params.topics.insert(
|
||||
get_hash(GossipKind::VoluntaryExit),
|
||||
Self::get_topic_params(
|
||||
self,
|
||||
VOLUNTARY_EXIT_WEIGHT,
|
||||
4.0 / TSpec::slots_per_epoch() as f64,
|
||||
self.epoch * 100,
|
||||
None,
|
||||
),
|
||||
);
|
||||
params.topics.insert(
|
||||
get_hash(GossipKind::AttesterSlashing),
|
||||
Self::get_topic_params(
|
||||
self,
|
||||
ATTESTER_SLASHING_WEIGHT,
|
||||
1.0 / 5.0 / TSpec::slots_per_epoch() as f64,
|
||||
self.epoch * 100,
|
||||
None,
|
||||
),
|
||||
);
|
||||
params.topics.insert(
|
||||
get_hash(GossipKind::ProposerSlashing),
|
||||
Self::get_topic_params(
|
||||
self,
|
||||
PROPOSER_SLASHING_WEIGHT,
|
||||
1.0 / 5.0 / TSpec::slots_per_epoch() as f64,
|
||||
self.epoch * 100,
|
||||
None,
|
||||
),
|
||||
);
|
||||
|
||||
//dynamic topics
|
||||
let (beacon_block_params, beacon_aggregate_proof_params, beacon_attestation_subnet_params) =
|
||||
self.get_dynamic_topic_params(active_validators, current_slot)?;
|
||||
|
||||
params
|
||||
.topics
|
||||
.insert(get_hash(GossipKind::BeaconBlock), beacon_block_params);
|
||||
|
||||
params.topics.insert(
|
||||
get_hash(GossipKind::BeaconAggregateAndProof),
|
||||
beacon_aggregate_proof_params,
|
||||
);
|
||||
|
||||
for i in 0..self.attestation_subnet_count {
|
||||
params.topics.insert(
|
||||
get_hash(GossipKind::Attestation(SubnetId::new(i))),
|
||||
beacon_attestation_subnet_params.clone(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(params)
|
||||
}
|
||||
|
||||
pub fn get_dynamic_topic_params(
|
||||
&self,
|
||||
active_validators: usize,
|
||||
current_slot: Slot,
|
||||
) -> error::Result<(TopicScoreParams, TopicScoreParams, TopicScoreParams)> {
|
||||
let (aggregators_per_slot, committees_per_slot) =
|
||||
self.expected_aggregator_count_per_slot(active_validators)?;
|
||||
let multiple_bursts_per_subnet_per_epoch = committees_per_slot as u64
|
||||
>= 2 * self.attestation_subnet_count / TSpec::slots_per_epoch();
|
||||
|
||||
let beacon_block_params = Self::get_topic_params(
|
||||
self,
|
||||
BEACON_BLOCK_WEIGHT,
|
||||
1.0,
|
||||
self.epoch * 20,
|
||||
Some((TSpec::slots_per_epoch() * 5, 3.0, self.epoch, current_slot)),
|
||||
);
|
||||
|
||||
let beacon_aggregate_proof_params = Self::get_topic_params(
|
||||
self,
|
||||
BEACON_AGGREGATE_PROOF_WEIGHT,
|
||||
aggregators_per_slot,
|
||||
self.epoch,
|
||||
Some((TSpec::slots_per_epoch() * 2, 4.0, self.epoch, current_slot)),
|
||||
);
|
||||
let beacon_attestation_subnet_params = Self::get_topic_params(
|
||||
self,
|
||||
self.beacon_attestation_subnet_weight,
|
||||
active_validators as f64
|
||||
/ self.attestation_subnet_count as f64
|
||||
/ TSpec::slots_per_epoch() as f64,
|
||||
self.epoch
|
||||
* (if multiple_bursts_per_subnet_per_epoch {
|
||||
1
|
||||
} else {
|
||||
4
|
||||
}),
|
||||
Some((
|
||||
TSpec::slots_per_epoch()
|
||||
* (if multiple_bursts_per_subnet_per_epoch {
|
||||
4
|
||||
} else {
|
||||
16
|
||||
}),
|
||||
16.0,
|
||||
if multiple_bursts_per_subnet_per_epoch {
|
||||
self.slot * (TSpec::slots_per_epoch() as u32 / 2 + 1)
|
||||
} else {
|
||||
self.epoch * 3
|
||||
},
|
||||
current_slot,
|
||||
)),
|
||||
);
|
||||
|
||||
Ok((
|
||||
beacon_block_params,
|
||||
beacon_aggregate_proof_params,
|
||||
beacon_attestation_subnet_params,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn attestation_subnet_count(&self) -> u64 {
|
||||
self.attestation_subnet_count
|
||||
}
|
||||
|
||||
fn score_parameter_decay_with_base(
|
||||
decay_time: Duration,
|
||||
decay_interval: Duration,
|
||||
decay_to_zero: f64,
|
||||
) -> f64 {
|
||||
let ticks = decay_time.as_secs_f64() / decay_interval.as_secs_f64();
|
||||
decay_to_zero.powf(1.0 / ticks)
|
||||
}
|
||||
|
||||
fn decay_convergence(decay: f64, rate: f64) -> f64 {
|
||||
rate / (1.0 - decay)
|
||||
}
|
||||
|
||||
fn threshold(decay: f64, rate: f64) -> f64 {
|
||||
Self::decay_convergence(decay, rate) * decay
|
||||
}
|
||||
|
||||
fn expected_aggregator_count_per_slot(
|
||||
&self,
|
||||
active_validators: usize,
|
||||
) -> error::Result<(f64, usize)> {
|
||||
let committees_per_slot = TSpec::get_committee_count_per_slot_with(
|
||||
active_validators,
|
||||
self.max_committees_per_slot,
|
||||
self.target_committee_size,
|
||||
)
|
||||
.map_err(|e| format!("Could not get committee count from spec: {:?}", e))?;
|
||||
|
||||
let committees = committees_per_slot * TSpec::slots_per_epoch() as usize;
|
||||
|
||||
let smaller_committee_size = active_validators / committees;
|
||||
let num_larger_committees = active_validators - smaller_committee_size * committees;
|
||||
|
||||
let modulo_smaller = max(
|
||||
1,
|
||||
smaller_committee_size / self.target_aggregators_per_committee as usize,
|
||||
);
|
||||
let modulo_larger = max(
|
||||
1,
|
||||
(smaller_committee_size + 1) / self.target_aggregators_per_committee as usize,
|
||||
);
|
||||
|
||||
Ok((
|
||||
(((committees - num_larger_committees) * smaller_committee_size) as f64
|
||||
/ modulo_smaller as f64
|
||||
+ (num_larger_committees * (smaller_committee_size + 1)) as f64
|
||||
/ modulo_larger as f64)
|
||||
/ TSpec::slots_per_epoch() as f64,
|
||||
committees_per_slot,
|
||||
))
|
||||
}
|
||||
|
||||
fn score_parameter_decay(&self, decay_time: Duration) -> f64 {
|
||||
Self::score_parameter_decay_with_base(decay_time, self.decay_interval, self.decay_to_zero)
|
||||
}
|
||||
|
||||
fn get_topic_params(
|
||||
&self,
|
||||
topic_weight: f64,
|
||||
expected_message_rate: f64,
|
||||
first_message_decay_time: Duration,
|
||||
// decay slots (decay time in slots), cap factor, activation window, current slot
|
||||
mesh_message_info: Option<(u64, f64, Duration, Slot)>,
|
||||
) -> TopicScoreParams {
|
||||
let mut t_params = TopicScoreParams::default();
|
||||
|
||||
t_params.topic_weight = topic_weight;
|
||||
|
||||
t_params.time_in_mesh_quantum = self.slot;
|
||||
t_params.time_in_mesh_cap = 3600.0 / t_params.time_in_mesh_quantum.as_secs_f64();
|
||||
t_params.time_in_mesh_weight = 10.0 / t_params.time_in_mesh_cap;
|
||||
|
||||
t_params.first_message_deliveries_decay =
|
||||
self.score_parameter_decay(first_message_decay_time);
|
||||
t_params.first_message_deliveries_cap = Self::decay_convergence(
|
||||
t_params.first_message_deliveries_decay,
|
||||
2.0 * expected_message_rate / self.mesh_n as f64,
|
||||
);
|
||||
t_params.first_message_deliveries_weight = 40.0 / t_params.first_message_deliveries_cap;
|
||||
|
||||
if let Some((decay_slots, cap_factor, activation_window, current_slot)) = mesh_message_info
|
||||
{
|
||||
let decay_time = self.slot * decay_slots as u32;
|
||||
t_params.mesh_message_deliveries_decay = self.score_parameter_decay(decay_time);
|
||||
t_params.mesh_message_deliveries_threshold = Self::threshold(
|
||||
t_params.mesh_message_deliveries_decay,
|
||||
expected_message_rate / 50.0,
|
||||
);
|
||||
t_params.mesh_message_deliveries_cap =
|
||||
if cap_factor * t_params.mesh_message_deliveries_threshold < 2.0 {
|
||||
2.0
|
||||
} else {
|
||||
cap_factor * t_params.mesh_message_deliveries_threshold
|
||||
};
|
||||
t_params.mesh_message_deliveries_activation = activation_window;
|
||||
t_params.mesh_message_deliveries_window = Duration::from_secs(2);
|
||||
t_params.mesh_failure_penalty_decay = t_params.mesh_message_deliveries_decay;
|
||||
t_params.mesh_message_deliveries_weight = -self.max_positive_score
|
||||
/ (t_params.topic_weight * t_params.mesh_message_deliveries_threshold.powi(2));
|
||||
t_params.mesh_failure_penalty_weight = t_params.mesh_message_deliveries_weight;
|
||||
if decay_slots >= current_slot.as_u64() {
|
||||
t_params.mesh_message_deliveries_threshold = 0.0;
|
||||
t_params.mesh_message_deliveries_weight = 0.0;
|
||||
}
|
||||
} else {
|
||||
t_params.mesh_message_deliveries_weight = 0.0;
|
||||
t_params.mesh_message_deliveries_threshold = 0.0;
|
||||
t_params.mesh_message_deliveries_decay = 0.0;
|
||||
t_params.mesh_message_deliveries_cap = 0.0;
|
||||
t_params.mesh_message_deliveries_window = Duration::from_secs(0);
|
||||
t_params.mesh_message_deliveries_activation = Duration::from_secs(0);
|
||||
t_params.mesh_failure_penalty_decay = 0.0;
|
||||
t_params.mesh_failure_penalty_weight = 0.0;
|
||||
}
|
||||
|
||||
t_params.invalid_message_deliveries_weight =
|
||||
-self.max_positive_score / t_params.topic_weight;
|
||||
t_params.invalid_message_deliveries_decay = self.score_parameter_decay(self.epoch * 50);
|
||||
|
||||
t_params
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
use crate::behaviour::gossipsub_scoring_parameters::PeerScoreSettings;
|
||||
use crate::peer_manager::{score::PeerAction, PeerManager, PeerManagerEvent};
|
||||
use crate::rpc::*;
|
||||
use crate::service::METADATA_FILENAME;
|
||||
@@ -9,6 +10,7 @@ use handler::{BehaviourHandler, BehaviourHandlerIn, DelegateIn, DelegateOut};
|
||||
use libp2p::gossipsub::subscription_filter::{
|
||||
MaxCountSubscriptionFilter, WhitelistSubscriptionFilter,
|
||||
};
|
||||
use libp2p::gossipsub::PeerScoreThresholds;
|
||||
use libp2p::{
|
||||
core::{
|
||||
connection::{ConnectedPoint, ConnectionId, ListenerId},
|
||||
@@ -38,11 +40,13 @@ use std::{
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
use types::{EnrForkId, EthSpec, SignedBeaconBlock, SubnetId};
|
||||
use types::{ChainSpec, EnrForkId, EthSpec, SignedBeaconBlock, Slot, SubnetId};
|
||||
|
||||
mod gossipsub_scoring_parameters;
|
||||
mod handler;
|
||||
|
||||
const MAX_IDENTIFY_ADDRESSES: usize = 10;
|
||||
pub const GOSSIPSUB_GREYLIST_THRESHOLD: f64 = -16000.0;
|
||||
|
||||
/// Identifier of requests sent by a peer.
|
||||
pub type PeerRequestId = (ConnectionId, SubstreamId);
|
||||
@@ -131,6 +135,11 @@ pub struct Behaviour<TSpec: EthSpec> {
|
||||
network_dir: PathBuf,
|
||||
/// Logger for behaviour actions.
|
||||
log: slog::Logger,
|
||||
|
||||
score_settings: PeerScoreSettings<TSpec>,
|
||||
|
||||
/// The interval for updating gossipsub scores
|
||||
update_gossipsub_scores: tokio::time::Interval,
|
||||
}
|
||||
|
||||
/// Implements the combined behaviour for the libp2p service.
|
||||
@@ -140,6 +149,7 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
||||
net_conf: &NetworkConfig,
|
||||
network_globals: Arc<NetworkGlobals<TSpec>>,
|
||||
log: &slog::Logger,
|
||||
chain_spec: &ChainSpec,
|
||||
) -> error::Result<Self> {
|
||||
let behaviour_log = log.new(o!());
|
||||
|
||||
@@ -161,19 +171,42 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
||||
max_subscriptions_per_request: 100, //this is according to the current go implementation
|
||||
};
|
||||
|
||||
let gossipsub = Gossipsub::new_with_subscription_filter(
|
||||
let mut gossipsub = Gossipsub::new_with_subscription_filter(
|
||||
MessageAuthenticity::Anonymous,
|
||||
net_conf.gs_config.clone(),
|
||||
filter,
|
||||
)
|
||||
.map_err(|e| format!("Could not construct gossipsub: {:?}", e))?;
|
||||
|
||||
// Temporarily disable scoring until parameters are tested.
|
||||
/*
|
||||
//we don't know the number of active validators and the current slot yet
|
||||
let active_validators = TSpec::minimum_validator_count();
|
||||
let current_slot = Slot::new(0);
|
||||
|
||||
let thresholds = PeerScoreThresholds {
|
||||
gossip_threshold: -4000.0,
|
||||
publish_threshold: -8000.0,
|
||||
graylist_threshold: GOSSIPSUB_GREYLIST_THRESHOLD,
|
||||
accept_px_threshold: 100.0,
|
||||
opportunistic_graft_threshold: 5.0,
|
||||
};
|
||||
|
||||
let score_settings = PeerScoreSettings::new(chain_spec, &net_conf.gs_config);
|
||||
|
||||
//Prepare scoring parameters
|
||||
let params = score_settings.get_peer_score_params(
|
||||
active_validators,
|
||||
&thresholds,
|
||||
&enr_fork_id,
|
||||
current_slot,
|
||||
)?;
|
||||
|
||||
trace!(behaviour_log, "Using peer score params"; "params" => format!("{:?}", params));
|
||||
|
||||
let update_gossipsub_scores = tokio::time::interval(params.decay_interval);
|
||||
|
||||
gossipsub
|
||||
.with_peer_score(PeerScoreParams::default(), PeerScoreThresholds::default())
|
||||
.with_peer_score(params.clone(), thresholds)
|
||||
.expect("Valid score params and thresholds");
|
||||
*/
|
||||
|
||||
Ok(Behaviour {
|
||||
eth2_rpc: RPC::new(log.clone()),
|
||||
@@ -188,9 +221,51 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
||||
waker: None,
|
||||
network_dir: net_conf.network_dir.clone(),
|
||||
log: behaviour_log,
|
||||
score_settings,
|
||||
update_gossipsub_scores,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update_gossipsub_parameters(
|
||||
&mut self,
|
||||
active_validators: usize,
|
||||
current_slot: Slot,
|
||||
) -> error::Result<()> {
|
||||
let (beacon_block_params, beacon_aggregate_proof_params, beacon_attestation_subnet_params) =
|
||||
self.score_settings
|
||||
.get_dynamic_topic_params(active_validators, current_slot)?;
|
||||
|
||||
let fork_digest = self.enr_fork_id.fork_digest;
|
||||
let get_topic = |kind: GossipKind| -> Topic {
|
||||
GossipTopic::new(kind, GossipEncoding::default(), fork_digest).into()
|
||||
};
|
||||
|
||||
debug!(self.log, "Updating gossipsub score parameters";
|
||||
"active_validators" => active_validators);
|
||||
trace!(self.log, "Updated gossipsub score parameters";
|
||||
"beacon_block_params" => format!("{:?}", beacon_block_params),
|
||||
"beacon_aggregate_proof_params" => format!("{:?}", beacon_aggregate_proof_params),
|
||||
"beacon_attestation_subnet_params" => format!("{:?}", beacon_attestation_subnet_params),
|
||||
);
|
||||
|
||||
self.gossipsub
|
||||
.set_topic_params(get_topic(GossipKind::BeaconBlock), beacon_block_params)?;
|
||||
|
||||
self.gossipsub.set_topic_params(
|
||||
get_topic(GossipKind::BeaconAggregateAndProof),
|
||||
beacon_aggregate_proof_params,
|
||||
)?;
|
||||
|
||||
for i in 0..self.score_settings.attestation_subnet_count() {
|
||||
self.gossipsub.set_topic_params(
|
||||
get_topic(GossipKind::Attestation(SubnetId::new(i))),
|
||||
beacon_attestation_subnet_params.clone(),
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Attempts to connect to a libp2p peer.
|
||||
///
|
||||
/// This MUST be used over Swarm::dial() as this keeps track of the peer in the peer manager.
|
||||
@@ -308,7 +383,8 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
||||
match message.encode(GossipEncoding::default()) {
|
||||
Ok(message_data) => {
|
||||
if let Err(e) = self.gossipsub.publish(topic.clone().into(), message_data) {
|
||||
slog::warn!(self.log, "Could not publish message"; "error" => format!("{:?}", e));
|
||||
slog::warn!(self.log, "Could not publish message";
|
||||
"error" => format!("{:?}", e));
|
||||
|
||||
// add to metrics
|
||||
match topic.kind() {
|
||||
@@ -589,6 +665,17 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
||||
|
||||
fn on_rpc_event(&mut self, message: RPCMessage<TSpec>) {
|
||||
let peer_id = message.peer_id;
|
||||
|
||||
if !self.peer_manager.is_connected(&peer_id) {
|
||||
//ignore this event
|
||||
debug!(
|
||||
self.log,
|
||||
"Ignoring rpc message of disconnected peer";
|
||||
"peer" => peer_id.to_string()
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let handler_id = message.conn_id;
|
||||
// The METADATA and PING RPC responses are handled within the behaviour and not propagated
|
||||
match message.event {
|
||||
@@ -761,6 +848,11 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
||||
return Poll::Ready(NBAction::GenerateEvent(event));
|
||||
}
|
||||
|
||||
// perform gossipsub score updates when necessary
|
||||
while let Poll::Ready(Some(_)) = self.update_gossipsub_scores.poll_next_unpin(cx) {
|
||||
self.peer_manager.update_gossipsub_scores(&self.gossipsub);
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::time::Duration;
|
||||
pub const GOSSIP_MAX_SIZE: usize = 1_048_576;
|
||||
const MESSAGE_DOMAIN_INVALID_SNAPPY: [u8; 4] = [0, 0, 0, 0];
|
||||
const MESSAGE_DOMAIN_VALID_SNAPPY: [u8; 4] = [1, 0, 0, 0];
|
||||
pub const MESH_N_LOW: usize = 6;
|
||||
|
||||
pub type GossipsubConfig = GenericGossipsubConfig<MessageData>;
|
||||
pub type GossipsubConfigBuilder = GenericGossipsubConfigBuilder<MessageData>;
|
||||
@@ -80,6 +81,9 @@ pub struct Config {
|
||||
/// Attempt to construct external port mappings with UPnP.
|
||||
pub upnp_enabled: bool,
|
||||
|
||||
/// Subscribe to all subnets for the duration of the runtime.
|
||||
pub subscribe_all_subnets: bool,
|
||||
|
||||
/// List of extra topics to initially subscribe to as strings.
|
||||
pub topics: Vec<GossipKind>,
|
||||
}
|
||||
@@ -87,7 +91,7 @@ pub struct Config {
|
||||
impl Default for Config {
|
||||
/// Generate a default network configuration.
|
||||
fn default() -> Self {
|
||||
// WARNING: this directory default should be always overrided with parameters
|
||||
// WARNING: this directory default should be always overwritten with parameters
|
||||
// from cli for specific networks.
|
||||
let network_dir = dirs::home_dir()
|
||||
.unwrap_or_else(|| PathBuf::from("."))
|
||||
@@ -130,7 +134,7 @@ impl Default for Config {
|
||||
.max_transmit_size(GOSSIP_MAX_SIZE)
|
||||
.heartbeat_interval(Duration::from_millis(700))
|
||||
.mesh_n(8)
|
||||
.mesh_n_low(6)
|
||||
.mesh_n_low(MESH_N_LOW)
|
||||
.mesh_n_high(12)
|
||||
.gossip_lazy(6)
|
||||
.fanout_ttl(Duration::from_secs(60))
|
||||
@@ -142,6 +146,7 @@ impl Default for Config {
|
||||
.duplicate_cache_time(Duration::from_secs(385))
|
||||
.message_id_fn(gossip_message_id)
|
||||
.fast_message_id_fn(fast_gossip_message_id)
|
||||
.allow_self_origin(true)
|
||||
.build()
|
||||
.expect("valid gossipsub configuration");
|
||||
|
||||
@@ -149,13 +154,13 @@ impl Default for Config {
|
||||
let discv5_config = Discv5ConfigBuilder::new()
|
||||
.enable_packet_filter()
|
||||
.session_cache_capacity(1000)
|
||||
.request_timeout(Duration::from_secs(4))
|
||||
.request_timeout(Duration::from_secs(1))
|
||||
.query_peer_timeout(Duration::from_secs(2))
|
||||
.query_timeout(Duration::from_secs(30))
|
||||
.request_retries(1)
|
||||
.enr_peer_update_min(10)
|
||||
.query_parallelism(5)
|
||||
.disable_report_discovered_peers()
|
||||
.query_timeout(Duration::from_secs(30))
|
||||
.query_peer_timeout(Duration::from_secs(2))
|
||||
.ip_limit() // limits /24 IP's in buckets.
|
||||
.ping_interval(Duration::from_secs(300))
|
||||
.build();
|
||||
@@ -179,6 +184,7 @@ impl Default for Config {
|
||||
client_version: lighthouse_version::version_with_platform(),
|
||||
disable_discovery: false,
|
||||
upnp_enabled: true,
|
||||
subscribe_all_subnets: false,
|
||||
topics: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ pub use enr::{build_enr, create_enr_builder_from_config, use_or_load_enr, Combin
|
||||
pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt};
|
||||
pub use libp2p::core::identity::{Keypair, PublicKey};
|
||||
|
||||
use crate::metrics;
|
||||
use crate::{config, metrics};
|
||||
use crate::{error, Enr, NetworkConfig, NetworkGlobals, SubnetDiscovery};
|
||||
use discv5::{enr::NodeId, Discv5, Discv5Event};
|
||||
use enr::{BITFIELD_ENR_KEY, ETH2_ENR_KEY};
|
||||
@@ -36,7 +36,7 @@ pub use subnet_predicate::subnet_predicate;
|
||||
/// Local ENR storage filename.
|
||||
pub const ENR_FILENAME: &str = "enr.dat";
|
||||
/// Target number of peers we'd like to have connected to a given long-lived subnet.
|
||||
pub const TARGET_SUBNET_PEERS: usize = 3;
|
||||
pub const TARGET_SUBNET_PEERS: usize = config::MESH_N_LOW;
|
||||
/// Target number of peers to search for given a grouped subnet query.
|
||||
const TARGET_PEERS_FOR_GROUPED_QUERY: usize = 6;
|
||||
/// Number of times to attempt a discovery request.
|
||||
@@ -632,7 +632,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
||||
.network_globals
|
||||
.peers
|
||||
.read()
|
||||
.peers_on_subnet(subnet_query.subnet_id)
|
||||
.good_peers_on_subnet(subnet_query.subnet_id)
|
||||
.count();
|
||||
|
||||
if peers_on_subnet >= TARGET_SUBNET_PEERS {
|
||||
|
||||
@@ -4,14 +4,14 @@ pub use self::peerdb::*;
|
||||
use crate::discovery::{subnet_predicate, Discovery, DiscoveryEvent, TARGET_SUBNET_PEERS};
|
||||
use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode};
|
||||
use crate::types::SyncState;
|
||||
use crate::{error, metrics};
|
||||
use crate::{error, metrics, Gossipsub};
|
||||
use crate::{EnrExt, NetworkConfig, NetworkGlobals, PeerId, SubnetDiscovery};
|
||||
use futures::prelude::*;
|
||||
use futures::Stream;
|
||||
use hashset_delay::HashSetDelay;
|
||||
use libp2p::core::multiaddr::Protocol as MProtocol;
|
||||
use libp2p::identify::IdentifyInfo;
|
||||
use slog::{crit, debug, error};
|
||||
use slog::{crit, debug, error, warn};
|
||||
use smallvec::SmallVec;
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
@@ -33,12 +33,18 @@ pub(crate) mod score;
|
||||
pub use peer_info::{ConnectionDirection, PeerConnectionStatus, PeerConnectionStatus::*, PeerInfo};
|
||||
pub use peer_sync_status::{PeerSyncStatus, SyncInfo};
|
||||
use score::{PeerAction, ScoreState};
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// The time in seconds between re-status's peers.
|
||||
const STATUS_INTERVAL: u64 = 300;
|
||||
/// The time in seconds between PING events. We do not send a ping if the other peer has PING'd us
|
||||
/// within this time frame (Seconds)
|
||||
const PING_INTERVAL: u64 = 30;
|
||||
/// This is asymmetric to avoid simultaneous pings.
|
||||
/// The interval for outbound connections.
|
||||
const PING_INTERVAL_OUTBOUND: u64 = 30;
|
||||
/// The interval for inbound connections.
|
||||
const PING_INTERVAL_INBOUND: u64 = 35;
|
||||
|
||||
/// The heartbeat performs regular updates such as updating reputations and performing discovery
|
||||
/// requests. This defines the interval in seconds.
|
||||
@@ -49,14 +55,20 @@ const HEARTBEAT_INTERVAL: u64 = 30;
|
||||
/// PEER_EXCESS_FACTOR = 0.1 we allow 10% more nodes, i.e 55.
|
||||
const PEER_EXCESS_FACTOR: f32 = 0.1;
|
||||
|
||||
/// Relative factor of peers that are allowed to have a negative gossipsub score without penalizing
|
||||
/// them in lighthouse.
|
||||
const ALLOWED_NEGATIVE_GOSSIPSUB_FACTOR: f32 = 0.1;
|
||||
|
||||
/// The main struct that handles peer's reputation and connection status.
|
||||
pub struct PeerManager<TSpec: EthSpec> {
|
||||
/// Storage of network globals to access the `PeerDB`.
|
||||
network_globals: Arc<NetworkGlobals<TSpec>>,
|
||||
/// A queue of events that the `PeerManager` is waiting to produce.
|
||||
events: SmallVec<[PeerManagerEvent; 16]>,
|
||||
/// A collection of peers awaiting to be Ping'd.
|
||||
ping_peers: HashSetDelay<PeerId>,
|
||||
/// A collection of inbound-connected peers awaiting to be Ping'd.
|
||||
inbound_ping_peers: HashSetDelay<PeerId>,
|
||||
/// A collection of outbound-connected peers awaiting to be Ping'd.
|
||||
outbound_ping_peers: HashSetDelay<PeerId>,
|
||||
/// A collection of peers awaiting to be Status'd.
|
||||
status_peers: HashSetDelay<PeerId>,
|
||||
/// The target number of peers we would like to connect to.
|
||||
@@ -106,7 +118,8 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
Ok(PeerManager {
|
||||
network_globals,
|
||||
events: SmallVec::new(),
|
||||
ping_peers: HashSetDelay::new(Duration::from_secs(PING_INTERVAL)),
|
||||
inbound_ping_peers: HashSetDelay::new(Duration::from_secs(PING_INTERVAL_INBOUND)),
|
||||
outbound_ping_peers: HashSetDelay::new(Duration::from_secs(PING_INTERVAL_OUTBOUND)),
|
||||
status_peers: HashSetDelay::new(Duration::from_secs(STATUS_INTERVAL)),
|
||||
target_peers: config.target_peers,
|
||||
max_peers: (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as usize,
|
||||
@@ -157,55 +170,30 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
/// If the peer doesn't exist, log a warning and insert defaults.
|
||||
pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction) {
|
||||
// Helper function to avoid any potential deadlocks.
|
||||
let mut ban_peer = None;
|
||||
let mut unban_peer = None;
|
||||
let mut to_ban_peers = Vec::with_capacity(1);
|
||||
let mut to_unban_peers = Vec::with_capacity(1);
|
||||
|
||||
{
|
||||
let mut peer_db = self.network_globals.peers.write();
|
||||
if let Some(info) = peer_db.peer_info_mut(peer_id) {
|
||||
let previous_state = info.score_state();
|
||||
info.apply_peer_action_to_score(action);
|
||||
Self::handle_score_transitions(
|
||||
previous_state,
|
||||
peer_id,
|
||||
info,
|
||||
&mut to_ban_peers,
|
||||
&mut to_unban_peers,
|
||||
&mut self.events,
|
||||
&self.log,
|
||||
);
|
||||
if previous_state != info.score_state() {
|
||||
match info.score_state() {
|
||||
ScoreState::Banned => {
|
||||
debug!(self.log, "Peer has been banned"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string());
|
||||
ban_peer = Some(peer_id);
|
||||
}
|
||||
ScoreState::Disconnected => {
|
||||
debug!(self.log, "Peer transitioned to disconnect state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
|
||||
// disconnect the peer if it's currently connected or dialing
|
||||
if info.is_connected_or_dialing() {
|
||||
self.events.push(PeerManagerEvent::DisconnectPeer(
|
||||
peer_id.clone(),
|
||||
GoodbyeReason::BadScore,
|
||||
));
|
||||
peer_db.notify_disconnecting(peer_id);
|
||||
} else if info.is_banned() {
|
||||
unban_peer = Some(peer_id);
|
||||
}
|
||||
}
|
||||
ScoreState::Healthy => {
|
||||
debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
|
||||
// unban the peer if it was previously banned.
|
||||
if info.is_banned() {
|
||||
unban_peer = Some(peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
debug!(self.log, "Peer score adjusted"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string());
|
||||
}
|
||||
}
|
||||
} // end write lock
|
||||
|
||||
if let Some(peer_id) = ban_peer {
|
||||
self.ban_peer(peer_id);
|
||||
}
|
||||
if let Some(peer_id) = unban_peer {
|
||||
if let Err(e) = self.unban_peer(peer_id) {
|
||||
error!(self.log, "{}", e; "peer_id" => %peer_id);
|
||||
}
|
||||
}
|
||||
self.ban_and_unban_peers(to_ban_peers, to_unban_peers);
|
||||
}
|
||||
|
||||
/* Discovery Requests */
|
||||
@@ -222,6 +210,11 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
|
||||
/// A request to find peers on a given subnet.
|
||||
pub fn discover_subnet_peers(&mut self, subnets_to_discover: Vec<SubnetDiscovery>) {
|
||||
// If discovery is not started or disabled, ignore the request
|
||||
if !self.discovery.started {
|
||||
return;
|
||||
}
|
||||
|
||||
let filtered: Vec<SubnetDiscovery> = subnets_to_discover
|
||||
.into_iter()
|
||||
.filter(|s| {
|
||||
@@ -237,7 +230,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
.network_globals
|
||||
.peers
|
||||
.read()
|
||||
.peers_on_subnet(s.subnet_id)
|
||||
.good_peers_on_subnet(s.subnet_id)
|
||||
.count();
|
||||
if peers_on_subnet >= TARGET_SUBNET_PEERS {
|
||||
debug!(
|
||||
@@ -282,7 +275,8 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
.notify_disconnect(peer_id);
|
||||
|
||||
// remove the ping and status timer for the peer
|
||||
self.ping_peers.remove(peer_id);
|
||||
self.inbound_ping_peers.remove(peer_id);
|
||||
self.outbound_ping_peers.remove(peer_id);
|
||||
self.status_peers.remove(peer_id);
|
||||
}
|
||||
|
||||
@@ -316,6 +310,10 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
self.network_globals.peers.read().is_banned(peer_id)
|
||||
}
|
||||
|
||||
pub fn is_connected(&self, peer_id: &PeerId) -> bool {
|
||||
self.network_globals.peers.read().is_connected(peer_id)
|
||||
}
|
||||
|
||||
/// Reports whether the peer limit is reached in which case we stop allowing new incoming
|
||||
/// connections.
|
||||
pub fn peer_limit_reached(&self) -> bool {
|
||||
@@ -425,7 +423,17 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
// received a ping
|
||||
// reset the to-ping timer for this peer
|
||||
debug!(self.log, "Received a ping request"; "peer_id" => peer_id.to_string(), "seq_no" => seq);
|
||||
self.ping_peers.insert(peer_id.clone());
|
||||
match peer_info.connection_direction {
|
||||
Some(ConnectionDirection::Incoming) => {
|
||||
self.inbound_ping_peers.insert(peer_id.clone());
|
||||
}
|
||||
Some(ConnectionDirection::Outgoing) => {
|
||||
self.outbound_ping_peers.insert(peer_id.clone());
|
||||
}
|
||||
None => {
|
||||
warn!(self.log, "Received a ping from a peer with an unknown connection direction"; "peer_id" => %peer_id);
|
||||
}
|
||||
}
|
||||
|
||||
// if the sequence number is unknown send an update the meta data of the peer.
|
||||
if let Some(meta_data) = &peer_info.meta_data {
|
||||
@@ -517,6 +525,59 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn update_gossipsub_scores(&mut self, gossipsub: &Gossipsub) {
|
||||
let mut to_ban_peers = Vec::new();
|
||||
let mut to_unban_peers = Vec::new();
|
||||
|
||||
{
|
||||
//collect peers with scores
|
||||
let mut guard = self.network_globals.peers.write();
|
||||
let mut peers: Vec<_> = guard
|
||||
.peers_mut()
|
||||
.filter_map(|(peer_id, info)| {
|
||||
gossipsub
|
||||
.peer_score(peer_id)
|
||||
.map(|score| (peer_id, info, score))
|
||||
})
|
||||
.collect();
|
||||
|
||||
// sort descending by score
|
||||
peers.sort_unstable_by(|(.., s1), (.., s2)| {
|
||||
s2.partial_cmp(s1).unwrap_or(Ordering::Equal)
|
||||
});
|
||||
|
||||
let mut to_ignore_negative_peers =
|
||||
(self.target_peers as f32 * ALLOWED_NEGATIVE_GOSSIPSUB_FACTOR).ceil() as usize;
|
||||
|
||||
for (peer_id, info, score) in peers {
|
||||
let previous_state = info.score_state();
|
||||
info.update_gossipsub_score(
|
||||
score,
|
||||
if score < 0.0 && to_ignore_negative_peers > 0 {
|
||||
to_ignore_negative_peers -= 1;
|
||||
// We ignore the negative score for the best negative peers so that their
|
||||
// gossipsub score can recover without getting disconnected.
|
||||
true
|
||||
} else {
|
||||
false
|
||||
},
|
||||
);
|
||||
|
||||
Self::handle_score_transitions(
|
||||
previous_state,
|
||||
peer_id,
|
||||
info,
|
||||
&mut to_ban_peers,
|
||||
&mut to_unban_peers,
|
||||
&mut self.events,
|
||||
&self.log,
|
||||
);
|
||||
}
|
||||
} // end write lock
|
||||
|
||||
self.ban_and_unban_peers(to_ban_peers, to_unban_peers);
|
||||
}
|
||||
|
||||
/* Internal functions */
|
||||
|
||||
// The underlying discovery server has updated our external IP address. We send this up to
|
||||
@@ -618,16 +679,19 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
return true;
|
||||
}
|
||||
ConnectingType::IngoingConnected { multiaddr } => {
|
||||
peerdb.connect_outgoing(peer_id, multiaddr, enr)
|
||||
peerdb.connect_ingoing(peer_id, multiaddr, enr);
|
||||
// start a timer to ping inbound peers.
|
||||
self.inbound_ping_peers.insert(peer_id.clone());
|
||||
}
|
||||
ConnectingType::OutgoingConnected { multiaddr } => {
|
||||
peerdb.connect_ingoing(peer_id, multiaddr, enr)
|
||||
peerdb.connect_outgoing(peer_id, multiaddr, enr);
|
||||
// start a timer for to ping outbound peers.
|
||||
self.outbound_ping_peers.insert(peer_id.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// start a ping and status timer for the peer
|
||||
self.ping_peers.insert(peer_id.clone());
|
||||
self.status_peers.insert(peer_id.clone());
|
||||
|
||||
// increment prometheus metrics
|
||||
@@ -655,6 +719,59 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
true
|
||||
}
|
||||
|
||||
fn handle_score_transitions(
|
||||
previous_state: ScoreState,
|
||||
peer_id: &PeerId,
|
||||
info: &mut PeerInfo<TSpec>,
|
||||
to_ban_peers: &mut Vec<PeerId>,
|
||||
to_unban_peers: &mut Vec<PeerId>,
|
||||
events: &mut SmallVec<[PeerManagerEvent; 16]>,
|
||||
log: &slog::Logger,
|
||||
) {
|
||||
if previous_state != info.score_state() {
|
||||
match info.score_state() {
|
||||
ScoreState::Banned => {
|
||||
debug!(log, "Peer has been banned"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string());
|
||||
to_ban_peers.push(peer_id.clone());
|
||||
}
|
||||
ScoreState::Disconnected => {
|
||||
debug!(log, "Peer transitioned to disconnect state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
|
||||
// disconnect the peer if it's currently connected or dialing
|
||||
if info.is_connected_or_dialing() {
|
||||
// Change the state to inform that we are disconnecting the peer.
|
||||
info.disconnecting(false);
|
||||
events.push(PeerManagerEvent::DisconnectPeer(
|
||||
peer_id.clone(),
|
||||
GoodbyeReason::BadScore,
|
||||
));
|
||||
} else if info.is_banned() {
|
||||
to_unban_peers.push(peer_id.clone());
|
||||
}
|
||||
}
|
||||
ScoreState::Healthy => {
|
||||
debug!(log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
|
||||
// unban the peer if it was previously banned.
|
||||
if info.is_banned() {
|
||||
to_unban_peers.push(peer_id.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn ban_and_unban_peers(&mut self, to_ban_peers: Vec<PeerId>, to_unban_peers: Vec<PeerId>) {
|
||||
// process banning peers
|
||||
for peer_id in to_ban_peers {
|
||||
self.ban_peer(&peer_id);
|
||||
}
|
||||
// process unbanning peers
|
||||
for peer_id in to_unban_peers {
|
||||
if let Err(e) = self.unban_peer(&peer_id) {
|
||||
error!(self.log, "{}", e; "peer_id" => %peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates the scores of known peers according to their connection
|
||||
/// status and the time that has passed.
|
||||
/// NOTE: This is experimental and will likely be adjusted
|
||||
@@ -669,47 +786,17 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
// Update scores
|
||||
info.score_update();
|
||||
|
||||
// handle score transitions
|
||||
if previous_state != info.score_state() {
|
||||
match info.score_state() {
|
||||
ScoreState::Banned => {
|
||||
debug!(self.log, "Peer has been banned"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string());
|
||||
to_ban_peers.push(peer_id.clone());
|
||||
}
|
||||
ScoreState::Disconnected => {
|
||||
debug!(self.log, "Peer transitioned to disconnect state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
|
||||
// disconnect the peer if it's currently connected or dialing
|
||||
if info.is_connected_or_dialing() {
|
||||
// Change the state to inform that we are disconnecting the peer.
|
||||
info.disconnecting(false);
|
||||
self.events.push(PeerManagerEvent::DisconnectPeer(
|
||||
peer_id.clone(),
|
||||
GoodbyeReason::BadScore,
|
||||
));
|
||||
} else if info.is_banned() {
|
||||
to_unban_peers.push(peer_id.clone());
|
||||
}
|
||||
}
|
||||
ScoreState::Healthy => {
|
||||
debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
|
||||
// unban the peer if it was previously banned.
|
||||
if info.is_banned() {
|
||||
to_unban_peers.push(peer_id.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// process banning peers
|
||||
for peer_id in to_ban_peers {
|
||||
self.ban_peer(&peer_id);
|
||||
}
|
||||
// process unbanning peers
|
||||
for peer_id in to_unban_peers {
|
||||
if let Err(e) = self.unban_peer(&peer_id) {
|
||||
error!(self.log, "{}", e; "peer_id" => %peer_id);
|
||||
}
|
||||
Self::handle_score_transitions(
|
||||
previous_state,
|
||||
peer_id,
|
||||
info,
|
||||
&mut to_ban_peers,
|
||||
&mut to_unban_peers,
|
||||
&mut self.events,
|
||||
&self.log,
|
||||
);
|
||||
}
|
||||
self.ban_and_unban_peers(to_ban_peers, to_unban_peers);
|
||||
}
|
||||
|
||||
/// Bans a peer.
|
||||
@@ -737,9 +824,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
.peer_info(peer_id)
|
||||
.map(|info| {
|
||||
info.seen_addresses()
|
||||
.iter()
|
||||
.filter(|ip| peer_db.is_ip_banned(ip))
|
||||
.cloned()
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
@@ -757,7 +842,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
|
||||
let seen_ip_addresses = peer_db
|
||||
.peer_info(peer_id)
|
||||
.map(|info| info.seen_addresses().iter().cloned().collect::<Vec<_>>())
|
||||
.map(|info| info.seen_addresses().collect::<Vec<_>>())
|
||||
.unwrap_or_default();
|
||||
|
||||
self.discovery.unban_peer(&peer_id, seen_ip_addresses);
|
||||
@@ -774,8 +859,10 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
let peer_count = self.network_globals.connected_or_dialing_peers();
|
||||
if peer_count < self.target_peers {
|
||||
// If we need more peers, queue a discovery lookup.
|
||||
debug!(self.log, "Starting a new peer discovery query"; "connected_peers" => peer_count, "target_peers" => self.target_peers);
|
||||
self.discovery.discover_peers();
|
||||
if self.discovery.started {
|
||||
debug!(self.log, "Starting a new peer discovery query"; "connected_peers" => peer_count, "target_peers" => self.target_peers);
|
||||
self.discovery.discover_peers();
|
||||
}
|
||||
}
|
||||
|
||||
// Updates peer's scores.
|
||||
@@ -833,13 +920,26 @@ impl<TSpec: EthSpec> Stream for PeerManager<TSpec> {
|
||||
|
||||
// poll the timeouts for pings and status'
|
||||
loop {
|
||||
match self.ping_peers.poll_next_unpin(cx) {
|
||||
match self.inbound_ping_peers.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(peer_id))) => {
|
||||
self.ping_peers.insert(peer_id.clone());
|
||||
self.inbound_ping_peers.insert(peer_id.clone());
|
||||
self.events.push(PeerManagerEvent::Ping(peer_id));
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
error!(self.log, "Failed to check for peers to ping"; "error" => e.to_string())
|
||||
error!(self.log, "Failed to check for inbound peers to ping"; "error" => e.to_string())
|
||||
}
|
||||
Poll::Ready(None) | Poll::Pending => break,
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
match self.outbound_ping_peers.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(peer_id))) => {
|
||||
self.outbound_ping_peers.insert(peer_id.clone());
|
||||
self.events.push(PeerManagerEvent::Ping(peer_id));
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
error!(self.log, "Failed to check for outbound peers to ping"; "error" => e.to_string())
|
||||
}
|
||||
Poll::Ready(None) | Poll::Pending => break,
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ use serde::{
|
||||
Serialize,
|
||||
};
|
||||
use std::collections::HashSet;
|
||||
use std::net::IpAddr;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::time::Instant;
|
||||
use types::{EthSpec, SubnetId};
|
||||
use PeerConnectionStatus::*;
|
||||
@@ -31,7 +31,7 @@ pub struct PeerInfo<T: EthSpec> {
|
||||
pub listening_addresses: Vec<Multiaddr>,
|
||||
/// This is addresses we have physically seen and this is what we use for banning/un-banning
|
||||
/// peers.
|
||||
seen_addresses: HashSet<IpAddr>,
|
||||
pub seen_addresses: HashSet<SocketAddr>,
|
||||
/// The current syncing state of the peer. The state may be determined after it's initial
|
||||
/// connection.
|
||||
pub sync_status: PeerSyncStatus,
|
||||
@@ -91,9 +91,11 @@ impl<T: EthSpec> PeerInfo<T> {
|
||||
false
|
||||
}
|
||||
|
||||
/// Returns the seen addresses of the peer.
|
||||
pub fn seen_addresses(&self) -> &HashSet<IpAddr> {
|
||||
&self.seen_addresses
|
||||
/// Returns the seen IP addresses of the peer.
|
||||
pub fn seen_addresses<'a>(&'a self) -> impl Iterator<Item = IpAddr> + 'a {
|
||||
self.seen_addresses
|
||||
.iter()
|
||||
.map(|socket_addr| socket_addr.ip())
|
||||
}
|
||||
|
||||
/// Returns the connection status of the peer.
|
||||
@@ -107,8 +109,8 @@ impl<T: EthSpec> PeerInfo<T> {
|
||||
}
|
||||
|
||||
/// Returns score of the peer.
|
||||
pub fn score(&self) -> Score {
|
||||
self.score
|
||||
pub fn score(&self) -> &Score {
|
||||
&self.score
|
||||
}
|
||||
|
||||
/// Returns the state of the peer based on the score.
|
||||
@@ -130,6 +132,14 @@ impl<T: EthSpec> PeerInfo<T> {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn update_gossipsub_score(&mut self, new_score: f64, ignore: bool) {
|
||||
self.score.update_gossipsub_score(new_score, ignore);
|
||||
}
|
||||
|
||||
pub fn is_good_gossipsub_peer(&self) -> bool {
|
||||
self.score.is_good_gossipsub_peer()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Resets the peers score.
|
||||
pub fn reset_score(&mut self) {
|
||||
@@ -243,7 +253,7 @@ impl<T: EthSpec> PeerInfo<T> {
|
||||
|
||||
/// Modifies the status to Connected and increases the number of ingoing
|
||||
/// connections by one
|
||||
pub(crate) fn connect_ingoing(&mut self, seen_address: Option<IpAddr>) {
|
||||
pub(crate) fn connect_ingoing(&mut self, seen_address: Option<SocketAddr>) {
|
||||
match &mut self.connection_status {
|
||||
Connected { n_in, .. } => *n_in += 1,
|
||||
Disconnected { .. }
|
||||
@@ -256,14 +266,14 @@ impl<T: EthSpec> PeerInfo<T> {
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ip_addr) = seen_address {
|
||||
self.seen_addresses.insert(ip_addr);
|
||||
if let Some(socket_addr) = seen_address {
|
||||
self.seen_addresses.insert(socket_addr);
|
||||
}
|
||||
}
|
||||
|
||||
/// Modifies the status to Connected and increases the number of outgoing
|
||||
/// connections by one
|
||||
pub(crate) fn connect_outgoing(&mut self, seen_address: Option<IpAddr>) {
|
||||
pub(crate) fn connect_outgoing(&mut self, seen_address: Option<SocketAddr>) {
|
||||
match &mut self.connection_status {
|
||||
Connected { n_out, .. } => *n_out += 1,
|
||||
Disconnected { .. }
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use super::peer_info::{PeerConnectionStatus, PeerInfo};
|
||||
use super::peer_info::{ConnectionDirection, PeerConnectionStatus, PeerInfo};
|
||||
use super::peer_sync_status::PeerSyncStatus;
|
||||
use super::score::{Score, ScoreState};
|
||||
use crate::multiaddr::{Multiaddr, Protocol};
|
||||
@@ -7,8 +7,8 @@ use crate::Enr;
|
||||
use crate::PeerId;
|
||||
use rand::seq::SliceRandom;
|
||||
use slog::{crit, debug, error, trace, warn};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::net::IpAddr;
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::time::Instant;
|
||||
use types::{EthSpec, SubnetId};
|
||||
|
||||
@@ -16,8 +16,7 @@ use types::{EthSpec, SubnetId};
|
||||
const MAX_DC_PEERS: usize = 500;
|
||||
/// The maximum number of banned nodes to remember.
|
||||
const MAX_BANNED_PEERS: usize = 1000;
|
||||
/// If there are more than `BANNED_PEERS_PER_IP_THRESHOLD` many banned peers with the same IP we ban
|
||||
/// the IP.
|
||||
/// We ban an IP if there are more than `BANNED_PEERS_PER_IP_THRESHOLD` banned peers with this IP.
|
||||
const BANNED_PEERS_PER_IP_THRESHOLD: usize = 5;
|
||||
|
||||
/// Storage of known peers, their reputation and information
|
||||
@@ -42,19 +41,19 @@ pub struct BannedPeersCount {
|
||||
impl BannedPeersCount {
|
||||
/// Removes the peer from the counts if it is banned. Returns true if the peer was banned and
|
||||
/// false otherwise.
|
||||
pub fn remove_banned_peer(&mut self, ip_addresses: &HashSet<IpAddr>) {
|
||||
pub fn remove_banned_peer(&mut self, ip_addresses: impl Iterator<Item = IpAddr>) {
|
||||
self.banned_peers = self.banned_peers.saturating_sub(1);
|
||||
for address in ip_addresses {
|
||||
if let Some(count) = self.banned_peers_per_ip.get_mut(address) {
|
||||
if let Some(count) = self.banned_peers_per_ip.get_mut(&address) {
|
||||
*count = count.saturating_sub(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_banned_peer(&mut self, ip_addresses: &HashSet<IpAddr>) {
|
||||
pub fn add_banned_peer(&mut self, ip_addresses: impl Iterator<Item = IpAddr>) {
|
||||
self.banned_peers = self.banned_peers.saturating_add(1);
|
||||
for address in ip_addresses {
|
||||
*self.banned_peers_per_ip.entry(*address).or_insert(0) += 1;
|
||||
*self.banned_peers_per_ip.entry(address).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,10 +95,11 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
/* Getters */
|
||||
|
||||
/// Gives the score of a peer, or default score if it is unknown.
|
||||
pub fn score(&self, peer_id: &PeerId) -> Score {
|
||||
pub fn score(&self, peer_id: &PeerId) -> f64 {
|
||||
self.peers
|
||||
.get(peer_id)
|
||||
.map_or(Score::default(), |info| info.score())
|
||||
.map_or(&Score::default(), |info| info.score())
|
||||
.score()
|
||||
}
|
||||
|
||||
/// Returns an iterator over all peers in the db.
|
||||
@@ -163,7 +163,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
/// This is used to determine if we should accept incoming connections or not.
|
||||
pub fn is_banned(&self, peer_id: &PeerId) -> bool {
|
||||
if let Some(peer) = self.peers.get(peer_id) {
|
||||
match peer.score().state() {
|
||||
match peer.score_state() {
|
||||
ScoreState::Banned => true,
|
||||
_ => self.ip_is_banned(peer),
|
||||
}
|
||||
@@ -174,8 +174,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
|
||||
fn ip_is_banned(&self, peer: &PeerInfo<TSpec>) -> bool {
|
||||
peer.seen_addresses()
|
||||
.iter()
|
||||
.any(|addr| self.banned_peers_count.ip_is_banned(addr))
|
||||
.any(|ip| self.banned_peers_count.ip_is_banned(&ip))
|
||||
}
|
||||
|
||||
/// Returns true if the IP is banned.
|
||||
@@ -186,7 +185,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
/// Returns true if the Peer is either banned or in the disconnected state.
|
||||
pub fn is_banned_or_disconnected(&self, peer_id: &PeerId) -> bool {
|
||||
if let Some(peer) = self.peers.get(peer_id) {
|
||||
match peer.score().state() {
|
||||
match peer.score_state() {
|
||||
ScoreState::Banned | ScoreState::Disconnected => true,
|
||||
_ => self.ip_is_banned(peer),
|
||||
}
|
||||
@@ -243,10 +242,12 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
}
|
||||
|
||||
/// Gives an iterator of all peers on a given subnet.
|
||||
pub fn peers_on_subnet(&self, subnet_id: SubnetId) -> impl Iterator<Item = &PeerId> {
|
||||
pub fn good_peers_on_subnet(&self, subnet_id: SubnetId) -> impl Iterator<Item = &PeerId> {
|
||||
self.peers
|
||||
.iter()
|
||||
.filter(move |(_, info)| info.is_connected() && info.on_subnet(subnet_id))
|
||||
.filter(move |(_, info)| {
|
||||
info.is_connected() && info.on_subnet(subnet_id) && info.is_good_gossipsub_peer()
|
||||
})
|
||||
.map(|(peer_id, _)| peer_id)
|
||||
}
|
||||
|
||||
@@ -370,8 +371,13 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
});
|
||||
}
|
||||
|
||||
/// Sets a peer as connected with an ingoing connection.
|
||||
pub fn connect_ingoing(&mut self, peer_id: &PeerId, multiaddr: Multiaddr, enr: Option<Enr>) {
|
||||
fn connect(
|
||||
&mut self,
|
||||
peer_id: &PeerId,
|
||||
multiaddr: Multiaddr,
|
||||
enr: Option<Enr>,
|
||||
direction: ConnectionDirection,
|
||||
) {
|
||||
let info = self.peers.entry(peer_id.clone()).or_default();
|
||||
info.enr = enr;
|
||||
|
||||
@@ -385,39 +391,37 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
.remove_banned_peer(info.seen_addresses());
|
||||
}
|
||||
|
||||
// Add the seen ip address to the peer's info
|
||||
let ip_addr = multiaddr.iter().find_map(|p| match p {
|
||||
Protocol::Ip4(ip) => Some(ip.into()),
|
||||
Protocol::Ip6(ip) => Some(ip.into()),
|
||||
// Add the seen ip address and port to the peer's info
|
||||
let socket_addr = match multiaddr.iter().fold(
|
||||
(None, None),
|
||||
|(found_ip, found_port), protocol| match protocol {
|
||||
Protocol::Ip4(ip) => (Some(ip.into()), found_port),
|
||||
Protocol::Ip6(ip) => (Some(ip.into()), found_port),
|
||||
Protocol::Tcp(port) => (found_ip, Some(port)),
|
||||
_ => (found_ip, found_port),
|
||||
},
|
||||
) {
|
||||
(Some(ip), Some(port)) => Some(SocketAddr::new(ip, port)),
|
||||
(Some(_ip), None) => {
|
||||
crit!(self.log, "Connected peer has an IP but no TCP port"; "peer_id" => %peer_id);
|
||||
None
|
||||
}
|
||||
_ => None,
|
||||
});
|
||||
};
|
||||
|
||||
info.connect_ingoing(ip_addr);
|
||||
match direction {
|
||||
ConnectionDirection::Incoming => info.connect_ingoing(socket_addr),
|
||||
ConnectionDirection::Outgoing => info.connect_outgoing(socket_addr),
|
||||
}
|
||||
}
|
||||
/// Sets a peer as connected with an ingoing connection.
|
||||
pub fn connect_ingoing(&mut self, peer_id: &PeerId, multiaddr: Multiaddr, enr: Option<Enr>) {
|
||||
self.connect(peer_id, multiaddr, enr, ConnectionDirection::Incoming)
|
||||
}
|
||||
|
||||
/// Sets a peer as connected with an outgoing connection.
|
||||
pub fn connect_outgoing(&mut self, peer_id: &PeerId, multiaddr: Multiaddr, enr: Option<Enr>) {
|
||||
let info = self.peers.entry(peer_id.clone()).or_default();
|
||||
info.enr = enr;
|
||||
|
||||
if info.is_disconnected() {
|
||||
self.disconnected_peers = self.disconnected_peers.saturating_sub(1);
|
||||
}
|
||||
|
||||
if info.is_banned() {
|
||||
error!(self.log, "Connected to a banned peer"; "peer_id" => %peer_id);
|
||||
self.banned_peers_count
|
||||
.remove_banned_peer(info.seen_addresses());
|
||||
}
|
||||
|
||||
// Add the seen ip address to the peer's info
|
||||
let ip_addr = multiaddr.iter().find_map(|p| match p {
|
||||
Protocol::Ip4(ip) => Some(ip.into()),
|
||||
Protocol::Ip6(ip) => Some(ip.into()),
|
||||
_ => None,
|
||||
});
|
||||
|
||||
info.connect_outgoing(ip_addr);
|
||||
self.connect(peer_id, multiaddr, enr, ConnectionDirection::Outgoing)
|
||||
}
|
||||
|
||||
/// Sets the peer as disconnected. A banned peer remains banned
|
||||
@@ -458,7 +462,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
});
|
||||
|
||||
// Ban the peer if the score is not already low enough.
|
||||
match info.score().state() {
|
||||
match info.score_state() {
|
||||
ScoreState::Banned => {}
|
||||
_ => {
|
||||
// If score isn't low enough to ban, this function has been called incorrectly.
|
||||
@@ -518,7 +522,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
return Err("Unbanning peer that is not banned");
|
||||
}
|
||||
|
||||
if let ScoreState::Banned = info.score().state() {
|
||||
if let ScoreState::Banned = info.score_state() {
|
||||
return Err("Attempted to unban (connection status) a banned peer");
|
||||
}
|
||||
|
||||
@@ -537,16 +541,15 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
pub fn shrink_to_fit(&mut self) {
|
||||
// Remove excess banned peers
|
||||
while self.banned_peers_count.banned_peers() > MAX_BANNED_PEERS {
|
||||
if let Some(to_drop) = if let Some((id, info)) = self
|
||||
if let Some(to_drop) = if let Some((id, info, _)) = self
|
||||
.peers
|
||||
.iter()
|
||||
.filter(|(_, info)| info.is_banned())
|
||||
.min_by(|(_, info_a), (_, info_b)| {
|
||||
info_a
|
||||
.score()
|
||||
.partial_cmp(&info_b.score())
|
||||
.unwrap_or(std::cmp::Ordering::Equal)
|
||||
}) {
|
||||
.filter_map(|(id, info)| match info.connection_status() {
|
||||
PeerConnectionStatus::Banned { since } => Some((id, info, since)),
|
||||
_ => None,
|
||||
})
|
||||
.min_by_key(|(_, _, since)| *since)
|
||||
{
|
||||
self.banned_peers_count
|
||||
.remove_banned_peer(info.seen_addresses());
|
||||
Some(id.clone())
|
||||
@@ -571,12 +574,11 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
.peers
|
||||
.iter()
|
||||
.filter(|(_, info)| info.is_disconnected())
|
||||
.min_by(|(_, info_a), (_, info_b)| {
|
||||
info_a
|
||||
.score()
|
||||
.partial_cmp(&info_b.score())
|
||||
.unwrap_or(std::cmp::Ordering::Equal)
|
||||
.filter_map(|(id, info)| match info.connection_status() {
|
||||
PeerConnectionStatus::Disconnected { since } => Some((id, since)),
|
||||
_ => None,
|
||||
})
|
||||
.min_by_key(|(_, since)| *since)
|
||||
.map(|(id, _)| id.clone())
|
||||
{
|
||||
debug!(self.log, "Removing old disconnected peer"; "peer_id" => to_drop.to_string());
|
||||
@@ -665,7 +667,7 @@ mod tests {
|
||||
// this is the only peer
|
||||
assert_eq!(pdb.peers().count(), 1);
|
||||
// the peer has the default reputation
|
||||
assert_eq!(pdb.score(&random_peer).score(), Score::default().score());
|
||||
assert_eq!(pdb.score(&random_peer), Score::default().score());
|
||||
// it should be connected, and therefore not counted as disconnected
|
||||
assert_eq!(pdb.disconnected_peers, 0);
|
||||
assert!(peer_info.unwrap().is_connected());
|
||||
@@ -784,27 +786,30 @@ mod tests {
|
||||
#[test]
|
||||
fn test_disconnected_ban_consistency() {
|
||||
let mut pdb = get_db();
|
||||
let mut multiaddr = Multiaddr::empty();
|
||||
multiaddr.push(Protocol::Tcp(9000));
|
||||
multiaddr.push(Protocol::Ip4("0.0.0.0".parse().unwrap()));
|
||||
|
||||
let random_peer = PeerId::random();
|
||||
let random_peer1 = PeerId::random();
|
||||
let random_peer2 = PeerId::random();
|
||||
let random_peer3 = PeerId::random();
|
||||
|
||||
pdb.connect_ingoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||
pdb.connect_ingoing(&random_peer1, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||
pdb.connect_ingoing(&random_peer2, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||
pdb.connect_ingoing(&random_peer3, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||
pdb.connect_ingoing(&random_peer, multiaddr.clone(), None);
|
||||
pdb.connect_ingoing(&random_peer1, multiaddr.clone(), None);
|
||||
pdb.connect_ingoing(&random_peer2, multiaddr.clone(), None);
|
||||
pdb.connect_ingoing(&random_peer3, multiaddr.clone(), None);
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
assert_eq!(
|
||||
pdb.banned_peers_count.banned_peers(),
|
||||
pdb.banned_peers().count()
|
||||
);
|
||||
|
||||
pdb.connect_ingoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||
pdb.connect_ingoing(&random_peer, multiaddr.clone(), None);
|
||||
pdb.notify_disconnect(&random_peer1);
|
||||
pdb.disconnect_and_ban(&random_peer2);
|
||||
pdb.notify_disconnect(&random_peer2);
|
||||
pdb.connect_ingoing(&random_peer3, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||
pdb.connect_ingoing(&random_peer3, multiaddr.clone(), None);
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
assert_eq!(
|
||||
pdb.banned_peers_count.banned_peers(),
|
||||
@@ -818,7 +823,7 @@ mod tests {
|
||||
pdb.banned_peers().count()
|
||||
);
|
||||
|
||||
pdb.connect_outgoing(&random_peer2, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||
pdb.connect_outgoing(&random_peer2, multiaddr.clone(), None);
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
assert_eq!(
|
||||
pdb.banned_peers_count.banned_peers(),
|
||||
@@ -834,11 +839,11 @@ mod tests {
|
||||
|
||||
pdb.disconnect_and_ban(&random_peer3);
|
||||
pdb.notify_disconnect(&random_peer3);
|
||||
pdb.connect_ingoing(&random_peer1, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||
pdb.connect_ingoing(&random_peer1, multiaddr.clone(), None);
|
||||
pdb.notify_disconnect(&random_peer2);
|
||||
pdb.disconnect_and_ban(&random_peer3);
|
||||
pdb.notify_disconnect(&random_peer3);
|
||||
pdb.connect_ingoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||
pdb.connect_ingoing(&random_peer, multiaddr.clone(), None);
|
||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||
assert_eq!(
|
||||
pdb.banned_peers_count.banned_peers(),
|
||||
@@ -868,6 +873,7 @@ mod tests {
|
||||
for ip in ips {
|
||||
let mut addr = Multiaddr::empty();
|
||||
addr.push(Protocol::from(ip));
|
||||
addr.push(Protocol::Tcp(9000));
|
||||
pdb.connect_ingoing(&p, addr, None);
|
||||
}
|
||||
p
|
||||
@@ -981,8 +987,10 @@ mod tests {
|
||||
assert!(!pdb.is_banned(&p2));
|
||||
|
||||
// add ip2 to all peers and ban them.
|
||||
let mut socker_addr = Multiaddr::from(ip2);
|
||||
socker_addr.push(Protocol::Tcp(8080));
|
||||
for p in &peers {
|
||||
pdb.connect_ingoing(&p, ip2.into(), None);
|
||||
pdb.connect_ingoing(&p, socker_addr.clone(), None);
|
||||
pdb.disconnect_and_ban(p);
|
||||
pdb.notify_disconnect(p);
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
//! As the logic develops this documentation will advance.
|
||||
//!
|
||||
//! The scoring algorithms are currently experimental.
|
||||
use crate::behaviour::GOSSIPSUB_GREYLIST_THRESHOLD;
|
||||
use serde::Serialize;
|
||||
use std::time::Instant;
|
||||
use tokio::time::Duration;
|
||||
@@ -19,6 +20,9 @@ pub(crate) const DEFAULT_SCORE: f64 = 0.0;
|
||||
const MIN_SCORE_BEFORE_DISCONNECT: f64 = -20.0;
|
||||
/// The minimum reputation before a peer is banned.
|
||||
const MIN_SCORE_BEFORE_BAN: f64 = -50.0;
|
||||
/// If a peer has a lighthouse score below this constant all other score parts will get ignored and
|
||||
/// the peer will get banned regardless of the other parts.
|
||||
const MIN_LIGHTHOUSE_SCORE_BEFORE_BAN: f64 = -60.0;
|
||||
/// The maximum score a peer can obtain.
|
||||
const MAX_SCORE: f64 = 100.0;
|
||||
/// The minimum score a peer can obtain.
|
||||
@@ -28,6 +32,12 @@ const SCORE_HALFLIFE: f64 = 600.0;
|
||||
/// The number of seconds we ban a peer for before their score begins to decay.
|
||||
const BANNED_BEFORE_DECAY: Duration = Duration::from_secs(1800);
|
||||
|
||||
/// We weight negative gossipsub scores in such a way that they never result in a disconnect by
|
||||
/// themselves. This "solves" the problem of non-decaying gossipsub scores for disconnected peers.
|
||||
const GOSSIPSUB_NEGATIVE_SCORE_WEIGHT: f64 =
|
||||
(MIN_SCORE_BEFORE_DISCONNECT + 1.0) / GOSSIPSUB_GREYLIST_THRESHOLD;
|
||||
const GOSSIPSUB_POSITIVE_SCORE_WEIGHT: f64 = GOSSIPSUB_NEGATIVE_SCORE_WEIGHT;
|
||||
|
||||
/// A collection of actions a peer can perform which will adjust its score.
|
||||
/// Each variant has an associated score change.
|
||||
// To easily assess the behaviour of scores changes the number of variants should stay low, and
|
||||
@@ -55,74 +65,6 @@ pub enum PeerAction {
|
||||
_ValidMessage,
|
||||
}
|
||||
|
||||
/// The expected state of the peer given the peer's score.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub(crate) enum ScoreState {
|
||||
/// We are content with the peers performance. We permit connections and messages.
|
||||
Healthy,
|
||||
/// The peer should be disconnected. We allow re-connections if the peer is persistent.
|
||||
Disconnected,
|
||||
/// The peer is banned. We disallow new connections until it's score has decayed into a
|
||||
/// tolerable threshold.
|
||||
Banned,
|
||||
}
|
||||
|
||||
/// A peer's score (perceived potential usefulness).
|
||||
///
|
||||
/// This simplistic version consists of a global score per peer which decays to 0 over time. The
|
||||
/// decay rate applies equally to positive and negative scores.
|
||||
#[derive(Copy, PartialEq, Clone, Debug, Serialize)]
|
||||
pub struct Score {
|
||||
/// The global score.
|
||||
// NOTE: In the future we may separate this into sub-scores involving the RPC, Gossipsub and
|
||||
// lighthouse.
|
||||
score: f64,
|
||||
/// The time the score was last updated to perform time-based adjustments such as score-decay.
|
||||
#[serde(skip)]
|
||||
last_updated: Instant,
|
||||
}
|
||||
|
||||
impl Default for Score {
|
||||
fn default() -> Self {
|
||||
Score {
|
||||
score: DEFAULT_SCORE,
|
||||
last_updated: Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Score {}
|
||||
|
||||
impl PartialOrd for Score {
|
||||
fn partial_cmp(&self, other: &Score) -> Option<std::cmp::Ordering> {
|
||||
self.score
|
||||
.partial_cmp(&other.score)
|
||||
.or_else(|| self.last_updated.partial_cmp(&other.last_updated))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for Score {
|
||||
fn cmp(&self, other: &Score) -> std::cmp::Ordering {
|
||||
self.partial_cmp(other)
|
||||
.unwrap_or_else(|| std::cmp::Ordering::Equal)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<f64> for Score {
|
||||
fn from(f: f64) -> Self {
|
||||
Score {
|
||||
score: f,
|
||||
last_updated: Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Score {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:.2}", self.score)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PeerAction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
@@ -135,6 +77,18 @@ impl std::fmt::Display for PeerAction {
|
||||
}
|
||||
}
|
||||
|
||||
/// The expected state of the peer given the peer's score.
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
pub(crate) enum ScoreState {
|
||||
/// We are content with the peers performance. We permit connections and messages.
|
||||
Healthy,
|
||||
/// The peer should be disconnected. We allow re-connections if the peer is persistent.
|
||||
Disconnected,
|
||||
/// The peer is banned. We disallow new connections until it's score has decayed into a
|
||||
/// tolerable threshold.
|
||||
Banned,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ScoreState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
@@ -145,23 +99,59 @@ impl std::fmt::Display for ScoreState {
|
||||
}
|
||||
}
|
||||
|
||||
impl Score {
|
||||
/// Return max possible score.
|
||||
pub fn max_score() -> Self {
|
||||
Score {
|
||||
score: MAX_SCORE,
|
||||
/// A peer's score (perceived potential usefulness).
|
||||
///
|
||||
/// This simplistic version consists of a global score per peer which decays to 0 over time. The
|
||||
/// decay rate applies equally to positive and negative scores.
|
||||
#[derive(PartialEq, Clone, Debug, Serialize)]
|
||||
pub struct RealScore {
|
||||
/// The global score.
|
||||
// NOTE: In the future we may separate this into sub-scores involving the RPC, Gossipsub and
|
||||
// lighthouse.
|
||||
lighthouse_score: f64,
|
||||
gossipsub_score: f64,
|
||||
/// We ignore the negative gossipsub scores of some peers to allow decaying without
|
||||
/// disconnecting.
|
||||
ignore_negative_gossipsub_score: bool,
|
||||
score: f64,
|
||||
/// The time the score was last updated to perform time-based adjustments such as score-decay.
|
||||
#[serde(skip)]
|
||||
last_updated: Instant,
|
||||
}
|
||||
|
||||
impl Default for RealScore {
|
||||
fn default() -> Self {
|
||||
RealScore {
|
||||
lighthouse_score: DEFAULT_SCORE,
|
||||
gossipsub_score: DEFAULT_SCORE,
|
||||
score: DEFAULT_SCORE,
|
||||
last_updated: Instant::now(),
|
||||
ignore_negative_gossipsub_score: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RealScore {
|
||||
/// Access to the underlying score.
|
||||
pub fn score(&self) -> f64 {
|
||||
fn recompute_score(&mut self) {
|
||||
self.score = self.lighthouse_score;
|
||||
if self.lighthouse_score <= MIN_LIGHTHOUSE_SCORE_BEFORE_BAN {
|
||||
//ignore all other scores, i.e. do nothing here
|
||||
} else if self.gossipsub_score >= 0.0 {
|
||||
self.score += self.gossipsub_score * GOSSIPSUB_POSITIVE_SCORE_WEIGHT;
|
||||
} else if !self.ignore_negative_gossipsub_score {
|
||||
self.score += self.gossipsub_score * GOSSIPSUB_NEGATIVE_SCORE_WEIGHT;
|
||||
}
|
||||
}
|
||||
|
||||
fn score(&self) -> f64 {
|
||||
self.score
|
||||
}
|
||||
|
||||
/// Modifies the score based on a peer's action.
|
||||
pub fn apply_peer_action(&mut self, peer_action: PeerAction) {
|
||||
match peer_action {
|
||||
PeerAction::Fatal => self.score = MIN_SCORE, // The worst possible score
|
||||
PeerAction::Fatal => self.set_lighthouse_score(MIN_SCORE), // The worst possible score
|
||||
PeerAction::LowToleranceError => self.add(-10.0),
|
||||
PeerAction::MidToleranceError => self.add(-5.0),
|
||||
PeerAction::HighToleranceError => self.add(-1.0),
|
||||
@@ -169,18 +159,14 @@ impl Score {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the expected state of the peer given it's score.
|
||||
pub(crate) fn state(&self) -> ScoreState {
|
||||
match self.score {
|
||||
x if x <= MIN_SCORE_BEFORE_BAN => ScoreState::Banned,
|
||||
x if x <= MIN_SCORE_BEFORE_DISCONNECT => ScoreState::Disconnected,
|
||||
_ => ScoreState::Healthy,
|
||||
}
|
||||
fn set_lighthouse_score(&mut self, new_score: f64) {
|
||||
self.lighthouse_score = new_score;
|
||||
self.update_state();
|
||||
}
|
||||
|
||||
/// Add an f64 to the score abiding by the limits.
|
||||
fn add(&mut self, score: f64) {
|
||||
let mut new_score = self.score + score;
|
||||
let mut new_score = self.lighthouse_score + score;
|
||||
if new_score > MAX_SCORE {
|
||||
new_score = MAX_SCORE;
|
||||
}
|
||||
@@ -188,32 +174,28 @@ impl Score {
|
||||
new_score = MIN_SCORE;
|
||||
}
|
||||
|
||||
if self.score > MIN_SCORE_BEFORE_BAN && new_score <= MIN_SCORE_BEFORE_BAN {
|
||||
self.set_lighthouse_score(new_score);
|
||||
}
|
||||
|
||||
fn update_state(&mut self) {
|
||||
let was_not_banned = self.score > MIN_SCORE_BEFORE_BAN;
|
||||
self.recompute_score();
|
||||
if was_not_banned && self.score <= MIN_SCORE_BEFORE_BAN {
|
||||
//we ban this peer for at least BANNED_BEFORE_DECAY seconds
|
||||
self.last_updated += BANNED_BEFORE_DECAY;
|
||||
}
|
||||
|
||||
self.score = new_score;
|
||||
}
|
||||
|
||||
/// Add an f64 to the score abiding by the limits.
|
||||
#[cfg(test)]
|
||||
pub fn test_add(&mut self, score: f64) {
|
||||
let mut new_score = self.score + score;
|
||||
if new_score > MAX_SCORE {
|
||||
new_score = MAX_SCORE;
|
||||
}
|
||||
if new_score < MIN_SCORE {
|
||||
new_score = MIN_SCORE;
|
||||
}
|
||||
|
||||
self.score = new_score;
|
||||
self.add(score);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
// reset the score
|
||||
pub fn test_reset(&mut self) {
|
||||
self.score = 0f64;
|
||||
self.set_lighthouse_score(0f64);
|
||||
}
|
||||
|
||||
/// Applies time-based logic such as decay rates to the score.
|
||||
@@ -237,10 +219,109 @@ impl Score {
|
||||
{
|
||||
// e^(-ln(2)/HL*t)
|
||||
let decay_factor = (*HALFLIFE_DECAY * secs_since_update as f64).exp();
|
||||
self.score *= decay_factor;
|
||||
self.lighthouse_score *= decay_factor;
|
||||
self.last_updated = now;
|
||||
self.update_state();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_gossipsub_score(&mut self, new_score: f64, ignore: bool) {
|
||||
// we only update gossipsub if last_updated is in the past which means either the peer is
|
||||
// not banned or the BANNED_BEFORE_DECAY time is over.
|
||||
if self.last_updated <= Instant::now() {
|
||||
self.gossipsub_score = new_score;
|
||||
self.ignore_negative_gossipsub_score = ignore;
|
||||
self.update_state();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_good_gossipsub_peer(&self) -> bool {
|
||||
self.gossipsub_score >= 0.0
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Clone, Debug, Serialize)]
|
||||
pub enum Score {
|
||||
Max,
|
||||
Real(RealScore),
|
||||
}
|
||||
|
||||
impl Default for Score {
|
||||
fn default() -> Self {
|
||||
Self::Real(RealScore::default())
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! apply {
|
||||
( $method:ident $(, $param_name: ident: $param_type: ty)*) => {
|
||||
impl Score {
|
||||
pub fn $method(
|
||||
&mut self, $($param_name: $param_type, )*
|
||||
) {
|
||||
if let Self::Real(score) = self {
|
||||
score.$method($($param_name, )*);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
apply!(apply_peer_action, peer_action: PeerAction);
|
||||
apply!(update);
|
||||
apply!(update_gossipsub_score, new_score: f64, ignore: bool);
|
||||
#[cfg(test)]
|
||||
apply!(test_add, score: f64);
|
||||
#[cfg(test)]
|
||||
apply!(test_reset);
|
||||
|
||||
impl Score {
|
||||
pub fn score(&self) -> f64 {
|
||||
match self {
|
||||
Self::Max => f64::INFINITY,
|
||||
Self::Real(score) => score.score(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn max_score() -> Self {
|
||||
Self::Max
|
||||
}
|
||||
|
||||
/// Returns the expected state of the peer given it's score.
|
||||
pub(crate) fn state(&self) -> ScoreState {
|
||||
match self.score() {
|
||||
x if x <= MIN_SCORE_BEFORE_BAN => ScoreState::Banned,
|
||||
x if x <= MIN_SCORE_BEFORE_DISCONNECT => ScoreState::Disconnected,
|
||||
_ => ScoreState::Healthy,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_good_gossipsub_peer(&self) -> bool {
|
||||
match self {
|
||||
Self::Max => true,
|
||||
Self::Real(score) => score.is_good_gossipsub_peer(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Score {}
|
||||
|
||||
impl PartialOrd for Score {
|
||||
fn partial_cmp(&self, other: &Score) -> Option<std::cmp::Ordering> {
|
||||
self.score().partial_cmp(&other.score())
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for Score {
|
||||
fn cmp(&self, other: &Score) -> std::cmp::Ordering {
|
||||
self.partial_cmp(other)
|
||||
.unwrap_or_else(|| std::cmp::Ordering::Equal)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Score {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:.2}", self.score())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -254,44 +335,60 @@ mod tests {
|
||||
// 0 change does not change de reputation
|
||||
//
|
||||
let change = 0.0;
|
||||
score.add(change);
|
||||
score.test_add(change);
|
||||
assert_eq!(score.score(), DEFAULT_SCORE);
|
||||
|
||||
// underflowing change is capped
|
||||
let mut score = Score::default();
|
||||
let change = MIN_SCORE - 50.0;
|
||||
score.add(change);
|
||||
score.test_add(change);
|
||||
assert_eq!(score.score(), MIN_SCORE);
|
||||
|
||||
// overflowing change is capped
|
||||
let mut score = Score::default();
|
||||
let change = MAX_SCORE + 50.0;
|
||||
score.add(change);
|
||||
score.test_add(change);
|
||||
assert_eq!(score.score(), MAX_SCORE);
|
||||
|
||||
// Score adjusts
|
||||
let mut score = Score::default();
|
||||
let change = 1.32;
|
||||
score.add(change);
|
||||
score.test_add(change);
|
||||
assert_eq!(score.score(), DEFAULT_SCORE + change);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ban_time() {
|
||||
let mut score = Score::default();
|
||||
let mut score = RealScore::default();
|
||||
let now = Instant::now();
|
||||
|
||||
let change = MIN_SCORE_BEFORE_BAN;
|
||||
score.add(change);
|
||||
score.test_add(change);
|
||||
assert_eq!(score.score(), MIN_SCORE_BEFORE_BAN);
|
||||
assert_eq!(score.state(), ScoreState::Banned);
|
||||
|
||||
score.update_at(now + BANNED_BEFORE_DECAY);
|
||||
assert_eq!(score.score(), MIN_SCORE_BEFORE_BAN);
|
||||
assert_eq!(score.state(), ScoreState::Banned);
|
||||
|
||||
score.update_at(now + BANNED_BEFORE_DECAY + Duration::from_secs(1));
|
||||
assert!(score.score() > MIN_SCORE_BEFORE_BAN);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_very_negative_gossipsub_score() {
|
||||
let mut score = Score::default();
|
||||
score.update_gossipsub_score(GOSSIPSUB_GREYLIST_THRESHOLD, false);
|
||||
assert!(!score.is_good_gossipsub_peer());
|
||||
assert!(score.score() < 0.0);
|
||||
assert_eq!(score.state(), ScoreState::Healthy);
|
||||
score.test_add(-1.0001);
|
||||
assert_eq!(score.state(), ScoreState::Disconnected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ignored_gossipsub_score() {
|
||||
let mut score = Score::default();
|
||||
score.update_gossipsub_score(GOSSIPSUB_GREYLIST_THRESHOLD, true);
|
||||
assert!(!score.is_good_gossipsub_peer());
|
||||
assert_eq!(score.score(), 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,31 +176,6 @@ enum InboundState<TSpec: EthSpec> {
|
||||
Poisoned,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> InboundState<TSpec> {
|
||||
/// Sends the given items over the underlying substream, if the state allows it, and returns the
|
||||
/// final state.
|
||||
fn send_items(
|
||||
self,
|
||||
pending_items: &mut Vec<RPCCodedResponse<TSpec>>,
|
||||
remaining_chunks: u64,
|
||||
) -> Self {
|
||||
if let InboundState::Idle(substream) = self {
|
||||
// only send on Idle
|
||||
if !pending_items.is_empty() {
|
||||
// take the items that we need to send
|
||||
let to_send = std::mem::replace(pending_items, vec![]);
|
||||
let fut = process_inbound_substream(substream, remaining_chunks, to_send).boxed();
|
||||
InboundState::Busy(Box::pin(fut))
|
||||
} else {
|
||||
// nothing to do, keep waiting for responses
|
||||
InboundState::Idle(substream)
|
||||
}
|
||||
} else {
|
||||
self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// State of an outbound substream. Either waiting for a response, or in the process of sending.
|
||||
pub enum OutboundSubstreamState<TSpec: EthSpec> {
|
||||
/// A request has been sent, and we are awaiting a response. This future is driven in the
|
||||
@@ -308,8 +283,8 @@ where
|
||||
let inbound_info = if let Some(info) = self.inbound_substreams.get_mut(&inbound_id) {
|
||||
info
|
||||
} else {
|
||||
warn!(self.log, "Stream has expired. Response not sent";
|
||||
"response" => response.to_string(), "id" => inbound_id);
|
||||
warn!(self.log, "Inbound stream has expired, response not sent";
|
||||
"response" => response.to_string(), "id" => inbound_id, "msg" => "Likely too many resources, reduce peer count");
|
||||
return;
|
||||
};
|
||||
|
||||
@@ -626,69 +601,99 @@ where
|
||||
// drive inbound streams that need to be processed
|
||||
let mut substreams_to_remove = Vec::new(); // Closed substreams that need to be removed
|
||||
for (id, info) in self.inbound_substreams.iter_mut() {
|
||||
match std::mem::replace(&mut info.state, InboundState::Poisoned) {
|
||||
state @ InboundState::Idle(..) if !deactivated => {
|
||||
info.state = state.send_items(&mut info.pending_items, info.remaining_chunks);
|
||||
}
|
||||
InboundState::Idle(mut substream) => {
|
||||
// handler is deactivated, close the stream and mark it for removal
|
||||
match substream.close().poll_unpin(cx) {
|
||||
// if we can't close right now, put the substream back and try again later
|
||||
Poll::Pending => info.state = InboundState::Idle(substream),
|
||||
Poll::Ready(res) => {
|
||||
substreams_to_remove.push(*id);
|
||||
if let Some(ref delay_key) = info.delay_key {
|
||||
self.inbound_substreams_delay.remove(delay_key);
|
||||
}
|
||||
if let Err(error) = res {
|
||||
self.pending_errors.push(HandlerErr::Inbound {
|
||||
id: *id,
|
||||
error,
|
||||
proto: info.protocol,
|
||||
});
|
||||
}
|
||||
if info.pending_items.last().map(|l| l.close_after()) == Some(false) {
|
||||
// if the request was still active, report back to cancel it
|
||||
self.pending_errors.push(HandlerErr::Inbound {
|
||||
id: *id,
|
||||
proto: info.protocol,
|
||||
error: RPCError::HandlerRejected,
|
||||
});
|
||||
}
|
||||
loop {
|
||||
match std::mem::replace(&mut info.state, InboundState::Poisoned) {
|
||||
InboundState::Idle(substream) if !deactivated => {
|
||||
if !info.pending_items.is_empty() {
|
||||
let to_send = std::mem::replace(&mut info.pending_items, vec![]);
|
||||
let fut = process_inbound_substream(
|
||||
substream,
|
||||
info.remaining_chunks,
|
||||
to_send,
|
||||
)
|
||||
.boxed();
|
||||
info.state = InboundState::Busy(Box::pin(fut));
|
||||
} else {
|
||||
info.state = InboundState::Idle(substream);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
InboundState::Busy(mut fut) => {
|
||||
// first check if sending finished
|
||||
let state = match fut.poll_unpin(cx) {
|
||||
Poll::Ready((substream, errors, remove, new_remaining_chunks)) => {
|
||||
info.remaining_chunks = new_remaining_chunks;
|
||||
// report any error
|
||||
for error in errors {
|
||||
self.pending_errors.push(HandlerErr::Inbound {
|
||||
id: *id,
|
||||
error,
|
||||
proto: info.protocol,
|
||||
})
|
||||
}
|
||||
if remove {
|
||||
InboundState::Idle(mut substream) => {
|
||||
// handler is deactivated, close the stream and mark it for removal
|
||||
match substream.close().poll_unpin(cx) {
|
||||
// if we can't close right now, put the substream back and try again later
|
||||
Poll::Pending => info.state = InboundState::Idle(substream),
|
||||
Poll::Ready(res) => {
|
||||
substreams_to_remove.push(*id);
|
||||
if let Some(ref delay_key) = info.delay_key {
|
||||
self.inbound_substreams_delay.remove(delay_key);
|
||||
}
|
||||
if let Err(error) = res {
|
||||
self.pending_errors.push(HandlerErr::Inbound {
|
||||
id: *id,
|
||||
error,
|
||||
proto: info.protocol,
|
||||
});
|
||||
}
|
||||
if info.pending_items.last().map(|l| l.close_after()) == Some(false)
|
||||
{
|
||||
// if the request was still active, report back to cancel it
|
||||
self.pending_errors.push(HandlerErr::Inbound {
|
||||
id: *id,
|
||||
proto: info.protocol,
|
||||
error: RPCError::HandlerRejected,
|
||||
});
|
||||
}
|
||||
}
|
||||
InboundState::Idle(substream)
|
||||
}
|
||||
Poll::Pending => InboundState::Busy(fut),
|
||||
};
|
||||
info.state = if !deactivated {
|
||||
// if the last batch finished, send more.
|
||||
state.send_items(&mut info.pending_items, info.remaining_chunks)
|
||||
} else {
|
||||
state
|
||||
};
|
||||
break;
|
||||
}
|
||||
InboundState::Busy(mut fut) => {
|
||||
// first check if sending finished
|
||||
match fut.poll_unpin(cx) {
|
||||
Poll::Ready((substream, errors, remove, new_remaining_chunks)) => {
|
||||
info.remaining_chunks = new_remaining_chunks;
|
||||
// report any error
|
||||
for error in errors {
|
||||
self.pending_errors.push(HandlerErr::Inbound {
|
||||
id: *id,
|
||||
error,
|
||||
proto: info.protocol,
|
||||
})
|
||||
}
|
||||
if remove {
|
||||
substreams_to_remove.push(*id);
|
||||
if let Some(ref delay_key) = info.delay_key {
|
||||
self.inbound_substreams_delay.remove(delay_key);
|
||||
}
|
||||
}
|
||||
|
||||
// The stream may be currently idle. Attempt to process more
|
||||
// elements
|
||||
|
||||
if !deactivated && !info.pending_items.is_empty() {
|
||||
let to_send =
|
||||
std::mem::replace(&mut info.pending_items, vec![]);
|
||||
let fut = process_inbound_substream(
|
||||
substream,
|
||||
info.remaining_chunks,
|
||||
to_send,
|
||||
)
|
||||
.boxed();
|
||||
info.state = InboundState::Busy(Box::pin(fut));
|
||||
} else {
|
||||
info.state = InboundState::Idle(substream);
|
||||
break;
|
||||
}
|
||||
}
|
||||
Poll::Pending => {
|
||||
info.state = InboundState::Busy(fut);
|
||||
break;
|
||||
}
|
||||
};
|
||||
}
|
||||
InboundState::Poisoned => unreachable!("Poisoned inbound substream"),
|
||||
}
|
||||
InboundState::Poisoned => unreachable!("Poisoned inbound substream"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -912,6 +917,8 @@ async fn process_inbound_substream<TSpec: EthSpec>(
|
||||
substream_closed = true;
|
||||
}
|
||||
}
|
||||
} else if matches!(item, RPCCodedResponse::StreamTermination(_)) {
|
||||
// The sender closed the stream before us, ignore this.
|
||||
} else {
|
||||
// we have more items after a closed substream, report those as errors
|
||||
errors.push(RPCError::InternalError(
|
||||
|
||||
@@ -119,7 +119,7 @@ impl<TSpec: EthSpec> RPC<TSpec> {
|
||||
Duration::from_secs(10),
|
||||
)
|
||||
.build()
|
||||
.unwrap();
|
||||
.expect("Configuration parameters are valid");
|
||||
RPC {
|
||||
limiter,
|
||||
events: Vec::new(),
|
||||
|
||||
@@ -9,22 +9,21 @@ use crate::EnrExt;
|
||||
use crate::{NetworkConfig, NetworkGlobals, PeerAction};
|
||||
use futures::prelude::*;
|
||||
use libp2p::core::{
|
||||
identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::boxed::Boxed,
|
||||
identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed,
|
||||
};
|
||||
use libp2p::{
|
||||
core, noise,
|
||||
swarm::{SwarmBuilder, SwarmEvent},
|
||||
PeerId, Swarm, Transport,
|
||||
};
|
||||
use slog::{crit, debug, info, o, trace, warn};
|
||||
use slog::{crit, debug, info, o, trace, warn, Logger};
|
||||
use ssz::Decode;
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use types::{EnrForkId, EthSpec};
|
||||
use types::{ChainSpec, EnrForkId, EthSpec};
|
||||
|
||||
pub const NETWORK_KEY_FILENAME: &str = "key";
|
||||
/// The maximum simultaneous libp2p connections per peer.
|
||||
@@ -54,7 +53,7 @@ pub struct Service<TSpec: EthSpec> {
|
||||
pub local_peer_id: PeerId,
|
||||
|
||||
/// The libp2p logger handle.
|
||||
pub log: slog::Logger,
|
||||
pub log: Logger,
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> Service<TSpec> {
|
||||
@@ -62,7 +61,8 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
||||
executor: task_executor::TaskExecutor,
|
||||
config: &NetworkConfig,
|
||||
enr_fork_id: EnrForkId,
|
||||
log: &slog::Logger,
|
||||
log: &Logger,
|
||||
chain_spec: &ChainSpec,
|
||||
) -> error::Result<(Arc<NetworkGlobals<TSpec>>, Self)> {
|
||||
let log = log.new(o!("service"=> "libp2p"));
|
||||
trace!(log, "Libp2p Service starting");
|
||||
@@ -105,8 +105,14 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
||||
let transport = build_transport(local_keypair.clone())
|
||||
.map_err(|e| format!("Failed to build transport: {:?}", e))?;
|
||||
// Lighthouse network behaviour
|
||||
let behaviour =
|
||||
Behaviour::new(&local_keypair, config, network_globals.clone(), &log).await?;
|
||||
let behaviour = Behaviour::new(
|
||||
&local_keypair,
|
||||
config,
|
||||
network_globals.clone(),
|
||||
&log,
|
||||
chain_spec,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// use the executor for libp2p
|
||||
struct Executor(task_executor::TaskExecutor);
|
||||
@@ -200,6 +206,7 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
||||
}
|
||||
|
||||
let mut subscribed_topics: Vec<GossipKind> = vec![];
|
||||
|
||||
for topic_kind in &config.topics {
|
||||
if swarm.subscribe_kind(topic_kind.clone()) {
|
||||
subscribed_topics.push(topic_kind.clone());
|
||||
@@ -207,6 +214,7 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
||||
warn!(log, "Could not subscribe to topic"; "topic" => format!("{}",topic_kind));
|
||||
}
|
||||
}
|
||||
|
||||
if !subscribed_topics.is_empty() {
|
||||
info!(log, "Subscribed to topics"; "topics" => format!("{:?}", subscribed_topics));
|
||||
}
|
||||
@@ -323,9 +331,7 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
||||
|
||||
/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and
|
||||
/// mplex as the multiplexing layer.
|
||||
fn build_transport(
|
||||
local_private_key: Keypair,
|
||||
) -> Result<Boxed<(PeerId, StreamMuxerBox), Error>, Error> {
|
||||
fn build_transport(local_private_key: Keypair) -> std::io::Result<Boxed<(PeerId, StreamMuxerBox)>> {
|
||||
let transport = libp2p::tcp::TokioTcpConfig::new().nodelay(true);
|
||||
let transport = libp2p::dns::DnsConfig::new(transport)?;
|
||||
#[cfg(feature = "libp2p-websocket")]
|
||||
@@ -333,15 +339,21 @@ fn build_transport(
|
||||
let trans_clone = transport.clone();
|
||||
transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone))
|
||||
};
|
||||
|
||||
// mplex config
|
||||
let mut mplex_config = libp2p::mplex::MplexConfig::new();
|
||||
mplex_config.max_buffer_len(256);
|
||||
mplex_config.max_buffer_len_behaviour(libp2p::mplex::MaxBufferBehaviour::Block);
|
||||
|
||||
// Authentication
|
||||
Ok(transport
|
||||
.upgrade(core::upgrade::Version::V1)
|
||||
.authenticate(generate_noise_config(&local_private_key))
|
||||
.multiplex(libp2p::mplex::MplexConfig::new())
|
||||
.map(|(peer, muxer), _| (peer, core::muxing::StreamMuxerBox::new(muxer)))
|
||||
.multiplex(core::upgrade::SelectUpgrade::new(
|
||||
libp2p::yamux::Config::default(),
|
||||
mplex_config,
|
||||
))
|
||||
.timeout(Duration::from_secs(10))
|
||||
.timeout(Duration::from_secs(10))
|
||||
.map_err(|err| Error::new(ErrorKind::Other, err))
|
||||
.boxed())
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ use eth2_libp2p::{GossipsubConfigBuilder, Libp2pEvent, NetworkConfig};
|
||||
use slog::{debug, error, o, Drain};
|
||||
use std::net::{TcpListener, UdpSocket};
|
||||
use std::time::Duration;
|
||||
use types::{EnrForkId, MinimalEthSpec};
|
||||
use types::{ChainSpec, EnrForkId, MinimalEthSpec};
|
||||
|
||||
type E = MinimalEthSpec;
|
||||
use tempdir::TempDir;
|
||||
@@ -105,10 +105,16 @@ pub async fn build_libp2p_instance(boot_nodes: Vec<Enr>, log: slog::Logger) -> L
|
||||
shutdown_tx,
|
||||
);
|
||||
Libp2pInstance(
|
||||
LibP2PService::new(executor, &config, EnrForkId::default(), &log)
|
||||
.await
|
||||
.expect("should build libp2p instance")
|
||||
.1,
|
||||
LibP2PService::new(
|
||||
executor,
|
||||
&config,
|
||||
EnrForkId::default(),
|
||||
&log,
|
||||
&ChainSpec::minimal(),
|
||||
)
|
||||
.await
|
||||
.expect("should build libp2p instance")
|
||||
.1,
|
||||
signal,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -89,6 +89,7 @@ impl BeaconProposerCache {
|
||||
|
||||
Ok(ProposerData {
|
||||
pubkey: PublicKeyBytes::from(pubkey),
|
||||
validator_index: i as u64,
|
||||
slot,
|
||||
})
|
||||
})
|
||||
|
||||
@@ -39,7 +39,7 @@ use std::sync::Arc;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use types::{
|
||||
Attestation, AttestationDuty, AttesterSlashing, CloneConfig, CommitteeCache, Epoch, EthSpec,
|
||||
Hash256, ProposerSlashing, PublicKey, RelativeEpoch, SignedAggregateAndProof,
|
||||
Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, RelativeEpoch, SignedAggregateAndProof,
|
||||
SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig,
|
||||
};
|
||||
use warp::{http::Response, Filter};
|
||||
@@ -390,7 +390,13 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
let beacon_states_path = eth1_v1
|
||||
.and(warp::path("beacon"))
|
||||
.and(warp::path("states"))
|
||||
.and(warp::path::param::<StateId>())
|
||||
.and(warp::path::param::<StateId>().or_else(|_| {
|
||||
blocking_task(|| {
|
||||
Err(warp_utils::reject::custom_bad_request(
|
||||
"Invalid state ID".to_string(),
|
||||
))
|
||||
})
|
||||
}))
|
||||
.and(chain_filter.clone());
|
||||
|
||||
// GET beacon/states/{state_id}/root
|
||||
@@ -435,6 +441,50 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
})
|
||||
});
|
||||
|
||||
// GET beacon/states/{state_id}/validator_balances?id
|
||||
let get_beacon_state_validator_balances = beacon_states_path
|
||||
.clone()
|
||||
.and(warp::path("validator_balances"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::query::<api_types::ValidatorBalancesQuery>())
|
||||
.and_then(
|
||||
|state_id: StateId,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
query: api_types::ValidatorBalancesQuery| {
|
||||
blocking_json_task(move || {
|
||||
state_id
|
||||
.map_state(&chain, |state| {
|
||||
Ok(state
|
||||
.validators
|
||||
.iter()
|
||||
.zip(state.balances.iter())
|
||||
.enumerate()
|
||||
// filter by validator id(s) if provided
|
||||
.filter(|(index, (validator, _))| {
|
||||
query.id.as_ref().map_or(true, |ids| {
|
||||
ids.0.iter().any(|id| match id {
|
||||
ValidatorId::PublicKey(pubkey) => {
|
||||
&validator.pubkey == pubkey
|
||||
}
|
||||
ValidatorId::Index(param_index) => {
|
||||
*param_index == *index as u64
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
.map(|(index, (_, balance))| {
|
||||
Some(api_types::ValidatorBalanceData {
|
||||
index: index as u64,
|
||||
balance: *balance,
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>())
|
||||
})
|
||||
.map(api_types::GenericResponse::from)
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// GET beacon/states/{state_id}/validators?id,status
|
||||
let get_beacon_state_validators = beacon_states_path
|
||||
.clone()
|
||||
@@ -747,7 +797,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
* beacon/blocks
|
||||
*/
|
||||
|
||||
// POST beacon/blocks/{block_id}
|
||||
// POST beacon/blocks
|
||||
let post_beacon_blocks = eth1_v1
|
||||
.and(warp::path("beacon"))
|
||||
.and(warp::path("blocks"))
|
||||
@@ -1286,10 +1336,16 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
})?;
|
||||
|
||||
if let Some(peer_info) = network_globals.peers.read().peer_info(&peer_id) {
|
||||
//TODO: update this to seen_addresses once #1764 is resolved
|
||||
let address = match peer_info.listening_addresses.get(0) {
|
||||
Some(addr) => addr.to_string(),
|
||||
None => "".to_string(), // this field is non-nullable in the eth2 API spec
|
||||
let address = if let Some(socket_addr) =
|
||||
peer_info.seen_addresses.iter().next()
|
||||
{
|
||||
let mut addr = eth2_libp2p::Multiaddr::from(socket_addr.ip());
|
||||
addr.push(eth2_libp2p::multiaddr::Protocol::Tcp(socket_addr.port()));
|
||||
addr.to_string()
|
||||
} else if let Some(addr) = peer_info.listening_addresses.first() {
|
||||
addr.to_string()
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
// the eth2 API spec implies only peers we have been connected to at some point should be included.
|
||||
@@ -1319,56 +1375,260 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path("node"))
|
||||
.and(warp::path("peers"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::query::<api_types::PeersQuery>())
|
||||
.and(network_globals.clone())
|
||||
.and_then(
|
||||
|query: api_types::PeersQuery, network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
||||
blocking_json_task(move || {
|
||||
let mut peers: Vec<api_types::PeerData> = Vec::new();
|
||||
network_globals
|
||||
.peers
|
||||
.read()
|
||||
.peers()
|
||||
.for_each(|(peer_id, peer_info)| {
|
||||
let address =
|
||||
if let Some(socket_addr) = peer_info.seen_addresses.iter().next() {
|
||||
let mut addr = eth2_libp2p::Multiaddr::from(socket_addr.ip());
|
||||
addr.push(eth2_libp2p::multiaddr::Protocol::Tcp(
|
||||
socket_addr.port(),
|
||||
));
|
||||
addr.to_string()
|
||||
} else if let Some(addr) = peer_info.listening_addresses.first() {
|
||||
addr.to_string()
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
// the eth2 API spec implies only peers we have been connected to at some point should be included.
|
||||
if let Some(dir) = peer_info.connection_direction.as_ref() {
|
||||
let direction =
|
||||
api_types::PeerDirection::from_connection_direction(&dir);
|
||||
let state = api_types::PeerState::from_peer_connection_status(
|
||||
&peer_info.connection_status(),
|
||||
);
|
||||
|
||||
let state_matches = query.state.as_ref().map_or(true, |states| {
|
||||
states.0.iter().any(|state_param| *state_param == state)
|
||||
});
|
||||
let direction_matches =
|
||||
query.direction.as_ref().map_or(true, |directions| {
|
||||
directions.0.iter().any(|dir_param| *dir_param == direction)
|
||||
});
|
||||
|
||||
if state_matches && direction_matches {
|
||||
peers.push(api_types::PeerData {
|
||||
peer_id: peer_id.to_string(),
|
||||
enr: peer_info.enr.as_ref().map(|enr| enr.to_base64()),
|
||||
last_seen_p2p_address: address,
|
||||
direction,
|
||||
state,
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(api_types::PeersData {
|
||||
meta: api_types::PeersMetaData {
|
||||
count: peers.len() as u64,
|
||||
},
|
||||
data: peers,
|
||||
})
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// GET node/peer_count
|
||||
let get_node_peer_count = eth1_v1
|
||||
.and(warp::path("node"))
|
||||
.and(warp::path("peer_count"))
|
||||
.and(warp::path::end())
|
||||
.and(network_globals.clone())
|
||||
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
||||
blocking_json_task(move || {
|
||||
let mut peers: Vec<api_types::PeerData> = Vec::new();
|
||||
let mut connected: u64 = 0;
|
||||
let mut connecting: u64 = 0;
|
||||
let mut disconnected: u64 = 0;
|
||||
let mut disconnecting: u64 = 0;
|
||||
|
||||
network_globals
|
||||
.peers
|
||||
.read()
|
||||
.peers()
|
||||
// the eth2 API spec implies only peers we have been connected to at some point should be included.
|
||||
.filter(|(_, peer_info)| peer_info.connection_direction.is_some())
|
||||
.for_each(|(peer_id, peer_info)| {
|
||||
//TODO: update this to seen_addresses once #1764 is resolved
|
||||
let address = match peer_info.listening_addresses.get(0) {
|
||||
Some(addr) => addr.to_string(),
|
||||
None => "".to_string(), // this field is non-nullable in the eth2 API spec
|
||||
};
|
||||
if let Some(dir) = peer_info.connection_direction.as_ref() {
|
||||
peers.push(api_types::PeerData {
|
||||
peer_id: peer_id.to_string(),
|
||||
enr: peer_info.enr.as_ref().map(|enr| enr.to_base64()),
|
||||
last_seen_p2p_address: address,
|
||||
direction: api_types::PeerDirection::from_connection_direction(
|
||||
&dir,
|
||||
),
|
||||
state: api_types::PeerState::from_peer_connection_status(
|
||||
&peer_info.connection_status(),
|
||||
),
|
||||
});
|
||||
.for_each(|(_, peer_info)| {
|
||||
let state = api_types::PeerState::from_peer_connection_status(
|
||||
&peer_info.connection_status(),
|
||||
);
|
||||
match state {
|
||||
api_types::PeerState::Connected => connected += 1,
|
||||
api_types::PeerState::Connecting => connecting += 1,
|
||||
api_types::PeerState::Disconnected => disconnected += 1,
|
||||
api_types::PeerState::Disconnecting => disconnecting += 1,
|
||||
}
|
||||
});
|
||||
Ok(api_types::GenericResponse::from(peers))
|
||||
|
||||
Ok(api_types::GenericResponse::from(api_types::PeerCount {
|
||||
disconnecting,
|
||||
connecting,
|
||||
connected,
|
||||
disconnected,
|
||||
}))
|
||||
})
|
||||
});
|
||||
|
||||
/*
|
||||
* validator
|
||||
*/
|
||||
|
||||
// GET validator/duties/attester/{epoch}
|
||||
let get_validator_duties_attester = eth1_v1
|
||||
// GET validator/duties/proposer/{epoch}
|
||||
let get_validator_duties_proposer = eth1_v1
|
||||
.and(warp::path("validator"))
|
||||
.and(warp::path("duties"))
|
||||
.and(warp::path("proposer"))
|
||||
.and(warp::path::param::<Epoch>())
|
||||
.and(warp::path::end())
|
||||
.and(not_while_syncing_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and(beacon_proposer_cache())
|
||||
.and_then(
|
||||
|epoch: Epoch,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
beacon_proposer_cache: Arc<Mutex<BeaconProposerCache>>| {
|
||||
blocking_json_task(move || {
|
||||
let current_epoch = chain
|
||||
.epoch()
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||
|
||||
if epoch > current_epoch {
|
||||
return Err(warp_utils::reject::custom_bad_request(format!(
|
||||
"request epoch {} is ahead of the current epoch {}",
|
||||
epoch, current_epoch
|
||||
)));
|
||||
}
|
||||
|
||||
if epoch == current_epoch {
|
||||
beacon_proposer_cache
|
||||
.lock()
|
||||
.get_proposers(&chain, epoch)
|
||||
.map(api_types::GenericResponse::from)
|
||||
} else {
|
||||
let state =
|
||||
StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))
|
||||
.state(&chain)?;
|
||||
|
||||
epoch
|
||||
.slot_iter(T::EthSpec::slots_per_epoch())
|
||||
.map(|slot| {
|
||||
state
|
||||
.get_beacon_proposer_index(slot, &chain.spec)
|
||||
.map_err(warp_utils::reject::beacon_state_error)
|
||||
.and_then(|i| {
|
||||
let pubkey =
|
||||
chain.validator_pubkey(i)
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?
|
||||
.ok_or_else(||
|
||||
warp_utils::reject::beacon_chain_error(
|
||||
BeaconChainError::ValidatorPubkeyCacheIncomplete(i)
|
||||
)
|
||||
)?;
|
||||
|
||||
Ok(api_types::ProposerData {
|
||||
pubkey: PublicKeyBytes::from(pubkey),
|
||||
validator_index: i as u64,
|
||||
slot,
|
||||
})
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<api_types::ProposerData>, _>>()
|
||||
.map(api_types::GenericResponse::from)
|
||||
}
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// GET validator/blocks/{slot}
|
||||
let get_validator_blocks = eth1_v1
|
||||
.and(warp::path("validator"))
|
||||
.and(warp::path("blocks"))
|
||||
.and(warp::path::param::<Slot>())
|
||||
.and(warp::path::end())
|
||||
.and(not_while_syncing_filter.clone())
|
||||
.and(warp::query::<api_types::ValidatorBlocksQuery>())
|
||||
.and(chain_filter.clone())
|
||||
.and_then(
|
||||
|slot: Slot, query: api_types::ValidatorBlocksQuery, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
let randao_reveal = (&query.randao_reveal).try_into().map_err(|e| {
|
||||
warp_utils::reject::custom_bad_request(format!(
|
||||
"randao reveal is not valid BLS signature: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
chain
|
||||
.produce_block(randao_reveal, slot, query.graffiti.map(Into::into))
|
||||
.map(|block_and_state| block_and_state.0)
|
||||
.map(api_types::GenericResponse::from)
|
||||
.map_err(warp_utils::reject::block_production_error)
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// GET validator/attestation_data?slot,committee_index
|
||||
let get_validator_attestation_data = eth1_v1
|
||||
.and(warp::path("validator"))
|
||||
.and(warp::path("attestation_data"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::query::<api_types::ValidatorAttestationDataQuery>())
|
||||
.and(not_while_syncing_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and_then(
|
||||
|query: api_types::ValidatorAttestationDataQuery, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
chain
|
||||
.produce_unaggregated_attestation(query.slot, query.committee_index)
|
||||
.map(|attestation| attestation.data)
|
||||
.map(api_types::GenericResponse::from)
|
||||
.map_err(warp_utils::reject::beacon_chain_error)
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// GET validator/aggregate_attestation?attestation_data_root,slot
|
||||
let get_validator_aggregate_attestation = eth1_v1
|
||||
.and(warp::path("validator"))
|
||||
.and(warp::path("aggregate_attestation"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::query::<api_types::ValidatorAggregateAttestationQuery>())
|
||||
.and(not_while_syncing_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and_then(
|
||||
|query: api_types::ValidatorAggregateAttestationQuery, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
chain
|
||||
.get_aggregated_attestation_by_slot_and_root(
|
||||
query.slot,
|
||||
&query.attestation_data_root,
|
||||
)
|
||||
.map(api_types::GenericResponse::from)
|
||||
.ok_or_else(|| {
|
||||
warp_utils::reject::custom_not_found(
|
||||
"no matching aggregate found".to_string(),
|
||||
)
|
||||
})
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// POST validator/duties/attester/{epoch}
|
||||
let post_validator_duties_attester = eth1_v1
|
||||
.and(warp::path("validator"))
|
||||
.and(warp::path("duties"))
|
||||
.and(warp::path("attester"))
|
||||
.and(warp::path::param::<Epoch>())
|
||||
.and(warp::path::end())
|
||||
.and(not_while_syncing_filter.clone())
|
||||
.and(warp::query::<api_types::ValidatorDutiesQuery>())
|
||||
.and(warp::body::json())
|
||||
.and(chain_filter.clone())
|
||||
.and_then(
|
||||
|epoch: Epoch, query: api_types::ValidatorDutiesQuery, chain: Arc<BeaconChain<T>>| {
|
||||
|epoch: Epoch, indices: api_types::ValidatorIndexData, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
let current_epoch = chain
|
||||
.epoch()
|
||||
@@ -1384,30 +1644,22 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
let validator_count = StateId::head()
|
||||
.map_state(&chain, |state| Ok(state.validators.len() as u64))?;
|
||||
|
||||
let indices = query
|
||||
.index
|
||||
.as_ref()
|
||||
.map(|index| index.0.clone())
|
||||
.map(Result::Ok)
|
||||
.unwrap_or_else(|| {
|
||||
Ok::<_, warp::Rejection>((0..validator_count).collect())
|
||||
})?;
|
||||
|
||||
let pubkeys = indices
|
||||
.into_iter()
|
||||
.filter(|i| *i < validator_count as u64)
|
||||
.0
|
||||
.iter()
|
||||
.filter(|i| **i < validator_count as u64)
|
||||
.map(|i| {
|
||||
let pubkey = chain
|
||||
.validator_pubkey(i as usize)
|
||||
.validator_pubkey(*i as usize)
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?
|
||||
.ok_or_else(|| {
|
||||
warp_utils::reject::custom_bad_request(format!(
|
||||
"unknown validator index {}",
|
||||
i
|
||||
*i
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok((i, pubkey))
|
||||
Ok((*i, pubkey))
|
||||
})
|
||||
.collect::<Result<Vec<_>, warp::Rejection>>()?;
|
||||
|
||||
@@ -1523,103 +1775,6 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
},
|
||||
);
|
||||
|
||||
// GET validator/duties/proposer/{epoch}
|
||||
let get_validator_duties_proposer = eth1_v1
|
||||
.and(warp::path("validator"))
|
||||
.and(warp::path("duties"))
|
||||
.and(warp::path("proposer"))
|
||||
.and(warp::path::param::<Epoch>())
|
||||
.and(warp::path::end())
|
||||
.and(not_while_syncing_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and(beacon_proposer_cache())
|
||||
.and_then(
|
||||
|epoch: Epoch,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
beacon_proposer_cache: Arc<Mutex<BeaconProposerCache>>| {
|
||||
blocking_json_task(move || {
|
||||
beacon_proposer_cache
|
||||
.lock()
|
||||
.get_proposers(&chain, epoch)
|
||||
.map(api_types::GenericResponse::from)
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// GET validator/blocks/{slot}
|
||||
let get_validator_blocks = eth1_v1
|
||||
.and(warp::path("validator"))
|
||||
.and(warp::path("blocks"))
|
||||
.and(warp::path::param::<Slot>())
|
||||
.and(warp::path::end())
|
||||
.and(not_while_syncing_filter.clone())
|
||||
.and(warp::query::<api_types::ValidatorBlocksQuery>())
|
||||
.and(chain_filter.clone())
|
||||
.and_then(
|
||||
|slot: Slot, query: api_types::ValidatorBlocksQuery, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
let randao_reveal = (&query.randao_reveal).try_into().map_err(|e| {
|
||||
warp_utils::reject::custom_bad_request(format!(
|
||||
"randao reveal is not valid BLS signature: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
chain
|
||||
.produce_block(randao_reveal, slot, query.graffiti.map(Into::into))
|
||||
.map(|block_and_state| block_and_state.0)
|
||||
.map(api_types::GenericResponse::from)
|
||||
.map_err(warp_utils::reject::block_production_error)
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// GET validator/attestation_data?slot,committee_index
|
||||
let get_validator_attestation_data = eth1_v1
|
||||
.and(warp::path("validator"))
|
||||
.and(warp::path("attestation_data"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::query::<api_types::ValidatorAttestationDataQuery>())
|
||||
.and(not_while_syncing_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and_then(
|
||||
|query: api_types::ValidatorAttestationDataQuery, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
chain
|
||||
.produce_unaggregated_attestation(query.slot, query.committee_index)
|
||||
.map(|attestation| attestation.data)
|
||||
.map(api_types::GenericResponse::from)
|
||||
.map_err(warp_utils::reject::beacon_chain_error)
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// GET validator/aggregate_attestation?attestation_data_root,slot
|
||||
let get_validator_aggregate_attestation = eth1_v1
|
||||
.and(warp::path("validator"))
|
||||
.and(warp::path("aggregate_attestation"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::query::<api_types::ValidatorAggregateAttestationQuery>())
|
||||
.and(not_while_syncing_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and_then(
|
||||
|query: api_types::ValidatorAggregateAttestationQuery, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
chain
|
||||
.get_aggregated_attestation_by_slot_and_root(
|
||||
query.slot,
|
||||
&query.attestation_data_root,
|
||||
)
|
||||
.map(api_types::GenericResponse::from)
|
||||
.ok_or_else(|| {
|
||||
warp_utils::reject::custom_not_found(
|
||||
"no matching aggregate found".to_string(),
|
||||
)
|
||||
})
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// POST validator/aggregate_and_proofs
|
||||
let post_validator_aggregate_and_proofs = eth1_v1
|
||||
.and(warp::path("validator"))
|
||||
@@ -1629,53 +1784,81 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(chain_filter.clone())
|
||||
.and(warp::body::json())
|
||||
.and(network_tx_filter.clone())
|
||||
.and(log_filter.clone())
|
||||
.and_then(
|
||||
|chain: Arc<BeaconChain<T>>,
|
||||
aggregate: SignedAggregateAndProof<T::EthSpec>,
|
||||
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
|
||||
aggregates: Vec<SignedAggregateAndProof<T::EthSpec>>,
|
||||
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>, log: Logger| {
|
||||
blocking_json_task(move || {
|
||||
let aggregate =
|
||||
let mut verified_aggregates = Vec::with_capacity(aggregates.len());
|
||||
let mut messages = Vec::with_capacity(aggregates.len());
|
||||
let mut failures = Vec::new();
|
||||
|
||||
// Verify that all messages in the post are valid before processing further
|
||||
for (index, aggregate) in aggregates.as_slice().iter().enumerate() {
|
||||
match chain.verify_aggregated_attestation_for_gossip(aggregate.clone()) {
|
||||
Ok(aggregate) => aggregate,
|
||||
Ok(verified_aggregate) => {
|
||||
messages.push(PubsubMessage::AggregateAndProofAttestation(Box::new(
|
||||
verified_aggregate.aggregate().clone(),
|
||||
)));
|
||||
verified_aggregates.push((index, verified_aggregate));
|
||||
}
|
||||
// If we already know the attestation, don't broadcast it or attempt to
|
||||
// further verify it. Return success.
|
||||
//
|
||||
// It's reasonably likely that two different validators produce
|
||||
// identical aggregates, especially if they're using the same beacon
|
||||
// node.
|
||||
Err(AttnError::AttestationAlreadyKnown(_)) => return Ok(()),
|
||||
Err(AttnError::AttestationAlreadyKnown(_)) => continue,
|
||||
Err(e) => {
|
||||
return Err(warp_utils::reject::object_invalid(format!(
|
||||
"gossip verification failed: {:?}",
|
||||
e
|
||||
)));
|
||||
}
|
||||
};
|
||||
error!(log,
|
||||
"Failure verifying aggregate and proofs";
|
||||
"error" => format!("{:?}", e),
|
||||
"request_index" => index,
|
||||
"aggregator_index" => aggregate.message.aggregator_index,
|
||||
"attestation_index" => aggregate.message.aggregate.data.index,
|
||||
"attestation_slot" => aggregate.message.aggregate.data.slot,
|
||||
);
|
||||
failures.push(api_types::Failure::new(index, format!("Verification: {:?}", e)));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
publish_pubsub_message(
|
||||
&network_tx,
|
||||
PubsubMessage::AggregateAndProofAttestation(Box::new(
|
||||
aggregate.aggregate().clone(),
|
||||
)),
|
||||
)?;
|
||||
// Publish aggregate attestations to the libp2p network
|
||||
if !messages.is_empty() {
|
||||
publish_network_message(&network_tx, NetworkMessage::Publish { messages })?;
|
||||
}
|
||||
|
||||
chain
|
||||
.apply_attestation_to_fork_choice(&aggregate)
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::broadcast_without_import(format!(
|
||||
"not applied to fork choice: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
// Import aggregate attestations
|
||||
for (index, verified_aggregate) in verified_aggregates {
|
||||
if let Err(e) = chain.apply_attestation_to_fork_choice(&verified_aggregate) {
|
||||
error!(log,
|
||||
"Failure applying verified aggregate attestation to fork choice";
|
||||
"error" => format!("{:?}", e),
|
||||
"request_index" => index,
|
||||
"aggregator_index" => verified_aggregate.aggregate().message.aggregator_index,
|
||||
"attestation_index" => verified_aggregate.attestation().data.index,
|
||||
"attestation_slot" => verified_aggregate.attestation().data.slot,
|
||||
);
|
||||
failures.push(api_types::Failure::new(index, format!("Fork choice: {:?}", e)));
|
||||
}
|
||||
if let Err(e) = chain.add_to_block_inclusion_pool(verified_aggregate) {
|
||||
warn!(log,
|
||||
"Could not add verified aggregate attestation to the inclusion pool";
|
||||
"error" => format!("{:?}", e),
|
||||
"request_index" => index,
|
||||
);
|
||||
failures.push(api_types::Failure::new(index, format!("Op pool: {:?}", e)));
|
||||
}
|
||||
}
|
||||
|
||||
chain.add_to_block_inclusion_pool(aggregate).map_err(|e| {
|
||||
warp_utils::reject::broadcast_without_import(format!(
|
||||
"not applied to block inclusion pool: {:?}",
|
||||
e
|
||||
if !failures.is_empty() {
|
||||
Err(warp_utils::reject::indexed_bad_request("error processing aggregate and proofs".to_string(),
|
||||
failures
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
})
|
||||
},
|
||||
);
|
||||
@@ -1922,9 +2105,11 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
let routes = warp::get()
|
||||
.and(
|
||||
get_beacon_genesis
|
||||
.boxed()
|
||||
.or(get_beacon_state_root.boxed())
|
||||
.or(get_beacon_state_fork.boxed())
|
||||
.or(get_beacon_state_finality_checkpoints.boxed())
|
||||
.or(get_beacon_state_validator_balances.boxed())
|
||||
.or(get_beacon_state_validators.boxed())
|
||||
.or(get_beacon_state_validators_id.boxed())
|
||||
.or(get_beacon_state_committees.boxed())
|
||||
@@ -1948,7 +2133,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.or(get_node_health.boxed())
|
||||
.or(get_node_peers_by_id.boxed())
|
||||
.or(get_node_peers.boxed())
|
||||
.or(get_validator_duties_attester.boxed())
|
||||
.or(get_node_peer_count.boxed())
|
||||
.or(get_validator_duties_proposer.boxed())
|
||||
.or(get_validator_blocks.boxed())
|
||||
.or(get_validator_attestation_data.boxed())
|
||||
@@ -1963,23 +2148,19 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.or(get_lighthouse_eth1_syncing.boxed())
|
||||
.or(get_lighthouse_eth1_block_cache.boxed())
|
||||
.or(get_lighthouse_eth1_deposit_cache.boxed())
|
||||
.or(get_lighthouse_beacon_states_ssz.boxed())
|
||||
.boxed(),
|
||||
.or(get_lighthouse_beacon_states_ssz.boxed()),
|
||||
)
|
||||
.or(warp::post()
|
||||
.and(
|
||||
post_beacon_blocks
|
||||
.or(post_beacon_pool_attestations.boxed())
|
||||
.or(post_beacon_pool_attester_slashings.boxed())
|
||||
.or(post_beacon_pool_proposer_slashings.boxed())
|
||||
.or(post_beacon_pool_voluntary_exits.boxed())
|
||||
.or(post_validator_aggregate_and_proofs.boxed())
|
||||
.or(post_validator_beacon_committee_subscriptions.boxed())
|
||||
.boxed(),
|
||||
)
|
||||
.boxed())
|
||||
.boxed()
|
||||
// Maps errors into HTTP responses.
|
||||
.or(warp::post().and(
|
||||
post_beacon_blocks
|
||||
.boxed()
|
||||
.or(post_beacon_pool_attestations.boxed())
|
||||
.or(post_beacon_pool_attester_slashings.boxed())
|
||||
.or(post_beacon_pool_proposer_slashings.boxed())
|
||||
.or(post_beacon_pool_voluntary_exits.boxed())
|
||||
.or(post_validator_duties_attester.boxed())
|
||||
.or(post_validator_aggregate_and_proofs.boxed())
|
||||
.or(post_validator_beacon_committee_subscriptions.boxed()),
|
||||
))
|
||||
.recover(warp_utils::reject::handle_rejection)
|
||||
.with(slog_logging(log.clone()))
|
||||
.with(prometheus_metrics())
|
||||
|
||||
@@ -37,7 +37,7 @@ const FINALIZED_EPOCH: u64 = 3;
|
||||
const TCP_PORT: u16 = 42;
|
||||
const UDP_PORT: u16 = 42;
|
||||
const SEQ_NUMBER: u64 = 0;
|
||||
const EXTERNAL_ADDR: &str = "/ip4/0.0.0.0";
|
||||
const EXTERNAL_ADDR: &str = "/ip4/0.0.0.0/tcp/9000";
|
||||
|
||||
/// Skipping the slots around the epoch boundary allows us to check that we're obtaining states
|
||||
/// from skipped slots for the finalized and justified checkpoints (instead of the state from the
|
||||
@@ -162,10 +162,6 @@ impl ApiTester {
|
||||
EXTERNAL_ADDR.parse().unwrap(),
|
||||
None,
|
||||
);
|
||||
//TODO: have to update this once #1764 is resolved
|
||||
if let Some(peer_info) = network_globals.peers.write().peer_info_mut(&peer_id) {
|
||||
peer_info.listening_addresses = vec![EXTERNAL_ADDR.parse().unwrap()];
|
||||
}
|
||||
|
||||
*network_globals.sync_state.write() = SyncState::Synced;
|
||||
|
||||
@@ -415,6 +411,73 @@ impl ApiTester {
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_beacon_states_validator_balances(self) -> Self {
|
||||
for state_id in self.interesting_state_ids() {
|
||||
for validator_indices in self.interesting_validator_indices() {
|
||||
let state_opt = self.get_state(state_id);
|
||||
let validators: Vec<Validator> = match state_opt.as_ref() {
|
||||
Some(state) => state.validators.clone().into(),
|
||||
None => vec![],
|
||||
};
|
||||
let validator_index_ids = validator_indices
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|i| ValidatorId::Index(i))
|
||||
.collect::<Vec<ValidatorId>>();
|
||||
let validator_pubkey_ids = validator_indices
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|i| {
|
||||
ValidatorId::PublicKey(
|
||||
validators
|
||||
.get(i as usize)
|
||||
.map_or(PublicKeyBytes::empty(), |val| val.pubkey.clone()),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<ValidatorId>>();
|
||||
|
||||
let result_index_ids = self
|
||||
.client
|
||||
.get_beacon_states_validator_balances(
|
||||
state_id,
|
||||
Some(validator_index_ids.as_slice()),
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.map(|res| res.data);
|
||||
let result_pubkey_ids = self
|
||||
.client
|
||||
.get_beacon_states_validator_balances(
|
||||
state_id,
|
||||
Some(validator_pubkey_ids.as_slice()),
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.map(|res| res.data);
|
||||
|
||||
let expected = state_opt.map(|state| {
|
||||
let mut validators = Vec::with_capacity(validator_indices.len());
|
||||
|
||||
for i in validator_indices {
|
||||
if i < state.balances.len() as u64 {
|
||||
validators.push(ValidatorBalanceData {
|
||||
index: i as u64,
|
||||
balance: state.balances[i as usize],
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
validators
|
||||
});
|
||||
|
||||
assert_eq!(result_index_ids, expected, "{:?}", state_id);
|
||||
assert_eq!(result_pubkey_ids, expected, "{:?}", state_id);
|
||||
}
|
||||
}
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_beacon_states_validators(self) -> Self {
|
||||
for state_id in self.interesting_state_ids() {
|
||||
for statuses in self.interesting_validator_statuses() {
|
||||
@@ -1126,18 +1189,66 @@ impl ApiTester {
|
||||
}
|
||||
|
||||
pub async fn test_get_node_peers(self) -> Self {
|
||||
let result = self.client.get_node_peers().await.unwrap().data;
|
||||
let peer_states: Vec<Option<&[PeerState]>> = vec![
|
||||
Some(&[PeerState::Connected]),
|
||||
Some(&[PeerState::Connecting]),
|
||||
Some(&[PeerState::Disconnected]),
|
||||
Some(&[PeerState::Disconnecting]),
|
||||
None,
|
||||
Some(&[PeerState::Connected, PeerState::Connecting]),
|
||||
];
|
||||
let peer_dirs: Vec<Option<&[PeerDirection]>> = vec![
|
||||
Some(&[PeerDirection::Outbound]),
|
||||
Some(&[PeerDirection::Inbound]),
|
||||
Some(&[PeerDirection::Inbound, PeerDirection::Outbound]),
|
||||
None,
|
||||
];
|
||||
|
||||
let expected = PeerData {
|
||||
peer_id: self.external_peer_id.to_string(),
|
||||
enr: None,
|
||||
last_seen_p2p_address: EXTERNAL_ADDR.to_string(),
|
||||
state: PeerState::Connected,
|
||||
direction: PeerDirection::Inbound,
|
||||
};
|
||||
for states in peer_states {
|
||||
for dirs in peer_dirs.clone() {
|
||||
let result = self.client.get_node_peers(states, dirs).await.unwrap();
|
||||
let expected_peer = PeerData {
|
||||
peer_id: self.external_peer_id.to_string(),
|
||||
enr: None,
|
||||
last_seen_p2p_address: EXTERNAL_ADDR.to_string(),
|
||||
state: PeerState::Connected,
|
||||
direction: PeerDirection::Inbound,
|
||||
};
|
||||
|
||||
assert_eq!(result, vec![expected]);
|
||||
let state_match =
|
||||
states.map_or(true, |states| states.contains(&PeerState::Connected));
|
||||
let dir_match = dirs.map_or(true, |dirs| dirs.contains(&PeerDirection::Inbound));
|
||||
|
||||
let mut expected_peers = Vec::new();
|
||||
if state_match && dir_match {
|
||||
expected_peers.push(expected_peer);
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
result,
|
||||
PeersData {
|
||||
meta: PeersMetaData {
|
||||
count: expected_peers.len() as u64
|
||||
},
|
||||
data: expected_peers,
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_get_node_peer_count(self) -> Self {
|
||||
let result = self.client.get_node_peer_count().await.unwrap().data;
|
||||
assert_eq!(
|
||||
result,
|
||||
PeerCount {
|
||||
connected: 1,
|
||||
connecting: 0,
|
||||
disconnected: 0,
|
||||
disconnecting: 0,
|
||||
}
|
||||
);
|
||||
self
|
||||
}
|
||||
|
||||
@@ -1239,7 +1350,7 @@ impl ApiTester {
|
||||
if epoch > current_epoch + 1 {
|
||||
assert_eq!(
|
||||
self.client
|
||||
.get_validator_duties_attester(epoch, Some(&indices))
|
||||
.post_validator_duties_attester(epoch, indices.as_slice())
|
||||
.await
|
||||
.unwrap_err()
|
||||
.status()
|
||||
@@ -1251,7 +1362,7 @@ impl ApiTester {
|
||||
|
||||
let results = self
|
||||
.client
|
||||
.get_validator_duties_attester(epoch, Some(&indices))
|
||||
.post_validator_duties_attester(epoch, indices.as_slice())
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
@@ -1340,7 +1451,11 @@ impl ApiTester {
|
||||
.unwrap();
|
||||
let pubkey = state.validators[index].pubkey.clone().into();
|
||||
|
||||
ProposerData { pubkey, slot }
|
||||
ProposerData {
|
||||
pubkey,
|
||||
validator_index: index as u64,
|
||||
slot,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@@ -1477,17 +1592,17 @@ impl ApiTester {
|
||||
let fork = head.beacon_state.fork;
|
||||
let genesis_validators_root = self.chain.genesis_validators_root;
|
||||
|
||||
let mut duties = vec![];
|
||||
for i in 0..self.validator_keypairs.len() {
|
||||
duties.push(
|
||||
self.client
|
||||
.get_validator_duties_attester(epoch, Some(&[i as u64]))
|
||||
.await
|
||||
.unwrap()
|
||||
.data[0]
|
||||
.clone(),
|
||||
let duties = self
|
||||
.client
|
||||
.post_validator_duties_attester(
|
||||
epoch,
|
||||
(0..self.validator_keypairs.len() as u64)
|
||||
.collect::<Vec<u64>>()
|
||||
.as_slice(),
|
||||
)
|
||||
}
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
|
||||
let (i, kp, duty, proof) = self
|
||||
.validator_keypairs
|
||||
@@ -1558,7 +1673,7 @@ impl ApiTester {
|
||||
let aggregate = self.get_aggregate().await;
|
||||
|
||||
self.client
|
||||
.post_validator_aggregate_and_proof::<E>(&aggregate)
|
||||
.post_validator_aggregate_and_proof::<E>(&[aggregate])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -1573,7 +1688,7 @@ impl ApiTester {
|
||||
aggregate.message.aggregate.data.slot += 1;
|
||||
|
||||
self.client
|
||||
.post_validator_aggregate_and_proof::<E>(&aggregate)
|
||||
.post_validator_aggregate_and_proof::<E>(&[aggregate])
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
@@ -1704,6 +1819,8 @@ async fn beacon_get() {
|
||||
.await
|
||||
.test_beacon_states_validators()
|
||||
.await
|
||||
.test_beacon_states_validator_balances()
|
||||
.await
|
||||
.test_beacon_states_committees()
|
||||
.await
|
||||
.test_beacon_states_validator_id()
|
||||
@@ -1830,6 +1947,8 @@ async fn node_get() {
|
||||
.test_get_node_peers_by_id()
|
||||
.await
|
||||
.test_get_node_peers()
|
||||
.await
|
||||
.test_get_node_peer_count()
|
||||
.await;
|
||||
}
|
||||
|
||||
|
||||
@@ -40,4 +40,4 @@ igd = "0.11.1"
|
||||
itertools = "0.9.0"
|
||||
num_cpus = "1.13.0"
|
||||
lru_cache = { path = "../../common/lru_cache" }
|
||||
get_if_addrs = "0.5.3"
|
||||
if-addrs = "0.6.4"
|
||||
|
||||
@@ -13,7 +13,7 @@ use rand::seq::SliceRandom;
|
||||
use slog::{debug, error, o, trace, warn};
|
||||
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use eth2_libp2p::SubnetDiscovery;
|
||||
use eth2_libp2p::{NetworkConfig, SubnetDiscovery};
|
||||
use hashset_delay::HashSetDelay;
|
||||
use slot_clock::SlotClock;
|
||||
use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription};
|
||||
@@ -89,6 +89,12 @@ pub struct AttestationService<T: BeaconChainTypes> {
|
||||
/// The waker for the current thread.
|
||||
waker: Option<std::task::Waker>,
|
||||
|
||||
/// The discovery mechanism of lighthouse is disabled.
|
||||
discovery_disabled: bool,
|
||||
|
||||
/// We are always subscribed to all subnets.
|
||||
subscribe_all_subnets: bool,
|
||||
|
||||
/// The logger for the attestation service.
|
||||
log: slog::Logger,
|
||||
}
|
||||
@@ -96,7 +102,11 @@ pub struct AttestationService<T: BeaconChainTypes> {
|
||||
impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
/* Public functions */
|
||||
|
||||
pub fn new(beacon_chain: Arc<BeaconChain<T>>, log: &slog::Logger) -> Self {
|
||||
pub fn new(
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
config: &NetworkConfig,
|
||||
log: &slog::Logger,
|
||||
) -> Self {
|
||||
let log = log.new(o!("service" => "attestation_service"));
|
||||
|
||||
// calculate the random subnet duration from the spec constants
|
||||
@@ -124,6 +134,8 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
aggregate_validators_on_subnet: HashSetDelay::new(default_timeout),
|
||||
known_validators: HashSetDelay::new(last_seen_val_timeout),
|
||||
waker: None,
|
||||
subscribe_all_subnets: config.subscribe_all_subnets,
|
||||
discovery_disabled: config.disable_discovery,
|
||||
log,
|
||||
}
|
||||
}
|
||||
@@ -131,7 +143,11 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
/// Return count of all currently subscribed subnets (long-lived **and** short-lived).
|
||||
#[cfg(test)]
|
||||
pub fn subscription_count(&self) -> usize {
|
||||
self.subscriptions.len()
|
||||
if self.subscribe_all_subnets {
|
||||
self.beacon_chain.spec.attestation_subnet_count as usize
|
||||
} else {
|
||||
self.subscriptions.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Processes a list of validator subscriptions.
|
||||
@@ -186,7 +202,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
if subscription.slot > *slot {
|
||||
subnets_to_discover.insert(subnet_id, subscription.slot);
|
||||
}
|
||||
} else {
|
||||
} else if !self.discovery_disabled {
|
||||
subnets_to_discover.insert(subnet_id, subscription.slot);
|
||||
}
|
||||
|
||||
@@ -218,13 +234,17 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) = self.discover_peers_request(
|
||||
subnets_to_discover
|
||||
.into_iter()
|
||||
.map(|(subnet_id, slot)| ExactSubnet { subnet_id, slot }),
|
||||
) {
|
||||
warn!(self.log, "Discovery lookup request error"; "error" => e);
|
||||
};
|
||||
// If the discovery mechanism isn't disabled, attempt to set up a peer discovery for the
|
||||
// required subnets.
|
||||
if !self.discovery_disabled {
|
||||
if let Err(e) = self.discover_peers_request(
|
||||
subnets_to_discover
|
||||
.into_iter()
|
||||
.map(|(subnet_id, slot)| ExactSubnet { subnet_id, slot }),
|
||||
) {
|
||||
warn!(self.log, "Discovery lookup request error"; "error" => e);
|
||||
};
|
||||
}
|
||||
|
||||
// pre-emptively wake the thread to check for new events
|
||||
if let Some(waker) = &self.waker {
|
||||
@@ -343,7 +363,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
// in-active. This case is checked on the subscription event (see `handle_subscriptions`).
|
||||
|
||||
// Return if we already have a subscription for this subnet_id and slot
|
||||
if self.unsubscriptions.contains(&exact_subnet) {
|
||||
if self.unsubscriptions.contains(&exact_subnet) || self.subscribe_all_subnets {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -366,7 +386,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
///
|
||||
/// This also updates the ENR to indicate our long-lived subscription to the subnet
|
||||
fn add_known_validator(&mut self, validator_index: u64) {
|
||||
if self.known_validators.get(&validator_index).is_none() {
|
||||
if self.known_validators.get(&validator_index).is_none() && !self.subscribe_all_subnets {
|
||||
// New validator has subscribed
|
||||
// Subscribe to random topics and update the ENR if needed.
|
||||
|
||||
|
||||
@@ -92,10 +92,11 @@ mod tests {
|
||||
|
||||
fn get_attestation_service() -> AttestationService<TestBeaconChainType> {
|
||||
let log = get_logger();
|
||||
let config = NetworkConfig::default();
|
||||
|
||||
let beacon_chain = CHAIN.chain.clone();
|
||||
|
||||
AttestationService::new(beacon_chain, &log)
|
||||
AttestationService::new(beacon_chain, &config, &log)
|
||||
}
|
||||
|
||||
fn get_subscription(
|
||||
|
||||
@@ -36,13 +36,13 @@ lazy_static! {
|
||||
&["subnet"]
|
||||
);
|
||||
|
||||
pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC: Result<IntGaugeVec> = try_create_int_gauge_vec(
|
||||
pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC: Result<GaugeVec> = try_create_float_gauge_vec(
|
||||
"gossipsub_avg_peer_score_per_topic",
|
||||
"Average peer's score per topic",
|
||||
&["topic_hash"]
|
||||
);
|
||||
|
||||
pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC: Result<IntGaugeVec> = try_create_int_gauge_vec(
|
||||
pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC: Result<GaugeVec> = try_create_float_gauge_vec(
|
||||
"gossipsub_avg_peer_score_per_subnet_topic",
|
||||
"Average peer's score per subnet topic",
|
||||
&["subnet"]
|
||||
@@ -53,6 +53,60 @@ lazy_static! {
|
||||
"Failed attestation publishes per subnet",
|
||||
&["subnet"]
|
||||
);
|
||||
|
||||
pub static ref SCORES_BELOW_ZERO_PER_CLIENT: Result<GaugeVec> = try_create_float_gauge_vec(
|
||||
"gossipsub_scores_below_zero_per_client",
|
||||
"Relative number of scores below zero per client",
|
||||
&["Client"]
|
||||
);
|
||||
pub static ref SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT: Result<GaugeVec> = try_create_float_gauge_vec(
|
||||
"gossipsub_scores_below_gossip_threshold_per_client",
|
||||
"Relative number of scores below gossip threshold per client",
|
||||
&["Client"]
|
||||
);
|
||||
pub static ref SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT: Result<GaugeVec> = try_create_float_gauge_vec(
|
||||
"gossipsub_scores_below_publish_threshold_per_client",
|
||||
"Relative number of scores below publish threshold per client",
|
||||
&["Client"]
|
||||
);
|
||||
pub static ref SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT: Result<GaugeVec> = try_create_float_gauge_vec(
|
||||
"gossipsub_scores_below_greylist_threshold_per_client",
|
||||
"Relative number of scores below greylist threshold per client",
|
||||
&["Client"]
|
||||
);
|
||||
|
||||
pub static ref MIN_SCORES_PER_CLIENT: Result<GaugeVec> = try_create_float_gauge_vec(
|
||||
"gossipsub_min_scores_per_client",
|
||||
"Minimum scores per client",
|
||||
&["Client"]
|
||||
);
|
||||
pub static ref MEDIAN_SCORES_PER_CLIENT: Result<GaugeVec> = try_create_float_gauge_vec(
|
||||
"gossipsub_median_scores_per_client",
|
||||
"Median scores per client",
|
||||
&["Client"]
|
||||
);
|
||||
pub static ref MEAN_SCORES_PER_CLIENT: Result<GaugeVec> = try_create_float_gauge_vec(
|
||||
"gossipsub_mean_scores_per_client",
|
||||
"Mean scores per client",
|
||||
&["Client"]
|
||||
);
|
||||
pub static ref MAX_SCORES_PER_CLIENT: Result<GaugeVec> = try_create_float_gauge_vec(
|
||||
"gossipsub_max_scores_per_client",
|
||||
"Max scores per client",
|
||||
&["Client"]
|
||||
);
|
||||
pub static ref BEACON_BLOCK_MESH_PEERS_PER_CLIENT: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"block_mesh_peers_per_client",
|
||||
"Number of mesh peers for BeaconBlock topic per client",
|
||||
&["Client"]
|
||||
);
|
||||
pub static ref BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec(
|
||||
"beacon_aggregate_and_proof_mesh_peers_per_client",
|
||||
"Number of mesh peers for BeaconAggregateAndProof topic per client",
|
||||
&["Client"]
|
||||
);
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
//! - UPnP
|
||||
|
||||
use crate::{NetworkConfig, NetworkMessage};
|
||||
use get_if_addrs::get_if_addrs;
|
||||
use if_addrs::get_if_addrs;
|
||||
use slog::{debug, info, warn};
|
||||
use std::net::{IpAddr, SocketAddr, SocketAddrV4};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
@@ -20,7 +20,7 @@ use std::{collections::HashMap, net::SocketAddr, sync::Arc, time::Duration};
|
||||
use store::HotColdDB;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::time::Delay;
|
||||
use types::{EthSpec, ValidatorSubscription};
|
||||
use types::{EthSpec, RelativeEpoch, SubnetId, Unsigned, ValidatorSubscription};
|
||||
|
||||
mod tests;
|
||||
|
||||
@@ -110,8 +110,12 @@ pub struct NetworkService<T: BeaconChainTypes> {
|
||||
discovery_auto_update: bool,
|
||||
/// A delay that expires when a new fork takes place.
|
||||
next_fork_update: Option<Delay>,
|
||||
/// Subscribe to all the subnets once synced.
|
||||
subscribe_all_subnets: bool,
|
||||
/// A timer for updating various network metrics.
|
||||
metrics_update: tokio::time::Interval,
|
||||
/// gossipsub_parameter_update timer
|
||||
gossipsub_parameter_update: tokio::time::Interval,
|
||||
/// The logger for the network service.
|
||||
log: slog::Logger,
|
||||
}
|
||||
@@ -153,8 +157,14 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
||||
let next_fork_update = next_fork_delay(&beacon_chain);
|
||||
|
||||
// launch libp2p service
|
||||
let (network_globals, mut libp2p) =
|
||||
LibP2PService::new(executor.clone(), config, enr_fork_id, &network_log).await?;
|
||||
let (network_globals, mut libp2p) = LibP2PService::new(
|
||||
executor.clone(),
|
||||
config,
|
||||
enr_fork_id,
|
||||
&network_log,
|
||||
&beacon_chain.spec,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Repopulate the DHT with stored ENR's.
|
||||
let enrs_to_load = load_dht::<T::EthSpec, T::HotStore, T::ColdStore>(store.clone());
|
||||
@@ -178,11 +188,15 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
||||
)?;
|
||||
|
||||
// attestation service
|
||||
let attestation_service = AttestationService::new(beacon_chain.clone(), &network_log);
|
||||
let attestation_service =
|
||||
AttestationService::new(beacon_chain.clone(), &config, &network_log);
|
||||
|
||||
// create a timer for updating network metrics
|
||||
let metrics_update = tokio::time::interval(Duration::from_secs(METRIC_UPDATE_INTERVAL));
|
||||
|
||||
// create a timer for updating gossipsub parameters
|
||||
let gossipsub_parameter_update = tokio::time::interval(Duration::from_secs(60));
|
||||
|
||||
// create the network service and spawn the task
|
||||
let network_log = network_log.new(o!("service" => "network"));
|
||||
let network_service = NetworkService {
|
||||
@@ -196,7 +210,9 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
||||
upnp_mappings: (None, None),
|
||||
discovery_auto_update: config.discv5_config.enr_update,
|
||||
next_fork_update,
|
||||
subscribe_all_subnets: config.subscribe_all_subnets,
|
||||
metrics_update,
|
||||
gossipsub_parameter_update,
|
||||
log: network_log,
|
||||
};
|
||||
|
||||
@@ -256,7 +272,51 @@ fn spawn_service<T: BeaconChainTypes>(
|
||||
.as_ref()
|
||||
.map(|gauge| gauge.reset());
|
||||
}
|
||||
update_gossip_metrics::<T::EthSpec>(&service.libp2p.swarm.gs());
|
||||
update_gossip_metrics::<T::EthSpec>(
|
||||
&service.libp2p.swarm.gs(),
|
||||
&service.network_globals,
|
||||
&service.log
|
||||
);
|
||||
}
|
||||
_ = service.gossipsub_parameter_update.next() => {
|
||||
if let Ok(slot) = service.beacon_chain.slot() {
|
||||
if let Some(active_validators) = service.beacon_chain.with_head(|head| {
|
||||
Ok(
|
||||
head
|
||||
.beacon_state
|
||||
.get_cached_active_validator_indices(RelativeEpoch::Current)
|
||||
.map(|indices| indices.len())
|
||||
.ok()
|
||||
.or_else(|| {
|
||||
// if active validator cached was not build we count the
|
||||
// active validators
|
||||
service
|
||||
.beacon_chain
|
||||
.epoch()
|
||||
.ok()
|
||||
.map(|current_epoch| {
|
||||
head
|
||||
.beacon_state
|
||||
.validators
|
||||
.iter()
|
||||
.filter(|validator|
|
||||
validator.is_active_at(current_epoch)
|
||||
)
|
||||
.count()
|
||||
})
|
||||
})
|
||||
)
|
||||
}).unwrap_or(None) {
|
||||
if (*service.libp2p.swarm)
|
||||
.update_gossipsub_parameters(active_validators, slot).is_err() {
|
||||
error!(
|
||||
service.log,
|
||||
"Failed to update gossipsub parameters";
|
||||
"active_validators" => active_validators
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// handle a message sent to the network
|
||||
Some(message) = service.network_recv.recv() => {
|
||||
@@ -296,6 +356,7 @@ fn spawn_service<T: BeaconChainTypes>(
|
||||
trace!(service.log, "Propagating gossipsub message";
|
||||
"propagation_peer" => format!("{:?}", propagation_source),
|
||||
"message_id" => message_id.to_string(),
|
||||
"validation_result" => format!("{:?}", validation_result)
|
||||
);
|
||||
service
|
||||
.libp2p
|
||||
@@ -340,6 +401,22 @@ fn spawn_service<T: BeaconChainTypes>(
|
||||
warn!(service.log, "Could not subscribe to topic"; "topic" => format!("{}",topic_kind));
|
||||
}
|
||||
}
|
||||
|
||||
// if we are to subscribe to all subnets we do it here
|
||||
if service.subscribe_all_subnets {
|
||||
for subnet_id in 0..<<T as BeaconChainTypes>::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() {
|
||||
let subnet_id = SubnetId::new(subnet_id);
|
||||
let topic_kind = eth2_libp2p::types::GossipKind::Attestation(subnet_id);
|
||||
if service.libp2p.swarm.subscribe_kind(topic_kind.clone()) {
|
||||
// Update the ENR bitfield.
|
||||
service.libp2p.swarm.update_enr_subnet(subnet_id, true);
|
||||
subscribed_topics.push(topic_kind.clone());
|
||||
} else {
|
||||
warn!(service.log, "Could not subscribe to topic"; "topic" => format!("{}",topic_kind));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !subscribed_topics.is_empty() {
|
||||
info!(service.log, "Subscribed to topics"; "topics" => format!("{:?}", subscribed_topics));
|
||||
}
|
||||
@@ -537,7 +614,26 @@ fn expose_receive_metrics<T: EthSpec>(message: &PubsubMessage<T>) {
|
||||
}
|
||||
}
|
||||
|
||||
fn update_gossip_metrics<T: EthSpec>(gossipsub: &Gossipsub) {
|
||||
/// A work-around to reduce temporary allocation when updating gossip metrics.
|
||||
pub struct ToStringCache<T>(HashMap<T, String>);
|
||||
|
||||
impl<T: Clone + std::hash::Hash + std::fmt::Display + Eq> ToStringCache<T> {
|
||||
pub fn with_capacity(c: usize) -> Self {
|
||||
Self(HashMap::with_capacity(c))
|
||||
}
|
||||
|
||||
pub fn get(&mut self, item: T) -> &str {
|
||||
self.0
|
||||
.entry(item.clone())
|
||||
.or_insert_with(|| item.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
fn update_gossip_metrics<T: EthSpec>(
|
||||
gossipsub: &Gossipsub,
|
||||
network_globals: &Arc<NetworkGlobals<T>>,
|
||||
logger: &slog::Logger,
|
||||
) {
|
||||
// Clear the metrics
|
||||
let _ = metrics::PEERS_PER_PROTOCOL
|
||||
.as_ref()
|
||||
@@ -555,23 +651,60 @@ fn update_gossip_metrics<T: EthSpec>(gossipsub: &Gossipsub) {
|
||||
.as_ref()
|
||||
.map(|gauge| gauge.reset());
|
||||
|
||||
let _ = metrics::SCORES_BELOW_ZERO_PER_CLIENT
|
||||
.as_ref()
|
||||
.map(|gauge| gauge.reset());
|
||||
let _ = metrics::SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT
|
||||
.as_ref()
|
||||
.map(|gauge| gauge.reset());
|
||||
let _ = metrics::SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT
|
||||
.as_ref()
|
||||
.map(|gauge| gauge.reset());
|
||||
let _ = metrics::SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT
|
||||
.as_ref()
|
||||
.map(|gauge| gauge.reset());
|
||||
let _ = metrics::MIN_SCORES_PER_CLIENT
|
||||
.as_ref()
|
||||
.map(|gauge| gauge.reset());
|
||||
let _ = metrics::MEDIAN_SCORES_PER_CLIENT
|
||||
.as_ref()
|
||||
.map(|gauge| gauge.reset());
|
||||
let _ = metrics::MEAN_SCORES_PER_CLIENT
|
||||
.as_ref()
|
||||
.map(|gauge| gauge.reset());
|
||||
let _ = metrics::MAX_SCORES_PER_CLIENT
|
||||
.as_ref()
|
||||
.map(|gauge| gauge.reset());
|
||||
|
||||
let _ = metrics::BEACON_BLOCK_MESH_PEERS_PER_CLIENT
|
||||
.as_ref()
|
||||
.map(|gauge| gauge.reset());
|
||||
let _ = metrics::BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT
|
||||
.as_ref()
|
||||
.map(|gauge| gauge.reset());
|
||||
|
||||
let mut subnet_ids: ToStringCache<u64> =
|
||||
ToStringCache::with_capacity(T::default_spec().attestation_subnet_count as usize);
|
||||
let mut gossip_kinds: ToStringCache<GossipKind> =
|
||||
ToStringCache::with_capacity(T::default_spec().attestation_subnet_count as usize);
|
||||
|
||||
// reset the mesh peers, showing all subnets
|
||||
for subnet_id in 0..T::default_spec().attestation_subnet_count {
|
||||
let _ = metrics::get_int_gauge(
|
||||
&metrics::MESH_PEERS_PER_SUBNET_TOPIC,
|
||||
&[&subnet_id.to_string()],
|
||||
&[subnet_ids.get(subnet_id)],
|
||||
)
|
||||
.map(|v| v.set(0));
|
||||
|
||||
let _ = metrics::get_int_gauge(
|
||||
&metrics::GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC,
|
||||
&[&subnet_id.to_string()],
|
||||
&[subnet_ids.get(subnet_id)],
|
||||
)
|
||||
.map(|v| v.set(0));
|
||||
|
||||
let _ = metrics::get_int_gauge(
|
||||
&metrics::GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC,
|
||||
&[&subnet_id.to_string()],
|
||||
&[subnet_ids.get(subnet_id)],
|
||||
)
|
||||
.map(|v| v.set(0));
|
||||
}
|
||||
@@ -582,7 +715,7 @@ fn update_gossip_metrics<T: EthSpec>(gossipsub: &Gossipsub) {
|
||||
if let GossipKind::Attestation(subnet_id) = topic.kind() {
|
||||
let _ = metrics::get_int_gauge(
|
||||
&metrics::GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC,
|
||||
&[&subnet_id.to_string()],
|
||||
&[subnet_ids.get(subnet_id.into())],
|
||||
)
|
||||
.map(|v| v.set(1));
|
||||
}
|
||||
@@ -600,29 +733,29 @@ fn update_gossip_metrics<T: EthSpec>(gossipsub: &Gossipsub) {
|
||||
GossipKind::Attestation(subnet_id) => {
|
||||
if let Some(v) = metrics::get_int_gauge(
|
||||
&metrics::GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC,
|
||||
&[&subnet_id.to_string()],
|
||||
&[subnet_ids.get(subnet_id.into())],
|
||||
) {
|
||||
v.inc()
|
||||
};
|
||||
|
||||
// average peer scores
|
||||
if let Some(score) = gossipsub.peer_score(peer_id) {
|
||||
if let Some(v) = metrics::get_int_gauge(
|
||||
if let Some(v) = metrics::get_gauge(
|
||||
&metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC,
|
||||
&[&subnet_id.to_string()],
|
||||
&[subnet_ids.get(subnet_id.into())],
|
||||
) {
|
||||
v.add(score as i64)
|
||||
v.add(score)
|
||||
};
|
||||
}
|
||||
}
|
||||
kind => {
|
||||
// main topics
|
||||
if let Some(score) = gossipsub.peer_score(peer_id) {
|
||||
if let Some(v) = metrics::get_int_gauge(
|
||||
if let Some(v) = metrics::get_gauge(
|
||||
&metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC,
|
||||
&[&format!("{:?}", kind)],
|
||||
&[gossip_kinds.get(kind.clone())],
|
||||
) {
|
||||
v.add(score as i64)
|
||||
v.add(score)
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -636,20 +769,20 @@ fn update_gossip_metrics<T: EthSpec>(gossipsub: &Gossipsub) {
|
||||
match topic.kind() {
|
||||
GossipKind::Attestation(subnet_id) => {
|
||||
// average peer scores
|
||||
if let Some(v) = metrics::get_int_gauge(
|
||||
if let Some(v) = metrics::get_gauge(
|
||||
&metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC,
|
||||
&[&subnet_id.to_string()],
|
||||
&[subnet_ids.get(subnet_id.into())],
|
||||
) {
|
||||
v.set(v.get() / (*peers as i64))
|
||||
v.set(v.get() / (*peers as f64))
|
||||
};
|
||||
}
|
||||
kind => {
|
||||
// main topics
|
||||
if let Some(v) = metrics::get_int_gauge(
|
||||
if let Some(v) = metrics::get_gauge(
|
||||
&metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC,
|
||||
&[&format!("{:?}", kind)],
|
||||
) {
|
||||
v.set(v.get() / (*peers as i64))
|
||||
v.set(v.get() / (*peers as f64))
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -664,7 +797,7 @@ fn update_gossip_metrics<T: EthSpec>(gossipsub: &Gossipsub) {
|
||||
GossipKind::Attestation(subnet_id) => {
|
||||
if let Some(v) = metrics::get_int_gauge(
|
||||
&metrics::MESH_PEERS_PER_SUBNET_TOPIC,
|
||||
&[&subnet_id.to_string()],
|
||||
&[subnet_ids.get(subnet_id.into())],
|
||||
) {
|
||||
v.set(peers as i64)
|
||||
};
|
||||
@@ -673,7 +806,7 @@ fn update_gossip_metrics<T: EthSpec>(gossipsub: &Gossipsub) {
|
||||
// main topics
|
||||
if let Some(v) = metrics::get_int_gauge(
|
||||
&metrics::MESH_PEERS_PER_MAIN_TOPIC,
|
||||
&[&format!("{:?}", kind)],
|
||||
&[gossip_kinds.get(kind.clone())],
|
||||
) {
|
||||
v.set(peers as i64)
|
||||
};
|
||||
@@ -695,4 +828,132 @@ fn update_gossip_metrics<T: EthSpec>(gossipsub: &Gossipsub) {
|
||||
v.set(*peers)
|
||||
};
|
||||
}
|
||||
|
||||
let mut peer_to_client = HashMap::new();
|
||||
let mut scores_per_client: HashMap<String, Vec<f64>> = HashMap::new();
|
||||
{
|
||||
let peers = network_globals.peers.read();
|
||||
for (peer_id, _) in gossipsub.all_peers() {
|
||||
let client = peers
|
||||
.peer_info(peer_id)
|
||||
.map(|peer_info| peer_info.client.kind.to_string())
|
||||
.unwrap_or_else(|| "Unknown".to_string());
|
||||
|
||||
peer_to_client.insert(peer_id, client.clone());
|
||||
let score = gossipsub.peer_score(peer_id).unwrap_or(0.0);
|
||||
if (client == "Prysm" || client == "Lighthouse") && score < 0.0 {
|
||||
trace!(logger, "Peer has negative score"; "peer" => format!("{:?}", peer_id),
|
||||
"client" => &client, "score" => score);
|
||||
}
|
||||
scores_per_client.entry(client).or_default().push(score);
|
||||
}
|
||||
}
|
||||
|
||||
// mesh peers per client
|
||||
for topic_hash in gossipsub.topics() {
|
||||
if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) {
|
||||
match topic.kind() {
|
||||
GossipKind::BeaconBlock => {
|
||||
for peer in gossipsub.mesh_peers(&topic_hash) {
|
||||
if let Some(client) = peer_to_client.get(peer) {
|
||||
if let Some(v) = metrics::get_int_gauge(
|
||||
&metrics::BEACON_BLOCK_MESH_PEERS_PER_CLIENT,
|
||||
&[client],
|
||||
) {
|
||||
v.inc()
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
GossipKind::BeaconAggregateAndProof => {
|
||||
for peer in gossipsub.mesh_peers(&topic_hash) {
|
||||
if let Some(client) = peer_to_client.get(peer) {
|
||||
if let Some(v) = metrics::get_int_gauge(
|
||||
&metrics::BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT,
|
||||
&[client],
|
||||
) {
|
||||
v.inc()
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (client, scores) in scores_per_client.into_iter() {
|
||||
let c = &[client.as_ref()];
|
||||
let len = scores.len();
|
||||
if len > 0 {
|
||||
let mut below0 = 0;
|
||||
let mut below_gossip_threshold = 0;
|
||||
let mut below_publish_threshold = 0;
|
||||
let mut below_greylist_threshold = 0;
|
||||
let mut min = f64::INFINITY;
|
||||
let mut sum = 0.0;
|
||||
let mut max = f64::NEG_INFINITY;
|
||||
|
||||
let count = scores.len() as f64;
|
||||
|
||||
for &score in &scores {
|
||||
if score < 0.0 {
|
||||
below0 += 1;
|
||||
}
|
||||
if score < -4000.0 {
|
||||
//TODO not hardcode
|
||||
below_gossip_threshold += 1;
|
||||
}
|
||||
if score < -8000.0 {
|
||||
//TODO not hardcode
|
||||
below_publish_threshold += 1;
|
||||
}
|
||||
if score < -16000.0 {
|
||||
//TODO not hardcode
|
||||
below_greylist_threshold += 1;
|
||||
}
|
||||
if score < min {
|
||||
min = score;
|
||||
}
|
||||
if score > max {
|
||||
max = score;
|
||||
}
|
||||
sum += score;
|
||||
}
|
||||
|
||||
let median = if len == 0 {
|
||||
0.0
|
||||
} else if len % 2 == 0 {
|
||||
(scores[len / 2 - 1] + scores[len / 2]) / 2.0
|
||||
} else {
|
||||
scores[len / 2]
|
||||
};
|
||||
|
||||
metrics::set_gauge_entry(
|
||||
&metrics::SCORES_BELOW_ZERO_PER_CLIENT,
|
||||
c,
|
||||
below0 as f64 / count,
|
||||
);
|
||||
metrics::set_gauge_entry(
|
||||
&metrics::SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT,
|
||||
c,
|
||||
below_gossip_threshold as f64 / count,
|
||||
);
|
||||
metrics::set_gauge_entry(
|
||||
&metrics::SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT,
|
||||
c,
|
||||
below_publish_threshold as f64 / count,
|
||||
);
|
||||
metrics::set_gauge_entry(
|
||||
&metrics::SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT,
|
||||
c,
|
||||
below_greylist_threshold as f64 / count,
|
||||
);
|
||||
|
||||
metrics::set_gauge_entry(&metrics::MIN_SCORES_PER_CLIENT, c, min);
|
||||
metrics::set_gauge_entry(&metrics::MEDIAN_SCORES_PER_CLIENT, c, median);
|
||||
metrics::set_gauge_entry(&metrics::MEAN_SCORES_PER_CLIENT, c, sum / count);
|
||||
metrics::set_gauge_entry(&metrics::MAX_SCORES_PER_CLIENT, c, max);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
/*
|
||||
* Network parameters.
|
||||
*/
|
||||
.arg(
|
||||
Arg::with_name("subscribe-all-subnets")
|
||||
.long("subscribe-all-subnets")
|
||||
.help("Subscribe to all subnets regardless of validator count. \
|
||||
This will also advertise the beacon node as being long-lived subscribed to all subnets.")
|
||||
.takes_value(false),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("zero-ports")
|
||||
.long("zero-ports")
|
||||
|
||||
@@ -354,6 +354,10 @@ pub fn set_network_config(
|
||||
config.network_dir = data_dir.join(DEFAULT_NETWORK_DIR);
|
||||
};
|
||||
|
||||
if cli_args.is_present("subscribe-all-subnets") {
|
||||
config.subscribe_all_subnets = true;
|
||||
}
|
||||
|
||||
if let Some(listen_address_str) = cli_args.value_of("listen-address") {
|
||||
let listen_address = listen_address_str
|
||||
.parse()
|
||||
|
||||
@@ -79,6 +79,7 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
|
||||
let builder = ClientBuilder::new(context.eth_spec_instance.clone())
|
||||
.runtime_context(context)
|
||||
.chain_spec(spec)
|
||||
.http_api_config(client_config.http_api.clone())
|
||||
.disk_store(&db_path, &freezer_db_path_res?, store_config)?;
|
||||
|
||||
let builder = builder
|
||||
@@ -123,7 +124,6 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
|
||||
.network(&client_config.network)
|
||||
.await?
|
||||
.notifier()?
|
||||
.http_api_config(client_config.http_api.clone())
|
||||
.http_metrics_config(client_config.http_metrics.clone())
|
||||
.build()
|
||||
.map(Self)
|
||||
|
||||
@@ -10,7 +10,8 @@ where
|
||||
{
|
||||
/// Clean up the database by performing one-off maintenance at start-up.
|
||||
pub fn remove_garbage(&self) -> Result<(), Error> {
|
||||
self.delete_temp_states()
|
||||
self.delete_temp_states()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete the temporary states that were leftover by failed block imports.
|
||||
|
||||
@@ -9,7 +9,8 @@ use crate::leveldb_store::BytesKey;
|
||||
use crate::leveldb_store::LevelDB;
|
||||
use crate::memory_store::MemoryStore;
|
||||
use crate::metadata::{
|
||||
SchemaVersion, CONFIG_KEY, CURRENT_SCHEMA_VERSION, SCHEMA_VERSION_KEY, SPLIT_KEY,
|
||||
PruningCheckpoint, SchemaVersion, CONFIG_KEY, CURRENT_SCHEMA_VERSION, PRUNING_CHECKPOINT_KEY,
|
||||
SCHEMA_VERSION_KEY, SPLIT_KEY,
|
||||
};
|
||||
use crate::metrics;
|
||||
use crate::{
|
||||
@@ -924,6 +925,25 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Run a compaction pass to free up space used by deleted states.
|
||||
pub fn compact(&self) -> Result<(), Error> {
|
||||
self.hot_db.compact()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load the checkpoint to begin pruning from (the "old finalized checkpoint").
|
||||
pub fn load_pruning_checkpoint(&self) -> Result<Option<Checkpoint>, Error> {
|
||||
Ok(self
|
||||
.hot_db
|
||||
.get(&PRUNING_CHECKPOINT_KEY)?
|
||||
.map(|pc: PruningCheckpoint| pc.checkpoint))
|
||||
}
|
||||
|
||||
/// Create a staged store for the pruning checkpoint.
|
||||
pub fn pruning_checkpoint_store_op(&self, checkpoint: Checkpoint) -> KeyValueStoreOp {
|
||||
PruningCheckpoint { checkpoint }.as_kv_store_op(PRUNING_CHECKPOINT_KEY)
|
||||
}
|
||||
}
|
||||
|
||||
/// Advance the split point of the store, moving new finalized states to the freezer.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use super::*;
|
||||
use crate::metrics;
|
||||
use db_key::Key;
|
||||
use leveldb::compaction::Compaction;
|
||||
use leveldb::database::batch::{Batch, Writebatch};
|
||||
use leveldb::database::kv::KV;
|
||||
use leveldb::database::Database;
|
||||
@@ -152,6 +153,27 @@ impl<E: EthSpec> KeyValueStore<E> for LevelDB<E> {
|
||||
fn begin_rw_transaction(&self) -> MutexGuard<()> {
|
||||
self.transaction_mutex.lock()
|
||||
}
|
||||
|
||||
/// Compact all values in the states and states flag columns.
|
||||
fn compact(&self) -> Result<(), Error> {
|
||||
let endpoints = |column: DBColumn| {
|
||||
(
|
||||
BytesKey::from_vec(get_key_for_col(column.as_str(), Hash256::zero().as_bytes())),
|
||||
BytesKey::from_vec(get_key_for_col(
|
||||
column.as_str(),
|
||||
Hash256::repeat_byte(0xff).as_bytes(),
|
||||
)),
|
||||
)
|
||||
};
|
||||
|
||||
for (start_key, end_key) in vec![
|
||||
endpoints(DBColumn::BeaconStateTemporary),
|
||||
endpoints(DBColumn::BeaconState),
|
||||
] {
|
||||
self.db.compact(&start_key, &end_key);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> ItemStore<E> for LevelDB<E> {}
|
||||
|
||||
@@ -67,6 +67,9 @@ pub trait KeyValueStore<E: EthSpec>: Sync + Send + Sized + 'static {
|
||||
/// This doesn't prevent other threads writing to the DB unless they also use
|
||||
/// this method. In future we may implement a safer mandatory locking scheme.
|
||||
fn begin_rw_transaction(&self) -> MutexGuard<()>;
|
||||
|
||||
/// Compact the database, freeing space used by deleted items.
|
||||
fn compact(&self) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
pub fn get_key_for_col(column: &str, key: &[u8]) -> Vec<u8> {
|
||||
|
||||
@@ -84,6 +84,10 @@ impl<E: EthSpec> KeyValueStore<E> for MemoryStore<E> {
|
||||
fn begin_rw_transaction(&self) -> MutexGuard<()> {
|
||||
self.transaction_mutex.lock()
|
||||
}
|
||||
|
||||
fn compact(&self) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> ItemStore<E> for MemoryStore<E> {}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::{DBColumn, Error, StoreItem};
|
||||
use ssz::{Decode, Encode};
|
||||
use types::Hash256;
|
||||
use types::{Checkpoint, Hash256};
|
||||
|
||||
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(2);
|
||||
|
||||
@@ -10,6 +10,7 @@ pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(2);
|
||||
pub const SCHEMA_VERSION_KEY: Hash256 = Hash256::repeat_byte(0);
|
||||
pub const CONFIG_KEY: Hash256 = Hash256::repeat_byte(1);
|
||||
pub const SPLIT_KEY: Hash256 = Hash256::repeat_byte(2);
|
||||
pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3);
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct SchemaVersion(pub u64);
|
||||
@@ -33,3 +34,27 @@ impl StoreItem for SchemaVersion {
|
||||
Ok(SchemaVersion(u64::from_ssz_bytes(bytes)?))
|
||||
}
|
||||
}
|
||||
|
||||
/// The checkpoint used for pruning the database.
|
||||
///
|
||||
/// Updated whenever pruning is successful.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct PruningCheckpoint {
|
||||
pub checkpoint: Checkpoint,
|
||||
}
|
||||
|
||||
impl StoreItem for PruningCheckpoint {
|
||||
fn db_column() -> DBColumn {
|
||||
DBColumn::BeaconMeta
|
||||
}
|
||||
|
||||
fn as_store_bytes(&self) -> Vec<u8> {
|
||||
self.checkpoint.as_ssz_bytes()
|
||||
}
|
||||
|
||||
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
|
||||
Ok(PruningCheckpoint {
|
||||
checkpoint: Checkpoint::from_ssz_bytes(bytes)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,14 +50,14 @@ Several dependencies may be required to compile Lighthouse. The following
|
||||
packages may be required in addition a base Ubuntu Server installation:
|
||||
|
||||
```bash
|
||||
sudo apt install -y git gcc g++ make cmake pkg-config libssl-dev
|
||||
sudo apt install -y git gcc g++ make cmake pkg-config
|
||||
```
|
||||
|
||||
#### macOS
|
||||
|
||||
You will need `cmake`. You can install via homebrew:
|
||||
|
||||
brew install openssl cmake
|
||||
brew install cmake
|
||||
|
||||
### Command is not found
|
||||
|
||||
@@ -71,12 +71,4 @@ See ["Configuring the `PATH` environment variable"
|
||||
|
||||
Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply type `$ rustup update`.
|
||||
|
||||
### OpenSSL
|
||||
|
||||
If you get a build failure relating to OpenSSL, try installing `openssl-dev` or
|
||||
`libssl-dev` using your OS package manager.
|
||||
|
||||
- Ubuntu: `$ apt-get install libssl-dev`.
|
||||
- Amazon Linux: `$ yum install openssl-devel`.
|
||||
|
||||
[WSL]: https://docs.microsoft.com/en-us/windows/wsl/about
|
||||
|
||||
@@ -13,6 +13,10 @@ clients to form a resilient and decentralized proof-of-stake blockchain.
|
||||
We implement the specification as defined in the
|
||||
[ethereum/eth2.0-specs](https://github.com/ethereum/eth2.0-specs) repository.
|
||||
|
||||
**🚨🚨🚨 Note: Lighthouse is not *yet* ready to produce mainnet deposits. The developers will require some
|
||||
time to test against the mainnet deposit contract, once it is released. DO NOT SUBMIT VALIDATOR
|
||||
DEPOSITS WITH LIGHTHOUSE. 🚨🚨🚨**
|
||||
|
||||
## Topics
|
||||
|
||||
You may read this book from start to finish, or jump to some of these topics:
|
||||
|
||||
@@ -60,11 +60,12 @@ Examples where it is **ineffective** are:
|
||||
|
||||
## Import and Export
|
||||
|
||||
Lighthouse supports v4 of the slashing protection interchange format described
|
||||
Lighthouse supports v5 of the slashing protection interchange format described
|
||||
[here][interchange-spec]. An interchange file is a record of all blocks and attestations
|
||||
signing by a set of validator keys – basically a portable slashing protection database!
|
||||
|
||||
You can import a `.json` interchange file from another client using this command:
|
||||
With your validator client stopped, you can import a `.json` interchange file from another client
|
||||
using this command:
|
||||
|
||||
```bash
|
||||
lighthouse account validator slashing-protection import <my_interchange.json>
|
||||
@@ -85,6 +86,8 @@ You can export Lighthouse's database for use with another client with this comma
|
||||
lighthouse account validator slashing-protection export <lighthouse_interchange.json>
|
||||
```
|
||||
|
||||
The validator client needs to be stopped in order to export.
|
||||
|
||||
[interchange-spec]: https://hackmd.io/@sproul/Bk0Y0qdGD
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
@@ -3,6 +3,10 @@
|
||||
Joining an Eth2 testnet is a great way to get familiar with staking in Phase 0.
|
||||
All users should experiment with a testnet prior to staking mainnet ETH.
|
||||
|
||||
**🚨🚨🚨 Note: Lighthouse is not *yet* ready to produce mainnet deposits. The developers will require some
|
||||
time to test against the mainnet deposit contract, once it is released. DO NOT SUBMIT VALIDATOR
|
||||
DEPOSITS WITH LIGHTHOUSE. 🚨🚨🚨**
|
||||
|
||||
## Supported Testnets
|
||||
|
||||
Lighthouse supports four testnets:
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
# Create a validator
|
||||
|
||||
**🚨🚨🚨 Note: Lighthouse is not *yet* ready to produce mainnet deposits. The developers will require some
|
||||
time to test against the mainnet deposit contract, once it is released. DO NOT SUBMIT VALIDATOR
|
||||
DEPOSITS WITH LIGHTHOUSE. 🚨🚨🚨**
|
||||
|
||||
Validators are fundamentally represented by a BLS keypair. In Lighthouse, we
|
||||
use a [wallet](./wallet-create.md) to generate these keypairs. Once a wallet
|
||||
exists, the `lighthouse account validator create` command is used to generate
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "boot_node"
|
||||
version = "0.3.3"
|
||||
version = "0.3.4"
|
||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
|
||||
@@ -28,6 +28,8 @@ pub enum Error {
|
||||
Reqwest(reqwest::Error),
|
||||
/// The server returned an error message where the body was able to be parsed.
|
||||
ServerMessage(ErrorMessage),
|
||||
/// The server returned an error message with an array of errors.
|
||||
ServerIndexedMessage(IndexedErrorMessage),
|
||||
/// The server returned an error message where the body was unable to be parsed.
|
||||
StatusCode(StatusCode),
|
||||
/// The supplied URL is badly formatted. It should look something like `http://127.0.0.1:5052`.
|
||||
@@ -50,6 +52,7 @@ impl Error {
|
||||
match self {
|
||||
Error::Reqwest(error) => error.status(),
|
||||
Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(),
|
||||
Error::ServerIndexedMessage(msg) => StatusCode::try_from(msg.code).ok(),
|
||||
Error::StatusCode(status) => Some(*status),
|
||||
Error::InvalidUrl(_) => None,
|
||||
Error::InvalidSecret(_) => None,
|
||||
@@ -137,6 +140,26 @@ impl BeaconNodeHttpClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Perform a HTTP POST request, returning a JSON response.
|
||||
async fn post_with_response<T: DeserializeOwned, U: IntoUrl, V: Serialize>(
|
||||
&self,
|
||||
url: U,
|
||||
body: &V,
|
||||
) -> Result<T, Error> {
|
||||
let response = self
|
||||
.client
|
||||
.post(url)
|
||||
.json(body)
|
||||
.send()
|
||||
.await
|
||||
.map_err(Error::Reqwest)?;
|
||||
ok_or_error(response)
|
||||
.await?
|
||||
.json()
|
||||
.await
|
||||
.map_err(Error::Reqwest)
|
||||
}
|
||||
|
||||
/// `GET beacon/genesis`
|
||||
///
|
||||
/// ## Errors
|
||||
@@ -210,6 +233,35 @@ impl BeaconNodeHttpClient {
|
||||
self.get_opt(path).await
|
||||
}
|
||||
|
||||
/// `GET beacon/states/{state_id}/validator_balances?id`
|
||||
///
|
||||
/// Returns `Ok(None)` on a 404 error.
|
||||
pub async fn get_beacon_states_validator_balances(
|
||||
&self,
|
||||
state_id: StateId,
|
||||
ids: Option<&[ValidatorId]>,
|
||||
) -> Result<Option<GenericResponse<Vec<ValidatorBalanceData>>>, Error> {
|
||||
let mut path = self.eth_path()?;
|
||||
|
||||
path.path_segments_mut()
|
||||
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||
.push("beacon")
|
||||
.push("states")
|
||||
.push(&state_id.to_string())
|
||||
.push("validator_balances");
|
||||
|
||||
if let Some(ids) = ids {
|
||||
let id_string = ids
|
||||
.iter()
|
||||
.map(|i| i.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(",");
|
||||
path.query_pairs_mut().append_pair("id", &id_string);
|
||||
}
|
||||
|
||||
self.get_opt(path).await
|
||||
}
|
||||
|
||||
/// `GET beacon/states/{state_id}/validators?id,status`
|
||||
///
|
||||
/// Returns `Ok(None)` on a 404 error.
|
||||
@@ -670,7 +722,11 @@ impl BeaconNodeHttpClient {
|
||||
}
|
||||
|
||||
/// `GET node/peers`
|
||||
pub async fn get_node_peers(&self) -> Result<GenericResponse<Vec<PeerData>>, Error> {
|
||||
pub async fn get_node_peers(
|
||||
&self,
|
||||
states: Option<&[PeerState]>,
|
||||
directions: Option<&[PeerDirection]>,
|
||||
) -> Result<PeersData, Error> {
|
||||
let mut path = self.eth_path()?;
|
||||
|
||||
path.path_segments_mut()
|
||||
@@ -678,6 +734,36 @@ impl BeaconNodeHttpClient {
|
||||
.push("node")
|
||||
.push("peers");
|
||||
|
||||
if let Some(states) = states {
|
||||
let state_string = states
|
||||
.iter()
|
||||
.map(|i| i.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(",");
|
||||
path.query_pairs_mut().append_pair("state", &state_string);
|
||||
}
|
||||
|
||||
if let Some(directions) = directions {
|
||||
let dir_string = directions
|
||||
.iter()
|
||||
.map(|i| i.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(",");
|
||||
path.query_pairs_mut().append_pair("direction", &dir_string);
|
||||
}
|
||||
|
||||
self.get(path).await
|
||||
}
|
||||
|
||||
/// `GET node/peer_count`
|
||||
pub async fn get_node_peer_count(&self) -> Result<GenericResponse<PeerCount>, Error> {
|
||||
let mut path = self.eth_path()?;
|
||||
|
||||
path.path_segments_mut()
|
||||
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||
.push("node")
|
||||
.push("peer_count");
|
||||
|
||||
self.get(path).await
|
||||
}
|
||||
|
||||
@@ -713,37 +799,6 @@ impl BeaconNodeHttpClient {
|
||||
self.get(path).await
|
||||
}
|
||||
|
||||
/// `GET validator/duties/attester/{epoch}?index`
|
||||
///
|
||||
/// ## Note
|
||||
///
|
||||
/// The `index` query parameter accepts a list of validator indices.
|
||||
pub async fn get_validator_duties_attester(
|
||||
&self,
|
||||
epoch: Epoch,
|
||||
index: Option<&[u64]>,
|
||||
) -> Result<GenericResponse<Vec<AttesterData>>, Error> {
|
||||
let mut path = self.eth_path()?;
|
||||
|
||||
path.path_segments_mut()
|
||||
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||
.push("validator")
|
||||
.push("duties")
|
||||
.push("attester")
|
||||
.push(&epoch.to_string());
|
||||
|
||||
if let Some(index) = index {
|
||||
let string = index
|
||||
.iter()
|
||||
.map(|i| i.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(",");
|
||||
path.query_pairs_mut().append_pair("index", &string);
|
||||
}
|
||||
|
||||
self.get(path).await
|
||||
}
|
||||
|
||||
/// `GET validator/duties/proposer/{epoch}`
|
||||
pub async fn get_validator_duties_proposer(
|
||||
&self,
|
||||
@@ -761,11 +816,7 @@ impl BeaconNodeHttpClient {
|
||||
self.get(path).await
|
||||
}
|
||||
|
||||
/// `GET validator/duties/attester/{epoch}?index`
|
||||
///
|
||||
/// ## Note
|
||||
///
|
||||
/// The `index` query parameter accepts a list of validator indices.
|
||||
/// `GET validator/blocks/{slot}`
|
||||
pub async fn get_validator_blocks<T: EthSpec>(
|
||||
&self,
|
||||
slot: Slot,
|
||||
@@ -834,10 +885,28 @@ impl BeaconNodeHttpClient {
|
||||
self.get_opt(path).await
|
||||
}
|
||||
|
||||
/// `POST validator/duties/attester/{epoch}`
|
||||
pub async fn post_validator_duties_attester(
|
||||
&self,
|
||||
epoch: Epoch,
|
||||
indices: &[u64],
|
||||
) -> Result<GenericResponse<Vec<AttesterData>>, Error> {
|
||||
let mut path = self.eth_path()?;
|
||||
|
||||
path.path_segments_mut()
|
||||
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||
.push("validator")
|
||||
.push("duties")
|
||||
.push("attester")
|
||||
.push(&epoch.to_string());
|
||||
|
||||
self.post_with_response(path, &indices).await
|
||||
}
|
||||
|
||||
/// `POST validator/aggregate_and_proofs`
|
||||
pub async fn post_validator_aggregate_and_proof<T: EthSpec>(
|
||||
&self,
|
||||
aggregate: &SignedAggregateAndProof<T>,
|
||||
aggregates: &[SignedAggregateAndProof<T>],
|
||||
) -> Result<(), Error> {
|
||||
let mut path = self.eth_path()?;
|
||||
|
||||
@@ -846,7 +915,14 @@ impl BeaconNodeHttpClient {
|
||||
.push("validator")
|
||||
.push("aggregate_and_proofs");
|
||||
|
||||
self.post(path, aggregate).await?;
|
||||
let response = self
|
||||
.client
|
||||
.post(path)
|
||||
.json(aggregates)
|
||||
.send()
|
||||
.await
|
||||
.map_err(Error::Reqwest)?;
|
||||
ok_or_indexed_error(response).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -882,3 +958,17 @@ async fn ok_or_error(response: Response) -> Result<Response, Error> {
|
||||
Err(Error::StatusCode(status))
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an
|
||||
/// appropriate indexed error message.
|
||||
async fn ok_or_indexed_error(response: Response) -> Result<Response, Error> {
|
||||
let status = response.status();
|
||||
|
||||
if status == StatusCode::OK {
|
||||
Ok(response)
|
||||
} else if let Ok(message) = response.json().await {
|
||||
Err(Error::ServerIndexedMessage(message))
|
||||
} else {
|
||||
Err(Error::StatusCode(status))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,30 @@ pub struct ErrorMessage {
|
||||
pub stacktraces: Vec<String>,
|
||||
}
|
||||
|
||||
/// An indexed API error serializable to JSON.
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct IndexedErrorMessage {
|
||||
pub code: u16,
|
||||
pub message: String,
|
||||
pub failures: Vec<Failure>,
|
||||
}
|
||||
|
||||
/// A single failure in an index of API errors, serializable to JSON.
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Failure {
|
||||
pub index: u64,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
impl Failure {
|
||||
pub fn new(index: usize, message: String) -> Self {
|
||||
Self {
|
||||
index: index as u64,
|
||||
message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct GenesisData {
|
||||
#[serde(with = "serde_utils::quoted_u64")]
|
||||
@@ -206,6 +230,14 @@ pub struct ValidatorData {
|
||||
pub validator: Validator,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ValidatorBalanceData {
|
||||
#[serde(with = "serde_utils::quoted_u64")]
|
||||
pub index: u64,
|
||||
#[serde(with = "serde_utils::quoted_u64")]
|
||||
pub balance: u64,
|
||||
}
|
||||
|
||||
// TODO: This does not currently match the spec, but I'm going to try and change the spec using
|
||||
// this proposal:
|
||||
//
|
||||
@@ -415,10 +447,14 @@ impl<T: FromStr> TryFrom<String> for QueryVec<T> {
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize)]
|
||||
pub struct ValidatorDutiesQuery {
|
||||
pub index: Option<QueryVec<u64>>,
|
||||
pub struct ValidatorBalancesQuery {
|
||||
pub id: Option<QueryVec<ValidatorId>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct ValidatorIndexData(#[serde(with = "serde_utils::quoted_u64_vec")] pub Vec<u64>);
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct AttesterData {
|
||||
pub pubkey: PublicKeyBytes,
|
||||
@@ -438,6 +474,8 @@ pub struct AttesterData {
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ProposerData {
|
||||
pub pubkey: PublicKeyBytes,
|
||||
#[serde(with = "serde_utils::quoted_u64")]
|
||||
pub validator_index: u64,
|
||||
pub slot: Slot,
|
||||
}
|
||||
|
||||
@@ -471,6 +509,12 @@ pub struct BeaconCommitteeSubscription {
|
||||
pub is_aggregator: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct PeersQuery {
|
||||
pub state: Option<QueryVec<PeerState>>,
|
||||
pub direction: Option<QueryVec<PeerDirection>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct PeerData {
|
||||
pub peer_id: String,
|
||||
@@ -480,6 +524,17 @@ pub struct PeerData {
|
||||
pub direction: PeerDirection,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct PeersData {
|
||||
pub data: Vec<PeerData>,
|
||||
pub meta: PeersMetaData,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct PeersMetaData {
|
||||
pub count: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum PeerState {
|
||||
@@ -516,6 +571,17 @@ impl FromStr for PeerState {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for PeerState {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
PeerState::Connected => write!(f, "connected"),
|
||||
PeerState::Connecting => write!(f, "connecting"),
|
||||
PeerState::Disconnected => write!(f, "disconnected"),
|
||||
PeerState::Disconnecting => write!(f, "disconnecting"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum PeerDirection {
|
||||
@@ -544,6 +610,27 @@ impl FromStr for PeerDirection {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for PeerDirection {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
PeerDirection::Inbound => write!(f, "inbound"),
|
||||
PeerDirection::Outbound => write!(f, "outbound"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct PeerCount {
|
||||
#[serde(with = "serde_utils::quoted_u64")]
|
||||
pub connected: u64,
|
||||
#[serde(with = "serde_utils::quoted_u64")]
|
||||
pub connecting: u64,
|
||||
#[serde(with = "serde_utils::quoted_u64")]
|
||||
pub disconnected: u64,
|
||||
#[serde(with = "serde_utils::quoted_u64")]
|
||||
pub disconnecting: u64,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -122,6 +122,10 @@ define_net!(spadina, include_spadina_file, "spadina", true);
|
||||
|
||||
define_net!(zinken, include_zinken_file, "zinken", true);
|
||||
|
||||
define_net!(mainnet, include_mainnet_file, "mainnet", false);
|
||||
|
||||
define_net!(toledo, include_toledo_file, "toledo", true);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
1
common/eth2_testnet_config/.gitignore
vendored
1
common/eth2_testnet_config/.gitignore
vendored
@@ -3,3 +3,4 @@ schlesi-*
|
||||
witti-*
|
||||
/altona*
|
||||
built_in_testnet_configs/*/genesis.ssz
|
||||
!built_in_testnet_configs/mainnet/genesis.ssz
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
//! Downloads a testnet configuration from Github.
|
||||
|
||||
use eth2_config::{
|
||||
altona, medalla, spadina, zinken, Eth2NetArchiveAndDirectory, GENESIS_FILE_NAME,
|
||||
altona, medalla, spadina, toledo, zinken, Eth2NetArchiveAndDirectory, GENESIS_FILE_NAME,
|
||||
};
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
@@ -12,6 +12,7 @@ const ETH2_NET_DIRS: &[Eth2NetArchiveAndDirectory<'static>] = &[
|
||||
medalla::ETH2_NET_DIR,
|
||||
spadina::ETH2_NET_DIR,
|
||||
zinken::ETH2_NET_DIR,
|
||||
toledo::ETH2_NET_DIR,
|
||||
];
|
||||
|
||||
fn main() {
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
[]
|
||||
@@ -0,0 +1,155 @@
|
||||
# Mainnet preset
|
||||
|
||||
CONFIG_NAME: "mainnet"
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# 2**6 (= 64)
|
||||
MAX_COMMITTEES_PER_SLOT: 64
|
||||
# 2**7 (= 128)
|
||||
TARGET_COMMITTEE_SIZE: 128
|
||||
# 2**11 (= 2,048)
|
||||
MAX_VALIDATORS_PER_COMMITTEE: 2048
|
||||
# 2**2 (= 4)
|
||||
MIN_PER_EPOCH_CHURN_LIMIT: 4
|
||||
# 2**16 (= 65,536)
|
||||
CHURN_LIMIT_QUOTIENT: 65536
|
||||
# See issue 563
|
||||
SHUFFLE_ROUND_COUNT: 90
|
||||
# `2**14` (= 16,384)
|
||||
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384
|
||||
# Dec 1, 2020, 12pm UTC
|
||||
MIN_GENESIS_TIME: 1606824000
|
||||
# 4
|
||||
HYSTERESIS_QUOTIENT: 4
|
||||
# 1 (minus 0.25)
|
||||
HYSTERESIS_DOWNWARD_MULTIPLIER: 1
|
||||
# 5 (plus 1.25)
|
||||
HYSTERESIS_UPWARD_MULTIPLIER: 5
|
||||
|
||||
|
||||
# Fork Choice
|
||||
# ---------------------------------------------------------------
|
||||
# 2**3 (= 8)
|
||||
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8
|
||||
|
||||
|
||||
# Validator
|
||||
# ---------------------------------------------------------------
|
||||
# 2**11 (= 2,048)
|
||||
ETH1_FOLLOW_DISTANCE: 2048
|
||||
# 2**4 (= 16)
|
||||
TARGET_AGGREGATORS_PER_COMMITTEE: 16
|
||||
# 2**0 (= 1)
|
||||
RANDOM_SUBNETS_PER_VALIDATOR: 1
|
||||
# 2**8 (= 256)
|
||||
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256
|
||||
# 14 (estimate from Eth1 mainnet)
|
||||
SECONDS_PER_ETH1_BLOCK: 14
|
||||
|
||||
|
||||
# Deposit contract
|
||||
# ---------------------------------------------------------------
|
||||
# Ethereum PoW Mainnet
|
||||
DEPOSIT_CHAIN_ID: 1
|
||||
DEPOSIT_NETWORK_ID: 1
|
||||
# **TBD**
|
||||
DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa
|
||||
|
||||
|
||||
# Gwei values
|
||||
# ---------------------------------------------------------------
|
||||
# 2**0 * 10**9 (= 1,000,000,000) Gwei
|
||||
MIN_DEPOSIT_AMOUNT: 1000000000
|
||||
# 2**5 * 10**9 (= 32,000,000,000) Gwei
|
||||
MAX_EFFECTIVE_BALANCE: 32000000000
|
||||
# 2**4 * 10**9 (= 16,000,000,000) Gwei
|
||||
EJECTION_BALANCE: 16000000000
|
||||
# 2**0 * 10**9 (= 1,000,000,000) Gwei
|
||||
EFFECTIVE_BALANCE_INCREMENT: 1000000000
|
||||
|
||||
|
||||
# Initial values
|
||||
# ---------------------------------------------------------------
|
||||
# Mainnet initial fork version, recommend altering for testnets
|
||||
GENESIS_FORK_VERSION: 0x00000000
|
||||
BLS_WITHDRAWAL_PREFIX: 0x00
|
||||
|
||||
|
||||
# Time parameters
|
||||
# ---------------------------------------------------------------
|
||||
# 604800 seconds (7 days)
|
||||
GENESIS_DELAY: 604800
|
||||
# 12 seconds
|
||||
SECONDS_PER_SLOT: 12
|
||||
# 2**0 (= 1) slots 12 seconds
|
||||
MIN_ATTESTATION_INCLUSION_DELAY: 1
|
||||
# 2**5 (= 32) slots 6.4 minutes
|
||||
SLOTS_PER_EPOCH: 32
|
||||
# 2**0 (= 1) epochs 6.4 minutes
|
||||
MIN_SEED_LOOKAHEAD: 1
|
||||
# 2**2 (= 4) epochs 25.6 minutes
|
||||
MAX_SEED_LOOKAHEAD: 4
|
||||
# 2**6 (= 64) epochs ~6.8 hours
|
||||
EPOCHS_PER_ETH1_VOTING_PERIOD: 64
|
||||
# 2**13 (= 8,192) slots ~13 hours
|
||||
SLOTS_PER_HISTORICAL_ROOT: 8192
|
||||
# 2**8 (= 256) epochs ~27 hours
|
||||
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
|
||||
# 2**8 (= 256) epochs ~27 hours
|
||||
SHARD_COMMITTEE_PERIOD: 256
|
||||
# 2**2 (= 4) epochs 25.6 minutes
|
||||
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
|
||||
|
||||
|
||||
# State vector lengths
|
||||
# ---------------------------------------------------------------
|
||||
# 2**16 (= 65,536) epochs ~0.8 years
|
||||
EPOCHS_PER_HISTORICAL_VECTOR: 65536
|
||||
# 2**13 (= 8,192) epochs ~36 days
|
||||
EPOCHS_PER_SLASHINGS_VECTOR: 8192
|
||||
# 2**24 (= 16,777,216) historical roots, ~26,131 years
|
||||
HISTORICAL_ROOTS_LIMIT: 16777216
|
||||
# 2**40 (= 1,099,511,627,776) validator spots
|
||||
VALIDATOR_REGISTRY_LIMIT: 1099511627776
|
||||
|
||||
|
||||
# Reward and penalty quotients
|
||||
# ---------------------------------------------------------------
|
||||
# 2**6 (= 64)
|
||||
BASE_REWARD_FACTOR: 64
|
||||
# 2**9 (= 512)
|
||||
WHISTLEBLOWER_REWARD_QUOTIENT: 512
|
||||
# 2**3 (= 8)
|
||||
PROPOSER_REWARD_QUOTIENT: 8
|
||||
# 2**26 (= 67,108,864)
|
||||
INACTIVITY_PENALTY_QUOTIENT: 67108864
|
||||
# 2**7 (= 128) (lower safety margin at Phase 0 genesis)
|
||||
MIN_SLASHING_PENALTY_QUOTIENT: 128
|
||||
# 1 (lower safety margin at Phase 0 genesis)
|
||||
PROPORTIONAL_SLASHING_MULTIPLIER: 1
|
||||
|
||||
|
||||
# Max operations per block
|
||||
# ---------------------------------------------------------------
|
||||
# 2**4 (= 16)
|
||||
MAX_PROPOSER_SLASHINGS: 16
|
||||
# 2**1 (= 2)
|
||||
MAX_ATTESTER_SLASHINGS: 2
|
||||
# 2**7 (= 128)
|
||||
MAX_ATTESTATIONS: 128
|
||||
# 2**4 (= 16)
|
||||
MAX_DEPOSITS: 16
|
||||
# 2**4 (= 16)
|
||||
MAX_VOLUNTARY_EXITS: 16
|
||||
|
||||
|
||||
# Signature domains
|
||||
# ---------------------------------------------------------------
|
||||
DOMAIN_BEACON_PROPOSER: 0x00000000
|
||||
DOMAIN_BEACON_ATTESTER: 0x01000000
|
||||
DOMAIN_RANDAO: 0x02000000
|
||||
DOMAIN_DEPOSIT: 0x03000000
|
||||
DOMAIN_VOLUNTARY_EXIT: 0x04000000
|
||||
DOMAIN_SELECTION_PROOF: 0x05000000
|
||||
DOMAIN_AGGREGATE_AND_PROOF: 0x06000000
|
||||
@@ -0,0 +1 @@
|
||||
11184524
|
||||
@@ -0,0 +1 @@
|
||||
0x00000000219ab540356cBB839Cbe05303d7705Fa
|
||||
@@ -0,0 +1,20 @@
|
||||
# discv5.1-only bootnode @protolambda
|
||||
- enr:-Ku4QL5E378NT4-vqP6v1mZ7kHxiTHJvuBvQixQsuTTCffa0PJNWMBlG3Mduvsvd6T2YP1U3l5tBKO5H-9wyX2SCtPkBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC4EvfsAHAe0P__________gmlkgnY0gmlwhDaetEeJc2VjcDI1NmsxoQKtGC2CAuba7goLLdle899M3esUmoWRvzi7GBVhq6ViCYN1ZHCCIyg
|
||||
|
||||
# lighthouse (Canada) @protolambda
|
||||
- enr:-LK4QHLujdDjOwm2siyFJ2XGz19_ip-qTtozG3ceZ3_56G-LMWb4um67gTSYRJg0WsSkyvRMBEpz8uuIYl-7HfWvktgBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCXm69nAHAe0P__________gmlkgnY0gmlwhCO3C5OJc2VjcDI1NmsxoQKXw9BLDY6YwmqTtfkzUnlJQb82UrlX4lIAnSSYWHFRlYN0Y3CCIyiDdWRwgiMo
|
||||
|
||||
# lighthouse (Sao Paulo) @protolambda
|
||||
- enr:-LK4QMxmk7obupScBebKFaasSH3QmYUg-HaEmMAljfmGQCLbKwdOhszzx-VfVPvlH7bZZbOmg3-SNWbJsFfytdjD7a4Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCXm69nAHAe0P__________gmlkgnY0gmlwhBLkdWuJc2VjcDI1NmsxoQOwYsJyLOjJcDIqiQSSZtDi_EwwSaUjPBSnLVY_PYu-HoN0Y3CCIyiDdWRwgiMo
|
||||
|
||||
# Teku @protolambda
|
||||
- enr:-KG4QKqo0mG4C35ntJg8icO54wd973aZ7aBiAnC2t1XkGvgqNDOEHwNe2ykxYVUj9AWjm_lKD7brlhXKCZEskGbie2cDhGV0aDKQl5uvZwBwHtD__________4JpZIJ2NIJpcIQNOThwiXNlY3AyNTZrMaECn1dwC8MRt8rk2VUT8RjzEBaceF09d4CEQI20O_SWYcqDdGNwgiMog3VkcIIjKA
|
||||
|
||||
# Prysm @protolambda
|
||||
- enr:-LK4QAhU5smiLgU0AgrdFv8eCKmDPCBkXCMCIy8Aktaci5qvCYOsW98xVqJS6OoPWt4Sz_YoTdLQBWxd-RZ756vmGPMBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCXm69nAHAe0P__________gmlkgnY0gmlwhDTTDL2Jc2VjcDI1NmsxoQOmSJ0mKsQjab7Zralm1Hi0AEReZ2SEqYdKoOPmoA98DoN0Y3CCIyiDdWRwgiMo
|
||||
|
||||
# Lighthouse: @sigp
|
||||
- enr:-LK4QBsu_4I-tmA5WgxkJWRuVUCj2_QE2mmrwX0sFvAc3NR_YPrub4kpvPCb_OjKLwEefxey81SAcvQ7mr2Vvh8xhbgBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCXm69nAHAe0P__________gmlkgnY0gmlwhA3UHZWJc2VjcDI1NmsxoQL9FPylFeunleHuPXlbB938eIMd3X9y9cJ8ZI8y3Li0u4N0Y3CCIyiDdWRwgiMo
|
||||
|
||||
# Lighthouse: @sigp
|
||||
- enr:-LK4QEfW9TCASUUy8L5xamlTVs3JbgT8iYOUspJkbh3rj-BuUndLjtonockiN2K_0g-cBQGq-wvsgAiz5Q3-ic-Wz_ABh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCXm69nAHAe0P__________gmlkgnY0gmlwhCLV8-OJc2VjcDI1NmsxoQKYJuiXbqPzkbT0NAKIJneNWiX0136HiYI9qtx5NF1IloN0Y3CCIyiDdWRwgiMo
|
||||
@@ -0,0 +1,154 @@
|
||||
# Toledo preset, variant of mainnet
|
||||
|
||||
CONFIG_NAME: "toledo"
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# 2**6 (= 64)
|
||||
MAX_COMMITTEES_PER_SLOT: 64
|
||||
# 2**7 (= 128)
|
||||
TARGET_COMMITTEE_SIZE: 128
|
||||
# 2**11 (= 2,048)
|
||||
MAX_VALIDATORS_PER_COMMITTEE: 2048
|
||||
# 2**2 (= 4)
|
||||
MIN_PER_EPOCH_CHURN_LIMIT: 4
|
||||
# 2**16 (= 65,536)
|
||||
CHURN_LIMIT_QUOTIENT: 65536
|
||||
# See issue 563
|
||||
SHUFFLE_ROUND_COUNT: 90
|
||||
# `2**14` (= 16,384)
|
||||
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384
|
||||
# Nov 10, 2020, 12pm UTC
|
||||
MIN_GENESIS_TIME: 1605009600
|
||||
# 4
|
||||
HYSTERESIS_QUOTIENT: 4
|
||||
# 1 (minus 0.25)
|
||||
HYSTERESIS_DOWNWARD_MULTIPLIER: 1
|
||||
# 5 (plus 1.25)
|
||||
HYSTERESIS_UPWARD_MULTIPLIER: 5
|
||||
|
||||
|
||||
# Fork Choice
|
||||
# ---------------------------------------------------------------
|
||||
# 2**3 (= 8)
|
||||
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8
|
||||
|
||||
|
||||
# Validator
|
||||
# ---------------------------------------------------------------
|
||||
# 2**11 (= 2,048)
|
||||
ETH1_FOLLOW_DISTANCE: 2048
|
||||
# 2**4 (= 16)
|
||||
TARGET_AGGREGATORS_PER_COMMITTEE: 16
|
||||
# 2**0 (= 1)
|
||||
RANDOM_SUBNETS_PER_VALIDATOR: 1
|
||||
# 2**8 (= 256)
|
||||
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256
|
||||
# 14 (estimate from Eth1 mainnet)
|
||||
SECONDS_PER_ETH1_BLOCK: 14
|
||||
|
||||
|
||||
# Deposit contract
|
||||
# ---------------------------------------------------------------
|
||||
# Ethereum Goerli testnet
|
||||
DEPOSIT_CHAIN_ID: 5
|
||||
DEPOSIT_NETWORK_ID: 5
|
||||
# Toledo permissioned test deposit contract on Goerli
|
||||
DEPOSIT_CONTRACT_ADDRESS: 0x47709dC7a8c18688a1f051761fc34ac253970bC0
|
||||
|
||||
|
||||
# Gwei values
|
||||
# ---------------------------------------------------------------
|
||||
# 2**0 * 10**9 (= 1,000,000,000) Gwei
|
||||
MIN_DEPOSIT_AMOUNT: 1000000000
|
||||
# 2**5 * 10**9 (= 32,000,000,000) Gwei
|
||||
MAX_EFFECTIVE_BALANCE: 32000000000
|
||||
# 2**4 * 10**9 (= 16,000,000,000) Gwei
|
||||
EJECTION_BALANCE: 16000000000
|
||||
# 2**0 * 10**9 (= 1,000,000,000) Gwei
|
||||
EFFECTIVE_BALANCE_INCREMENT: 1000000000
|
||||
|
||||
|
||||
# Initial values
|
||||
# ---------------------------------------------------------------
|
||||
GENESIS_FORK_VERSION: 0x00701ED0
|
||||
BLS_WITHDRAWAL_PREFIX: 0x00
|
||||
|
||||
|
||||
# Time parameters
|
||||
# ---------------------------------------------------------------
|
||||
# 86400 seconds (1 day)
|
||||
GENESIS_DELAY: 86400
|
||||
# 12 seconds
|
||||
SECONDS_PER_SLOT: 12
|
||||
# 2**0 (= 1) slots 12 seconds
|
||||
MIN_ATTESTATION_INCLUSION_DELAY: 1
|
||||
# 2**5 (= 32) slots 6.4 minutes
|
||||
SLOTS_PER_EPOCH: 32
|
||||
# 2**0 (= 1) epochs 6.4 minutes
|
||||
MIN_SEED_LOOKAHEAD: 1
|
||||
# 2**2 (= 4) epochs 25.6 minutes
|
||||
MAX_SEED_LOOKAHEAD: 4
|
||||
# 2**6 (= 64) epochs ~6.8 hours
|
||||
EPOCHS_PER_ETH1_VOTING_PERIOD: 64
|
||||
# 2**13 (= 8,192) slots ~13 hours
|
||||
SLOTS_PER_HISTORICAL_ROOT: 8192
|
||||
# 2**8 (= 256) epochs ~27 hours
|
||||
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
|
||||
# 2**8 (= 256) epochs ~27 hours
|
||||
SHARD_COMMITTEE_PERIOD: 256
|
||||
# 2**2 (= 4) epochs 25.6 minutes
|
||||
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
|
||||
|
||||
|
||||
# State vector lengths
|
||||
# ---------------------------------------------------------------
|
||||
# 2**16 (= 65,536) epochs ~0.8 years
|
||||
EPOCHS_PER_HISTORICAL_VECTOR: 65536
|
||||
# 2**13 (= 8,192) epochs ~36 days
|
||||
EPOCHS_PER_SLASHINGS_VECTOR: 8192
|
||||
# 2**24 (= 16,777,216) historical roots, ~26,131 years
|
||||
HISTORICAL_ROOTS_LIMIT: 16777216
|
||||
# 2**40 (= 1,099,511,627,776) validator spots
|
||||
VALIDATOR_REGISTRY_LIMIT: 1099511627776
|
||||
|
||||
|
||||
# Reward and penalty quotients
|
||||
# ---------------------------------------------------------------
|
||||
# 2**6 (= 64)
|
||||
BASE_REWARD_FACTOR: 64
|
||||
# 2**9 (= 512)
|
||||
WHISTLEBLOWER_REWARD_QUOTIENT: 512
|
||||
# 2**3 (= 8)
|
||||
PROPOSER_REWARD_QUOTIENT: 8
|
||||
# 2**26 (= 67,108,864)
|
||||
INACTIVITY_PENALTY_QUOTIENT: 67108864
|
||||
# 2**7 (= 128) (lower safety margin at Phase 0 genesis)
|
||||
MIN_SLASHING_PENALTY_QUOTIENT: 128
|
||||
# 1 (lower safety margin at Phase 0 genesis)
|
||||
PROPORTIONAL_SLASHING_MULTIPLIER: 1
|
||||
|
||||
|
||||
# Max operations per block
|
||||
# ---------------------------------------------------------------
|
||||
# 2**4 (= 16)
|
||||
MAX_PROPOSER_SLASHINGS: 16
|
||||
# 2**1 (= 2)
|
||||
MAX_ATTESTER_SLASHINGS: 2
|
||||
# 2**7 (= 128)
|
||||
MAX_ATTESTATIONS: 128
|
||||
# 2**4 (= 16)
|
||||
MAX_DEPOSITS: 16
|
||||
# 2**4 (= 16)
|
||||
MAX_VOLUNTARY_EXITS: 16
|
||||
|
||||
|
||||
# Signature domains
|
||||
# ---------------------------------------------------------------
|
||||
DOMAIN_BEACON_PROPOSER: 0x00000000
|
||||
DOMAIN_BEACON_ATTESTER: 0x01000000
|
||||
DOMAIN_RANDAO: 0x02000000
|
||||
DOMAIN_DEPOSIT: 0x03000000
|
||||
DOMAIN_VOLUNTARY_EXIT: 0x04000000
|
||||
DOMAIN_SELECTION_PROOF: 0x05000000
|
||||
DOMAIN_AGGREGATE_AND_PROOF: 0x06000000
|
||||
@@ -0,0 +1 @@
|
||||
3702432
|
||||
@@ -0,0 +1 @@
|
||||
0x47709dC7a8c18688a1f051761fc34ac253970bC0
|
||||
Binary file not shown.
@@ -7,10 +7,7 @@
|
||||
//!
|
||||
//! https://github.com/sigp/lighthouse/pull/605
|
||||
//!
|
||||
use eth2_config::{
|
||||
include_altona_file, include_medalla_file, include_spadina_file, include_zinken_file,
|
||||
testnets_dir,
|
||||
};
|
||||
use eth2_config::{testnets_dir, *};
|
||||
|
||||
use enr::{CombinedKey, Enr};
|
||||
use ssz::Decode;
|
||||
@@ -56,8 +53,10 @@ const ALTONA: HardcodedNet = define_net!(altona, include_altona_file);
|
||||
const MEDALLA: HardcodedNet = define_net!(medalla, include_medalla_file);
|
||||
const SPADINA: HardcodedNet = define_net!(spadina, include_spadina_file);
|
||||
const ZINKEN: HardcodedNet = define_net!(zinken, include_zinken_file);
|
||||
const MAINNET: HardcodedNet = define_net!(mainnet, include_mainnet_file);
|
||||
const TOLEDO: HardcodedNet = define_net!(toledo, include_toledo_file);
|
||||
|
||||
const HARDCODED_NETS: &[HardcodedNet] = &[ALTONA, MEDALLA, SPADINA, ZINKEN];
|
||||
const HARDCODED_NETS: &[HardcodedNet] = &[ALTONA, MEDALLA, SPADINA, ZINKEN, MAINNET, TOLEDO];
|
||||
pub const DEFAULT_HARDCODED_TESTNET: &str = "medalla";
|
||||
|
||||
/// Specifies an Eth2 testnet.
|
||||
@@ -66,6 +65,8 @@ pub const DEFAULT_HARDCODED_TESTNET: &str = "medalla";
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct Eth2TestnetConfig {
|
||||
pub deposit_contract_address: String,
|
||||
/// Note: instead of the block where the contract is deployed, it is acceptable to set this
|
||||
/// value to be the block number where the first deposit occurs.
|
||||
pub deposit_contract_deploy_block: u64,
|
||||
pub boot_enr: Option<Vec<Enr<CombinedKey>>>,
|
||||
pub genesis_state_bytes: Option<Vec<u8>>,
|
||||
@@ -239,7 +240,8 @@ impl Eth2TestnetConfig {
|
||||
file.read_to_end(&mut bytes)
|
||||
.map_err(|e| format!("Unable to read {:?}: {:?}", file, e))
|
||||
})?;
|
||||
Some(bytes)
|
||||
|
||||
Some(bytes).filter(|bytes| !bytes.is_empty())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -269,7 +271,7 @@ mod tests {
|
||||
use super::*;
|
||||
use ssz::Encode;
|
||||
use tempdir::TempDir;
|
||||
use types::{Eth1Data, Hash256, V012LegacyEthSpec, YamlConfig};
|
||||
use types::{Eth1Data, Hash256, MainnetEthSpec, V012LegacyEthSpec, YamlConfig};
|
||||
|
||||
type E = V012LegacyEthSpec;
|
||||
|
||||
@@ -279,13 +281,23 @@ mod tests {
|
||||
let config =
|
||||
Eth2TestnetConfig::from_hardcoded_net(net).expect(&format!("{:?}", net.name));
|
||||
|
||||
// Ensure we can parse the YAML config to a chain spec.
|
||||
config
|
||||
.yaml_config
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.apply_to_chain_spec::<E>(&E::default_spec())
|
||||
.unwrap();
|
||||
if net.name == "mainnet" || net.name == "toledo" {
|
||||
// Ensure we can parse the YAML config to a chain spec.
|
||||
config
|
||||
.yaml_config
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.apply_to_chain_spec::<MainnetEthSpec>(&E::default_spec())
|
||||
.unwrap();
|
||||
} else {
|
||||
// Ensure we can parse the YAML config to a chain spec.
|
||||
config
|
||||
.yaml_config
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.apply_to_chain_spec::<V012LegacyEthSpec>(&E::default_spec())
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
config.genesis_state_bytes.is_some(),
|
||||
|
||||
@@ -57,6 +57,7 @@
|
||||
use prometheus::{HistogramOpts, HistogramTimer, Opts};
|
||||
use std::time::Duration;
|
||||
|
||||
use prometheus::core::{Atomic, GenericGauge, GenericGaugeVec};
|
||||
pub use prometheus::{
|
||||
Encoder, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge,
|
||||
IntGaugeVec, Result, TextEncoder,
|
||||
@@ -164,6 +165,27 @@ pub fn get_int_gauge(int_gauge_vec: &Result<IntGaugeVec>, name: &[&str]) -> Opti
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_gauge<P: Atomic>(
|
||||
gauge_vec: &Result<GenericGaugeVec<P>>,
|
||||
name: &[&str],
|
||||
) -> Option<GenericGauge<P>> {
|
||||
if let Ok(gauge_vec) = gauge_vec {
|
||||
Some(gauge_vec.get_metric_with_label_values(name).ok()?)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_gauge_entry<P: Atomic>(
|
||||
gauge_vec: &Result<GenericGaugeVec<P>>,
|
||||
name: &[&str],
|
||||
value: P::T,
|
||||
) {
|
||||
if let Some(v) = get_gauge(gauge_vec, name) {
|
||||
v.set(value)
|
||||
};
|
||||
}
|
||||
|
||||
/// If `int_gauge_vec.is_ok()`, sets the gauge with the given `name` to the given `value`
|
||||
/// otherwise returns false.
|
||||
pub fn set_int_gauge(int_gauge_vec: &Result<IntGaugeVec>, name: &[&str], value: i64) -> bool {
|
||||
|
||||
@@ -10,7 +10,7 @@ use target_info::Target;
|
||||
/// `Lighthouse/v0.2.0-1419501f2+`
|
||||
pub const VERSION: &str = git_version!(
|
||||
args = ["--always", "--dirty=+"],
|
||||
prefix = "Lighthouse/v0.3.3-",
|
||||
prefix = "Lighthouse/v0.3.4-",
|
||||
fallback = "unknown"
|
||||
);
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use eth2::types::ErrorMessage;
|
||||
use eth2::types::{ErrorMessage, Failure, IndexedErrorMessage};
|
||||
use std::convert::Infallible;
|
||||
use warp::{http::StatusCode, reject::Reject};
|
||||
|
||||
@@ -110,12 +110,37 @@ pub fn invalid_auth(msg: String) -> warp::reject::Rejection {
|
||||
warp::reject::custom(InvalidAuthorization(msg))
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct IndexedBadRequestErrors {
|
||||
pub message: String,
|
||||
pub failures: Vec<Failure>,
|
||||
}
|
||||
|
||||
impl Reject for IndexedBadRequestErrors {}
|
||||
|
||||
pub fn indexed_bad_request(message: String, failures: Vec<Failure>) -> warp::reject::Rejection {
|
||||
warp::reject::custom(IndexedBadRequestErrors { message, failures })
|
||||
}
|
||||
|
||||
/// This function receives a `Rejection` and tries to return a custom
|
||||
/// value, otherwise simply passes the rejection along.
|
||||
pub async fn handle_rejection(err: warp::Rejection) -> Result<impl warp::Reply, Infallible> {
|
||||
let code;
|
||||
let message;
|
||||
|
||||
if let Some(e) = err.find::<crate::reject::IndexedBadRequestErrors>() {
|
||||
message = format!("BAD_REQUEST: {}", e.message);
|
||||
code = StatusCode::BAD_REQUEST;
|
||||
|
||||
let json = warp::reply::json(&IndexedErrorMessage {
|
||||
code: code.as_u16(),
|
||||
message,
|
||||
failures: e.failures.clone(),
|
||||
});
|
||||
|
||||
return Ok(warp::reply::with_status(json, code));
|
||||
}
|
||||
|
||||
if err.is_not_found() {
|
||||
code = StatusCode::NOT_FOUND;
|
||||
message = "NOT_FOUND".to_string();
|
||||
|
||||
@@ -260,7 +260,7 @@ impl ChainSpec {
|
||||
churn_limit_quotient: 65_536,
|
||||
shuffle_round_count: 90,
|
||||
min_genesis_active_validator_count: 16_384,
|
||||
min_genesis_time: 1_578_009_600, // Jan 3, 2020
|
||||
min_genesis_time: 1606824000, // Dec 1, 2020
|
||||
hysteresis_quotient: 4,
|
||||
hysteresis_downward_multiplier: 1,
|
||||
hysteresis_upward_multiplier: 5,
|
||||
@@ -324,7 +324,7 @@ impl ChainSpec {
|
||||
seconds_per_eth1_block: 14,
|
||||
deposit_chain_id: 1,
|
||||
deposit_network_id: 1,
|
||||
deposit_contract_address: "1234567890123456789012345678901234567890"
|
||||
deposit_contract_address: "00000000219ab540356cbb839cbe05303d7705fa"
|
||||
.parse()
|
||||
.expect("chain spec deposit contract address"),
|
||||
|
||||
@@ -354,6 +354,7 @@ impl ChainSpec {
|
||||
target_committee_size: 4,
|
||||
shuffle_round_count: 10,
|
||||
min_genesis_active_validator_count: 64,
|
||||
min_genesis_time: 1578009600,
|
||||
eth1_follow_distance: 16,
|
||||
genesis_fork_version: [0x00, 0x00, 0x00, 0x01],
|
||||
shard_committee_period: 64,
|
||||
@@ -366,6 +367,9 @@ impl ChainSpec {
|
||||
network_id: 2, // lighthouse testnet network id
|
||||
deposit_chain_id: 5,
|
||||
deposit_network_id: 5,
|
||||
deposit_contract_address: "1234567890123456789012345678901234567890"
|
||||
.parse()
|
||||
.expect("minimal chain spec deposit address"),
|
||||
boot_nodes,
|
||||
..ChainSpec::mainnet()
|
||||
}
|
||||
@@ -598,6 +602,7 @@ impl YamlConfig {
|
||||
Some(match self.config_name.as_str() {
|
||||
"mainnet" => EthSpecId::Mainnet,
|
||||
"minimal" => EthSpecId::Minimal,
|
||||
"toledo" => EthSpecId::Mainnet,
|
||||
"zinken" => EthSpecId::V012Legacy,
|
||||
"spadina" => EthSpecId::V012Legacy,
|
||||
"medalla" => EthSpecId::V012Legacy,
|
||||
|
||||
@@ -109,16 +109,28 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq +
|
||||
fn get_committee_count_per_slot(
|
||||
active_validator_count: usize,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<usize, Error> {
|
||||
Self::get_committee_count_per_slot_with(
|
||||
active_validator_count,
|
||||
spec.max_committees_per_slot,
|
||||
spec.target_committee_size,
|
||||
)
|
||||
}
|
||||
|
||||
fn get_committee_count_per_slot_with(
|
||||
active_validator_count: usize,
|
||||
max_committees_per_slot: usize,
|
||||
target_committee_size: usize,
|
||||
) -> Result<usize, Error> {
|
||||
let slots_per_epoch = Self::SlotsPerEpoch::to_usize();
|
||||
|
||||
Ok(std::cmp::max(
|
||||
1,
|
||||
std::cmp::min(
|
||||
spec.max_committees_per_slot,
|
||||
max_committees_per_slot,
|
||||
active_validator_count
|
||||
.safe_div(slots_per_epoch)?
|
||||
.safe_div(spec.target_committee_size)?,
|
||||
.safe_div(target_committee_size)?,
|
||||
),
|
||||
))
|
||||
}
|
||||
|
||||
@@ -75,3 +75,9 @@ impl Into<u64> for SubnetId {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<u64> for &SubnetId {
|
||||
fn into(self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ uuid = { version = "0.8.1", features = ["serde", "v4"] }
|
||||
rand = "0.7.3"
|
||||
eth2_keystore = { path = "../eth2_keystore" }
|
||||
eth2_key_derivation = { path = "../eth2_key_derivation" }
|
||||
tiny-bip39 = { git = "https://github.com/sigp/tiny-bip39.git", rev = "1137c32da91bd5e75db4305a84ddd15255423f7f" }
|
||||
tiny-bip39 = "0.8.0"
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.2"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lcli"
|
||||
description = "Lighthouse CLI (modeled after zcli)"
|
||||
version = "0.3.3"
|
||||
version = "0.3.4"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = "2018"
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lighthouse"
|
||||
version = "0.3.3"
|
||||
version = "0.3.4"
|
||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
@@ -36,6 +36,7 @@ eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
||||
directory = { path = "../common/directory" }
|
||||
lighthouse_version = { path = "../common/lighthouse_version" }
|
||||
account_utils = { path = "../common/account_utils" }
|
||||
remote_signer = { "path" = "../remote_signer" }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1.0"
|
||||
|
||||
@@ -15,7 +15,7 @@ use futures::channel::{
|
||||
};
|
||||
use futures::{future, StreamExt};
|
||||
|
||||
use slog::{info, o, Drain, Level, Logger};
|
||||
use slog::{error, info, o, Drain, Level, Logger};
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use std::cell::RefCell;
|
||||
use std::ffi::OsStr;
|
||||
@@ -395,9 +395,16 @@ impl<E: EthSpec> Environment<E> {
|
||||
// setup for handling a Ctrl-C
|
||||
let (ctrlc_send, ctrlc_oneshot) = oneshot::channel();
|
||||
let ctrlc_send_c = RefCell::new(Some(ctrlc_send));
|
||||
let log = self.log.clone();
|
||||
ctrlc::set_handler(move || {
|
||||
if let Some(ctrlc_send) = ctrlc_send_c.try_borrow_mut().unwrap().take() {
|
||||
ctrlc_send.send(()).expect("Error sending ctrl-c message");
|
||||
if let Err(e) = ctrlc_send.send(()) {
|
||||
error!(
|
||||
log,
|
||||
"Error sending ctrl-c message";
|
||||
"error" => e
|
||||
);
|
||||
}
|
||||
}
|
||||
})
|
||||
.map_err(|e| format!("Could not set ctrlc handler: {:?}", e))?;
|
||||
|
||||
@@ -115,7 +115,7 @@ fn main() {
|
||||
.long("testnet")
|
||||
.value_name("testnet")
|
||||
.help("Name of network lighthouse will connect to")
|
||||
.possible_values(&["medalla", "altona", "spadina", "zinken"])
|
||||
.possible_values(&["medalla", "altona", "spadina", "zinken", "mainnet", "toledo"])
|
||||
.conflicts_with("testnet-dir")
|
||||
.takes_value(true)
|
||||
.global(true)
|
||||
@@ -125,6 +125,7 @@ fn main() {
|
||||
.subcommand(boot_node::cli_app())
|
||||
.subcommand(validator_client::cli_app())
|
||||
.subcommand(account_manager::cli_app())
|
||||
.subcommand(remote_signer::cli_app())
|
||||
.get_matches();
|
||||
|
||||
// Debugging output for libp2p and external crates.
|
||||
@@ -264,15 +265,22 @@ fn run<E: EthSpec>(
|
||||
|
||||
warn!(
|
||||
log,
|
||||
"Ethereum 2.0 is pre-release. This software is experimental."
|
||||
"Ethereum 2.0 is pre-release. This software is experimental"
|
||||
);
|
||||
info!(log, "Lighthouse started"; "version" => VERSION);
|
||||
info!(
|
||||
log,
|
||||
"Configured for testnet";
|
||||
"name" => testnet_name
|
||||
"name" => &testnet_name
|
||||
);
|
||||
|
||||
if testnet_name == "mainnet" {
|
||||
warn!(
|
||||
log,
|
||||
"The mainnet specification is being used. This not recommended (yet)."
|
||||
)
|
||||
}
|
||||
|
||||
match matches.subcommand() {
|
||||
("beacon_node", Some(matches)) => {
|
||||
let context = environment.core_context();
|
||||
@@ -292,7 +300,7 @@ fn run<E: EthSpec>(
|
||||
.shutdown_sender()
|
||||
.try_send("Failed to start beacon node");
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
("validator_client", Some(matches)) => {
|
||||
let context = environment.core_context();
|
||||
@@ -316,7 +324,17 @@ fn run<E: EthSpec>(
|
||||
.shutdown_sender()
|
||||
.try_send("Failed to start validator client");
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
("remote_signer", Some(matches)) => {
|
||||
if let Err(e) = remote_signer::run(&mut environment, matches) {
|
||||
crit!(log, "Failed to start remote signer"; "reason" => e);
|
||||
let _ = environment
|
||||
.core_context()
|
||||
.executor
|
||||
.shutdown_sender()
|
||||
.try_send("Failed to start remote signer");
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
crit!(log, "No subcommand supplied. See --help .");
|
||||
|
||||
@@ -283,10 +283,14 @@ impl TestValidator {
|
||||
|
||||
let pubkeys = stdout[..stdout.len() - 1]
|
||||
.split("\n")
|
||||
.map(|line| {
|
||||
let tab = line.find("\t").expect("line must have tab");
|
||||
let (_, pubkey) = line.split_at(tab + 1);
|
||||
pubkey.to_string()
|
||||
.filter_map(|line| {
|
||||
if line.starts_with(MAINNET_WARNING) {
|
||||
None
|
||||
} else {
|
||||
let tab = line.find("\t").expect("line must have tab");
|
||||
let (_, pubkey) = line.split_at(tab + 1);
|
||||
Some(pubkey.to_string())
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
|
||||
24
remote_signer/Cargo.toml
Normal file
24
remote_signer/Cargo.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "remote_signer"
|
||||
version = "0.2.0"
|
||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[features]
|
||||
# Compiles the BLS crypto code so that the binary is portable across machines.
|
||||
portable = ["bls/supranational-portable"]
|
||||
# Uses the slower Milagro BLS library, which is written in native Rust.
|
||||
milagro = ["bls/milagro"]
|
||||
|
||||
[dev-dependencies]
|
||||
client_backend = { path = "./backend", package = "remote_signer_backend" }
|
||||
helpers = { path = "../testing/remote_signer_test", package = "remote_signer_test" }
|
||||
|
||||
[dependencies]
|
||||
bls = { path = "../crypto/bls" }
|
||||
clap = "2.33.3"
|
||||
client = { path = "./client", package = "remote_signer_client" }
|
||||
environment = { path = "../lighthouse/environment" }
|
||||
serde_json = "1.0.58"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
types = { path = "../consensus/types"}
|
||||
134
remote_signer/README.md
Normal file
134
remote_signer/README.md
Normal file
@@ -0,0 +1,134 @@
|
||||
# Remote BLS Signer
|
||||
|
||||
## Overview
|
||||
|
||||
Simple HTTP BLS signer service.
|
||||
|
||||
This service is designed to be consumed by Ethereum 2.0 clients, looking for a more secure avenue to store their BLS12-381 secret keys, while running their validators in more permisive and/or scalable environments.
|
||||
|
||||
One goal of this package is to be standard compliant. There is a [current draft for an Ethereum Improvement Proposal (EIP)](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-3030.md) in progress. Please refer to the [roadmap](#roadmap) for a list of advanced features.
|
||||
|
||||
## API
|
||||
|
||||
### Standard
|
||||
|
||||
### `GET /upcheck`
|
||||
|
||||
_**Responses**_
|
||||
|
||||
Success | <br>
|
||||
--- | ---
|
||||
Code | `200`
|
||||
Content | `{"status": "OK"}`
|
||||
|
||||
---
|
||||
|
||||
### `GET /keys`
|
||||
|
||||
Returns the identifiers of the keys available to the signer.
|
||||
|
||||
_**Responses**_
|
||||
|
||||
Success | <br>
|
||||
--- | ---
|
||||
Code | `200`
|
||||
Content | `{"keys": "[identifier]"}`
|
||||
|
||||
---
|
||||
|
||||
### `POST /sign/:identifier`
|
||||
|
||||
URL Parameter | <br>
|
||||
--- | ---
|
||||
`:identifier` | `public_key_hex_string_without_0x`
|
||||
|
||||
_**Request**_
|
||||
|
||||
JSON Body | <br> | <br>
|
||||
--- | --- | ---
|
||||
`bls_domain` | **Required** | The BLS Signature domain.<br>As defined in the [specification](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/beacon-chain.md#domain-types), in lowercase, omitting the `domain` prefix.<br>Supporting `beacon_proposer`, `beacon_attester`, and `randao`.
|
||||
`data` | **Required** | The data to be signed.<br>As defined in the specifications for [block](https://github.com/ethereum/eth2.0-APIs/blob/master/types/block.yaml), [attestation](https://github.com/ethereum/eth2.0-APIs/blob/master/types/attestation.yaml), and [epoch](https://github.com/ethereum/eth2.0-APIs/blob/master/types/misc.yaml).
|
||||
`fork` | **Required** | A `Fork` object containing previous and current versions.<br>As defined in the [specification](https://github.com/ethereum/eth2.0-APIs/blob/master/types/misc.yaml)
|
||||
`genesis_validators_root` | **Required** | A `Hash256` for domain separation and chain versioning.
|
||||
<br> | Optional | Any other field will be ignored by the signer
|
||||
|
||||
_**Responses**_
|
||||
|
||||
Success | <br>
|
||||
--- | ---
|
||||
Code | `200`
|
||||
Content | `{"signature": "<signature_hex_string>"}`
|
||||
|
||||
_or_
|
||||
|
||||
Error | <br>
|
||||
--- | ---
|
||||
Code | `400`
|
||||
Content | `{"error": "<Bad Request Error Message>"}`
|
||||
|
||||
_or_
|
||||
|
||||
Error | <br>
|
||||
--- | ---
|
||||
Code | `404`
|
||||
Content | `{"error": "Key not found: <identifier>"}`
|
||||
|
||||
## Build instructions
|
||||
|
||||
1. [Get Rust](https://www.rust-lang.org/learn/get-started).
|
||||
2. Go to the root directory of this repository.
|
||||
3. Execute `make`
|
||||
4. The binary `lighthouse` will most likely be found in `./target/release`.
|
||||
5. Run it as `lighthouse remote_signer` or `lighthouse rs`.
|
||||
|
||||
## Running the signer
|
||||
|
||||
### Storing the secret keys as raw files
|
||||
|
||||
* Steps to store a secret key
|
||||
* Choose an empty directory, as the backend will parse every file looking for keys.
|
||||
* Create a file named after the **hex representation of the public key without 0x**.
|
||||
* Write the **hex representation of the secret key without 0x**.
|
||||
* Store the file in your chosen directory.
|
||||
* Use this directory as a command line parameter (`--storage-raw-dir`)
|
||||
|
||||
### Command line flags
|
||||
|
||||
```
|
||||
USAGE:
|
||||
lighthouse remote_signer [OPTIONS]
|
||||
|
||||
FLAGS:
|
||||
-h, --help Prints help information
|
||||
-V, --version Prints version information
|
||||
|
||||
OPTIONS:
|
||||
--debug-level <LEVEL> The verbosity level for emitting logs. [default: info] [possible values:
|
||||
info, debug, trace, warn, error, crit]
|
||||
--listen-address <ADDRESS> The address to listen for TCP connections. [default: 0.0.0.0]
|
||||
--log-format <FORMAT> Specifies the format used for logging. [possible values: JSON]
|
||||
--logfile <FILE> File path where output will be written.
|
||||
--port <PORT> The TCP port to listen on. [default: 9000]
|
||||
--spec <TITLE> Specifies the default eth2 spec type. [default: mainnet] [possible values:
|
||||
mainnet, minimal, interop]
|
||||
--storage-raw-dir <DIR> Data directory for secret keys in raw files.
|
||||
```
|
||||
|
||||
## Roadmap
|
||||
|
||||
- [X] EIP standard compliant
|
||||
- [ ] Metrics
|
||||
- [ ] Benchmarking & Profiling
|
||||
- [ ] Release management
|
||||
- [ ] Architecture builds
|
||||
- [ ] Support EIP-2335, BLS12-381 keystore
|
||||
- [ ] Support storage in AWS Cloud HSM
|
||||
- [ ] Route with the `warp` library
|
||||
- [ ] Slashing protection pipeline
|
||||
- [ ] TLS/SSL support for requests
|
||||
- [ ] Authentication by HTTP Header support
|
||||
- [ ] Confidential computing support (e.g. Intel SGX)
|
||||
|
||||
## LICENSE
|
||||
|
||||
* Apache 2.0.
|
||||
20
remote_signer/backend/Cargo.toml
Normal file
20
remote_signer/backend/Cargo.toml
Normal file
@@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "remote_signer_backend"
|
||||
version = "0.2.0"
|
||||
authors = ["Herman Junge <herman@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dev-dependencies]
|
||||
helpers = { path = "../../testing/remote_signer_test", package = "remote_signer_test" }
|
||||
sloggers = "1.0.1"
|
||||
tempdir = "0.3.7"
|
||||
|
||||
[dependencies]
|
||||
bls = { path = "../../crypto/bls" }
|
||||
clap = "2.33.3"
|
||||
hex = "0.4.2"
|
||||
lazy_static = "1.4.0"
|
||||
regex = "1.3.9"
|
||||
slog = "2.5.2"
|
||||
types = { path = "../../consensus/types" }
|
||||
zeroize = { version = "1.1.1", features = ["zeroize_derive"] }
|
||||
46
remote_signer/backend/src/error.rs
Normal file
46
remote_signer/backend/src/error.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
#[derive(Debug)]
|
||||
pub enum BackendError {
|
||||
/// Parameter is not a hexadecimal representation of a BLS public key.
|
||||
InvalidPublicKey(String),
|
||||
|
||||
/// Retrieved value is not a hexadecimal representation of a BLS secret key.
|
||||
InvalidSecretKey(String),
|
||||
|
||||
/// Public and Secret key won't match.
|
||||
KeyMismatch(String),
|
||||
|
||||
/// Item requested by its public key is not found.
|
||||
KeyNotFound(String),
|
||||
|
||||
/// Errors from the storage medium.
|
||||
///
|
||||
/// When converted from `std::io::Error`, stores `std::io::ErrorKind`
|
||||
/// and `std::io::Error` both formatted to string.
|
||||
StorageError(String, String),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BackendError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match self {
|
||||
BackendError::InvalidPublicKey(e) => write!(f, "Invalid public key: {}", e),
|
||||
|
||||
// Feed it with the public key value used to retrieve it.
|
||||
BackendError::InvalidSecretKey(e) => write!(f, "Invalid secret key: {}", e),
|
||||
|
||||
// Feed it with the public key value used to retrieve it.
|
||||
BackendError::KeyMismatch(e) => write!(f, "Key mismatch: {}", e),
|
||||
|
||||
BackendError::KeyNotFound(e) => write!(f, "Key not found: {}", e),
|
||||
|
||||
// Only outputs to string the first component of the tuple, accounting
|
||||
// for potential differences on error displays between OS distributions.
|
||||
BackendError::StorageError(e, _) => write!(f, "Storage error: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for BackendError {
|
||||
fn from(e: std::io::Error) -> BackendError {
|
||||
BackendError::StorageError(format!("{:?}", e.kind()), format!("{}", e))
|
||||
}
|
||||
}
|
||||
336
remote_signer/backend/src/lib.rs
Normal file
336
remote_signer/backend/src/lib.rs
Normal file
@@ -0,0 +1,336 @@
|
||||
mod error;
|
||||
mod storage;
|
||||
mod storage_raw_dir;
|
||||
mod utils;
|
||||
mod zeroize_string;
|
||||
|
||||
use crate::zeroize_string::ZeroizeString;
|
||||
use bls::SecretKey;
|
||||
use clap::ArgMatches;
|
||||
pub use error::BackendError;
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use slog::{info, Logger};
|
||||
pub use storage::Storage;
|
||||
use storage_raw_dir::StorageRawDir;
|
||||
use types::Hash256;
|
||||
use utils::{bytes96_to_hex_string, validate_bls_pair};
|
||||
|
||||
lazy_static! {
|
||||
static ref PUBLIC_KEY_REGEX: Regex = Regex::new(r"[0-9a-fA-F]{96}").unwrap();
|
||||
}
|
||||
|
||||
/// A backend to be used by the Remote Signer HTTP API.
|
||||
///
|
||||
/// Designed to support several types of storages.
|
||||
#[derive(Clone)]
|
||||
pub struct Backend<T> {
|
||||
storage: T,
|
||||
}
|
||||
|
||||
impl Backend<StorageRawDir> {
|
||||
/// Creates a Backend with the given storage type at the CLI arguments.
|
||||
///
|
||||
/// # Storage types supported
|
||||
///
|
||||
/// * Raw files in directory: `--storage-raw-dir <DIR>`
|
||||
///
|
||||
pub fn new(cli_args: &ArgMatches<'_>, log: &Logger) -> Result<Self, String> {
|
||||
// Storage types are mutually exclusive.
|
||||
if let Some(path) = cli_args.value_of("storage-raw-dir") {
|
||||
info!(
|
||||
log,
|
||||
"Loading Backend";
|
||||
"storage type" => "raw dir",
|
||||
"directory" => path
|
||||
);
|
||||
|
||||
StorageRawDir::new(path)
|
||||
.map(|storage| Self { storage })
|
||||
.map_err(|e| format!("Storage Raw Dir: {}", e))
|
||||
} else {
|
||||
Err("No storage type supplied.".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Storage> Backend<T> {
|
||||
/// Returns the available public keys in storage.
|
||||
pub fn get_keys(&self) -> Result<Vec<String>, BackendError> {
|
||||
self.storage.get_keys()
|
||||
}
|
||||
|
||||
/// Signs the message with the requested key in storage.
|
||||
pub fn sign_message(
|
||||
&self,
|
||||
public_key: &str,
|
||||
signing_root: Hash256,
|
||||
) -> Result<String, BackendError> {
|
||||
if !PUBLIC_KEY_REGEX.is_match(public_key) || public_key.len() != 96 {
|
||||
return Err(BackendError::InvalidPublicKey(public_key.to_string()));
|
||||
}
|
||||
|
||||
let secret_key: ZeroizeString = self.storage.get_secret_key(public_key)?;
|
||||
let secret_key: SecretKey = validate_bls_pair(public_key, secret_key)?;
|
||||
|
||||
let signature = secret_key.sign(signing_root);
|
||||
|
||||
let signature: String = bytes96_to_hex_string(signature.serialize())
|
||||
.expect("Writing to a string should never error.");
|
||||
|
||||
Ok(signature)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests_commons {
|
||||
use super::*;
|
||||
pub use crate::Storage;
|
||||
use helpers::*;
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use tempdir::TempDir;
|
||||
|
||||
type T = StorageRawDir;
|
||||
|
||||
pub fn new_storage_with_tmp_dir() -> (T, TempDir) {
|
||||
let tmp_dir = TempDir::new("bls-remote-signer-test").unwrap();
|
||||
let storage = StorageRawDir::new(tmp_dir.path().to_str().unwrap()).unwrap();
|
||||
(storage, tmp_dir)
|
||||
}
|
||||
|
||||
pub fn get_null_logger() -> Logger {
|
||||
let log_builder = NullLoggerBuilder;
|
||||
log_builder.build().unwrap()
|
||||
}
|
||||
|
||||
pub fn new_backend_for_get_keys() -> (Backend<T>, TempDir) {
|
||||
let tmp_dir = TempDir::new("bls-remote-signer-test").unwrap();
|
||||
|
||||
let matches = set_matches(vec![
|
||||
"this_test",
|
||||
"--storage-raw-dir",
|
||||
tmp_dir.path().to_str().unwrap(),
|
||||
]);
|
||||
|
||||
let backend = match Backend::new(&matches, &get_null_logger()) {
|
||||
Ok(backend) => (backend),
|
||||
Err(e) => panic!("We should not be getting an err here: {}", e),
|
||||
};
|
||||
|
||||
(backend, tmp_dir)
|
||||
}
|
||||
|
||||
pub fn new_backend_for_signing() -> (Backend<T>, TempDir) {
|
||||
let (backend, tmp_dir) = new_backend_for_get_keys();
|
||||
|
||||
// This one has the whole fauna.
|
||||
add_sub_dirs(&tmp_dir);
|
||||
add_key_files(&tmp_dir);
|
||||
add_non_key_files(&tmp_dir);
|
||||
add_mismatched_key_file(&tmp_dir);
|
||||
|
||||
(backend, tmp_dir)
|
||||
}
|
||||
|
||||
pub fn assert_backend_new_error(matches: &ArgMatches, error_msg: &str) {
|
||||
match Backend::new(matches, &get_null_logger()) {
|
||||
Ok(_) => panic!("This invocation to Backend::new() should return error"),
|
||||
Err(e) => assert_eq!(e.to_string(), error_msg),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod backend_new {
|
||||
use super::*;
|
||||
use crate::tests_commons::*;
|
||||
use helpers::*;
|
||||
use tempdir::TempDir;
|
||||
|
||||
#[test]
|
||||
fn no_storage_type_supplied() {
|
||||
let matches = set_matches(vec!["this_test"]);
|
||||
|
||||
assert_backend_new_error(&matches, "No storage type supplied.");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn given_path_does_not_exist() {
|
||||
let matches = set_matches(vec!["this_test", "--storage-raw-dir", "/dev/null/foo"]);
|
||||
|
||||
assert_backend_new_error(&matches, "Storage Raw Dir: Path does not exist.");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn given_path_is_not_a_dir() {
|
||||
let matches = set_matches(vec!["this_test", "--storage-raw-dir", "/dev/null"]);
|
||||
|
||||
assert_backend_new_error(&matches, "Storage Raw Dir: Path is not a directory.");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn given_inaccessible() {
|
||||
let tmp_dir = TempDir::new("bls-remote-signer-test").unwrap();
|
||||
set_permissions(tmp_dir.path(), 0o40311);
|
||||
|
||||
let matches = set_matches(vec![
|
||||
"this_test",
|
||||
"--storage-raw-dir",
|
||||
tmp_dir.path().to_str().unwrap(),
|
||||
]);
|
||||
|
||||
let result = Backend::new(&matches, &get_null_logger());
|
||||
|
||||
// A `d-wx--x--x` directory is innaccesible but not unwrittable.
|
||||
// By switching back to `drwxr-xr-x` we can get rid of the
|
||||
// temporal directory once we leave this scope.
|
||||
set_permissions(tmp_dir.path(), 0o40755);
|
||||
|
||||
match result {
|
||||
Ok(_) => panic!("This invocation to Backend::new() should return error"),
|
||||
Err(e) => assert_eq!(e.to_string(), "Storage Raw Dir: PermissionDenied",),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn happy_path() {
|
||||
let (_backend, _tmp_dir) = new_backend_for_get_keys();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod backend_raw_dir_get_keys {
|
||||
use crate::tests_commons::*;
|
||||
use helpers::*;
|
||||
|
||||
#[test]
|
||||
fn empty_dir() {
|
||||
let (backend, _tmp_dir) = new_backend_for_get_keys();
|
||||
|
||||
assert_eq!(backend.get_keys().unwrap().len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn some_files_are_not_public_keys() {
|
||||
let (backend, tmp_dir) = new_backend_for_get_keys();
|
||||
|
||||
add_sub_dirs(&tmp_dir);
|
||||
add_key_files(&tmp_dir);
|
||||
add_non_key_files(&tmp_dir);
|
||||
|
||||
assert_eq!(backend.get_keys().unwrap().len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn all_files_are_public_keys() {
|
||||
let (backend, tmp_dir) = new_backend_for_get_keys();
|
||||
add_key_files(&tmp_dir);
|
||||
|
||||
assert_eq!(backend.get_keys().unwrap().len(), 3);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod backend_raw_dir_sign_message {
|
||||
use crate::tests_commons::*;
|
||||
use helpers::*;
|
||||
use types::Hash256;
|
||||
|
||||
#[test]
|
||||
fn invalid_public_key() {
|
||||
let (backend, _tmp_dir) = new_backend_for_signing();
|
||||
|
||||
let test_case = |public_key_param: &str| {
|
||||
assert_eq!(
|
||||
backend
|
||||
.clone()
|
||||
.sign_message(
|
||||
public_key_param,
|
||||
Hash256::from_slice(&hex::decode(SIGNING_ROOT).unwrap())
|
||||
)
|
||||
.unwrap_err()
|
||||
.to_string(),
|
||||
format!("Invalid public key: {}", public_key_param)
|
||||
);
|
||||
};
|
||||
|
||||
test_case("abcdef"); // Length < 96.
|
||||
test_case(&format!("{}55", PUBLIC_KEY_1)); // Length > 96.
|
||||
test_case(SILLY_FILE_NAME_1); // Length == 96; Invalid hex characters.
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn storage_error() {
|
||||
let (backend, tmp_dir) = new_backend_for_signing();
|
||||
|
||||
set_permissions(tmp_dir.path(), 0o40311);
|
||||
set_permissions(&tmp_dir.path().join(PUBLIC_KEY_1), 0o40311);
|
||||
|
||||
let result = backend.sign_message(
|
||||
PUBLIC_KEY_1,
|
||||
Hash256::from_slice(&hex::decode(SIGNING_ROOT).unwrap()),
|
||||
);
|
||||
|
||||
set_permissions(tmp_dir.path(), 0o40755);
|
||||
set_permissions(&tmp_dir.path().join(PUBLIC_KEY_1), 0o40755);
|
||||
|
||||
assert_eq!(
|
||||
result.unwrap_err().to_string(),
|
||||
"Storage error: PermissionDenied"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn key_not_found() {
|
||||
let (backend, _tmp_dir) = new_backend_for_signing();
|
||||
|
||||
assert_eq!(
|
||||
backend
|
||||
.sign_message(
|
||||
ABSENT_PUBLIC_KEY,
|
||||
Hash256::from_slice(&hex::decode(SIGNING_ROOT).unwrap())
|
||||
)
|
||||
.unwrap_err()
|
||||
.to_string(),
|
||||
format!("Key not found: {}", ABSENT_PUBLIC_KEY)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn key_mismatch() {
|
||||
let (backend, _tmp_dir) = new_backend_for_signing();
|
||||
|
||||
assert_eq!(
|
||||
backend
|
||||
.sign_message(
|
||||
MISMATCHED_PUBLIC_KEY,
|
||||
Hash256::from_slice(&hex::decode(SIGNING_ROOT).unwrap())
|
||||
)
|
||||
.unwrap_err()
|
||||
.to_string(),
|
||||
format!("Key mismatch: {}", MISMATCHED_PUBLIC_KEY)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn happy_path() {
|
||||
let (backend, _tmp_dir) = new_backend_for_signing();
|
||||
|
||||
let test_case = |public_key: &str, signature: &str| {
|
||||
assert_eq!(
|
||||
backend
|
||||
.clone()
|
||||
.sign_message(
|
||||
public_key,
|
||||
Hash256::from_slice(&hex::decode(SIGNING_ROOT).unwrap())
|
||||
)
|
||||
.unwrap(),
|
||||
signature
|
||||
);
|
||||
};
|
||||
|
||||
test_case(PUBLIC_KEY_1, EXPECTED_SIGNATURE_1);
|
||||
test_case(PUBLIC_KEY_2, EXPECTED_SIGNATURE_2);
|
||||
test_case(PUBLIC_KEY_3, EXPECTED_SIGNATURE_3);
|
||||
}
|
||||
}
|
||||
10
remote_signer/backend/src/storage.rs
Normal file
10
remote_signer/backend/src/storage.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
use crate::{BackendError, ZeroizeString};
|
||||
|
||||
/// The storage medium for the secret keys used by a `Backend`.
|
||||
pub trait Storage: 'static + Clone + Send + Sync {
|
||||
/// Queries storage for the available keys to sign.
|
||||
fn get_keys(&self) -> Result<Vec<String>, BackendError>;
|
||||
|
||||
/// Retrieves secret key from storage, using its public key as reference.
|
||||
fn get_secret_key(&self, input: &str) -> Result<ZeroizeString, BackendError>;
|
||||
}
|
||||
181
remote_signer/backend/src/storage_raw_dir.rs
Normal file
181
remote_signer/backend/src/storage_raw_dir.rs
Normal file
@@ -0,0 +1,181 @@
|
||||
use crate::{BackendError, Storage, ZeroizeString, PUBLIC_KEY_REGEX};
|
||||
use std::fs::read_dir;
|
||||
use std::fs::File;
|
||||
use std::io::prelude::Read;
|
||||
use std::io::BufReader;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct StorageRawDir {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl StorageRawDir {
|
||||
/// Initializes the storage with the given path, verifying
|
||||
/// whether it is a directory and if its available to the user.
|
||||
/// Does not list, nor verify the contents of the directory.
|
||||
pub fn new<P: AsRef<Path>>(path: P) -> Result<Self, String> {
|
||||
let path = path.as_ref();
|
||||
|
||||
if !path.exists() {
|
||||
return Err("Path does not exist.".to_string());
|
||||
}
|
||||
|
||||
if !path.is_dir() {
|
||||
return Err("Path is not a directory.".to_string());
|
||||
}
|
||||
|
||||
read_dir(path).map_err(|e| format!("{:?}", e.kind()))?;
|
||||
|
||||
Ok(Self {
|
||||
path: path.to_path_buf(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Storage for StorageRawDir {
|
||||
/// List all the files in the directory having a BLS public key name.
|
||||
/// This function DOES NOT check the contents of each file.
|
||||
fn get_keys(&self) -> Result<Vec<String>, BackendError> {
|
||||
let entries = read_dir(&self.path).map_err(BackendError::from)?;
|
||||
|
||||
// We are silently suppressing errors in this chain
|
||||
// because we only care about files actually passing these filters.
|
||||
let keys: Vec<String> = entries
|
||||
.filter_map(|entry| entry.ok())
|
||||
.filter(|entry| !entry.path().is_dir())
|
||||
.map(|entry| entry.file_name().into_string())
|
||||
.filter_map(|entry| entry.ok())
|
||||
.filter(|name| PUBLIC_KEY_REGEX.is_match(name))
|
||||
.collect();
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
/// Gets a requested secret key by their reference, its public key.
|
||||
/// This function DOES NOT check the contents of the retrieved file.
|
||||
fn get_secret_key(&self, input: &str) -> Result<ZeroizeString, BackendError> {
|
||||
let file = File::open(self.path.join(input)).map_err(|e| match e.kind() {
|
||||
std::io::ErrorKind::NotFound => BackendError::KeyNotFound(input.to_string()),
|
||||
_ => e.into(),
|
||||
})?;
|
||||
let mut buf_reader = BufReader::new(file);
|
||||
|
||||
let mut secret_key = String::new();
|
||||
buf_reader.read_to_string(&mut secret_key)?;
|
||||
|
||||
// Remove that `\n` without cloning.
|
||||
secret_key.pop();
|
||||
|
||||
Ok(ZeroizeString::from(secret_key))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod get_keys {
|
||||
use crate::tests_commons::*;
|
||||
use helpers::*;
|
||||
|
||||
#[test]
|
||||
fn problem_with_path() {
|
||||
let (storage, tmp_dir) = new_storage_with_tmp_dir();
|
||||
add_key_files(&tmp_dir);
|
||||
|
||||
// All good and fancy, let's make the dir innacessible now.
|
||||
set_permissions(tmp_dir.path(), 0o40311);
|
||||
|
||||
let result = storage.get_keys();
|
||||
|
||||
// Give permissions back, we want the tempdir to be deleted.
|
||||
set_permissions(tmp_dir.path(), 0o40755);
|
||||
|
||||
assert_eq!(
|
||||
result.unwrap_err().to_string(),
|
||||
"Storage error: PermissionDenied"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_files_in_dir() {
|
||||
let (storage, _tmp_dir) = new_storage_with_tmp_dir();
|
||||
|
||||
assert_eq!(storage.get_keys().unwrap().len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_files_in_dir_are_public_keys() {
|
||||
let (storage, tmp_dir) = new_storage_with_tmp_dir();
|
||||
add_sub_dirs(&tmp_dir);
|
||||
add_non_key_files(&tmp_dir);
|
||||
|
||||
assert_eq!(storage.get_keys().unwrap().len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn not_all_files_have_public_key_names() {
|
||||
let (storage, tmp_dir) = new_storage_with_tmp_dir();
|
||||
add_sub_dirs(&tmp_dir);
|
||||
add_key_files(&tmp_dir);
|
||||
add_non_key_files(&tmp_dir);
|
||||
|
||||
assert_eq!(storage.get_keys().unwrap().len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn all_files_do_have_public_key_names() {
|
||||
let (storage, tmp_dir) = new_storage_with_tmp_dir();
|
||||
add_key_files(&tmp_dir);
|
||||
|
||||
assert_eq!(storage.get_keys().unwrap().len(), 3);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod get_secret_key {
|
||||
use crate::tests_commons::*;
|
||||
use helpers::*;
|
||||
|
||||
#[test]
|
||||
fn unaccessible_file() {
|
||||
let (storage, tmp_dir) = new_storage_with_tmp_dir();
|
||||
add_key_files(&tmp_dir);
|
||||
|
||||
set_permissions(tmp_dir.path(), 0o40311);
|
||||
set_permissions(&tmp_dir.path().join(PUBLIC_KEY_1), 0o40311);
|
||||
|
||||
let result = storage.get_secret_key(PUBLIC_KEY_1);
|
||||
|
||||
set_permissions(tmp_dir.path(), 0o40755);
|
||||
set_permissions(&tmp_dir.path().join(PUBLIC_KEY_1), 0o40755);
|
||||
|
||||
assert_eq!(
|
||||
result.unwrap_err().to_string(),
|
||||
"Storage error: PermissionDenied"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn key_does_not_exist() {
|
||||
let (storage, _tmp_dir) = new_storage_with_tmp_dir();
|
||||
|
||||
assert_eq!(
|
||||
storage
|
||||
.get_secret_key(PUBLIC_KEY_1)
|
||||
.unwrap_err()
|
||||
.to_string(),
|
||||
format!("Key not found: {}", PUBLIC_KEY_1)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn happy_path() {
|
||||
let (storage, tmp_dir) = new_storage_with_tmp_dir();
|
||||
add_key_files(&tmp_dir);
|
||||
|
||||
assert_eq!(
|
||||
storage.get_secret_key(PUBLIC_KEY_1).unwrap().as_ref(),
|
||||
SECRET_KEY_1.as_bytes()
|
||||
);
|
||||
}
|
||||
}
|
||||
123
remote_signer/backend/src/utils.rs
Normal file
123
remote_signer/backend/src/utils.rs
Normal file
@@ -0,0 +1,123 @@
|
||||
use crate::{BackendError, ZeroizeString};
|
||||
use bls::SecretKey;
|
||||
use hex::decode;
|
||||
use std::fmt::{Error, Write};
|
||||
use std::str;
|
||||
|
||||
// hex::encode only allows up to 32 bytes.
|
||||
pub fn bytes96_to_hex_string(data: [u8; 96]) -> Result<String, Error> {
|
||||
static CHARS: &[u8] = b"0123456789abcdef";
|
||||
let mut s = String::with_capacity(96 * 2 + 2);
|
||||
|
||||
s.write_char('0')?;
|
||||
s.write_char('x')?;
|
||||
|
||||
for &byte in data.iter() {
|
||||
s.write_char(CHARS[(byte >> 4) as usize].into())?;
|
||||
s.write_char(CHARS[(byte & 0xf) as usize].into())?;
|
||||
}
|
||||
|
||||
Ok(s)
|
||||
}
|
||||
|
||||
/// Validates the match as a BLS pair of the public and secret keys given,
|
||||
/// consuming the secret key parameter, and returning a deserialized SecretKey.
|
||||
pub fn validate_bls_pair(
|
||||
public_key: &str,
|
||||
secret_key: ZeroizeString,
|
||||
) -> Result<SecretKey, BackendError> {
|
||||
let secret_key: SecretKey = secret_key.into_bls_sk().map_err(|e| {
|
||||
BackendError::InvalidSecretKey(format!("public_key: {}; {}", public_key, e))
|
||||
})?;
|
||||
|
||||
let pk_param_as_bytes = decode(&public_key)
|
||||
.map_err(|e| BackendError::InvalidPublicKey(format!("{}; {}", public_key, e)))?;
|
||||
|
||||
if &secret_key.public_key().serialize()[..] != pk_param_as_bytes.as_slice() {
|
||||
return Err(BackendError::KeyMismatch(public_key.to_string()));
|
||||
}
|
||||
|
||||
Ok(secret_key)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod functions {
|
||||
use super::*;
|
||||
use helpers::*;
|
||||
|
||||
#[test]
|
||||
fn fn_bytes96_to_hex_string() {
|
||||
assert_eq!(
|
||||
bytes96_to_hex_string(EXPECTED_SIGNATURE_1_BYTES).unwrap(),
|
||||
EXPECTED_SIGNATURE_1
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
bytes96_to_hex_string(EXPECTED_SIGNATURE_2_BYTES).unwrap(),
|
||||
EXPECTED_SIGNATURE_2
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
bytes96_to_hex_string(EXPECTED_SIGNATURE_3_BYTES).unwrap(),
|
||||
EXPECTED_SIGNATURE_3
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fn_validate_bls_pair() {
|
||||
let test_ok_case = |pk: &str, sk: ZeroizeString, sk_bytes: &[u8; 32]| {
|
||||
let serialized_secret_key = validate_bls_pair(pk, sk).unwrap().serialize();
|
||||
assert_eq!(serialized_secret_key.as_bytes().to_vec(), sk_bytes.to_vec());
|
||||
};
|
||||
|
||||
test_ok_case(
|
||||
PUBLIC_KEY_1,
|
||||
ZeroizeString::from(SECRET_KEY_1.to_string()),
|
||||
&SECRET_KEY_1_BYTES,
|
||||
);
|
||||
|
||||
let test_error_case = |pk: &str, sk: ZeroizeString, expected_error: &str| {
|
||||
assert_eq!(
|
||||
validate_bls_pair(pk, sk).err().unwrap().to_string(),
|
||||
expected_error
|
||||
);
|
||||
};
|
||||
|
||||
test_error_case(
|
||||
PUBLIC_KEY_2,
|
||||
ZeroizeString::from("TamperedKey%#$#%#$$&##00£$%$$£%$".to_string()),
|
||||
&format!(
|
||||
"Invalid secret key: public_key: {}; Invalid hex character: T at index 0",
|
||||
PUBLIC_KEY_2
|
||||
),
|
||||
);
|
||||
|
||||
test_error_case(
|
||||
PUBLIC_KEY_2,
|
||||
ZeroizeString::from("deadbeef".to_string()),
|
||||
&format!(
|
||||
"Invalid secret key: public_key: {}; InvalidSecretKeyLength {{ got: 4, expected: 32 }}",
|
||||
PUBLIC_KEY_2
|
||||
),
|
||||
);
|
||||
|
||||
let bad_pk_param = "not_validated_by_the_api_handler!";
|
||||
test_error_case(
|
||||
bad_pk_param,
|
||||
ZeroizeString::from(SECRET_KEY_1.to_string()),
|
||||
&format!("Invalid public key: {}; Odd number of digits", bad_pk_param),
|
||||
);
|
||||
|
||||
test_error_case(
|
||||
PUBLIC_KEY_1,
|
||||
ZeroizeString::from(SECRET_KEY_2.to_string()),
|
||||
&format!("Key mismatch: {}", PUBLIC_KEY_1),
|
||||
);
|
||||
|
||||
test_error_case(
|
||||
PUBLIC_KEY_2,
|
||||
ZeroizeString::from(SECRET_KEY_3.to_string()),
|
||||
&format!("Key mismatch: {}", PUBLIC_KEY_2),
|
||||
);
|
||||
}
|
||||
}
|
||||
222
remote_signer/backend/src/zeroize_string.rs
Normal file
222
remote_signer/backend/src/zeroize_string.rs
Normal file
@@ -0,0 +1,222 @@
|
||||
use bls::SecretKey;
|
||||
use std::str;
|
||||
use zeroize::Zeroize;
|
||||
|
||||
/// Provides a new-type wrapper around `String` that is zeroized on `Drop`.
|
||||
///
|
||||
/// Useful for ensuring that secret key memory is zeroed-out on drop.
|
||||
#[derive(Debug, Zeroize)]
|
||||
#[zeroize(drop)]
|
||||
pub struct ZeroizeString(String);
|
||||
|
||||
impl From<String> for ZeroizeString {
|
||||
fn from(s: String) -> Self {
|
||||
Self(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for ZeroizeString {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
self.0.as_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
impl ZeroizeString {
|
||||
/// Consumes the ZeroizeString, attempting to return a BLS SecretKey.
|
||||
pub fn into_bls_sk(self) -> Result<SecretKey, String> {
|
||||
let mut decoded_bytes = hex_string_to_bytes(&self.0)?;
|
||||
|
||||
let secret_key = SecretKey::deserialize(&decoded_bytes).map_err(|e| format!("{:?}", e))?;
|
||||
decoded_bytes.zeroize();
|
||||
|
||||
Ok(secret_key)
|
||||
}
|
||||
}
|
||||
|
||||
// An alternative to `hex::decode`, to allow for more control of
|
||||
// the objects created while decoding the secret key.
|
||||
fn hex_string_to_bytes(data: &str) -> Result<Vec<u8>, String> {
|
||||
if data.len() % 2 != 0 {
|
||||
return Err("Odd length".to_string());
|
||||
}
|
||||
|
||||
let mut vec: Vec<u8> = Vec::new();
|
||||
for i in 0..data.len() / 2 {
|
||||
vec.push(
|
||||
val(&data.as_bytes()[2 * i], 2 * i)? << 4
|
||||
| val(&data.as_bytes()[2 * i + 1], 2 * i + 1)?,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(vec)
|
||||
}
|
||||
|
||||
// Auxiliar function for `hex_string_to_bytes`.
|
||||
fn val(c: &u8, idx: usize) -> Result<u8, String> {
|
||||
match c {
|
||||
b'A'..=b'F' => Ok(c - b'A' + 10),
|
||||
b'a'..=b'f' => Ok(c - b'a' + 10),
|
||||
b'0'..=b'9' => Ok(c - b'0'),
|
||||
_ => Err(format!(
|
||||
"Invalid hex character: {} at index {}",
|
||||
*c as char, idx
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod object {
|
||||
use super::*;
|
||||
use helpers::*;
|
||||
use zeroize::Zeroize;
|
||||
|
||||
#[test]
|
||||
fn v_u8_zeroized() {
|
||||
// Create from `hex_string_to_bytes`, and record the pointer to its buffer.
|
||||
let mut decoded_bytes = hex_string_to_bytes(&SECRET_KEY_1.to_string()).unwrap();
|
||||
let old_pointer = decoded_bytes.as_ptr() as usize;
|
||||
|
||||
// Do something with the borrowed vector, and zeroize.
|
||||
let _ = SecretKey::deserialize(&decoded_bytes)
|
||||
.map_err(|e| format!("{:?}", e))
|
||||
.unwrap();
|
||||
decoded_bytes.zeroize();
|
||||
|
||||
// Check it is pointing to the same buffer, and that it was deleted.
|
||||
assert_eq!(old_pointer as usize, decoded_bytes.as_ptr() as usize);
|
||||
assert!(decoded_bytes.is_empty());
|
||||
|
||||
// Check if the underlying bytes were zeroized.
|
||||
for i in 0..SECRET_KEY_1.len() / 2 {
|
||||
unsafe {
|
||||
assert_eq!(*((old_pointer + i) as *const u8), 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fn_to_bls_sk() {
|
||||
let test_ok_case = |sk: &str, sk_b: &[u8]| {
|
||||
let z = ZeroizeString::from(sk.to_string());
|
||||
let sk: SecretKey = z.into_bls_sk().unwrap();
|
||||
assert_eq!(sk.serialize().as_bytes(), sk_b);
|
||||
};
|
||||
|
||||
let test_error_case = |sk: &str, err_msg: &str| {
|
||||
let z = ZeroizeString::from(sk.to_string());
|
||||
let err = z.into_bls_sk().err();
|
||||
assert_eq!(err, Some(err_msg.to_string()));
|
||||
};
|
||||
|
||||
test_ok_case(SECRET_KEY_1, &SECRET_KEY_1_BYTES);
|
||||
|
||||
test_error_case("Trolololololo", "Odd length");
|
||||
test_error_case("Trololololol", "Invalid hex character: T at index 0");
|
||||
test_error_case(
|
||||
"そんなことないでしょうけどう",
|
||||
"Invalid hex character: ã at index 0",
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn zeroized_after_drop() {
|
||||
let some_scope = |s: &str| -> usize {
|
||||
// Convert our literal into a String, then store the pointer
|
||||
// to the first byte of its slice.
|
||||
let s: String = s.to_string();
|
||||
let s_ptr = s.as_ptr();
|
||||
|
||||
// Just to make sure that the pointer of the string is NOT
|
||||
// the same as the pointer of the underlying buffer.
|
||||
assert_ne!(&s as *const String as usize, s_ptr as usize);
|
||||
|
||||
let z = ZeroizeString::from(s);
|
||||
|
||||
// Get the pointer to the underlying buffer,
|
||||
// We want to make sure is the same as the received string literal.
|
||||
// That is, no copies of the contents.
|
||||
let ptr_to_buf = z.as_ref().as_ptr();
|
||||
assert_eq!(ptr_to_buf, s_ptr);
|
||||
|
||||
// We exit this scope, returning to the caller the pointer to
|
||||
// the buffer, that we'll use to verify the zeroization.
|
||||
ptr_to_buf as usize
|
||||
};
|
||||
|
||||
// Call the closure.
|
||||
let ptr_to_buf = some_scope(SECRET_KEY_1);
|
||||
|
||||
// Check if the underlying bytes were zeroized.
|
||||
// At this point the first half is already reclaimed and assigned,
|
||||
// so we will just examine the other half.
|
||||
for i in SECRET_KEY_1.len() / 2..SECRET_KEY_1.len() {
|
||||
unsafe {
|
||||
assert_eq!(*((ptr_to_buf + i) as *const u8), 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod functions {
|
||||
use super::*;
|
||||
use helpers::*;
|
||||
|
||||
#[test]
|
||||
fn fn_hex_string_to_bytes() {
|
||||
assert_eq!(
|
||||
hex_string_to_bytes(&"0aa".to_string()).err(),
|
||||
Some("Odd length".to_string())
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
hex_string_to_bytes(&"0xdeadbeef".to_string()).err(),
|
||||
Some("Invalid hex character: x at index 1".to_string())
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
hex_string_to_bytes(&"n00bn00b".to_string()).err(),
|
||||
Some("Invalid hex character: n at index 0".to_string())
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
hex_string_to_bytes(&"abcdefgh".to_string()).err(),
|
||||
Some("Invalid hex character: g at index 6".to_string())
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
hex_string_to_bytes(&SECRET_KEY_1).unwrap(),
|
||||
SECRET_KEY_1_BYTES
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
hex_string_to_bytes(&PUBLIC_KEY_1).unwrap(),
|
||||
PUBLIC_KEY_1_BYTES.to_vec()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
hex_string_to_bytes(&SIGNING_ROOT).unwrap(),
|
||||
SIGNING_ROOT_BYTES.to_vec()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
hex_string_to_bytes(&EXPECTED_SIGNATURE_1[2..]).unwrap(),
|
||||
EXPECTED_SIGNATURE_1_BYTES.to_vec()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
hex_string_to_bytes(&EXPECTED_SIGNATURE_2[2..]).unwrap(),
|
||||
EXPECTED_SIGNATURE_2_BYTES.to_vec()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
hex_string_to_bytes(&EXPECTED_SIGNATURE_3[2..]).unwrap(),
|
||||
EXPECTED_SIGNATURE_3_BYTES.to_vec()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
hex_string_to_bytes(&"0a0b11".to_string()).unwrap(),
|
||||
vec![10, 11, 17]
|
||||
);
|
||||
}
|
||||
}
|
||||
19
remote_signer/client/Cargo.toml
Normal file
19
remote_signer/client/Cargo.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
[package]
|
||||
name = "remote_signer_client"
|
||||
version = "0.2.0"
|
||||
authors = ["Herman Junge <herman@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.3"
|
||||
client_backend = { path = "../backend", package = "remote_signer_backend" }
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
futures = "0.3.6"
|
||||
hyper = "0.13.8"
|
||||
lazy_static = "1.4.0"
|
||||
regex = "1.3.9"
|
||||
serde = { version = "1.0.116", features = ["derive"] }
|
||||
serde_json = "1.0.58"
|
||||
slog = "2.5.2"
|
||||
types = { path = "../../consensus/types" }
|
||||
task_executor = { "path" = "../../common/task_executor" }
|
||||
57
remote_signer/client/src/api_error.rs
Normal file
57
remote_signer/client/src/api_error.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
use hyper::{Body, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::to_string;
|
||||
use std::error::Error as StdError;
|
||||
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
pub enum ApiError {
|
||||
ServerError(String),
|
||||
NotImplemented(String),
|
||||
BadRequest(String),
|
||||
NotFound(String),
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
pub struct ApiErrorDesc {
|
||||
pub error: String,
|
||||
}
|
||||
|
||||
pub type ApiResult = Result<Response<Body>, ApiError>;
|
||||
|
||||
impl ApiError {
|
||||
pub fn status_code(self) -> (StatusCode, String) {
|
||||
match self {
|
||||
ApiError::ServerError(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc),
|
||||
ApiError::NotImplemented(desc) => (StatusCode::NOT_IMPLEMENTED, desc),
|
||||
ApiError::BadRequest(desc) => (StatusCode::BAD_REQUEST, desc),
|
||||
ApiError::NotFound(desc) => (StatusCode::NOT_FOUND, desc),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Response<Body>> for ApiError {
|
||||
fn into(self) -> Response<Body> {
|
||||
let (status_code, desc) = self.status_code();
|
||||
|
||||
let json_desc = to_string(&ApiErrorDesc { error: desc })
|
||||
.expect("The struct ApiErrorDesc should always serialize.");
|
||||
|
||||
Response::builder()
|
||||
.status(status_code)
|
||||
.body(Body::from(json_desc))
|
||||
.expect("Response should always be created.")
|
||||
}
|
||||
}
|
||||
|
||||
impl StdError for ApiError {
|
||||
fn cause(&self) -> Option<&dyn StdError> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ApiError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
let status = self.clone().status_code();
|
||||
write!(f, "{:?}: {:?}", status.0, status.1)
|
||||
}
|
||||
}
|
||||
18
remote_signer/client/src/api_response.rs
Normal file
18
remote_signer/client/src/api_response.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct UpcheckApiResponse {
|
||||
pub status: String,
|
||||
}
|
||||
|
||||
/// Contains the response to the `get_keys` API.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
pub struct KeysApiResponse {
|
||||
pub keys: Vec<String>,
|
||||
}
|
||||
|
||||
/// Contains the response to the `sign_message` API.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
pub struct SignatureApiResponse {
|
||||
pub signature: String,
|
||||
}
|
||||
70
remote_signer/client/src/backend.rs
Normal file
70
remote_signer/client/src/backend.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
use crate::api_error::ApiError;
|
||||
use crate::api_response::{KeysApiResponse, SignatureApiResponse};
|
||||
use crate::rest_api::Context;
|
||||
use crate::signing_root::get_signing_root;
|
||||
use client_backend::{BackendError, Storage};
|
||||
use hyper::Request;
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use std::sync::Arc;
|
||||
use types::EthSpec;
|
||||
|
||||
lazy_static! {
|
||||
static ref PUBLIC_KEY_FROM_PATH_REGEX: Regex = Regex::new(r"^/[^/]+/([^/]*)").unwrap();
|
||||
}
|
||||
|
||||
/// HTTP handler to get the list of public keys in the backend.
|
||||
pub fn get_keys<E: EthSpec, S: Storage, U>(
|
||||
_: U,
|
||||
ctx: Arc<Context<E, S>>,
|
||||
) -> Result<KeysApiResponse, ApiError> {
|
||||
let keys = ctx
|
||||
.backend
|
||||
.get_keys()
|
||||
.map_err(|e| ApiError::ServerError(format!("{}", e)))?;
|
||||
|
||||
if keys.is_empty() {
|
||||
return Err(ApiError::NotFound("No keys found in storage.".to_string()));
|
||||
}
|
||||
|
||||
Ok(KeysApiResponse { keys })
|
||||
}
|
||||
|
||||
/// HTTP handler to sign a message with the requested key.
|
||||
pub fn sign_message<E: EthSpec, S: Storage>(
|
||||
req: Request<Vec<u8>>,
|
||||
ctx: Arc<Context<E, S>>,
|
||||
) -> Result<SignatureApiResponse, ApiError> {
|
||||
// Parse the request body and compute the signing root.
|
||||
let signing_root = get_signing_root::<E>(&req, ctx.spec.clone())?;
|
||||
|
||||
// This public key parameter should have been validated by the router.
|
||||
// We are just going to extract it from the request.
|
||||
let path = req.uri().path().to_string();
|
||||
|
||||
let rc = |path: &str| -> Result<String, String> {
|
||||
let caps = PUBLIC_KEY_FROM_PATH_REGEX.captures(path).ok_or("")?;
|
||||
let re_match = caps.get(1).ok_or("")?;
|
||||
Ok(re_match.as_str().to_string())
|
||||
};
|
||||
|
||||
let public_key = rc(&path).map_err(|_| {
|
||||
ApiError::BadRequest(format!("Unable to get public key from path: {:?}", path))
|
||||
})?;
|
||||
|
||||
match ctx.backend.sign_message(&public_key, signing_root) {
|
||||
Ok(signature) => Ok(SignatureApiResponse { signature }),
|
||||
|
||||
Err(BackendError::KeyNotFound(_)) => {
|
||||
Err(ApiError::NotFound(format!("Key not found: {}", public_key)))
|
||||
}
|
||||
|
||||
Err(BackendError::InvalidPublicKey(_)) => Err(ApiError::BadRequest(format!(
|
||||
"Invalid public key: {}",
|
||||
public_key
|
||||
))),
|
||||
|
||||
// Catches InvalidSecretKey, KeyMismatch and StorageError.
|
||||
Err(e) => Err(ApiError::ServerError(e.to_string())),
|
||||
}
|
||||
}
|
||||
20
remote_signer/client/src/config.rs
Normal file
20
remote_signer/client/src/config.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
/// HTTP REST API Configuration
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
/// The IPv4 address the REST API HTTP server will listen on.
|
||||
pub listen_address: Ipv4Addr,
|
||||
/// The port the REST API HTTP server will listen on.
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Config {
|
||||
listen_address: Ipv4Addr::new(127, 0, 0, 1),
|
||||
port: 9000,
|
||||
}
|
||||
}
|
||||
}
|
||||
113
remote_signer/client/src/handler.rs
Normal file
113
remote_signer/client/src/handler.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
use crate::api_error::{ApiError, ApiResult};
|
||||
use crate::rest_api::Context;
|
||||
use hyper::{Body, Request, Response, StatusCode};
|
||||
use serde::Serialize;
|
||||
use std::sync::Arc;
|
||||
use types::EthSpec;
|
||||
|
||||
/// Provides a HTTP request handler with specific functionality.
|
||||
pub struct Handler<E: EthSpec, S: Send + Sync> {
|
||||
req: Request<()>,
|
||||
body: Body,
|
||||
ctx: Arc<Context<E, S>>,
|
||||
allow_body: bool,
|
||||
}
|
||||
|
||||
impl<E: EthSpec, S: 'static + Send + Sync> Handler<E, S> {
|
||||
/// Start handling a new request.
|
||||
pub fn new(req: Request<Body>, ctx: Arc<Context<E, S>>) -> Result<Self, ApiError> {
|
||||
let (req_parts, body) = req.into_parts();
|
||||
let req = Request::from_parts(req_parts, ());
|
||||
|
||||
Ok(Self {
|
||||
req,
|
||||
body,
|
||||
ctx,
|
||||
allow_body: false,
|
||||
})
|
||||
}
|
||||
|
||||
/// Return a simple static value.
|
||||
///
|
||||
/// Does not use the blocking executor.
|
||||
pub async fn static_value<V>(self, value: V) -> Result<HandledRequest<V>, ApiError> {
|
||||
// Always check and disallow a body for a static value.
|
||||
let _ = Self::get_body(self.body, false).await?;
|
||||
|
||||
Ok(HandledRequest { value })
|
||||
}
|
||||
|
||||
/// The default behaviour is to return an error if any body is supplied in the request. Calling
|
||||
/// this function disables that error.
|
||||
pub fn allow_body(mut self) -> Self {
|
||||
self.allow_body = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Spawns `func` on the blocking executor.
|
||||
///
|
||||
/// This method is suitable for handling long-running or intensive tasks.
|
||||
pub async fn in_blocking_task<F, V>(self, func: F) -> Result<HandledRequest<V>, ApiError>
|
||||
where
|
||||
V: Send + Sync + 'static,
|
||||
F: Fn(Request<Vec<u8>>, Arc<Context<E, S>>) -> Result<V, ApiError> + Send + Sync + 'static,
|
||||
{
|
||||
let ctx = self.ctx;
|
||||
let executor = ctx.executor.clone();
|
||||
let body = Self::get_body(self.body, self.allow_body).await?;
|
||||
let (req_parts, _) = self.req.into_parts();
|
||||
let req = Request::from_parts(req_parts, body);
|
||||
|
||||
let value = executor
|
||||
.runtime_handle()
|
||||
.spawn_blocking(move || func(req, ctx))
|
||||
.await
|
||||
.map_err(|e| {
|
||||
ApiError::ServerError(format!(
|
||||
"Failed to get blocking join handle: {}",
|
||||
e.to_string()
|
||||
))
|
||||
})??;
|
||||
|
||||
Ok(HandledRequest { value })
|
||||
}
|
||||
|
||||
/// Downloads the bytes for `body`.
|
||||
async fn get_body(body: Body, allow_body: bool) -> Result<Vec<u8>, ApiError> {
|
||||
let bytes = hyper::body::to_bytes(body)
|
||||
.await
|
||||
.map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?;
|
||||
|
||||
if !allow_body && !bytes[..].is_empty() {
|
||||
Err(ApiError::BadRequest(
|
||||
"The request body must be empty".to_string(),
|
||||
))
|
||||
} else {
|
||||
Ok(bytes.into_iter().collect())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A request that has been "handled" and now a result (`value`) needs to be serialized and
|
||||
/// returned.
|
||||
pub struct HandledRequest<V> {
|
||||
value: V,
|
||||
}
|
||||
|
||||
impl<V: Serialize> HandledRequest<V> {
|
||||
/// Suitable for items which only implement `serde`.
|
||||
pub fn serde_encodings(self) -> ApiResult {
|
||||
let body = Body::from(serde_json::to_string(&self.value).map_err(|e| {
|
||||
ApiError::ServerError(format!(
|
||||
"Unable to serialize response body as JSON: {:?}",
|
||||
e
|
||||
))
|
||||
})?);
|
||||
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("content-type", "application/json")
|
||||
.body(body)
|
||||
.map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e)))
|
||||
}
|
||||
}
|
||||
58
remote_signer/client/src/lib.rs
Normal file
58
remote_signer/client/src/lib.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
pub mod api_error;
|
||||
pub mod api_response;
|
||||
|
||||
mod backend;
|
||||
mod config;
|
||||
mod handler;
|
||||
mod rest_api;
|
||||
mod router;
|
||||
mod signing_root;
|
||||
mod upcheck;
|
||||
|
||||
use clap::ArgMatches;
|
||||
use client_backend::Backend;
|
||||
use config::Config;
|
||||
use environment::RuntimeContext;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::net::SocketAddr;
|
||||
use types::EthSpec;
|
||||
|
||||
pub struct Client {
|
||||
listening_address: SocketAddr,
|
||||
}
|
||||
|
||||
impl Client {
|
||||
pub async fn new<E: EthSpec>(
|
||||
context: RuntimeContext<E>,
|
||||
cli_args: &ArgMatches<'_>,
|
||||
) -> Result<Self, String> {
|
||||
let log = context.executor.log();
|
||||
|
||||
let mut config = Config::default();
|
||||
|
||||
if let Some(address) = cli_args.value_of("listen-address") {
|
||||
config.listen_address = address
|
||||
.parse::<Ipv4Addr>()
|
||||
.map_err(|_| "listen-address is not a valid IPv4 address.")?;
|
||||
}
|
||||
|
||||
if let Some(port) = cli_args.value_of("port") {
|
||||
config.port = port
|
||||
.parse::<u16>()
|
||||
.map_err(|_| "port is not a valid u16.")?;
|
||||
}
|
||||
|
||||
let backend = Backend::new(cli_args, log)?;
|
||||
|
||||
// It is useful to get the listening address if you have set up your port to be 0.
|
||||
let listening_address =
|
||||
rest_api::start_server(context.executor, config, backend, context.eth_spec_instance)
|
||||
.map_err(|e| format!("Failed to start HTTP API: {:?}", e))?;
|
||||
|
||||
Ok(Self { listening_address })
|
||||
}
|
||||
|
||||
pub fn get_listening_address(&self) -> SocketAddr {
|
||||
self.listening_address
|
||||
}
|
||||
}
|
||||
91
remote_signer/client/src/rest_api.rs
Normal file
91
remote_signer/client/src/rest_api.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
use crate::config::Config;
|
||||
use client_backend::{Backend, Storage};
|
||||
use futures::future::TryFutureExt;
|
||||
use hyper::server::conn::AddrStream;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::{Body, Request, Server};
|
||||
use slog::{info, warn};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use task_executor::TaskExecutor;
|
||||
use types::{ChainSpec, EthSpec};
|
||||
|
||||
pub struct Context<E: EthSpec, S: Send + Sync> {
|
||||
pub config: Config,
|
||||
pub executor: TaskExecutor,
|
||||
pub log: slog::Logger,
|
||||
pub backend: Backend<S>,
|
||||
pub eth_spec_instance: E,
|
||||
pub spec: ChainSpec,
|
||||
}
|
||||
|
||||
pub fn start_server<E: EthSpec, S: Storage>(
|
||||
executor: TaskExecutor,
|
||||
config: Config,
|
||||
backend: Backend<S>,
|
||||
eth_spec_instance: E,
|
||||
) -> Result<SocketAddr, hyper::Error> {
|
||||
let log = executor.log();
|
||||
|
||||
let context = Arc::new(Context {
|
||||
executor: executor.clone(),
|
||||
log: log.clone(),
|
||||
config: config.clone(),
|
||||
backend,
|
||||
eth_spec_instance,
|
||||
spec: E::default_spec(),
|
||||
});
|
||||
|
||||
// Define the function that will build the request handler.
|
||||
let make_service = make_service_fn(move |_socket: &AddrStream| {
|
||||
let ctx = context.clone();
|
||||
|
||||
async move {
|
||||
Ok::<_, hyper::Error>(service_fn(move |req: Request<Body>| {
|
||||
crate::router::on_http_request(req, ctx.clone())
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
let bind_addr = (config.listen_address, config.port).into();
|
||||
let server = Server::bind(&bind_addr).serve(make_service);
|
||||
|
||||
// Determine the address the server is actually listening on.
|
||||
//
|
||||
// This may be different to `bind_addr` if bind port was 0 (this allows the OS to choose a free
|
||||
// port).
|
||||
let actual_listen_addr = server.local_addr();
|
||||
|
||||
// Build a channel to kill the HTTP server.
|
||||
let exit = executor.exit();
|
||||
let inner_log = log.clone();
|
||||
let server_exit = async move {
|
||||
let _ = exit.await;
|
||||
info!(inner_log, "HTTP service shutdown");
|
||||
};
|
||||
|
||||
// Configure the `hyper` server to gracefully shutdown when the shutdown channel is triggered.
|
||||
let inner_log = log.clone();
|
||||
let server_future = server
|
||||
.with_graceful_shutdown(async {
|
||||
server_exit.await;
|
||||
})
|
||||
.map_err(move |e| {
|
||||
warn!(
|
||||
inner_log,
|
||||
"HTTP server failed to start, Unable to bind"; "address" => format!("{:?}", e)
|
||||
)
|
||||
})
|
||||
.unwrap_or_else(|_| ());
|
||||
|
||||
info!(
|
||||
log,
|
||||
"HTTP API started";
|
||||
"address" => format!("{}", actual_listen_addr.ip()),
|
||||
"port" => actual_listen_addr.port(),
|
||||
);
|
||||
|
||||
executor.spawn_without_exit(server_future, "http");
|
||||
|
||||
Ok(actual_listen_addr)
|
||||
}
|
||||
101
remote_signer/client/src/router.rs
Normal file
101
remote_signer/client/src/router.rs
Normal file
@@ -0,0 +1,101 @@
|
||||
use crate::api_error::ApiError;
|
||||
use crate::backend::{get_keys, sign_message};
|
||||
use crate::handler::Handler;
|
||||
use crate::rest_api::Context;
|
||||
use crate::upcheck::upcheck;
|
||||
use client_backend::Storage;
|
||||
use hyper::{Body, Method, Request, Response};
|
||||
use slog::debug;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use types::EthSpec;
|
||||
|
||||
pub async fn on_http_request<E: EthSpec, S: Storage>(
|
||||
req: Request<Body>,
|
||||
ctx: Arc<Context<E, S>>,
|
||||
) -> Result<Response<Body>, ApiError> {
|
||||
let path = req.uri().path().to_string();
|
||||
let received_instant = Instant::now();
|
||||
let log = ctx.log.clone();
|
||||
|
||||
match route(req, ctx).await {
|
||||
Ok(response) => {
|
||||
debug!(
|
||||
log,
|
||||
"HTTP API request successful";
|
||||
"path" => path,
|
||||
"duration_ms" => Instant::now().duration_since(received_instant).as_millis()
|
||||
);
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
Err(error) => {
|
||||
debug!(
|
||||
log,
|
||||
"HTTP API request failure";
|
||||
"path" => path,
|
||||
"duration_ms" => Instant::now().duration_since(received_instant).as_millis()
|
||||
);
|
||||
Ok(error.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn route<E: EthSpec, S: Storage>(
|
||||
req: Request<Body>,
|
||||
ctx: Arc<Context<E, S>>,
|
||||
) -> Result<Response<Body>, ApiError> {
|
||||
let path = req.uri().path().to_string();
|
||||
let method = req.method().clone();
|
||||
let ctx = ctx.clone();
|
||||
let handler = Handler::new(req, ctx)?;
|
||||
|
||||
match (method, path.as_ref()) {
|
||||
(Method::GET, "/upcheck") => handler.static_value(upcheck()).await?.serde_encodings(),
|
||||
|
||||
(Method::GET, "/keys") => handler.in_blocking_task(get_keys).await?.serde_encodings(),
|
||||
|
||||
(Method::POST, _) => route_post(&path, handler).await,
|
||||
|
||||
_ => Err(ApiError::NotFound(
|
||||
"Request path and/or method not found.".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Responds to all the POST requests.
|
||||
///
|
||||
/// Should be deprecated once a better routing library is used, such as `warp`
|
||||
async fn route_post<E: EthSpec, S: Storage>(
|
||||
path: &str,
|
||||
handler: Handler<E, S>,
|
||||
) -> Result<Response<Body>, ApiError> {
|
||||
let mut path_segments = path[1..].trim_end_matches('/').split('/');
|
||||
|
||||
match path_segments.next() {
|
||||
Some("sign") => {
|
||||
let path_segments_count = path_segments.clone().count();
|
||||
|
||||
if path_segments_count == 0 {
|
||||
return Err(ApiError::BadRequest(
|
||||
"Parameter public_key needed in route /sign/:public_key".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if path_segments_count > 1 {
|
||||
return Err(ApiError::BadRequest(
|
||||
"Only one path segment is allowed after /sign".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
handler
|
||||
.allow_body()
|
||||
.in_blocking_task(sign_message)
|
||||
.await?
|
||||
.serde_encodings()
|
||||
}
|
||||
_ => Err(ApiError::NotFound(
|
||||
"Request path and/or method not found.".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
78
remote_signer/client/src/signing_root.rs
Normal file
78
remote_signer/client/src/signing_root.rs
Normal file
@@ -0,0 +1,78 @@
|
||||
use crate::api_error::ApiError;
|
||||
use serde::Deserialize;
|
||||
use serde_json::{from_value, Value};
|
||||
|
||||
use types::{
|
||||
AttestationData, BeaconBlock, ChainSpec, Domain, Epoch, EthSpec, Fork, Hash256, SignedRoot,
|
||||
};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct SignMessageRequestBody {
|
||||
/// BLS Signature domain.
|
||||
/// Supporting `beacon_proposer`, `beacon_attester`, and `randao`.
|
||||
/// As defined in
|
||||
/// * https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/beacon-chain.md#domain-types
|
||||
/// * in lowercase, omitting the `domain` prefix.
|
||||
bls_domain: String,
|
||||
|
||||
/// Supporting `block`, `attestation`, and `epoch`.
|
||||
/// (In LH these are `BeaconBlock`, `AttestationData`, and `Epoch`).
|
||||
/// As defined in
|
||||
/// * https://github.com/ethereum/eth2.0-APIs/blob/master/types/block.yaml
|
||||
/// * https://github.com/ethereum/eth2.0-APIs/blob/master/types/attestation.yaml
|
||||
/// * https://github.com/ethereum/eth2.0-APIs/blob/master/types/misc.yaml
|
||||
data: Value,
|
||||
|
||||
/// A `Fork` object containing previous and current versions.
|
||||
/// As defined in
|
||||
/// * https://github.com/ethereum/eth2.0-APIs/blob/master/types/misc.yaml
|
||||
fork: Fork,
|
||||
|
||||
/// A `Hash256` for domain separation and chain versioning.
|
||||
genesis_validators_root: Hash256,
|
||||
}
|
||||
|
||||
pub fn get_signing_root<E: EthSpec>(
|
||||
req: &hyper::Request<std::vec::Vec<u8>>,
|
||||
spec: ChainSpec,
|
||||
) -> Result<Hash256, ApiError> {
|
||||
let body: SignMessageRequestBody = serde_json::from_slice(req.body()).map_err(|e| {
|
||||
ApiError::BadRequest(format!("Unable to parse body message from JSON: {:?}", e))
|
||||
})?;
|
||||
|
||||
let get_domain = |epoch, bls_domain| {
|
||||
spec.get_domain(epoch, bls_domain, &body.fork, body.genesis_validators_root)
|
||||
};
|
||||
|
||||
match body.bls_domain.as_str() {
|
||||
"beacon_proposer" => {
|
||||
let block = from_value::<BeaconBlock<E>>(body.data.clone()).map_err(|e| {
|
||||
ApiError::BadRequest(format!("Unable to parse block from JSON: {:?}", e))
|
||||
})?;
|
||||
|
||||
Ok(block.signing_root(get_domain(block.epoch(), Domain::BeaconProposer)))
|
||||
}
|
||||
|
||||
"beacon_attester" => {
|
||||
let attestation = from_value::<AttestationData>(body.data.clone()).map_err(|e| {
|
||||
ApiError::BadRequest(format!("Unable to parse attestation from JSON: {:?}", e))
|
||||
})?;
|
||||
|
||||
Ok(attestation
|
||||
.signing_root(get_domain(attestation.target.epoch, Domain::BeaconAttester)))
|
||||
}
|
||||
|
||||
"randao" => {
|
||||
let epoch = from_value::<Epoch>(body.data.clone()).map_err(|e| {
|
||||
ApiError::BadRequest(format!("Unable to parse attestation from JSON: {:?}", e))
|
||||
})?;
|
||||
|
||||
Ok(epoch.signing_root(get_domain(epoch, Domain::Randao)))
|
||||
}
|
||||
|
||||
s => Err(ApiError::BadRequest(format!(
|
||||
"Unsupported bls_domain parameter: {}",
|
||||
s
|
||||
))),
|
||||
}
|
||||
}
|
||||
7
remote_signer/client/src/upcheck.rs
Normal file
7
remote_signer/client/src/upcheck.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
use crate::api_response::UpcheckApiResponse;
|
||||
|
||||
pub fn upcheck() -> UpcheckApiResponse {
|
||||
UpcheckApiResponse {
|
||||
status: "OK".to_string(),
|
||||
}
|
||||
}
|
||||
37
remote_signer/src/cli.rs
Normal file
37
remote_signer/src/cli.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
use clap::{App, Arg};
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
// Parse the CLI parameters.
|
||||
App::new("remote_signer")
|
||||
.visible_alias("rs")
|
||||
.author("Sigma Prime <contact@sigmaprime.io>")
|
||||
.setting(clap::AppSettings::ColoredHelp)
|
||||
.about(
|
||||
"Simple HTTP BLS signer service. \
|
||||
This service is designed to be consumed by Ethereum 2.0 clients, \
|
||||
looking for a more secure avenue to store their BLS12-381 secret keys, \
|
||||
while running their validators in more permisive and/or scalable environments.",
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("storage-raw-dir")
|
||||
.long("storage-raw-dir")
|
||||
.value_name("DIR")
|
||||
.help("Data directory for secret keys in raw files."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("listen-address")
|
||||
.long("listen-address")
|
||||
.value_name("ADDRESS")
|
||||
.help("The address to listen for TCP connections.")
|
||||
.default_value("0.0.0.0")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("port")
|
||||
.long("port")
|
||||
.value_name("PORT")
|
||||
.help("The TCP port to listen on.")
|
||||
.default_value("9000")
|
||||
.takes_value(true),
|
||||
)
|
||||
}
|
||||
34
remote_signer/src/lib.rs
Normal file
34
remote_signer/src/lib.rs
Normal file
@@ -0,0 +1,34 @@
|
||||
mod cli;
|
||||
|
||||
use clap::ArgMatches;
|
||||
use client::Client;
|
||||
use environment::Environment;
|
||||
use slog::info;
|
||||
use types::EthSpec;
|
||||
|
||||
pub use cli::cli_app;
|
||||
|
||||
pub fn run<E: EthSpec>(
|
||||
environment: &mut Environment<E>,
|
||||
matches: &ArgMatches,
|
||||
) -> Result<(), String> {
|
||||
let context = environment.core_context();
|
||||
let exit = context.executor.exit();
|
||||
|
||||
info!(
|
||||
context.log(),
|
||||
"Starting remote signer";
|
||||
);
|
||||
|
||||
let client = environment
|
||||
.runtime()
|
||||
.block_on(Client::new(context, matches))
|
||||
.map_err(|e| format!("Failed to init Rest API: {}", e))?;
|
||||
|
||||
environment.runtime().spawn(async move {
|
||||
exit.await;
|
||||
drop(client);
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
84
remote_signer/tests/get_keys.rs
Normal file
84
remote_signer/tests/get_keys.rs
Normal file
@@ -0,0 +1,84 @@
|
||||
mod get_keys {
|
||||
use client::api_response::KeysApiResponse;
|
||||
use helpers::*;
|
||||
|
||||
fn assert_ok(resp: ApiTestResponse, expected_keys_len: usize) {
|
||||
assert_eq!(resp.status, 200);
|
||||
assert_eq!(
|
||||
serde_json::from_value::<KeysApiResponse>(resp.json)
|
||||
.unwrap()
|
||||
.keys
|
||||
.len(),
|
||||
expected_keys_len
|
||||
);
|
||||
}
|
||||
|
||||
fn assert_error(resp: ApiTestResponse, http_status: u16, error_msg: &str) {
|
||||
assert_eq!(resp.status, http_status);
|
||||
assert_eq!(resp.json["error"], error_msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn all_files_in_dir_are_public_keys() {
|
||||
let (test_signer, tmp_dir) = set_up_api_test_signer_raw_dir();
|
||||
add_key_files(&tmp_dir);
|
||||
|
||||
let url = format!("{}/keys", test_signer.address);
|
||||
|
||||
let resp = http_get(&url);
|
||||
assert_ok(resp, 3);
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn some_files_in_dir_are_public_keys() {
|
||||
let (test_signer, tmp_dir) = set_up_api_test_signer_raw_dir();
|
||||
add_sub_dirs(&tmp_dir);
|
||||
add_key_files(&tmp_dir);
|
||||
add_non_key_files(&tmp_dir);
|
||||
|
||||
let url = format!("{}/keys", test_signer.address);
|
||||
|
||||
let resp = http_get(&url);
|
||||
assert_ok(resp, 3);
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_files_in_dir_are_public_keys() {
|
||||
let (test_signer, tmp_dir) = set_up_api_test_signer_raw_dir();
|
||||
add_sub_dirs(&tmp_dir);
|
||||
add_non_key_files(&tmp_dir);
|
||||
|
||||
let url = format!("{}/keys", test_signer.address);
|
||||
|
||||
let resp = http_get(&url);
|
||||
assert_error(resp, 404, "No keys found in storage.");
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn directory_failure() {
|
||||
let (test_signer, tmp_dir) = set_up_api_test_signer_raw_dir();
|
||||
add_sub_dirs(&tmp_dir);
|
||||
add_key_files(&tmp_dir);
|
||||
add_non_key_files(&tmp_dir);
|
||||
|
||||
// Somebody tripped over a wire.
|
||||
set_permissions(tmp_dir.path(), 0o40311);
|
||||
|
||||
let url = format!("{}/keys", test_signer.address);
|
||||
|
||||
let resp = http_get(&url);
|
||||
|
||||
// Be able to delete the tempdir afterward, regardless of this test result.
|
||||
set_permissions(tmp_dir.path(), 0o40755);
|
||||
|
||||
assert_error(resp, 500, "Storage error: PermissionDenied");
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
}
|
||||
402
remote_signer/tests/sign.rs
Normal file
402
remote_signer/tests/sign.rs
Normal file
@@ -0,0 +1,402 @@
|
||||
mod sign {
|
||||
use helpers::*;
|
||||
|
||||
#[test]
|
||||
fn additional_field() {
|
||||
let (test_signer, _tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let url = format!("{}/sign/{}", test_signer.address, PUBLIC_KEY_1);
|
||||
|
||||
let test_block_body = get_test_block_body(0xc137).replace(
|
||||
",\"genesis_validators_root\":\"0x000000000000000000000000000000000000000000000000000000000000c137\"",
|
||||
",\"genesis_validators_root\":\"0x000000000000000000000000000000000000000000000000000000000000c137\", \"foo\":\"bar\"",
|
||||
);
|
||||
let response = http_post_custom_body(&url, &test_block_body);
|
||||
assert_sign_ok(response, HAPPY_PATH_BLOCK_SIGNATURE_C137);
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn storage_error() {
|
||||
let (test_signer, tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let test_block_body = get_test_block_body(0xc137);
|
||||
set_permissions(tmp_dir.path(), 0o40311);
|
||||
set_permissions(&tmp_dir.path().join(PUBLIC_KEY_1), 0o40311);
|
||||
|
||||
let url = format!("{}/sign/{}", test_signer.address, PUBLIC_KEY_1);
|
||||
|
||||
let response = http_post_custom_body(&url, &test_block_body);
|
||||
set_permissions(tmp_dir.path(), 0o40755);
|
||||
set_permissions(&tmp_dir.path().join(PUBLIC_KEY_1), 0o40755);
|
||||
|
||||
assert_sign_error(response, 500, "Storage error: PermissionDenied");
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_public_key_in_path() {
|
||||
let (test_signer, _tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let test_block_body = get_test_block_body(0xc137);
|
||||
|
||||
let testcase = |url: String| {
|
||||
let response = http_post_custom_body(&url, &test_block_body);
|
||||
assert_sign_error(
|
||||
response,
|
||||
400,
|
||||
"Parameter public_key needed in route /sign/:public_key",
|
||||
);
|
||||
};
|
||||
|
||||
testcase(format!("{}/sign/", test_signer.address));
|
||||
testcase(format!("{}/sign", test_signer.address));
|
||||
testcase(format!("{}/sign//", test_signer.address));
|
||||
testcase(format!("{}/sign///", test_signer.address));
|
||||
testcase(format!("{}/sign/?'or 1 = 1 --", test_signer.address));
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn additional_path_segments() {
|
||||
let (test_signer, _tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let test_block_body = get_test_block_body(0xc137);
|
||||
|
||||
let testcase = |url: String| {
|
||||
let response = http_post_custom_body(&url, &test_block_body);
|
||||
assert_sign_error(
|
||||
response,
|
||||
400,
|
||||
"Only one path segment is allowed after /sign",
|
||||
);
|
||||
};
|
||||
|
||||
testcase(format!("{}/sign/this/receipt", test_signer.address));
|
||||
testcase(format!("{}/sign/this/receipt/please", test_signer.address));
|
||||
testcase(format!("{}/sign/this/receipt/please?", test_signer.address));
|
||||
testcase(format!(
|
||||
"{}/sign//{}/valid/pk",
|
||||
test_signer.address, PUBLIC_KEY_1
|
||||
));
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_public_key() {
|
||||
let (test_signer, _tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let test_block_body = get_test_block_body(0xc137);
|
||||
|
||||
let testcase = |url: String, expected_err: &str| {
|
||||
let response = http_post_custom_body(&url, &test_block_body);
|
||||
assert_sign_error(response, 400, expected_err);
|
||||
};
|
||||
|
||||
testcase(
|
||||
format!("{}/sign/{}", test_signer.address, "ScottBakula"),
|
||||
"Invalid public key: ScottBakula",
|
||||
);
|
||||
testcase(
|
||||
format!("{}/sign/{}", test_signer.address, "deadbeef"),
|
||||
"Invalid public key: deadbeef",
|
||||
);
|
||||
testcase(
|
||||
format!("{}/sign/{}", test_signer.address, SILLY_FILE_NAME_1),
|
||||
&format!("Invalid public key: {}", SILLY_FILE_NAME_1),
|
||||
);
|
||||
testcase(
|
||||
format!("{}/sign/{}", test_signer.address, SILLY_FILE_NAME_1),
|
||||
&format!("Invalid public key: {}", SILLY_FILE_NAME_1),
|
||||
);
|
||||
testcase(
|
||||
format!("{}/sign/0x{}", test_signer.address, PUBLIC_KEY_1),
|
||||
&format!("Invalid public key: 0x{}", PUBLIC_KEY_1),
|
||||
);
|
||||
testcase(
|
||||
format!("{}/sign/{}55", test_signer.address, PUBLIC_KEY_1),
|
||||
&format!("Invalid public key: {}55", PUBLIC_KEY_1),
|
||||
);
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn key_not_found() {
|
||||
let (test_signer, _tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let url = format!("{}/sign/{}", test_signer.address, ABSENT_PUBLIC_KEY);
|
||||
let test_block_body = get_test_block_body(0xc137);
|
||||
|
||||
let response = http_post_custom_body(&url, &test_block_body);
|
||||
assert_sign_error(
|
||||
response,
|
||||
404,
|
||||
&format!("Key not found: {}", ABSENT_PUBLIC_KEY),
|
||||
);
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_secret_key() {
|
||||
let (test_signer, _tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let url = format!(
|
||||
"{}/sign/{}",
|
||||
test_signer.address, PUBLIC_KEY_FOR_INVALID_SECRET_KEY
|
||||
);
|
||||
let test_block_body = get_test_block_body(0xc137);
|
||||
|
||||
let response = http_post_custom_body(&url, &test_block_body);
|
||||
assert_sign_error(
|
||||
response,
|
||||
500,
|
||||
&format!(
|
||||
"Invalid secret key: public_key: {}; Invalid hex character: W at index 0",
|
||||
PUBLIC_KEY_FOR_INVALID_SECRET_KEY
|
||||
),
|
||||
);
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn key_mismatch() {
|
||||
let (test_signer, _tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let url = format!("{}/sign/{}", test_signer.address, MISMATCHED_PUBLIC_KEY);
|
||||
let test_block_body = get_test_block_body(0xc137);
|
||||
|
||||
let response = http_post_custom_body(&url, &test_block_body);
|
||||
assert_sign_error(
|
||||
response,
|
||||
500,
|
||||
&format!("Key mismatch: {}", MISMATCHED_PUBLIC_KEY),
|
||||
);
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_json() {
|
||||
let (test_signer, _tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let url = format!("{}/sign/{}", test_signer.address, PUBLIC_KEY_1);
|
||||
|
||||
let testcase = |custom_body: &str, expected_err: &str| {
|
||||
let response = http_post_custom_body(&url, custom_body);
|
||||
assert_sign_error(response, 400, expected_err);
|
||||
};
|
||||
|
||||
testcase(
|
||||
"Trolololololo",
|
||||
"Unable to parse body message from JSON: Error(\"expected value\", line: 1, column: 1)",
|
||||
);
|
||||
testcase(
|
||||
"{\"bls_domain\"}",
|
||||
"Unable to parse body message from JSON: Error(\"expected `:`\", line: 1, column: 14)",
|
||||
);
|
||||
testcase(
|
||||
"{\"bls_domain\":}",
|
||||
"Unable to parse body message from JSON: Error(\"expected value\", line: 1, column: 15)",
|
||||
);
|
||||
|
||||
testcase(
|
||||
"{\"bls_domain\":\"}",
|
||||
"Unable to parse body message from JSON: Error(\"EOF while parsing a string\", line: 1, column: 16)",
|
||||
);
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_field_bls_domain() {
|
||||
let (test_signer, _tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let url = format!("{}/sign/{}", test_signer.address, PUBLIC_KEY_1);
|
||||
|
||||
let testcase = |json_patch, expected_err| {
|
||||
let test_block_body = get_test_block_body(0xc137).replace(
|
||||
"\"bls_domain\":\"beacon_proposer\"",
|
||||
&format!("\"bls_domain\":{}", json_patch),
|
||||
);
|
||||
let response = http_post_custom_body(&url, &test_block_body);
|
||||
assert_sign_error(response, 400, expected_err);
|
||||
};
|
||||
|
||||
testcase("\"blah\"", "Unsupported bls_domain parameter: blah");
|
||||
testcase("\"domain\"", "Unsupported bls_domain parameter: domain");
|
||||
testcase("\"\"", "Unsupported bls_domain parameter: ");
|
||||
testcase("", "Unable to parse body message from JSON: Error(\"expected value\", line: 1, column: 15)");
|
||||
testcase("1", "Unable to parse body message from JSON: Error(\"invalid type: integer `1`, expected a string\", line: 1, column: 15)");
|
||||
testcase("true", "Unable to parse body message from JSON: Error(\"invalid type: boolean `true`, expected a string\", line: 1, column: 18)");
|
||||
testcase("{\"cats\":\"3\"}", "Unable to parse body message from JSON: Error(\"invalid type: map, expected a string\", line: 1, column: 14)");
|
||||
testcase("[\"a\"]", "Unable to parse body message from JSON: Error(\"invalid type: sequence, expected a string\", line: 1, column: 14)");
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_field_bls_domain() {
|
||||
let (test_signer, _tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let url = format!("{}/sign/{}", test_signer.address, PUBLIC_KEY_1);
|
||||
|
||||
let test_block_body =
|
||||
get_test_block_body(0xc137).replace("\"bls_domain\":\"beacon_proposer\",", "");
|
||||
let response = http_post_custom_body(&url, &test_block_body);
|
||||
assert_sign_error(response, 400, "Unable to parse body message from JSON: Error(\"missing field `bls_domain`\", line: 1, column: 237203)");
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_field_fork() {
|
||||
let (test_signer, _tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let url = format!("{}/sign/{}", test_signer.address, PUBLIC_KEY_1);
|
||||
|
||||
let testcase = |json_patch, expected_err| {
|
||||
let test_block_body = get_test_block_body(0xc137).replace(
|
||||
"\"fork\":{\"previous_version\":\"0x01010101\",\"current_version\":\"0x02020202\",\"epoch\":\"1545\"},",
|
||||
json_patch,
|
||||
);
|
||||
|
||||
let response = http_post_custom_body(&url, &test_block_body);
|
||||
assert_sign_error(response, 400, expected_err);
|
||||
};
|
||||
|
||||
testcase(
|
||||
"\"fork\":{\"current_version\":\"0x02020202\",\"epoch\":\"1545\"},",
|
||||
"Unable to parse body message from JSON: Error(\"missing field `previous_version`\", line: 1, column: 237106)",
|
||||
);
|
||||
testcase(
|
||||
"\"fork\":{\"previous_version\":\"0x01010101\",\"epoch\":\"1545\"},",
|
||||
"Unable to parse body message from JSON: Error(\"missing field `current_version`\", line: 1, column: 237107)",
|
||||
);
|
||||
testcase(
|
||||
"\"fork\":{\"previous_version\":\"0x01010101\",\"current_version\":\"0x02020202\",",
|
||||
"Unable to parse body message from JSON: Error(\"missing field `epoch`\", line: 1, column: 237218)",
|
||||
);
|
||||
testcase(
|
||||
"\"fork\":{\"previous_version\":\"INVALID_VALUE\",\"current_version\":\"0x02020202\",\"epoch\":\"1545\"},",
|
||||
"Unable to parse body message from JSON: Error(\"missing 0x prefix\", line: 1, column: 237094)",
|
||||
);
|
||||
testcase(
|
||||
"\"fork\":{\"previous_version\":\"0xINVALID_VALUE\",\"current_version\":\"0x02020202\",\"epoch\":\"1545\"},",
|
||||
"Unable to parse body message from JSON: Error(\"invalid hex (OddLength)\", line: 1, column: 237096)",
|
||||
);
|
||||
testcase(
|
||||
"\"fork\":{\"previous_version\":\"0xINVALID_VALUE_\",\"current_version\":\"0x02020202\",\"epoch\":\"1545\"},",
|
||||
"Unable to parse body message from JSON: Error(\"invalid hex (InvalidHexCharacter { c: \\\'I\\\', index: 0 })\", line: 1, column: 237097)",
|
||||
);
|
||||
testcase(
|
||||
"\"fork\":{\"previous_version\":\"0x01010101\",\"current_version\":\"INVALID_VALUE\",\"epoch\":\"1545\"},",
|
||||
"Unable to parse body message from JSON: Error(\"missing 0x prefix\", line: 1, column: 237125)"
|
||||
);
|
||||
testcase(
|
||||
"\"fork\":{\"previous_version\":\"0x01010101\",\"current_version\":\"0xINVALID_VALUE\",\"epoch\":\"1545\"},",
|
||||
"Unable to parse body message from JSON: Error(\"invalid hex (OddLength)\", line: 1, column: 237127)"
|
||||
);
|
||||
testcase(
|
||||
"\"fork\":{\"previous_version\":\"0x01010101\",\"current_version\":\"0xINVALID_VALUE_\",\"epoch\":\"1545\"},",
|
||||
"Unable to parse body message from JSON: Error(\"invalid hex (InvalidHexCharacter { c: \\\'I\\\', index: 0 })\", line: 1, column: 237128)"
|
||||
);
|
||||
testcase(
|
||||
"\"fork\":{\"previous_version\":\"0x01010101\",\"current_version\":\"0x02020202\",\"epoch\":},",
|
||||
"Unable to parse body message from JSON: Error(\"expected value\", line: 1, column: 237132)"
|
||||
);
|
||||
testcase(
|
||||
"\"fork\":{\"previous_version\":\"0x01010101\",\"current_version\":\"0x02020202\",\"epoch\":\"zzz\"},",
|
||||
"Unable to parse body message from JSON: Error(\"invalid digit found in string\", line: 1, column: 237136)"
|
||||
);
|
||||
testcase(
|
||||
"\"fork\":{\"previous_version\":\"0x01010101\",\"current_version\":\"0x02020202\",\"epoch\":true},",
|
||||
"Unable to parse body message from JSON: Error(\"invalid type: boolean `true`, expected a quoted or unquoted integer\", line: 1, column: 237135)"
|
||||
);
|
||||
testcase(
|
||||
"\"fork\":{\"previous_version\":\"0x01010101\",\"current_version\":\"0x02020202\",\"epoch\":[\"a\"]},",
|
||||
"Unable to parse body message from JSON: Error(\"invalid type: sequence, expected a quoted or unquoted integer\", line: 1, column: 237132)"
|
||||
);
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_field_fork() {
|
||||
let (test_signer, _tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let url = format!("{}/sign/{}", test_signer.address, PUBLIC_KEY_1);
|
||||
|
||||
let test_block_body = get_test_block_body(0xc137).replace(
|
||||
"\"fork\":{\"previous_version\":\"0x01010101\",\"current_version\":\"0x02020202\",\"epoch\":\"1545\"},",
|
||||
"",
|
||||
);
|
||||
let response = http_post_custom_body(&url, &test_block_body);
|
||||
assert_sign_error(response, 400, "Unable to parse body message from JSON: Error(\"missing field `fork`\", line: 1, column: 237147)");
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_field_data() {
|
||||
let (test_signer, _tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let url = format!("{}/sign/{}", test_signer.address, PUBLIC_KEY_1);
|
||||
|
||||
let test_block_body = get_test_block_body(0xc137).replace("\"data\":", "\"not-data\":");
|
||||
|
||||
let response = http_post_custom_body(&url, &test_block_body);
|
||||
assert_sign_error(response, 400, "Unable to parse body message from JSON: Error(\"missing field `data`\", line: 1, column: 237830)");
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_field_genesis_validators_root() {
|
||||
let (test_signer, _tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let url = format!("{}/sign/{}", test_signer.address, PUBLIC_KEY_1);
|
||||
|
||||
let testcase = |json_patch, expected_err| {
|
||||
let test_block_body = get_test_block_body(0xc137).replace(
|
||||
",\"genesis_validators_root\":\"0x000000000000000000000000000000000000000000000000000000000000c137\"",
|
||||
&format!(",\"genesis_validators_root\":{}", json_patch),
|
||||
);
|
||||
|
||||
let response = http_post_custom_body(&url, &test_block_body);
|
||||
assert_sign_error(response, 400, expected_err);
|
||||
};
|
||||
|
||||
testcase("\"0\"", "Unable to parse body message from JSON: Error(\"0x prefix is missing\", line: 1, column: 237168)");
|
||||
testcase("\"0x\"", "Unable to parse body message from JSON: Error(\"invalid length 0, expected a 0x-prefixed hex string with length of 64\", line: 1, column: 237169)");
|
||||
testcase("\"0xa\"", "Unable to parse body message from JSON: Error(\"invalid length 1, expected a 0x-prefixed hex string with length of 64\", line: 1, column: 237170)");
|
||||
testcase("\"deadbeef\"", "Unable to parse body message from JSON: Error(\"0x prefix is missing\", line: 1, column: 237175)");
|
||||
testcase("\"0xdeadbeefzz\"", "Unable to parse body message from JSON: Error(\"invalid length 10, expected a 0x-prefixed hex string with length of 64\", line: 1, column: 237179)");
|
||||
testcase("\"0xdeadbeef1\"", "Unable to parse body message from JSON: Error(\"invalid length 9, expected a 0x-prefixed hex string with length of 64\", line: 1, column: 237178)");
|
||||
testcase("", "Unable to parse body message from JSON: Error(\"expected value\", line: 1, column: 237166)");
|
||||
testcase("1", "Unable to parse body message from JSON: Error(\"invalid type: integer `1`, expected a 0x-prefixed hex string with length of 64\", line: 1, column: 237166)");
|
||||
testcase("true", "Unable to parse body message from JSON: Error(\"invalid type: boolean `true`, expected a 0x-prefixed hex string with length of 64\", line: 1, column: 237169)");
|
||||
testcase("{\"cats\":\"3\"}", "Unable to parse body message from JSON: Error(\"invalid type: map, expected a 0x-prefixed hex string with length of 64\", line: 1, column: 237165)");
|
||||
testcase("[\"a\"]", "Unable to parse body message from JSON: Error(\"invalid type: sequence, expected a 0x-prefixed hex string with length of 64\", line: 1, column: 237165)");
|
||||
testcase(
|
||||
"\"0x000000000000000000000000000000000000000000000000000000000000c1370\"",
|
||||
"Unable to parse body message from JSON: Error(\"invalid length 65, expected a 0x-prefixed hex string with length of 64\", line: 1, column: 237234)",
|
||||
);
|
||||
testcase(
|
||||
"\"0x000000000000000000000000000000000000000000000000000000000000c13700\"",
|
||||
"Unable to parse body message from JSON: Error(\"invalid length 66, expected a 0x-prefixed hex string with length of 64\", line: 1, column: 237235)",
|
||||
);
|
||||
testcase(
|
||||
"\"0x000000000000000000000000000000000000000000000000000000000000c1370000\"",
|
||||
"Unable to parse body message from JSON: Error(\"invalid length 68, expected a 0x-prefixed hex string with length of 64\", line: 1, column: 237237)",
|
||||
);
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_field_genesis_validators_root() {
|
||||
let (test_signer, _tmp_dir) = set_up_api_test_signer_to_sign_message();
|
||||
let url = format!("{}/sign/{}", test_signer.address, PUBLIC_KEY_1);
|
||||
|
||||
let test_block_body = get_test_block_body(0xc137).replace(
|
||||
",\"genesis_validators_root\":\"0x000000000000000000000000000000000000000000000000000000000000c137\"",
|
||||
"",
|
||||
);
|
||||
let response = http_post_custom_body(&url, &test_block_body);
|
||||
assert_sign_error(response, 400, "Unable to parse body message from JSON: Error(\"missing field `genesis_validators_root`\", line: 1, column: 237139)");
|
||||
|
||||
test_signer.shutdown();
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user