Updates to latest master.

This commit is contained in:
Age Manning
2019-03-08 11:15:38 +11:00
129 changed files with 6696 additions and 2862 deletions

View File

@@ -1,10 +1,8 @@
use log::trace;
use state_processing::validate_attestation_without_signature;
use ssz::TreeHash;
use state_processing::per_block_processing::validate_attestation_without_signature;
use std::collections::{HashMap, HashSet};
use types::{
AggregateSignature, Attestation, AttestationData, BeaconState, BeaconStateError, Bitfield,
ChainSpec, FreeAttestation, Signature,
};
use types::*;
const PHASE_0_CUSTODY_BIT: bool = false;
@@ -84,11 +82,11 @@ impl AttestationAggregator {
/// - The signature is verified against that of the validator at `validator_index`.
pub fn process_free_attestation(
&mut self,
cached_state: &BeaconState,
state: &BeaconState,
free_attestation: &FreeAttestation,
spec: &ChainSpec,
) -> Result<Outcome, BeaconStateError> {
let attestation_duties = match cached_state.attestation_slot_and_shard_for_validator(
let attestation_duties = match state.attestation_slot_and_shard_for_validator(
free_attestation.validator_index as usize,
spec,
) {
@@ -119,9 +117,13 @@ impl AttestationAggregator {
invalid_outcome!(Message::BadShard);
}
let signable_message = free_attestation.data.signable_message(PHASE_0_CUSTODY_BIT);
let signable_message = AttestationDataAndCustodyBit {
data: free_attestation.data.clone(),
custody_bit: PHASE_0_CUSTODY_BIT,
}
.hash_tree_root();
let validator_record = match cached_state
let validator_record = match state
.validator_registry
.get(free_attestation.validator_index as usize)
{
@@ -131,9 +133,7 @@ impl AttestationAggregator {
if !free_attestation.signature.verify(
&signable_message,
cached_state
.fork
.get_domain(cached_state.current_epoch(spec), spec.domain_attestation),
spec.get_domain(state.current_epoch(spec), Domain::Attestation, &state.fork),
&validator_record.pubkey,
) {
invalid_outcome!(Message::BadSignature);

View File

@@ -1,5 +1,6 @@
use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome};
use crate::checkpoint::CheckPoint;
use crate::errors::{BeaconChainError as Error, BlockProductionError};
use db::{
stores::{BeaconBlockStore, BeaconStateStore},
ClientDB, DBError,
@@ -10,28 +11,15 @@ use parking_lot::{RwLock, RwLockReadGuard};
use slot_clock::SlotClock;
use ssz::ssz_encode;
use state_processing::{
BlockProcessable, BlockProcessingError, SlotProcessable, SlotProcessingError,
per_block_processing, per_block_processing_without_verifying_block_signature,
per_slot_processing, BlockProcessingError, SlotProcessingError,
};
use std::sync::Arc;
use types::{
readers::{BeaconBlockReader, BeaconStateReader},
AttestationData, BeaconBlock, BeaconBlockBody, BeaconState, BeaconStateError, ChainSpec,
Crosslink, Deposit, Epoch, Eth1Data, FreeAttestation, Hash256, PublicKey, RelativeEpoch,
Signature, Slot,
*,
};
#[derive(Debug, PartialEq)]
pub enum Error {
InsufficientValidators,
BadRecentBlockRoots,
BeaconStateError(BeaconStateError),
DBInconsistent(String),
DBError(String),
ForkChoiceError(ForkChoiceError),
MissingBeaconBlock(Hash256),
MissingBeaconState(Hash256),
}
#[derive(Debug, PartialEq)]
pub enum ValidBlock {
/// The block was successfully processed.
@@ -66,6 +54,11 @@ pub struct BeaconChain<T: ClientDB + Sized, U: SlotClock, F: ForkChoice> {
pub state_store: Arc<BeaconStateStore<T>>,
pub slot_clock: U,
pub attestation_aggregator: RwLock<AttestationAggregator>,
pub deposits_for_inclusion: RwLock<Vec<Deposit>>,
pub exits_for_inclusion: RwLock<Vec<VoluntaryExit>>,
pub transfers_for_inclusion: RwLock<Vec<Transfer>>,
pub proposer_slashings_for_inclusion: RwLock<Vec<ProposerSlashing>>,
pub attester_slashings_for_inclusion: RwLock<Vec<AttesterSlashing>>,
canonical_head: RwLock<CheckPoint>,
finalized_head: RwLock<CheckPoint>,
pub state: RwLock<BeaconState>,
@@ -80,6 +73,7 @@ where
F: ForkChoice,
{
/// Instantiate a new Beacon Chain, from genesis.
#[allow(clippy::too_many_arguments)] // Will be re-factored in the coming weeks.
pub fn genesis(
state_store: Arc<BeaconStateStore<T>>,
block_store: Arc<BeaconBlockStore<T>>,
@@ -132,6 +126,11 @@ where
state_store,
slot_clock,
attestation_aggregator,
deposits_for_inclusion: RwLock::new(vec![]),
exits_for_inclusion: RwLock::new(vec![]),
transfers_for_inclusion: RwLock::new(vec![]),
proposer_slashings_for_inclusion: RwLock::new(vec![]),
attester_slashings_for_inclusion: RwLock::new(vec![]),
state: RwLock::new(genesis_state),
finalized_head,
canonical_head,
@@ -208,9 +207,7 @@ where
let state_slot = self.state.read().slot;
let head_block_root = self.head().beacon_block_root;
for _ in state_slot.as_u64()..slot.as_u64() {
self.state
.write()
.per_slot_processing(head_block_root, &self.spec)?;
per_slot_processing(&mut *self.state.write(), head_block_root, &self.spec)?;
}
Ok(())
}
@@ -308,7 +305,7 @@ where
.state
.read()
.get_block_root(
justified_epoch.start_slot(self.spec.epoch_length),
justified_epoch.start_slot(self.spec.slots_per_epoch),
&self.spec,
)
.ok_or_else(|| Error::BadRecentBlockRoots)?;
@@ -327,10 +324,10 @@ where
shard,
beacon_block_root: self.head().beacon_block_root,
epoch_boundary_root,
shard_block_root: Hash256::zero(),
crosslink_data_root: Hash256::zero(),
latest_crosslink: Crosslink {
epoch: self.state.read().slot.epoch(self.spec.epoch_length),
shard_block_root: Hash256::zero(),
epoch: self.state.read().slot.epoch(self.spec.slots_per_epoch),
crosslink_data_root: Hash256::zero(),
},
justified_epoch,
justified_block_root,
@@ -364,6 +361,222 @@ where
Ok(aggregation_outcome)
}
/// Accept some deposit and queue it for inclusion in an appropriate block.
pub fn receive_deposit_for_inclusion(&self, deposit: Deposit) {
// TODO: deposits are not checked for validity; check them.
//
// https://github.com/sigp/lighthouse/issues/276
self.deposits_for_inclusion.write().push(deposit);
}
/// Return a vec of deposits suitable for inclusion in some block.
pub fn get_deposits_for_block(&self) -> Vec<Deposit> {
// TODO: deposits are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.deposits_for_inclusion.read().clone()
}
/// Takes a list of `Deposits` that were included in recent blocks and removes them from the
/// inclusion queue.
///
/// This ensures that `Deposits` are not included twice in successive blocks.
pub fn set_deposits_as_included(&self, included_deposits: &[Deposit]) {
// TODO: method does not take forks into account; consider this.
//
// https://github.com/sigp/lighthouse/issues/275
let mut indices_to_delete = vec![];
for included in included_deposits {
for (i, for_inclusion) in self.deposits_for_inclusion.read().iter().enumerate() {
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let deposits_for_inclusion = &mut self.deposits_for_inclusion.write();
for i in indices_to_delete {
deposits_for_inclusion.remove(i);
}
}
/// Accept some exit and queue it for inclusion in an appropriate block.
pub fn receive_exit_for_inclusion(&self, exit: VoluntaryExit) {
// TODO: exits are not checked for validity; check them.
//
// https://github.com/sigp/lighthouse/issues/276
self.exits_for_inclusion.write().push(exit);
}
/// Return a vec of exits suitable for inclusion in some block.
pub fn get_exits_for_block(&self) -> Vec<VoluntaryExit> {
// TODO: exits are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.exits_for_inclusion.read().clone()
}
/// Takes a list of `Deposits` that were included in recent blocks and removes them from the
/// inclusion queue.
///
/// This ensures that `Deposits` are not included twice in successive blocks.
pub fn set_exits_as_included(&self, included_exits: &[VoluntaryExit]) {
// TODO: method does not take forks into account; consider this.
let mut indices_to_delete = vec![];
for included in included_exits {
for (i, for_inclusion) in self.exits_for_inclusion.read().iter().enumerate() {
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let exits_for_inclusion = &mut self.exits_for_inclusion.write();
for i in indices_to_delete {
exits_for_inclusion.remove(i);
}
}
/// Accept some transfer and queue it for inclusion in an appropriate block.
pub fn receive_transfer_for_inclusion(&self, transfer: Transfer) {
// TODO: transfers are not checked for validity; check them.
//
// https://github.com/sigp/lighthouse/issues/276
self.transfers_for_inclusion.write().push(transfer);
}
/// Return a vec of transfers suitable for inclusion in some block.
pub fn get_transfers_for_block(&self) -> Vec<Transfer> {
// TODO: transfers are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.transfers_for_inclusion.read().clone()
}
/// Takes a list of `Deposits` that were included in recent blocks and removes them from the
/// inclusion queue.
///
/// This ensures that `Deposits` are not included twice in successive blocks.
pub fn set_transfers_as_included(&self, included_transfers: &[Transfer]) {
// TODO: method does not take forks into account; consider this.
let mut indices_to_delete = vec![];
for included in included_transfers {
for (i, for_inclusion) in self.transfers_for_inclusion.read().iter().enumerate() {
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let transfers_for_inclusion = &mut self.transfers_for_inclusion.write();
for i in indices_to_delete {
transfers_for_inclusion.remove(i);
}
}
/// Accept some proposer slashing and queue it for inclusion in an appropriate block.
pub fn receive_proposer_slashing_for_inclusion(&self, proposer_slashing: ProposerSlashing) {
// TODO: proposer_slashings are not checked for validity; check them.
//
// https://github.com/sigp/lighthouse/issues/276
self.proposer_slashings_for_inclusion
.write()
.push(proposer_slashing);
}
/// Return a vec of proposer slashings suitable for inclusion in some block.
pub fn get_proposer_slashings_for_block(&self) -> Vec<ProposerSlashing> {
// TODO: proposer_slashings are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.proposer_slashings_for_inclusion.read().clone()
}
/// Takes a list of `ProposerSlashings` that were included in recent blocks and removes them
/// from the inclusion queue.
///
/// This ensures that `ProposerSlashings` are not included twice in successive blocks.
pub fn set_proposer_slashings_as_included(
&self,
included_proposer_slashings: &[ProposerSlashing],
) {
// TODO: method does not take forks into account; consider this.
//
// https://github.com/sigp/lighthouse/issues/275
let mut indices_to_delete = vec![];
for included in included_proposer_slashings {
for (i, for_inclusion) in self
.proposer_slashings_for_inclusion
.read()
.iter()
.enumerate()
{
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let proposer_slashings_for_inclusion = &mut self.proposer_slashings_for_inclusion.write();
for i in indices_to_delete {
proposer_slashings_for_inclusion.remove(i);
}
}
/// Accept some attester slashing and queue it for inclusion in an appropriate block.
pub fn receive_attester_slashing_for_inclusion(&self, attester_slashing: AttesterSlashing) {
// TODO: attester_slashings are not checked for validity; check them.
//
// https://github.com/sigp/lighthouse/issues/276
self.attester_slashings_for_inclusion
.write()
.push(attester_slashing);
}
/// Return a vec of attester slashings suitable for inclusion in some block.
pub fn get_attester_slashings_for_block(&self) -> Vec<AttesterSlashing> {
// TODO: attester_slashings are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.attester_slashings_for_inclusion.read().clone()
}
/// Takes a list of `AttesterSlashings` that were included in recent blocks and removes them
/// from the inclusion queue.
///
/// This ensures that `AttesterSlashings` are not included twice in successive blocks.
pub fn set_attester_slashings_as_included(
&self,
included_attester_slashings: &[AttesterSlashing],
) {
// TODO: method does not take forks into account; consider this.
//
// https://github.com/sigp/lighthouse/issues/275
let mut indices_to_delete = vec![];
for included in included_attester_slashings {
for (i, for_inclusion) in self
.attester_slashings_for_inclusion
.read()
.iter()
.enumerate()
{
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let attester_slashings_for_inclusion = &mut self.attester_slashings_for_inclusion.write();
for i in indices_to_delete {
attester_slashings_for_inclusion.remove(i);
}
}
/// Dumps the entire canonical chain, from the head to genesis to a vector for analysis.
///
/// This could be a very expensive operation and should only be done in testing/analysis
@@ -412,6 +625,8 @@ where
last_slot = slot;
}
dump.reverse();
Ok(dump)
}
@@ -461,7 +676,7 @@ where
// Transition the parent state to the present slot.
let mut state = parent_state;
for _ in state.slot.as_u64()..present_slot.as_u64() {
if let Err(e) = state.per_slot_processing(parent_block_root, &self.spec) {
if let Err(e) = per_slot_processing(&mut state, parent_block_root, &self.spec) {
return Ok(BlockProcessingOutcome::InvalidBlock(
InvalidBlock::SlotProcessingError(e),
));
@@ -470,7 +685,7 @@ where
// Apply the received block to its parent state (which has been transitioned into this
// slot).
if let Err(e) = state.per_block_processing(&block, &self.spec) {
if let Err(e) = per_block_processing(&mut state, &block, &self.spec) {
return Ok(BlockProcessingOutcome::InvalidBlock(
InvalidBlock::PerBlockProcessingError(e),
));
@@ -488,6 +703,13 @@ where
self.block_store.put(&block_root, &ssz_encode(&block)[..])?;
self.state_store.put(&state_root, &ssz_encode(&state)[..])?;
// Update the inclusion queues so they aren't re-submitted.
self.set_deposits_as_included(&block.body.deposits[..]);
self.set_transfers_as_included(&block.body.transfers[..]);
self.set_exits_as_included(&block.body.voluntary_exits[..]);
self.set_proposer_slashings_as_included(&block.body.proposer_slashings[..]);
self.set_attester_slashings_as_included(&block.body.attester_slashings[..]);
// run the fork_choice add_block logic
self.fork_choice
.write()
@@ -500,7 +722,7 @@ where
if self.head().beacon_block_root == parent_block_root {
self.update_canonical_head(block.clone(), block_root, state.clone(), state_root);
// Update the local state variable.
*self.state.write() = state.clone();
*self.state.write() = state;
}
Ok(BlockProcessingOutcome::ValidBlock(ValidBlock::Processed))
@@ -510,7 +732,10 @@ where
///
/// The produced block will not be inherently valid, it must be signed by a block producer.
/// Block signing is out of the scope of this function and should be done by a separate program.
pub fn produce_block(&self, randao_reveal: Signature) -> Option<(BeaconBlock, BeaconState)> {
pub fn produce_block(
&self,
randao_reveal: Signature,
) -> Result<(BeaconBlock, BeaconState), BlockProductionError> {
debug!("Producing block at slot {}...", self.state.read().slot);
let mut state = self.state.read().clone();
@@ -527,7 +752,9 @@ where
attestations.len()
);
let parent_root = *state.get_block_root(state.slot.saturating_sub(1_u64), &self.spec)?;
let parent_root = *state
.get_block_root(state.slot.saturating_sub(1_u64), &self.spec)
.ok_or_else(|| BlockProductionError::UnableToGetBlockRootFromState)?;
let mut block = BeaconBlock {
slot: state.slot,
@@ -541,31 +768,24 @@ where
},
signature: self.spec.empty_signature.clone(), // To be completed by a validator.
body: BeaconBlockBody {
proposer_slashings: vec![],
attester_slashings: vec![],
proposer_slashings: self.get_proposer_slashings_for_block(),
attester_slashings: self.get_attester_slashings_for_block(),
attestations,
deposits: vec![],
exits: vec![],
deposits: self.get_deposits_for_block(),
voluntary_exits: self.get_exits_for_block(),
transfers: self.get_transfers_for_block(),
},
};
trace!("BeaconChain::produce_block: updating state for new block.",);
let result =
state.per_block_processing_without_verifying_block_signature(&block, &self.spec);
trace!(
"BeaconNode::produce_block: state processing result: {:?}",
result
);
result.ok()?;
per_block_processing_without_verifying_block_signature(&mut state, &block, &self.spec)?;
let state_root = state.canonical_root();
block.state_root = state_root;
trace!("Block produced.");
Some((block, state))
Ok((block, state))
}
// TODO: Left this as is, modify later

View File

@@ -0,0 +1,33 @@
use fork_choice::ForkChoiceError;
use state_processing::BlockProcessingError;
use types::*;
macro_rules! easy_from_to {
($from: ident, $to: ident) => {
impl From<$from> for $to {
fn from(e: $from) -> $to {
$to::$from(e)
}
}
};
}
#[derive(Debug, PartialEq)]
pub enum BeaconChainError {
InsufficientValidators,
BadRecentBlockRoots,
BeaconStateError(BeaconStateError),
DBInconsistent(String),
DBError(String),
ForkChoiceError(ForkChoiceError),
MissingBeaconBlock(Hash256),
MissingBeaconState(Hash256),
}
#[derive(Debug, PartialEq)]
pub enum BlockProductionError {
UnableToGetBlockRootFromState,
BlockProcessingError(BlockProcessingError),
}
easy_from_to!(BlockProcessingError, BlockProductionError);

View File

@@ -1,9 +1,9 @@
mod attestation_aggregator;
mod beacon_chain;
mod checkpoint;
mod errors;
pub use self::beacon_chain::{
BeaconChain, BlockProcessingOutcome, Error, InvalidBlock, ValidBlock,
};
pub use self::beacon_chain::{BeaconChain, BlockProcessingOutcome, InvalidBlock, ValidBlock};
pub use self::checkpoint::CheckPoint;
pub use self::errors::BeaconChainError;
pub use fork_choice::{ForkChoice, ForkChoiceAlgorithm, ForkChoiceError};

View File

@@ -4,12 +4,21 @@ version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[[bin]]
name = "test_harness"
path = "src/bin.rs"
[lib]
name = "test_harness"
path = "src/lib.rs"
[[bench]]
name = "state_transition"
harness = false
[dev-dependencies]
criterion = "0.2"
state_processing = { path = "../../../eth2/state_processing" }
[dependencies]
attester = { path = "../../../eth2/attester" }
@@ -17,6 +26,7 @@ beacon_chain = { path = "../../beacon_chain" }
block_proposer = { path = "../../../eth2/block_proposer" }
bls = { path = "../../../eth2/utils/bls" }
boolean-bitfield = { path = "../../../eth2/utils/boolean-bitfield" }
clap = "2.32.0"
db = { path = "../../db" }
parking_lot = "0.7"
failure = "0.1"
@@ -32,3 +42,4 @@ serde_json = "1.0"
slot_clock = { path = "../../../eth2/utils/slot_clock" }
ssz = { path = "../../../eth2/utils/ssz" }
types = { path = "../../../eth2/types" }
yaml-rust = "0.4.2"

View File

@@ -0,0 +1,150 @@
# Test Harness
Provides a testing environment for the `BeaconChain`, `Attester` and `BlockProposer` objects.
This environment bypasses networking and client run-times and connects the `Attester` and `Proposer`
directly to the `BeaconChain` via an `Arc`.
The `BeaconChainHarness` contains a single `BeaconChain` instance and many `ValidatorHarness`
instances. All of the `ValidatorHarness` instances work to advance the `BeaconChain` by
producing blocks and attestations.
The crate consists of a library and binary, examples for using both are
described below.
## YAML
Both the library and the binary are capable of parsing tests from a YAML file,
in fact this is the sole purpose of the binary.
You can find YAML test cases [here](specs/). An example is included below:
```yaml
title: Validator Registry Tests
summary: Tests deposit and slashing effects on validator registry.
test_suite: validator_registry
fork: tchaikovsky
version: 1.0
test_cases:
- config:
slots_per_epoch: 64
deposits_for_chain_start: 1000
num_slots: 64
skip_slots: [2, 3]
deposits:
# At slot 1, create a new validator deposit of 32 ETH.
- slot: 1
amount: 32
# Trigger more deposits...
- slot: 3
amount: 32
- slot: 5
amount: 32
proposer_slashings:
# At slot 2, trigger a proposer slashing for validator #42.
- slot: 2
validator_index: 42
# Trigger another slashing...
- slot: 8
validator_index: 13
attester_slashings:
# At slot 2, trigger an attester slashing for validators #11 and #12.
- slot: 2
validator_indices: [11, 12]
# Trigger another slashing...
- slot: 5
validator_indices: [14]
results:
num_skipped_slots: 2
states:
- slot: 63
num_validators: 1003
slashed_validators: [11, 12, 13, 14, 42]
exited_validators: []
```
Thanks to [prsym](http://github.com/prysmaticlabs/prysm) for coming up with the
base YAML format.
### Notes
Wherever `slot` is used, it is actually the "slot height", or slots since
genesis. This allows the tests to disregard the `GENESIS_EPOCH`.
### Differences from Prysmatic's format
1. The detail for `deposits`, `proposer_slashings` and `attester_slashings` is
ommitted from the test specification. It assumed they should be valid
objects.
2. There is a `states` list in `results` that runs checks against any state
specified by a `slot` number. This is in contrast to the variables in
`results` that assume the last (highest) state should be inspected.
#### Reasoning
Respective reasonings for above changes:
1. This removes the concerns of the actual object structure from the tests.
This allows for more variation in the deposits/slashings objects without
needing to update the tests. Also, it makes it makes it easier to create
tests.
2. This gives more fine-grained control over the tests. It allows for checking
that certain events happened at certain times whilst making the tests only
slightly more verbose.
_Notes: it may be useful to add an extra field to each slashing type to
indicate if it should be valid or not. It also may be useful to add an option
for double-vote/surround-vote attester slashings. The `amount` field was left
on `deposits` as it changes the behaviour of state significantly._
## Binary Usage Example
Follow these steps to run as a binary:
1. Navigate to the root of this crate (where this readme is located)
2. Run `$ cargo run --release -- --yaml examples/validator_registry.yaml`
_Note: the `--release` flag builds the binary without all the debugging
instrumentation. The test is much faster built using `--release`. As is
customary in cargo, the flags before `--` are passed to cargo and the flags
after are passed to the binary._
### CLI Options
```
Lighthouse Test Harness Runner 0.0.1
Sigma Prime <contact@sigmaprime.io>
Runs `test_harness` using a YAML test_case.
USAGE:
test_harness --log-level <LOG_LEVEL> --yaml <FILE>
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
OPTIONS:
--log-level <LOG_LEVEL> Logging level. [default: debug] [possible values: error, warn, info, debug, trace]
--yaml <FILE> YAML file test_case.
```
## Library Usage Example
```rust
use test_harness::BeaconChainHarness;
use types::ChainSpec;
let validator_count = 8;
let spec = ChainSpec::few_validators();
let mut harness = BeaconChainHarness::new(spec, validator_count);
harness.advance_chain_with_block();
let chain = harness.chain_dump().unwrap();
// One block should have been built on top of the genesis block.
assert_eq!(chain.len(), 2);
```

View File

@@ -1,6 +1,7 @@
use criterion::Criterion;
use criterion::{black_box, criterion_group, criterion_main, Benchmark};
// use env_logger::{Builder, Env};
use state_processing::SlotProcessable;
use test_harness::BeaconChainHarness;
use types::{ChainSpec, Hash256};
@@ -10,7 +11,7 @@ fn mid_epoch_state_transition(c: &mut Criterion) {
let validator_count = 1000;
let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count);
let epoch_depth = (rig.spec.epoch_length * 2) + (rig.spec.epoch_length / 2);
let epoch_depth = (rig.spec.slots_per_epoch * 2) + (rig.spec.slots_per_epoch / 2);
for _ in 0..epoch_depth {
rig.advance_chain_with_block();
@@ -18,7 +19,7 @@ fn mid_epoch_state_transition(c: &mut Criterion) {
let state = rig.beacon_chain.state.read().clone();
assert!((state.slot + 1) % rig.spec.epoch_length != 0);
assert!((state.slot + 1) % rig.spec.slots_per_epoch != 0);
c.bench_function("mid-epoch state transition 10k validators", move |b| {
let state = state.clone();
@@ -35,7 +36,7 @@ fn epoch_boundary_state_transition(c: &mut Criterion) {
let validator_count = 10000;
let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count);
let epoch_depth = rig.spec.epoch_length * 2;
let epoch_depth = rig.spec.slots_per_epoch * 2;
for _ in 0..(epoch_depth - 1) {
rig.advance_chain_with_block();
@@ -43,7 +44,7 @@ fn epoch_boundary_state_transition(c: &mut Criterion) {
let state = rig.beacon_chain.state.read().clone();
assert_eq!((state.slot + 1) % rig.spec.epoch_length, 0);
assert_eq!((state.slot + 1) % rig.spec.slots_per_epoch, 0);
c.bench(
"routines",

View File

@@ -0,0 +1,59 @@
title: Validator Registry Tests
summary: Tests deposit and slashing effects on validator registry.
test_suite: validator_registry
fork: tchaikovsky
version: 1.0
test_cases:
- config:
slots_per_epoch: 64
deposits_for_chain_start: 1000
num_slots: 64
skip_slots: [2, 3]
deposits:
# At slot 1, create a new validator deposit of 5 ETH.
- slot: 1
amount: 5000000000
# Trigger more deposits...
- slot: 3
amount: 5000000000
- slot: 5
amount: 32000000000
exits:
# At slot 10, submit an exit for validator #50.
- slot: 10
validator_index: 50
transfers:
- slot: 6
from: 1000
to: 1001
amount: 5000000000
proposer_slashings:
# At slot 2, trigger a proposer slashing for validator #42.
- slot: 2
validator_index: 42
# Trigger another slashing...
- slot: 8
validator_index: 13
attester_slashings:
# At slot 2, trigger an attester slashing for validators #11 and #12.
- slot: 2
validator_indices: [11, 12]
# Trigger another slashing...
- slot: 5
validator_indices: [14]
results:
num_skipped_slots: 2
states:
- slot: 63
num_validators: 1003
slashed_validators: [11, 12, 13, 14, 42]
exited_validators: []
exit_initiated_validators: [50]
balances:
- validator_index: 1000
comparison: "eq"
balance: 0
- validator_index: 1001
comparison: "eq"
balance: 10000000000

View File

@@ -1,7 +1,7 @@
use super::ValidatorHarness;
use beacon_chain::{BeaconChain, BlockProcessingOutcome};
pub use beacon_chain::{CheckPoint, Error as BeaconChainError};
use bls::create_proof_of_possession;
pub use beacon_chain::{BeaconChainError, CheckPoint};
use bls::{create_proof_of_possession, get_withdrawal_credentials};
use db::{
stores::{BeaconBlockStore, BeaconStateStore},
MemoryDB,
@@ -11,14 +11,9 @@ use log::debug;
use rayon::prelude::*;
use slot_clock::TestingSlotClock;
use std::collections::HashSet;
use std::fs::File;
use std::io::prelude::*;
use std::iter::FromIterator;
use std::sync::Arc;
use types::{
BeaconBlock, ChainSpec, Deposit, DepositData, DepositInput, Eth1Data, FreeAttestation, Hash256,
Keypair, Slot,
};
use types::*;
/// The beacon chain harness simulates a single beacon node with `validator_count` validators connected
/// to it. Each validator is provided a borrow to the beacon chain, where it may read
@@ -72,7 +67,13 @@ impl BeaconChainHarness {
timestamp: genesis_time - 1,
deposit_input: DepositInput {
pubkey: keypair.pk.clone(),
withdrawal_credentials: Hash256::zero(), // Withdrawal not possible.
// Validator can withdraw using their main keypair.
withdrawal_credentials: Hash256::from_slice(
&get_withdrawal_credentials(
&keypair.pk,
spec.bls_withdrawal_prefix_byte,
)[..],
),
proof_of_possession: create_proof_of_possession(&keypair),
},
},
@@ -130,13 +131,13 @@ impl BeaconChainHarness {
let nth_slot = slot
- slot
.epoch(self.spec.epoch_length)
.start_slot(self.spec.epoch_length);
let nth_epoch = slot.epoch(self.spec.epoch_length) - self.spec.genesis_epoch;
.epoch(self.spec.slots_per_epoch)
.start_slot(self.spec.slots_per_epoch);
let nth_epoch = slot.epoch(self.spec.slots_per_epoch) - self.spec.genesis_epoch;
debug!(
"Advancing BeaconChain to slot {}, epoch {} (epoch height: {}, slot {} in epoch.).",
slot,
slot.epoch(self.spec.epoch_length),
slot.epoch(self.spec.slots_per_epoch),
nth_epoch,
nth_slot
);
@@ -245,6 +246,70 @@ impl BeaconChainHarness {
debug!("Free attestations processed.");
}
/// Signs a message using some validators secret key with the `Fork` info from the latest state
/// of the `BeaconChain`.
///
/// Useful for producing slashable messages and other objects that `BeaconChainHarness` does
/// not produce naturally.
pub fn validator_sign(
&self,
validator_index: usize,
message: &[u8],
epoch: Epoch,
domain_type: Domain,
) -> Option<Signature> {
let validator = self.validators.get(validator_index)?;
let domain = self
.spec
.get_domain(epoch, domain_type, &self.beacon_chain.state.read().fork);
Some(Signature::new(message, domain, &validator.keypair.sk))
}
/// Submit a deposit to the `BeaconChain` and, if given a keypair, create a new
/// `ValidatorHarness` instance for this validator.
///
/// If a new `ValidatorHarness` was created, the validator should become fully operational as
/// if the validator were created during `BeaconChainHarness` instantiation.
pub fn add_deposit(&mut self, deposit: Deposit, keypair: Option<Keypair>) {
self.beacon_chain.receive_deposit_for_inclusion(deposit);
// If a keypair is present, add a new `ValidatorHarness` to the rig.
if let Some(keypair) = keypair {
let validator =
ValidatorHarness::new(keypair, self.beacon_chain.clone(), self.spec.clone());
self.validators.push(validator);
}
}
/// Submit an exit to the `BeaconChain` for inclusion in some block.
///
/// Note: the `ValidatorHarness` for this validator continues to exist. Once it is exited it
/// will stop receiving duties from the beacon chain and just do nothing when prompted to
/// produce/attest.
pub fn add_exit(&mut self, exit: VoluntaryExit) {
self.beacon_chain.receive_exit_for_inclusion(exit);
}
/// Submit an transfer to the `BeaconChain` for inclusion in some block.
pub fn add_transfer(&mut self, transfer: Transfer) {
self.beacon_chain.receive_transfer_for_inclusion(transfer);
}
/// Submit a proposer slashing to the `BeaconChain` for inclusion in some block.
pub fn add_proposer_slashing(&mut self, proposer_slashing: ProposerSlashing) {
self.beacon_chain
.receive_proposer_slashing_for_inclusion(proposer_slashing);
}
/// Submit an attester slashing to the `BeaconChain` for inclusion in some block.
pub fn add_attester_slashing(&mut self, attester_slashing: AttesterSlashing) {
self.beacon_chain
.receive_attester_slashing_for_inclusion(attester_slashing);
}
/// Executes the fork choice rule on the `BeaconChain`, selecting a new canonical head.
pub fn run_fork_choice(&mut self) {
self.beacon_chain.fork_choice().unwrap()
}
@@ -253,12 +318,4 @@ impl BeaconChainHarness {
pub fn chain_dump(&self) -> Result<Vec<CheckPoint>, BeaconChainError> {
self.beacon_chain.chain_dump()
}
/// Write the output of `chain_dump` to a JSON file.
pub fn dump_to_file(&self, filename: String, chain_dump: &[CheckPoint]) {
let json = serde_json::to_string(chain_dump).unwrap();
let mut file = File::create(filename).unwrap();
file.write_all(json.as_bytes())
.expect("Failed writing dump to file.");
}
}

View File

@@ -0,0 +1,69 @@
use clap::{App, Arg};
use env_logger::{Builder, Env};
use std::{fs::File, io::prelude::*};
use test_case::TestCase;
use yaml_rust::YamlLoader;
mod beacon_chain_harness;
mod test_case;
mod validator_harness;
use validator_harness::ValidatorHarness;
fn main() {
let matches = App::new("Lighthouse Test Harness Runner")
.version("0.0.1")
.author("Sigma Prime <contact@sigmaprime.io>")
.about("Runs `test_harness` using a YAML test_case.")
.arg(
Arg::with_name("yaml")
.long("yaml")
.value_name("FILE")
.help("YAML file test_case.")
.required(true),
)
.arg(
Arg::with_name("log")
.long("log-level")
.value_name("LOG_LEVEL")
.help("Logging level.")
.possible_values(&["error", "warn", "info", "debug", "trace"])
.default_value("debug")
.required(true),
)
.get_matches();
if let Some(log_level) = matches.value_of("log") {
Builder::from_env(Env::default().default_filter_or(log_level)).init();
}
if let Some(yaml_file) = matches.value_of("yaml") {
let docs = {
let mut file = File::open(yaml_file).unwrap();
let mut yaml_str = String::new();
file.read_to_string(&mut yaml_str).unwrap();
YamlLoader::load_from_str(&yaml_str).unwrap()
};
for doc in &docs {
// For each `test_cases` YAML in the document, build a `TestCase`, execute it and
// assert that the execution result matches the test_case description.
//
// In effect, for each `test_case` a new `BeaconChainHarness` is created from genesis
// and a new `BeaconChain` is built as per the test_case.
//
// After the `BeaconChain` has been built out as per the test_case, a dump of all blocks
// and states in the chain is obtained and checked against the `results` specified in
// the `test_case`.
//
// If any of the expectations in the results are not met, the process
// panics with a message.
for test_case in doc["test_cases"].as_vec().unwrap() {
let test_case = TestCase::from_yaml(test_case);
test_case.assert_result_valid(test_case.execute())
}
}
}
}

View File

@@ -1,4 +1,32 @@
//! Provides a testing environment for the `BeaconChain`, `Attester` and `BlockProposer` objects.
//!
//! This environment bypasses networking and client run-times and connects the `Attester` and `Proposer`
//! directly to the `BeaconChain` via an `Arc`.
//!
//! The `BeaconChainHarness` contains a single `BeaconChain` instance and many `ValidatorHarness`
//! instances. All of the `ValidatorHarness` instances work to advance the `BeaconChain` by
//! producing blocks and attestations.
//!
//! Example:
//! ```
//! use test_harness::BeaconChainHarness;
//! use types::ChainSpec;
//!
//! let validator_count = 8;
//! let spec = ChainSpec::few_validators();
//!
//! let mut harness = BeaconChainHarness::new(spec, validator_count);
//!
//! harness.advance_chain_with_block();
//!
//! let chain = harness.chain_dump().unwrap();
//!
//! // One block should have been built on top of the genesis block.
//! assert_eq!(chain.len(), 2);
//! ```
mod beacon_chain_harness;
pub mod test_case;
mod validator_harness;
pub use self::beacon_chain_harness::BeaconChainHarness;

View File

@@ -0,0 +1,335 @@
//! Defines execution and testing specs for a `BeaconChainHarness` instance. Supports loading from
//! a YAML file.
use crate::beacon_chain_harness::BeaconChainHarness;
use beacon_chain::CheckPoint;
use bls::{create_proof_of_possession, get_withdrawal_credentials};
use log::{info, warn};
use ssz::SignedRoot;
use types::*;
use types::{
attester_slashing::AttesterSlashingBuilder, proposer_slashing::ProposerSlashingBuilder,
};
use yaml_rust::Yaml;
mod config;
mod results;
mod state_check;
mod yaml_helpers;
pub use config::Config;
pub use results::Results;
pub use state_check::StateCheck;
/// Defines the execution and testing of a `BeaconChainHarness` instantiation.
///
/// Typical workflow is:
///
/// 1. Instantiate the `TestCase` from YAML: `let test_case = TestCase::from_yaml(&my_yaml);`
/// 2. Execute the test_case: `let result = test_case.execute();`
/// 3. Test the results against the test_case: `test_case.assert_result_valid(result);`
#[derive(Debug)]
pub struct TestCase {
/// Defines the execution.
pub config: Config,
/// Defines tests to run against the execution result.
pub results: Results,
}
/// The result of executing a `TestCase`.
///
pub struct ExecutionResult {
/// The canonical beacon chain generated from the execution.
pub chain: Vec<CheckPoint>,
/// The spec used for execution.
pub spec: ChainSpec,
}
impl TestCase {
/// Load the test case from a YAML document.
pub fn from_yaml(test_case: &Yaml) -> Self {
Self {
results: Results::from_yaml(&test_case["results"]),
config: Config::from_yaml(&test_case["config"]),
}
}
/// Return a `ChainSpec::foundation()`.
///
/// If specified in `config`, returns it with a modified `slots_per_epoch`.
fn spec(&self) -> ChainSpec {
let mut spec = ChainSpec::foundation();
if let Some(n) = self.config.slots_per_epoch {
spec.slots_per_epoch = n;
}
spec
}
/// Executes the test case, returning an `ExecutionResult`.
#[allow(clippy::cyclomatic_complexity)]
pub fn execute(&self) -> ExecutionResult {
let spec = self.spec();
let validator_count = self.config.deposits_for_chain_start;
let slots = self.config.num_slots;
info!(
"Building BeaconChainHarness with {} validators...",
validator_count
);
let mut harness = BeaconChainHarness::new(spec, validator_count);
info!("Starting simulation across {} slots...", slots);
// Start at 1 because genesis counts as a slot.
for slot_height in 1..slots {
// Used to ensure that deposits in the same slot have incremental deposit indices.
let mut deposit_index_offset = 0;
// Feed deposits to the BeaconChain.
if let Some(ref deposits) = self.config.deposits {
for (slot, amount) in deposits {
if *slot == slot_height {
info!("Including deposit at slot height {}.", slot_height);
let (deposit, keypair) =
build_deposit(&harness, *amount, deposit_index_offset);
harness.add_deposit(deposit, Some(keypair.clone()));
deposit_index_offset += 1;
}
}
}
// Feed proposer slashings to the BeaconChain.
if let Some(ref slashings) = self.config.proposer_slashings {
for (slot, validator_index) in slashings {
if *slot == slot_height {
info!(
"Including proposer slashing at slot height {} for validator #{}.",
slot_height, validator_index
);
let slashing = build_proposer_slashing(&harness, *validator_index);
harness.add_proposer_slashing(slashing);
}
}
}
// Feed attester slashings to the BeaconChain.
if let Some(ref slashings) = self.config.attester_slashings {
for (slot, validator_indices) in slashings {
if *slot == slot_height {
info!(
"Including attester slashing at slot height {} for validators {:?}.",
slot_height, validator_indices
);
let slashing =
build_double_vote_attester_slashing(&harness, &validator_indices[..]);
harness.add_attester_slashing(slashing);
}
}
}
// Feed exits to the BeaconChain.
if let Some(ref exits) = self.config.exits {
for (slot, validator_index) in exits {
if *slot == slot_height {
info!(
"Including exit at slot height {} for validator {}.",
slot_height, validator_index
);
let exit = build_exit(&harness, *validator_index);
harness.add_exit(exit);
}
}
}
// Feed transfers to the BeaconChain.
if let Some(ref transfers) = self.config.transfers {
for (slot, from, to, amount) in transfers {
if *slot == slot_height {
info!(
"Including transfer at slot height {} from validator {}.",
slot_height, from
);
let transfer = build_transfer(&harness, *from, *to, *amount);
harness.add_transfer(transfer);
}
}
}
// Build a block or skip a slot.
match self.config.skip_slots {
Some(ref skip_slots) if skip_slots.contains(&slot_height) => {
warn!("Skipping slot at height {}.", slot_height);
harness.increment_beacon_chain_slot();
}
_ => {
info!("Producing block at slot height {}.", slot_height);
harness.advance_chain_with_block();
}
}
}
harness.run_fork_choice();
info!("Test execution complete!");
info!("Building chain dump for analysis...");
ExecutionResult {
chain: harness.chain_dump().expect("Chain dump failed."),
spec: (*harness.spec).clone(),
}
}
/// Checks that the `ExecutionResult` is consistent with the specifications in `self.results`.
///
/// # Panics
///
/// Panics with a message if any result does not match exepectations.
pub fn assert_result_valid(&self, execution_result: ExecutionResult) {
info!("Verifying test results...");
let spec = &execution_result.spec;
if let Some(num_skipped_slots) = self.results.num_skipped_slots {
assert_eq!(
execution_result.chain.len(),
self.config.num_slots as usize - num_skipped_slots,
"actual skipped slots != expected."
);
info!(
"OK: Chain length is {} ({} skipped slots).",
execution_result.chain.len(),
num_skipped_slots
);
}
if let Some(ref state_checks) = self.results.state_checks {
for checkpoint in &execution_result.chain {
let state = &checkpoint.beacon_state;
for state_check in state_checks {
let adjusted_state_slot =
state.slot - spec.genesis_epoch.start_slot(spec.slots_per_epoch);
if state_check.slot == adjusted_state_slot {
state_check.assert_valid(state, spec);
}
}
}
}
}
}
/// Builds a `Deposit` this is valid for the given `BeaconChainHarness` at its next slot.
fn build_transfer(harness: &BeaconChainHarness, from: u64, to: u64, amount: u64) -> Transfer {
let slot = harness.beacon_chain.state.read().slot + 1;
let mut transfer = Transfer {
from,
to,
amount,
fee: 0,
slot,
pubkey: harness.validators[from as usize].keypair.pk.clone(),
signature: Signature::empty_signature(),
};
let message = transfer.signed_root();
let epoch = slot.epoch(harness.spec.slots_per_epoch);
transfer.signature = harness
.validator_sign(from as usize, &message[..], epoch, Domain::Transfer)
.expect("Unable to sign Transfer");
transfer
}
/// Builds a `Deposit` this is valid for the given `BeaconChainHarness`.
///
/// `index_offset` is used to ensure that `deposit.index == state.index` when adding multiple
/// deposits.
fn build_deposit(
harness: &BeaconChainHarness,
amount: u64,
index_offset: u64,
) -> (Deposit, Keypair) {
let keypair = Keypair::random();
let proof_of_possession = create_proof_of_possession(&keypair);
let index = harness.beacon_chain.state.read().deposit_index + index_offset;
let withdrawal_credentials = Hash256::from_slice(
&get_withdrawal_credentials(&keypair.pk, harness.spec.bls_withdrawal_prefix_byte)[..],
);
let deposit = Deposit {
// Note: `branch` and `index` will need to be updated once the spec defines their
// validity.
branch: vec![],
index,
deposit_data: DepositData {
amount,
timestamp: 1,
deposit_input: DepositInput {
pubkey: keypair.pk.clone(),
withdrawal_credentials,
proof_of_possession,
},
},
};
(deposit, keypair)
}
/// Builds a `VoluntaryExit` this is valid for the given `BeaconChainHarness`.
fn build_exit(harness: &BeaconChainHarness, validator_index: u64) -> VoluntaryExit {
let epoch = harness
.beacon_chain
.state
.read()
.current_epoch(&harness.spec);
let mut exit = VoluntaryExit {
epoch,
validator_index,
signature: Signature::empty_signature(),
};
let message = exit.signed_root();
exit.signature = harness
.validator_sign(validator_index as usize, &message[..], epoch, Domain::Exit)
.expect("Unable to sign VoluntaryExit");
exit
}
/// Builds an `AttesterSlashing` for some `validator_indices`.
///
/// Signs the message using a `BeaconChainHarness`.
fn build_double_vote_attester_slashing(
harness: &BeaconChainHarness,
validator_indices: &[u64],
) -> AttesterSlashing {
let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| {
harness
.validator_sign(validator_index as usize, message, epoch, domain)
.expect("Unable to sign AttesterSlashing")
};
AttesterSlashingBuilder::double_vote(validator_indices, signer)
}
/// Builds an `ProposerSlashing` for some `validator_index`.
///
/// Signs the message using a `BeaconChainHarness`.
fn build_proposer_slashing(harness: &BeaconChainHarness, validator_index: u64) -> ProposerSlashing {
let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| {
harness
.validator_sign(validator_index as usize, message, epoch, domain)
.expect("Unable to sign AttesterSlashing")
};
ProposerSlashingBuilder::double_vote(validator_index, signer, &harness.spec)
}

View File

@@ -0,0 +1,132 @@
use super::yaml_helpers::{as_u64, as_usize, as_vec_u64};
use types::*;
use yaml_rust::Yaml;
pub type ValidatorIndex = u64;
pub type ValidatorIndices = Vec<u64>;
pub type GweiAmount = u64;
pub type DepositTuple = (SlotHeight, GweiAmount);
pub type ExitTuple = (SlotHeight, ValidatorIndex);
pub type ProposerSlashingTuple = (SlotHeight, ValidatorIndex);
pub type AttesterSlashingTuple = (SlotHeight, ValidatorIndices);
/// (slot_height, from, to, amount)
pub type TransferTuple = (SlotHeight, ValidatorIndex, ValidatorIndex, GweiAmount);
/// Defines the execution of a `BeaconStateHarness` across a series of slots.
#[derive(Debug)]
pub struct Config {
/// Initial validators.
pub deposits_for_chain_start: usize,
/// Number of slots in an epoch.
pub slots_per_epoch: Option<u64>,
/// Number of slots to build before ending execution.
pub num_slots: u64,
/// Number of slots that should be skipped due to inactive validator.
pub skip_slots: Option<Vec<u64>>,
/// Deposits to be included during execution.
pub deposits: Option<Vec<DepositTuple>>,
/// Proposer slashings to be included during execution.
pub proposer_slashings: Option<Vec<ProposerSlashingTuple>>,
/// Attester slashings to be including during execution.
pub attester_slashings: Option<Vec<AttesterSlashingTuple>>,
/// Exits to be including during execution.
pub exits: Option<Vec<ExitTuple>>,
/// Transfers to be including during execution.
pub transfers: Option<Vec<TransferTuple>>,
}
impl Config {
/// Load from a YAML document.
///
/// Expects to receive the `config` section of the document.
pub fn from_yaml(yaml: &Yaml) -> Self {
Self {
deposits_for_chain_start: as_usize(&yaml, "deposits_for_chain_start")
.expect("Must specify validator count"),
slots_per_epoch: as_u64(&yaml, "slots_per_epoch"),
num_slots: as_u64(&yaml, "num_slots").expect("Must specify `config.num_slots`"),
skip_slots: as_vec_u64(yaml, "skip_slots"),
deposits: parse_deposits(&yaml),
proposer_slashings: parse_proposer_slashings(&yaml),
attester_slashings: parse_attester_slashings(&yaml),
exits: parse_exits(&yaml),
transfers: parse_transfers(&yaml),
}
}
}
/// Parse the `transfers` section of the YAML document.
fn parse_transfers(yaml: &Yaml) -> Option<Vec<TransferTuple>> {
let mut tuples = vec![];
for exit in yaml["transfers"].as_vec()? {
let slot = as_u64(exit, "slot").expect("Incomplete transfer (slot)");
let from = as_u64(exit, "from").expect("Incomplete transfer (from)");
let to = as_u64(exit, "to").expect("Incomplete transfer (to)");
let amount = as_u64(exit, "amount").expect("Incomplete transfer (amount)");
tuples.push((SlotHeight::from(slot), from, to, amount));
}
Some(tuples)
}
/// Parse the `attester_slashings` section of the YAML document.
fn parse_exits(yaml: &Yaml) -> Option<Vec<ExitTuple>> {
let mut tuples = vec![];
for exit in yaml["exits"].as_vec()? {
let slot = as_u64(exit, "slot").expect("Incomplete exit (slot)");
let validator_index =
as_u64(exit, "validator_index").expect("Incomplete exit (validator_index)");
tuples.push((SlotHeight::from(slot), validator_index));
}
Some(tuples)
}
/// Parse the `attester_slashings` section of the YAML document.
fn parse_attester_slashings(yaml: &Yaml) -> Option<Vec<AttesterSlashingTuple>> {
let mut slashings = vec![];
for slashing in yaml["attester_slashings"].as_vec()? {
let slot = as_u64(slashing, "slot").expect("Incomplete attester_slashing (slot)");
let validator_indices = as_vec_u64(slashing, "validator_indices")
.expect("Incomplete attester_slashing (validator_indices)");
slashings.push((SlotHeight::from(slot), validator_indices));
}
Some(slashings)
}
/// Parse the `proposer_slashings` section of the YAML document.
fn parse_proposer_slashings(yaml: &Yaml) -> Option<Vec<ProposerSlashingTuple>> {
let mut slashings = vec![];
for slashing in yaml["proposer_slashings"].as_vec()? {
let slot = as_u64(slashing, "slot").expect("Incomplete proposer slashing (slot)_");
let validator_index = as_u64(slashing, "validator_index")
.expect("Incomplete proposer slashing (validator_index)");
slashings.push((SlotHeight::from(slot), validator_index));
}
Some(slashings)
}
/// Parse the `deposits` section of the YAML document.
fn parse_deposits(yaml: &Yaml) -> Option<Vec<DepositTuple>> {
let mut deposits = vec![];
for deposit in yaml["deposits"].as_vec()? {
let slot = as_u64(deposit, "slot").expect("Incomplete deposit (slot)");
let amount = as_u64(deposit, "amount").expect("Incomplete deposit (amount)");
deposits.push((SlotHeight::from(slot), amount))
}
Some(deposits)
}

View File

@@ -0,0 +1,34 @@
use super::state_check::StateCheck;
use super::yaml_helpers::as_usize;
use yaml_rust::Yaml;
/// A series of tests to be carried out upon an `ExecutionResult`, returned from executing a
/// `TestCase`.
#[derive(Debug)]
pub struct Results {
pub num_skipped_slots: Option<usize>,
pub state_checks: Option<Vec<StateCheck>>,
}
impl Results {
/// Load from a YAML document.
///
/// Expects the `results` section of the YAML document.
pub fn from_yaml(yaml: &Yaml) -> Self {
Self {
num_skipped_slots: as_usize(yaml, "num_skipped_slots"),
state_checks: parse_state_checks(yaml),
}
}
}
/// Parse the `state_checks` section of the YAML document.
fn parse_state_checks(yaml: &Yaml) -> Option<Vec<StateCheck>> {
let mut states = vec![];
for state_yaml in yaml["states"].as_vec()? {
states.push(StateCheck::from_yaml(state_yaml));
}
Some(states)
}

View File

@@ -0,0 +1,178 @@
use super::yaml_helpers::{as_u64, as_usize, as_vec_u64};
use log::info;
use types::*;
use yaml_rust::Yaml;
type ValidatorIndex = u64;
type BalanceGwei = u64;
type BalanceCheckTuple = (ValidatorIndex, String, BalanceGwei);
/// Tests to be conducted upon a `BeaconState` object generated during the execution of a
/// `TestCase`.
#[derive(Debug)]
pub struct StateCheck {
/// Checked against `beacon_state.slot`.
pub slot: Slot,
/// Checked against `beacon_state.validator_registry.len()`.
pub num_validators: Option<usize>,
/// A list of validator indices which have been penalized. Must be in ascending order.
pub slashed_validators: Option<Vec<u64>>,
/// A list of validator indices which have been fully exited. Must be in ascending order.
pub exited_validators: Option<Vec<u64>>,
/// A list of validator indices which have had an exit initiated. Must be in ascending order.
pub exit_initiated_validators: Option<Vec<u64>>,
/// A list of balances to check.
pub balances: Option<Vec<BalanceCheckTuple>>,
}
impl StateCheck {
/// Load from a YAML document.
///
/// Expects the `state_check` section of the YAML document.
pub fn from_yaml(yaml: &Yaml) -> Self {
Self {
slot: Slot::from(as_u64(&yaml, "slot").expect("State must specify slot")),
num_validators: as_usize(&yaml, "num_validators"),
slashed_validators: as_vec_u64(&yaml, "slashed_validators"),
exited_validators: as_vec_u64(&yaml, "exited_validators"),
exit_initiated_validators: as_vec_u64(&yaml, "exit_initiated_validators"),
balances: parse_balances(&yaml),
}
}
/// Performs all checks against a `BeaconState`
///
/// # Panics
///
/// Panics with an error message if any test fails.
pub fn assert_valid(&self, state: &BeaconState, spec: &ChainSpec) {
let state_epoch = state.slot.epoch(spec.slots_per_epoch);
info!("Running state check for slot height {}.", self.slot);
// Check the state slot.
assert_eq!(
self.slot,
state.slot - spec.genesis_epoch.start_slot(spec.slots_per_epoch),
"State slot is invalid."
);
if let Some(num_validators) = self.num_validators {
assert_eq!(
state.validator_registry.len(),
num_validators,
"State validator count != expected."
);
info!("OK: num_validators = {}.", num_validators);
}
// Check for slashed validators.
if let Some(ref slashed_validators) = self.slashed_validators {
let actually_slashed_validators: Vec<u64> = state
.validator_registry
.iter()
.enumerate()
.filter_map(|(i, validator)| {
if validator.slashed {
Some(i as u64)
} else {
None
}
})
.collect();
assert_eq!(
actually_slashed_validators, *slashed_validators,
"Slashed validators != expected."
);
info!("OK: slashed_validators = {:?}.", slashed_validators);
}
// Check for exited validators.
if let Some(ref exited_validators) = self.exited_validators {
let actually_exited_validators: Vec<u64> = state
.validator_registry
.iter()
.enumerate()
.filter_map(|(i, validator)| {
if validator.is_exited_at(state_epoch) {
Some(i as u64)
} else {
None
}
})
.collect();
assert_eq!(
actually_exited_validators, *exited_validators,
"Exited validators != expected."
);
info!("OK: exited_validators = {:?}.", exited_validators);
}
// Check for validators that have initiated exit.
if let Some(ref exit_initiated_validators) = self.exit_initiated_validators {
let actual: Vec<u64> = state
.validator_registry
.iter()
.enumerate()
.filter_map(|(i, validator)| {
if validator.initiated_exit {
Some(i as u64)
} else {
None
}
})
.collect();
assert_eq!(
actual, *exit_initiated_validators,
"Exit initiated validators != expected."
);
info!(
"OK: exit_initiated_validators = {:?}.",
exit_initiated_validators
);
}
// Check validator balances.
if let Some(ref balances) = self.balances {
for (index, comparison, expected) in balances {
let actual = *state
.validator_balances
.get(*index as usize)
.expect("Balance check specifies unknown validator");
let result = match comparison.as_ref() {
"eq" => actual == *expected,
_ => panic!("Unknown balance comparison (use `eq`)"),
};
assert!(
result,
format!(
"Validator balance for {}: {} !{} {}.",
index, actual, comparison, expected
)
);
info!("OK: validator balance for {:?}.", index);
}
}
}
}
/// Parse the `transfers` section of the YAML document.
fn parse_balances(yaml: &Yaml) -> Option<Vec<BalanceCheckTuple>> {
let mut tuples = vec![];
for exit in yaml["balances"].as_vec()? {
let from =
as_u64(exit, "validator_index").expect("Incomplete balance check (validator_index)");
let comparison = exit["comparison"]
.clone()
.into_string()
.expect("Incomplete balance check (amount)");
let balance = as_u64(exit, "balance").expect("Incomplete balance check (balance)");
tuples.push((from, comparison, balance));
}
Some(tuples)
}

View File

@@ -0,0 +1,19 @@
use yaml_rust::Yaml;
pub fn as_usize(yaml: &Yaml, key: &str) -> Option<usize> {
yaml[key].as_i64().and_then(|n| Some(n as usize))
}
pub fn as_u64(yaml: &Yaml, key: &str) -> Option<u64> {
yaml[key].as_i64().and_then(|n| Some(n as u64))
}
pub fn as_vec_u64(yaml: &Yaml, key: &str) -> Option<Vec<u64>> {
yaml[key].clone().into_vec().and_then(|vec| {
Some(
vec.iter()
.map(|item| item.as_i64().unwrap() as u64)
.collect(),
)
})
}

View File

@@ -80,8 +80,8 @@ impl<T: ClientDB, U: SlotClock, F: ForkChoice> BeaconBlockNode for DirectBeaconN
let (block, _state) = self
.beacon_chain
.produce_block(randao_reveal.clone())
.ok_or_else(|| {
BeaconBlockNodeError::RemoteFailure("Did not produce block.".to_string())
.map_err(|e| {
BeaconBlockNodeError::RemoteFailure(format!("Did not produce block: {:?}", e))
})?;
if block.slot == slot {

View File

@@ -9,7 +9,7 @@ use db::ClientDB;
use fork_choice::ForkChoice;
use slot_clock::SlotClock;
use std::sync::Arc;
use types::{PublicKey, Slot};
use types::{Fork, PublicKey, Slot};
/// Connects directly to a borrowed `BeaconChain` and reads attester/proposer duties directly from
/// it.
@@ -40,6 +40,10 @@ impl<T: ClientDB, U: SlotClock, F: ForkChoice> ProducerDutiesReader for DirectDu
Err(_) => Err(ProducerDutiesReaderError::UnknownEpoch),
}
}
fn fork(&self) -> Result<Fork, ProducerDutiesReaderError> {
Ok(self.beacon_chain.state.read().fork.clone())
}
}
impl<T: ClientDB, U: SlotClock, F: ForkChoice> AttesterDutiesReader for DirectDuties<T, U, F> {

View File

@@ -1,27 +1,16 @@
use attester::Signer as AttesterSigner;
use block_proposer::Signer as BlockProposerSigner;
use std::sync::RwLock;
use types::{Keypair, Signature};
/// A test-only struct used to perform signing for a proposer or attester.
pub struct LocalSigner {
keypair: Keypair,
should_sign: RwLock<bool>,
}
impl LocalSigner {
/// Produce a new TestSigner with signing enabled by default.
pub fn new(keypair: Keypair) -> Self {
Self {
keypair,
should_sign: RwLock::new(true),
}
}
/// If set to `false`, the service will refuse to sign all messages. Otherwise, all messages
/// will be signed.
pub fn enable_signing(&self, enabled: bool) {
*self.should_sign.write().unwrap() = enabled;
Self { keypair }
}
/// Sign some message.

View File

@@ -28,24 +28,28 @@ pub enum AttestationProduceError {
PollError(AttestationPollError),
}
type TestingBlockProducer = BlockProducer<
TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
LocalSigner,
>;
type TestingAttester = Attester<
TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
LocalSigner,
>;
/// A `BlockProducer` and `Attester` which sign using a common keypair.
///
/// The test validator connects directly to a borrowed `BeaconChain` struct. It is useful for
/// testing that the core proposer and attester logic is functioning. Also for supporting beacon
/// chain tests.
pub struct ValidatorHarness {
pub block_producer: BlockProducer<
TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
LocalSigner,
>,
pub attester: Attester<
TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
LocalSigner,
>,
pub block_producer: TestingBlockProducer,
pub attester: TestingAttester,
pub spec: Arc<ChainSpec>,
pub epoch_map: Arc<DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>,
pub keypair: Keypair,

View File

@@ -29,7 +29,7 @@ fn it_can_produce_past_first_epoch_boundary() {
debug!("Harness built, tests starting..");
let blocks = harness.spec.epoch_length * 2 + 1;
let blocks = harness.spec.slots_per_epoch * 2 + 1;
for i in 0..blocks {
harness.advance_chain_with_block();
@@ -41,6 +41,4 @@ fn it_can_produce_past_first_epoch_boundary() {
let dump = harness.chain_dump().expect("Chain dump failed.");
assert_eq!(dump.len() as u64, blocks + 1); // + 1 for genesis block.
harness.dump_to_file("/tmp/chaindump.json".to_string(), &dump);
}

View File

@@ -134,9 +134,9 @@ mod tests {
let store = BeaconBlockStore::new(db.clone());
let ssz = "definitly not a valid block".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
db.put(DB_COLUMN, hash, ssz).unwrap();
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
assert_eq!(
store.block_at_slot(hash, Slot::from(42_u64)),
Err(BeaconBlockAtSlotError::DBError(
@@ -151,10 +151,10 @@ mod tests {
let store = BeaconBlockStore::new(db.clone());
let ssz = "some bytes".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let other_hash = &Hash256::from("another hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
let other_hash = &Hash256::from([0xBB; 32]);
db.put(DB_COLUMN, hash, ssz).unwrap();
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
assert_eq!(
store.block_at_slot(other_hash, Slot::from(42_u64)),
Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*other_hash))
@@ -169,18 +169,15 @@ mod tests {
let thread_count = 10;
let write_count = 10;
// We're expecting the product of these numbers to fit in one byte.
assert!(thread_count * write_count <= 255);
let mut handles = vec![];
for t in 0..thread_count {
let wc = write_count;
let bs = bs.clone();
let handle = thread::spawn(move || {
for w in 0..wc {
let key = (t * w) as u8;
let key = t * w;
let val = 42;
bs.put(&[key][..].into(), &vec![val]).unwrap();
bs.put(&Hash256::from_low_u64_le(key), &vec![val]).unwrap();
}
});
handles.push(handle);
@@ -192,9 +189,9 @@ mod tests {
for t in 0..thread_count {
for w in 0..write_count {
let key = (t * w) as u8;
assert!(bs.exists(&[key][..].into()).unwrap());
let val = bs.get(&[key][..].into()).unwrap().unwrap();
let key = t * w;
assert!(bs.exists(&Hash256::from_low_u64_le(key)).unwrap());
let val = bs.get(&Hash256::from_low_u64_le(key)).unwrap().unwrap();
assert_eq!(vec![42], val);
}
}
@@ -208,19 +205,20 @@ mod tests {
// Specify test block parameters.
let hashes = [
Hash256::from(&[0; 32][..]),
Hash256::from(&[1; 32][..]),
Hash256::from(&[2; 32][..]),
Hash256::from(&[3; 32][..]),
Hash256::from(&[4; 32][..]),
Hash256::from([0; 32]),
Hash256::from([1; 32]),
Hash256::from([2; 32]),
Hash256::from([3; 32]),
Hash256::from([4; 32]),
];
let parent_hashes = [
Hash256::from(&[255; 32][..]), // Genesis block.
Hash256::from(&[0; 32][..]),
Hash256::from(&[1; 32][..]),
Hash256::from(&[2; 32][..]),
Hash256::from(&[3; 32][..]),
Hash256::from([255; 32]), // Genesis block.
Hash256::from([0; 32]),
Hash256::from([1; 32]),
Hash256::from([2; 32]),
Hash256::from([3; 32]),
];
let unknown_hash = Hash256::from([101; 32]); // different from all above
let slots: Vec<Slot> = vec![0, 1, 3, 4, 5].iter().map(|x| Slot::new(*x)).collect();
// Generate a vec of random blocks and store them in the DB.
@@ -233,7 +231,7 @@ mod tests {
block.slot = slots[i];
let ssz = ssz_encode(&block);
db.put(DB_COLUMN, &hashes[i], &ssz).unwrap();
db.put(DB_COLUMN, hashes[i].as_bytes(), &ssz).unwrap();
blocks.push(block);
}
@@ -255,11 +253,10 @@ mod tests {
let ssz = bs.block_at_slot(&hashes[4], Slot::new(6)).unwrap();
assert_eq!(ssz, None);
let bad_hash = &Hash256::from("unknown".as_bytes());
let ssz = bs.block_at_slot(bad_hash, Slot::new(2));
let ssz = bs.block_at_slot(&unknown_hash, Slot::new(2));
assert_eq!(
ssz,
Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*bad_hash))
Err(BeaconBlockAtSlotError::UnknownBeaconBlock(unknown_hash))
);
}
}

View File

@@ -2,25 +2,25 @@ macro_rules! impl_crud_for_store {
($store: ident, $db_column: expr) => {
impl<T: ClientDB> $store<T> {
pub fn put(&self, hash: &Hash256, ssz: &[u8]) -> Result<(), DBError> {
self.db.put($db_column, hash, ssz)
self.db.put($db_column, hash.as_bytes(), ssz)
}
pub fn get(&self, hash: &Hash256) -> Result<Option<Vec<u8>>, DBError> {
self.db.get($db_column, hash)
self.db.get($db_column, hash.as_bytes())
}
pub fn exists(&self, hash: &Hash256) -> Result<bool, DBError> {
self.db.exists($db_column, hash)
self.db.exists($db_column, hash.as_bytes())
}
pub fn delete(&self, hash: &Hash256) -> Result<(), DBError> {
self.db.delete($db_column, hash)
self.db.delete($db_column, hash.as_bytes())
}
}
};
}
#[allow(unused_macros)]
#[cfg(test)]
macro_rules! test_crud_for_store {
($store: ident, $db_column: expr) => {
#[test]
@@ -29,10 +29,10 @@ macro_rules! test_crud_for_store {
let store = $store::new(db.clone());
let ssz = "some bytes".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
store.put(hash, ssz).unwrap();
assert_eq!(db.get(DB_COLUMN, hash).unwrap().unwrap(), ssz);
assert_eq!(db.get(DB_COLUMN, hash.as_bytes()).unwrap().unwrap(), ssz);
}
#[test]
@@ -41,9 +41,9 @@ macro_rules! test_crud_for_store {
let store = $store::new(db.clone());
let ssz = "some bytes".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
db.put(DB_COLUMN, hash, ssz).unwrap();
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
assert_eq!(store.get(hash).unwrap().unwrap(), ssz);
}
@@ -53,10 +53,10 @@ macro_rules! test_crud_for_store {
let store = $store::new(db.clone());
let ssz = "some bytes".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let other_hash = &Hash256::from("another hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
let other_hash = &Hash256::from([0xBB; 32]);
db.put(DB_COLUMN, other_hash, ssz).unwrap();
db.put(DB_COLUMN, other_hash.as_bytes(), ssz).unwrap();
assert_eq!(store.get(hash).unwrap(), None);
}
@@ -66,9 +66,9 @@ macro_rules! test_crud_for_store {
let store = $store::new(db.clone());
let ssz = "some bytes".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
db.put(DB_COLUMN, hash, ssz).unwrap();
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
assert!(store.exists(hash).unwrap());
}
@@ -78,10 +78,10 @@ macro_rules! test_crud_for_store {
let store = $store::new(db.clone());
let ssz = "some bytes".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let other_hash = &Hash256::from("another hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
let other_hash = &Hash256::from([0xBB; 32]);
db.put(DB_COLUMN, hash, ssz).unwrap();
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
assert!(!store.exists(other_hash).unwrap());
}
@@ -91,13 +91,13 @@ macro_rules! test_crud_for_store {
let store = $store::new(db.clone());
let ssz = "some bytes".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
db.put(DB_COLUMN, hash, ssz).unwrap();
assert!(db.exists(DB_COLUMN, hash).unwrap());
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
assert!(db.exists(DB_COLUMN, hash.as_bytes()).unwrap());
store.delete(hash).unwrap();
assert!(!db.exists(DB_COLUMN, hash).unwrap());
assert!(!db.exists(DB_COLUMN, hash.as_bytes()).unwrap());
}
};
}

View File

@@ -37,7 +37,7 @@ mod tests {
let db = Arc::new(MemoryDB::open());
let store = PoWChainStore::new(db.clone());
let hash = &Hash256::from("some hash".as_bytes()).to_vec();
let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec();
store.put_block_hash(hash).unwrap();
assert!(db.exists(DB_COLUMN, hash).unwrap());
@@ -48,7 +48,7 @@ mod tests {
let db = Arc::new(MemoryDB::open());
let store = PoWChainStore::new(db.clone());
let hash = &Hash256::from("some hash".as_bytes()).to_vec();
let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec();
db.put(DB_COLUMN, hash, &[0]).unwrap();
assert!(store.block_hash_exists(hash).unwrap());
@@ -59,8 +59,8 @@ mod tests {
let db = Arc::new(MemoryDB::open());
let store = PoWChainStore::new(db.clone());
let hash = &Hash256::from("some hash".as_bytes()).to_vec();
let other_hash = &Hash256::from("another hash".as_bytes()).to_vec();
let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec();
let other_hash = &Hash256::from([0xBB; 32]).as_bytes().to_vec();
db.put(DB_COLUMN, hash, &[0]).unwrap();
assert!(!store.block_hash_exists(other_hash).unwrap());