Implement slasher (#1567)

This is an implementation of a slasher that lives inside the BN and can be enabled via `lighthouse bn --slasher`.

Features included in this PR:

- [x] Detection of attester slashing conditions (double votes, surrounds existing, surrounded by existing)
- [x] Integration into Lighthouse's attestation verification flow
- [x] Detection of proposer slashing conditions
- [x] Extraction of attestations from blocks as they are verified
- [x] Compression of chunks
- [x] Configurable history length
- [x] Pruning of old attestations and blocks
- [x] More tests

Future work:

* Focus on a slice of history separate from the most recent N epochs (e.g. epochs `current - K` to `current - M`)
* Run out-of-process
* Ingest attestations from the chain without a resync

Design notes are here https://hackmd.io/@sproul/HJSEklmPL
This commit is contained in:
Michael Sproul
2020-11-23 03:43:22 +00:00
parent 59b2247ab8
commit 5828ff1204
44 changed files with 3662 additions and 87 deletions

631
slasher/src/array.rs Normal file
View File

@@ -0,0 +1,631 @@
use crate::metrics::{self, SLASHER_COMPRESSION_RATIO, SLASHER_NUM_CHUNKS_UPDATED};
use crate::{AttesterRecord, AttesterSlashingStatus, Config, Error, SlasherDB};
use flate2::bufread::{ZlibDecoder, ZlibEncoder};
use lmdb::{RwTransaction, Transaction};
use serde_derive::{Deserialize, Serialize};
use std::collections::{btree_map::Entry, BTreeMap, HashSet};
use std::convert::TryFrom;
use std::io::Read;
use std::iter::Extend;
use std::sync::Arc;
use types::{AttesterSlashing, Epoch, EthSpec, IndexedAttestation};
pub const MAX_DISTANCE: u16 = u16::MAX;
/// Terminology:
///
/// Let
/// N = config.history_length
/// C = config.chunk_size
/// K = config.validator_chunk_size
///
/// Then
///
/// `chunk_index` in [0..N/C) is the column of a chunk in the 2D matrix
/// `validator_chunk_index` in [0..N/K) is the row of a chunk in the 2D matrix
/// `chunk_offset` in [0..C) is the horizontal (epoch) offset of a value within a 2D chunk
/// `validator_offset` in [0..K) is the vertical (validator) offset of a value within a 2D chunk
#[derive(Debug, Serialize, Deserialize)]
pub struct Chunk {
data: Vec<u16>,
}
impl Chunk {
pub fn get_target(
&self,
validator_index: u64,
epoch: Epoch,
config: &Config,
) -> Result<Epoch, Error> {
assert_eq!(
self.data.len(),
config.chunk_size * config.validator_chunk_size
);
let validator_offset = config.validator_offset(validator_index);
let chunk_offset = config.chunk_offset(epoch);
let cell_index = config.cell_index(validator_offset, chunk_offset);
self.data
.get(cell_index)
.map(|distance| epoch + u64::from(*distance))
.ok_or_else(|| Error::ChunkIndexOutOfBounds(cell_index))
}
pub fn set_target(
&mut self,
validator_index: u64,
epoch: Epoch,
target_epoch: Epoch,
config: &Config,
) -> Result<(), Error> {
let distance = Self::epoch_distance(target_epoch, epoch)?;
self.set_raw_distance(validator_index, epoch, distance, config)
}
pub fn set_raw_distance(
&mut self,
validator_index: u64,
epoch: Epoch,
target_distance: u16,
config: &Config,
) -> Result<(), Error> {
let validator_offset = config.validator_offset(validator_index);
let chunk_offset = config.chunk_offset(epoch);
let cell_index = config.cell_index(validator_offset, chunk_offset);
let cell = self
.data
.get_mut(cell_index)
.ok_or_else(|| Error::ChunkIndexOutOfBounds(cell_index))?;
*cell = target_distance;
Ok(())
}
/// Compute the distance (difference) between two epochs.
///
/// Error if the distance is greater than or equal to `MAX_DISTANCE`.
pub fn epoch_distance(epoch: Epoch, base_epoch: Epoch) -> Result<u16, Error> {
let distance_u64 = epoch
.as_u64()
.checked_sub(base_epoch.as_u64())
.ok_or(Error::DistanceCalculationOverflow)?;
let distance = u16::try_from(distance_u64).map_err(|_| Error::DistanceTooLarge)?;
if distance < MAX_DISTANCE {
Ok(distance)
} else {
Err(Error::DistanceTooLarge)
}
}
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(transparent)]
pub struct MinTargetChunk {
chunk: Chunk,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(transparent)]
pub struct MaxTargetChunk {
chunk: Chunk,
}
pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwned {
fn name() -> &'static str;
fn empty(config: &Config) -> Self;
fn chunk(&mut self) -> &mut Chunk;
fn neutral_element() -> u16;
fn check_slashable<E: EthSpec>(
&self,
db: &SlasherDB<E>,
txn: &mut RwTransaction<'_>,
validator_index: u64,
attestation: &IndexedAttestation<E>,
config: &Config,
) -> Result<AttesterSlashingStatus<E>, Error>;
fn update(
&mut self,
chunk_index: usize,
validator_index: u64,
start_epoch: Epoch,
new_target_epoch: Epoch,
current_epoch: Epoch,
config: &Config,
) -> Result<bool, Error>;
fn first_start_epoch(
source_epoch: Epoch,
current_epoch: Epoch,
config: &Config,
) -> Option<Epoch>;
fn next_start_epoch(start_epoch: Epoch, config: &Config) -> Epoch;
fn select_db<E: EthSpec>(db: &SlasherDB<E>) -> lmdb::Database;
fn load<E: EthSpec>(
db: &SlasherDB<E>,
txn: &mut RwTransaction<'_>,
validator_chunk_index: usize,
chunk_index: usize,
config: &Config,
) -> Result<Option<Self>, Error> {
let disk_key = config.disk_key(validator_chunk_index, chunk_index);
let chunk_bytes = match txn.get(Self::select_db(db), &disk_key.to_be_bytes()) {
Ok(chunk_bytes) => chunk_bytes,
Err(lmdb::Error::NotFound) => return Ok(None),
Err(e) => return Err(e.into()),
};
let chunk = bincode::deserialize_from(ZlibDecoder::new(chunk_bytes))?;
Ok(Some(chunk))
}
fn store<E: EthSpec>(
&self,
db: &SlasherDB<E>,
txn: &mut RwTransaction<'_>,
validator_chunk_index: usize,
chunk_index: usize,
config: &Config,
) -> Result<(), Error> {
let disk_key = config.disk_key(validator_chunk_index, chunk_index);
let value = bincode::serialize(self)?;
let mut encoder = ZlibEncoder::new(&value[..], flate2::Compression::default());
let mut compressed_value = vec![];
encoder.read_to_end(&mut compressed_value)?;
let compression_ratio = value.len() as f64 / compressed_value.len() as f64;
metrics::set_float_gauge(&SLASHER_COMPRESSION_RATIO, compression_ratio);
txn.put(
Self::select_db(db),
&disk_key.to_be_bytes(),
&compressed_value,
SlasherDB::<E>::write_flags(),
)?;
Ok(())
}
}
impl TargetArrayChunk for MinTargetChunk {
fn name() -> &'static str {
"min"
}
fn empty(config: &Config) -> Self {
MinTargetChunk {
chunk: Chunk {
data: vec![
Self::neutral_element();
config.chunk_size * config.validator_chunk_size
],
},
}
}
fn neutral_element() -> u16 {
MAX_DISTANCE
}
fn chunk(&mut self) -> &mut Chunk {
&mut self.chunk
}
fn check_slashable<E: EthSpec>(
&self,
db: &SlasherDB<E>,
txn: &mut RwTransaction<'_>,
validator_index: u64,
attestation: &IndexedAttestation<E>,
config: &Config,
) -> Result<AttesterSlashingStatus<E>, Error> {
let min_target =
self.chunk
.get_target(validator_index, attestation.data.source.epoch, config)?;
if attestation.data.target.epoch > min_target {
let existing_attestation =
db.get_attestation_for_validator(txn, validator_index, min_target)?;
if attestation.data.source.epoch < existing_attestation.data.source.epoch {
Ok(AttesterSlashingStatus::SurroundsExisting(Box::new(
existing_attestation,
)))
} else {
Ok(AttesterSlashingStatus::AlreadyDoubleVoted)
}
} else {
Ok(AttesterSlashingStatus::NotSlashable)
}
}
fn update(
&mut self,
chunk_index: usize,
validator_index: u64,
start_epoch: Epoch,
new_target_epoch: Epoch,
current_epoch: Epoch,
config: &Config,
) -> Result<bool, Error> {
let min_epoch = Epoch::from(
current_epoch
.as_usize()
.saturating_sub(config.history_length - 1),
);
let mut epoch = start_epoch;
while config.chunk_index(epoch) == chunk_index && epoch >= min_epoch {
if new_target_epoch < self.chunk.get_target(validator_index, epoch, config)? {
self.chunk
.set_target(validator_index, epoch, new_target_epoch, config)?;
} else {
// We can stop.
return Ok(false);
}
epoch -= 1;
}
Ok(epoch >= min_epoch)
}
fn first_start_epoch(
source_epoch: Epoch,
current_epoch: Epoch,
config: &Config,
) -> Option<Epoch> {
if source_epoch > current_epoch - config.history_length as u64 {
assert_ne!(source_epoch, 0);
Some(source_epoch - 1)
} else {
None
}
}
// Move to last epoch of previous chunk
fn next_start_epoch(start_epoch: Epoch, config: &Config) -> Epoch {
let chunk_size = config.chunk_size as u64;
start_epoch / chunk_size * chunk_size - 1
}
fn select_db<E: EthSpec>(db: &SlasherDB<E>) -> lmdb::Database {
db.min_targets_db
}
}
impl TargetArrayChunk for MaxTargetChunk {
fn name() -> &'static str {
"max"
}
fn empty(config: &Config) -> Self {
MaxTargetChunk {
chunk: Chunk {
data: vec![
Self::neutral_element();
config.chunk_size * config.validator_chunk_size
],
},
}
}
fn neutral_element() -> u16 {
0
}
fn chunk(&mut self) -> &mut Chunk {
&mut self.chunk
}
fn check_slashable<E: EthSpec>(
&self,
db: &SlasherDB<E>,
txn: &mut RwTransaction<'_>,
validator_index: u64,
attestation: &IndexedAttestation<E>,
config: &Config,
) -> Result<AttesterSlashingStatus<E>, Error> {
let max_target =
self.chunk
.get_target(validator_index, attestation.data.source.epoch, config)?;
if attestation.data.target.epoch < max_target {
let existing_attestation =
db.get_attestation_for_validator(txn, validator_index, max_target)?;
if existing_attestation.data.source.epoch < attestation.data.source.epoch {
Ok(AttesterSlashingStatus::SurroundedByExisting(Box::new(
existing_attestation,
)))
} else {
Ok(AttesterSlashingStatus::AlreadyDoubleVoted)
}
} else {
Ok(AttesterSlashingStatus::NotSlashable)
}
}
fn update(
&mut self,
chunk_index: usize,
validator_index: u64,
start_epoch: Epoch,
new_target_epoch: Epoch,
current_epoch: Epoch,
config: &Config,
) -> Result<bool, Error> {
let mut epoch = start_epoch;
while config.chunk_index(epoch) == chunk_index && epoch <= current_epoch {
if new_target_epoch > self.chunk.get_target(validator_index, epoch, config)? {
self.chunk
.set_target(validator_index, epoch, new_target_epoch, config)?;
} else {
// We can stop.
return Ok(false);
}
epoch += 1;
}
// If the epoch to update now lies beyond the current chunk and is less than
// or equal to the current epoch, then continue to the next chunk to update it.
Ok(epoch <= current_epoch)
}
fn first_start_epoch(
source_epoch: Epoch,
current_epoch: Epoch,
_config: &Config,
) -> Option<Epoch> {
if source_epoch < current_epoch {
Some(source_epoch + 1)
} else {
None
}
}
// Move to first epoch of next chunk
fn next_start_epoch(start_epoch: Epoch, config: &Config) -> Epoch {
let chunk_size = config.chunk_size as u64;
(start_epoch / chunk_size + 1) * chunk_size
}
fn select_db<E: EthSpec>(db: &SlasherDB<E>) -> lmdb::Database {
db.max_targets_db
}
}
pub fn get_chunk_for_update<'a, E: EthSpec, T: TargetArrayChunk>(
db: &SlasherDB<E>,
txn: &mut RwTransaction<'_>,
updated_chunks: &'a mut BTreeMap<usize, T>,
validator_chunk_index: usize,
chunk_index: usize,
config: &Config,
) -> Result<&'a mut T, Error> {
Ok(match updated_chunks.entry(chunk_index) {
Entry::Occupied(occupied) => occupied.into_mut(),
Entry::Vacant(vacant) => {
let chunk = if let Some(disk_chunk) =
T::load(db, txn, validator_chunk_index, chunk_index, config)?
{
disk_chunk
} else {
T::empty(config)
};
vacant.insert(chunk)
}
})
}
#[allow(clippy::too_many_arguments)]
pub fn apply_attestation_for_validator<E: EthSpec, T: TargetArrayChunk>(
db: &SlasherDB<E>,
txn: &mut RwTransaction<'_>,
updated_chunks: &mut BTreeMap<usize, T>,
validator_chunk_index: usize,
validator_index: u64,
attestation: &IndexedAttestation<E>,
current_epoch: Epoch,
config: &Config,
) -> Result<AttesterSlashingStatus<E>, Error> {
let mut chunk_index = config.chunk_index(attestation.data.source.epoch);
let mut current_chunk = get_chunk_for_update(
db,
txn,
updated_chunks,
validator_chunk_index,
chunk_index,
config,
)?;
let slashing_status =
current_chunk.check_slashable(db, txn, validator_index, attestation, config)?;
if slashing_status != AttesterSlashingStatus::NotSlashable {
return Ok(slashing_status);
}
let mut start_epoch = if let Some(start_epoch) =
T::first_start_epoch(attestation.data.source.epoch, current_epoch, config)
{
start_epoch
} else {
return Ok(slashing_status);
};
loop {
chunk_index = config.chunk_index(start_epoch);
current_chunk = get_chunk_for_update(
db,
txn,
updated_chunks,
validator_chunk_index,
chunk_index,
config,
)?;
let keep_going = current_chunk.update(
chunk_index,
validator_index,
start_epoch,
attestation.data.target.epoch,
current_epoch,
config,
)?;
if !keep_going {
break;
}
start_epoch = T::next_start_epoch(start_epoch, config);
}
Ok(AttesterSlashingStatus::NotSlashable)
}
pub fn update<E: EthSpec>(
db: &SlasherDB<E>,
txn: &mut RwTransaction<'_>,
validator_chunk_index: usize,
batch: Vec<Arc<(IndexedAttestation<E>, AttesterRecord)>>,
current_epoch: Epoch,
config: &Config,
) -> Result<HashSet<AttesterSlashing<E>>, Error> {
// Split the batch up into horizontal segments.
// Map chunk indexes in the range `0..self.config.chunk_size` to attestations
// for those chunks.
let mut chunk_attestations = BTreeMap::new();
for attestation in batch {
chunk_attestations
.entry(config.chunk_index(attestation.0.data.source.epoch))
.or_insert_with(Vec::new)
.push(attestation);
}
let mut slashings = update_array::<_, MinTargetChunk>(
db,
txn,
validator_chunk_index,
&chunk_attestations,
current_epoch,
config,
)?;
slashings.extend(update_array::<_, MaxTargetChunk>(
db,
txn,
validator_chunk_index,
&chunk_attestations,
current_epoch,
config,
)?);
// Update all current epochs.
for validator_index in config.validator_indices_in_chunk(validator_chunk_index) {
db.update_current_epoch_for_validator(validator_index, current_epoch, txn)?;
}
Ok(slashings)
}
pub fn epoch_update_for_validator<E: EthSpec, T: TargetArrayChunk>(
db: &SlasherDB<E>,
txn: &mut RwTransaction<'_>,
updated_chunks: &mut BTreeMap<usize, T>,
validator_chunk_index: usize,
validator_index: u64,
current_epoch: Epoch,
config: &Config,
) -> Result<(), Error> {
let previous_current_epoch =
if let Some(epoch) = db.get_current_epoch_for_validator(validator_index, txn)? {
epoch
} else {
return Ok(());
};
let mut epoch = previous_current_epoch;
while epoch <= current_epoch {
let chunk_index = config.chunk_index(epoch);
let current_chunk = get_chunk_for_update(
db,
txn,
updated_chunks,
validator_chunk_index,
chunk_index,
config,
)?;
while config.chunk_index(epoch) == chunk_index && epoch <= current_epoch {
current_chunk.chunk().set_raw_distance(
validator_index,
epoch,
T::neutral_element(),
config,
)?;
epoch += 1;
}
}
Ok(())
}
#[allow(clippy::type_complexity)]
pub fn update_array<E: EthSpec, T: TargetArrayChunk>(
db: &SlasherDB<E>,
txn: &mut RwTransaction<'_>,
validator_chunk_index: usize,
chunk_attestations: &BTreeMap<usize, Vec<Arc<(IndexedAttestation<E>, AttesterRecord)>>>,
current_epoch: Epoch,
config: &Config,
) -> Result<HashSet<AttesterSlashing<E>>, Error> {
let mut slashings = HashSet::new();
// Map from chunk index to updated chunk at that index.
let mut updated_chunks = BTreeMap::new();
// Update the arrays for the change of current epoch.
for validator_index in config.validator_indices_in_chunk(validator_chunk_index) {
epoch_update_for_validator(
db,
txn,
&mut updated_chunks,
validator_chunk_index,
validator_index,
current_epoch,
config,
)?;
}
for attestations in chunk_attestations.values() {
for attestation in attestations {
for validator_index in
config.attesting_validators_in_chunk(&attestation.0, validator_chunk_index)
{
let slashing_status = apply_attestation_for_validator::<E, T>(
db,
txn,
&mut updated_chunks,
validator_chunk_index,
validator_index,
&attestation.0,
current_epoch,
config,
)?;
if let Some(slashing) = slashing_status.into_slashing(&attestation.0) {
slashings.insert(slashing);
}
}
}
}
// Store chunks on disk.
metrics::inc_counter_vec_by(
&SLASHER_NUM_CHUNKS_UPDATED,
&[T::name()],
updated_chunks.len() as i64,
);
for (chunk_index, chunk) in updated_chunks {
chunk.store(db, txn, validator_chunk_index, chunk_index, config)?;
}
Ok(slashings)
}

View File

@@ -0,0 +1,93 @@
use crate::{AttesterRecord, Config};
use parking_lot::Mutex;
use std::collections::BTreeSet;
use std::sync::Arc;
use types::{EthSpec, IndexedAttestation};
/// Staging area for attestations received from the network.
///
/// To be added to the database in batches, for efficiency and to prevent data races.
#[derive(Debug, Default)]
pub struct AttestationQueue<E: EthSpec> {
/// All attestations (unique) for storage on disk.
pub queue: Mutex<AttestationBatch<E>>,
}
/// Attestations grouped by validator index range.
#[derive(Debug)]
pub struct GroupedAttestations<E: EthSpec> {
pub subqueues: Vec<AttestationBatch<E>>,
}
/// A queue of attestations for a range of validator indices.
#[derive(Debug, Default)]
pub struct AttestationBatch<E: EthSpec> {
pub attestations: Vec<Arc<(IndexedAttestation<E>, AttesterRecord)>>,
}
impl<E: EthSpec> AttestationBatch<E> {
pub fn len(&self) -> usize {
self.attestations.len()
}
pub fn is_empty(&self) -> bool {
self.attestations.is_empty()
}
/// Group the attestations by validator index.
pub fn group_by_validator_index(self, config: &Config) -> GroupedAttestations<E> {
let mut grouped_attestations = GroupedAttestations { subqueues: vec![] };
for attestation in self.attestations {
let subqueue_ids = attestation
.0
.attesting_indices
.iter()
.map(|validator_index| config.validator_chunk_index(*validator_index))
.collect::<BTreeSet<_>>();
if let Some(max_subqueue_id) = subqueue_ids.iter().next_back() {
if *max_subqueue_id >= grouped_attestations.subqueues.len() {
grouped_attestations
.subqueues
.resize_with(max_subqueue_id + 1, AttestationBatch::default);
}
}
for subqueue_id in subqueue_ids {
grouped_attestations.subqueues[subqueue_id]
.attestations
.push(attestation.clone());
}
}
grouped_attestations
}
}
impl<E: EthSpec> AttestationQueue<E> {
/// Add an attestation to the queue.
pub fn queue(&self, attestation: IndexedAttestation<E>) {
let attester_record = AttesterRecord::from(attestation.clone());
self.queue
.lock()
.attestations
.push(Arc::new((attestation, attester_record)));
}
pub fn dequeue(&self) -> AttestationBatch<E> {
std::mem::take(&mut self.queue.lock())
}
pub fn requeue(&self, batch: AttestationBatch<E>) {
self.queue.lock().attestations.extend(batch.attestations);
}
pub fn len(&self) -> usize {
self.queue.lock().len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}

View File

@@ -0,0 +1,57 @@
use ssz_derive::{Decode, Encode};
use tree_hash::TreeHash as _;
use tree_hash_derive::TreeHash;
use types::{AggregateSignature, EthSpec, Hash256, IndexedAttestation, VariableList};
#[derive(Debug, Clone, Copy, Encode, Decode)]
pub struct AttesterRecord {
/// The hash of the attestation data, for checking double-voting.
pub attestation_data_hash: Hash256,
/// The hash of the indexed attestation, so it can be loaded.
pub indexed_attestation_hash: Hash256,
}
#[derive(Debug, Clone, Encode, Decode, TreeHash)]
struct IndexedAttestationHeader<T: EthSpec> {
pub attesting_indices: VariableList<u64, T::MaxValidatorsPerCommittee>,
pub data_root: Hash256,
pub signature: AggregateSignature,
}
impl<T: EthSpec> From<IndexedAttestation<T>> for AttesterRecord {
fn from(indexed_attestation: IndexedAttestation<T>) -> AttesterRecord {
let attestation_data_hash = indexed_attestation.data.tree_hash_root();
let header = IndexedAttestationHeader::<T> {
attesting_indices: indexed_attestation.attesting_indices,
data_root: attestation_data_hash,
signature: indexed_attestation.signature,
};
let indexed_attestation_hash = header.tree_hash_root();
AttesterRecord {
attestation_data_hash,
indexed_attestation_hash,
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::test_utils::indexed_att;
// Check correctness of fast hashing
#[test]
fn fast_hash() {
let data = vec![
indexed_att(vec![], 0, 0, 0),
indexed_att(vec![1, 2, 3], 12, 14, 1),
indexed_att(vec![4], 0, 5, u64::MAX),
];
for att in data {
assert_eq!(
att.tree_hash_root(),
AttesterRecord::from(att).indexed_attestation_hash
);
}
}
}

View File

@@ -0,0 +1,26 @@
use parking_lot::Mutex;
use types::SignedBeaconBlockHeader;
#[derive(Debug, Default)]
pub struct BlockQueue {
blocks: Mutex<Vec<SignedBeaconBlockHeader>>,
}
impl BlockQueue {
pub fn queue(&self, block_header: SignedBeaconBlockHeader) {
self.blocks.lock().push(block_header)
}
pub fn dequeue(&self) -> Vec<SignedBeaconBlockHeader> {
let mut blocks = self.blocks.lock();
std::mem::replace(&mut *blocks, vec![])
}
pub fn len(&self) -> usize {
self.blocks.lock().len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}

111
slasher/src/config.rs Normal file
View File

@@ -0,0 +1,111 @@
use crate::Error;
use serde_derive::{Deserialize, Serialize};
use std::path::PathBuf;
use types::{Epoch, EthSpec, IndexedAttestation};
pub const DEFAULT_CHUNK_SIZE: usize = 16;
pub const DEFAULT_VALIDATOR_CHUNK_SIZE: usize = 256;
pub const DEFAULT_HISTORY_LENGTH: usize = 4096;
pub const DEFAULT_UPDATE_PERIOD: u64 = 12;
pub const DEFAULT_MAX_DB_SIZE: usize = 256;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub database_path: PathBuf,
pub chunk_size: usize,
pub validator_chunk_size: usize,
/// Number of epochs of history to keep.
pub history_length: usize,
/// Update frequency in seconds.
pub update_period: u64,
/// Maximum size of the LMDB database in gigabytes.
pub max_db_size_gbs: usize,
}
impl Config {
pub fn new(database_path: PathBuf) -> Self {
Self {
database_path,
chunk_size: DEFAULT_CHUNK_SIZE,
validator_chunk_size: DEFAULT_VALIDATOR_CHUNK_SIZE,
history_length: DEFAULT_HISTORY_LENGTH,
update_period: DEFAULT_UPDATE_PERIOD,
max_db_size_gbs: DEFAULT_MAX_DB_SIZE,
}
}
pub fn validate(&self) -> Result<(), Error> {
if self.chunk_size == 0
|| self.validator_chunk_size == 0
|| self.history_length == 0
|| self.max_db_size_gbs == 0
{
Err(Error::ConfigInvalidZeroParameter {
config: self.clone(),
})
} else if self.history_length % self.chunk_size != 0 {
Err(Error::ConfigInvalidChunkSize {
chunk_size: self.chunk_size,
history_length: self.history_length,
})
} else {
Ok(())
}
}
pub fn is_compatible(&self, other: &Config) -> bool {
self.chunk_size == other.chunk_size
&& self.validator_chunk_size == other.validator_chunk_size
&& self.history_length == other.history_length
}
pub fn chunk_index(&self, epoch: Epoch) -> usize {
(epoch.as_usize() % self.history_length) / self.chunk_size
}
pub fn validator_chunk_index(&self, validator_index: u64) -> usize {
validator_index as usize / self.validator_chunk_size
}
pub fn chunk_offset(&self, epoch: Epoch) -> usize {
epoch.as_usize() % self.chunk_size
}
pub fn validator_offset(&self, validator_index: u64) -> usize {
validator_index as usize % self.validator_chunk_size
}
/// Map the validator and epoch chunk indexes into a single value for use as a database key.
pub fn disk_key(&self, validator_chunk_index: usize, chunk_index: usize) -> usize {
let width = self.history_length / self.chunk_size;
validator_chunk_index * width + chunk_index
}
/// Map the validator and epoch offsets into an index for `Chunk::data`.
pub fn cell_index(&self, validator_offset: usize, chunk_offset: usize) -> usize {
validator_offset * self.chunk_size + chunk_offset
}
/// Return an iterator over all the validator indices in a validator chunk.
pub fn validator_indices_in_chunk(
&self,
validator_chunk_index: usize,
) -> impl Iterator<Item = u64> {
(validator_chunk_index * self.validator_chunk_size
..(validator_chunk_index + 1) * self.validator_chunk_size)
.map(|index| index as u64)
}
/// Iterate over the attesting indices which belong to the `validator_chunk_index` chunk.
pub fn attesting_validators_in_chunk<'a, E: EthSpec>(
&'a self,
attestation: &'a IndexedAttestation<E>,
validator_chunk_index: usize,
) -> impl Iterator<Item = u64> + 'a {
attestation
.attesting_indices
.iter()
.filter(move |v| self.validator_chunk_index(**v) == validator_chunk_index)
.copied()
}
}

535
slasher/src/database.rs Normal file
View File

@@ -0,0 +1,535 @@
use crate::{
utils::TxnOptional, AttesterRecord, AttesterSlashingStatus, Config, Error,
ProposerSlashingStatus,
};
use byteorder::{BigEndian, ByteOrder};
use lmdb::{Cursor, Database, DatabaseFlags, Environment, RwTransaction, Transaction, WriteFlags};
use ssz::{Decode, Encode};
use std::collections::HashSet;
use std::marker::PhantomData;
use std::sync::Arc;
use types::{
Epoch, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, SignedBeaconBlockHeader, Slot,
};
/// Current database schema version, to check compatibility of on-disk DB with software.
const CURRENT_SCHEMA_VERSION: u64 = 0;
/// Metadata about the slashing database itself.
const METADATA_DB: &str = "metadata";
/// Map from `(target_epoch, validator_index)` to `AttesterRecord`.
const ATTESTERS_DB: &str = "attesters";
/// Map from `indexed_attestation_hash` to `IndexedAttestation`.
const INDEXED_ATTESTATION_DB: &str = "indexed_attestations";
/// Table of minimum targets for every source epoch within range.
const MIN_TARGETS_DB: &str = "min_targets";
/// Table of maximum targets for every source epoch within range.
const MAX_TARGETS_DB: &str = "max_targets";
/// Map from `validator_index` to the `current_epoch` for that validator.
///
/// Used to implement wrap-around semantics for the min and max target arrays.
const CURRENT_EPOCHS_DB: &str = "current_epochs";
/// Map from `(slot, validator_index)` to `SignedBeaconBlockHeader`.
const PROPOSERS_DB: &str = "proposers";
/// The number of DBs for LMDB to use (equal to the number of DBs defined above).
const LMDB_MAX_DBS: u32 = 7;
/// Constant key under which the schema version is stored in the `metadata_db`.
const METADATA_VERSION_KEY: &[u8] = &[0];
/// Constant key under which the slasher configuration is stored in the `metadata_db`.
const METADATA_CONFIG_KEY: &[u8] = &[1];
const ATTESTER_KEY_SIZE: usize = 16;
const PROPOSER_KEY_SIZE: usize = 16;
const GIGABYTE: usize = 1 << 30;
#[derive(Debug)]
pub struct SlasherDB<E: EthSpec> {
pub(crate) env: Environment,
pub(crate) indexed_attestation_db: Database,
pub(crate) attesters_db: Database,
pub(crate) min_targets_db: Database,
pub(crate) max_targets_db: Database,
pub(crate) current_epochs_db: Database,
pub(crate) proposers_db: Database,
pub(crate) metadata_db: Database,
config: Arc<Config>,
_phantom: PhantomData<E>,
}
/// Database key for the `attesters` database.
///
/// Stored as big-endian `(target_epoch, validator_index)` to enable efficient iteration
/// while pruning.
#[derive(Debug)]
pub struct AttesterKey {
data: [u8; ATTESTER_KEY_SIZE],
}
impl AttesterKey {
pub fn new(validator_index: u64, target_epoch: Epoch) -> Self {
let mut data = [0; ATTESTER_KEY_SIZE];
data[0..8].copy_from_slice(&target_epoch.as_u64().to_be_bytes());
data[8..ATTESTER_KEY_SIZE].copy_from_slice(&validator_index.to_be_bytes());
AttesterKey { data }
}
pub fn parse(data: &[u8]) -> Result<(Epoch, u64), Error> {
if data.len() == ATTESTER_KEY_SIZE {
let target_epoch = Epoch::new(BigEndian::read_u64(&data[..8]));
let validator_index = BigEndian::read_u64(&data[8..]);
Ok((target_epoch, validator_index))
} else {
Err(Error::AttesterKeyCorrupt { length: data.len() })
}
}
}
impl AsRef<[u8]> for AttesterKey {
fn as_ref(&self) -> &[u8] {
&self.data
}
}
/// Database key for the `proposers` database.
///
/// Stored as big-endian `(slot, validator_index)` to enable efficient iteration
/// while pruning.
#[derive(Debug)]
pub struct ProposerKey {
data: [u8; PROPOSER_KEY_SIZE],
}
impl ProposerKey {
pub fn new(validator_index: u64, slot: Slot) -> Self {
let mut data = [0; PROPOSER_KEY_SIZE];
data[0..8].copy_from_slice(&slot.as_u64().to_be_bytes());
data[8..PROPOSER_KEY_SIZE].copy_from_slice(&validator_index.to_be_bytes());
ProposerKey { data }
}
pub fn parse(data: &[u8]) -> Result<(Slot, u64), Error> {
if data.len() == PROPOSER_KEY_SIZE {
let slot = Slot::new(BigEndian::read_u64(&data[..8]));
let validator_index = BigEndian::read_u64(&data[8..]);
Ok((slot, validator_index))
} else {
Err(Error::ProposerKeyCorrupt { length: data.len() })
}
}
}
impl AsRef<[u8]> for ProposerKey {
fn as_ref(&self) -> &[u8] {
&self.data
}
}
/// Key containing a validator index
pub struct CurrentEpochKey {
validator_index: [u8; 8],
}
impl CurrentEpochKey {
pub fn new(validator_index: u64) -> Self {
Self {
validator_index: validator_index.to_be_bytes(),
}
}
}
impl AsRef<[u8]> for CurrentEpochKey {
fn as_ref(&self) -> &[u8] {
&self.validator_index
}
}
impl<E: EthSpec> SlasherDB<E> {
pub fn open(config: Arc<Config>) -> Result<Self, Error> {
std::fs::create_dir_all(&config.database_path)?;
let env = Environment::new()
.set_max_dbs(LMDB_MAX_DBS)
.set_map_size(config.max_db_size_gbs * GIGABYTE)
.open_with_permissions(&config.database_path, 0o600)?;
let indexed_attestation_db =
env.create_db(Some(INDEXED_ATTESTATION_DB), Self::db_flags())?;
let attesters_db = env.create_db(Some(ATTESTERS_DB), Self::db_flags())?;
let min_targets_db = env.create_db(Some(MIN_TARGETS_DB), Self::db_flags())?;
let max_targets_db = env.create_db(Some(MAX_TARGETS_DB), Self::db_flags())?;
let current_epochs_db = env.create_db(Some(CURRENT_EPOCHS_DB), Self::db_flags())?;
let proposers_db = env.create_db(Some(PROPOSERS_DB), Self::db_flags())?;
let metadata_db = env.create_db(Some(METADATA_DB), Self::db_flags())?;
let db = Self {
env,
indexed_attestation_db,
attesters_db,
min_targets_db,
max_targets_db,
current_epochs_db,
proposers_db,
metadata_db,
config,
_phantom: PhantomData,
};
let mut txn = db.begin_rw_txn()?;
if let Some(schema_version) = db.load_schema_version(&mut txn)? {
if schema_version != CURRENT_SCHEMA_VERSION {
return Err(Error::IncompatibleSchemaVersion {
database_schema_version: schema_version,
software_schema_version: CURRENT_SCHEMA_VERSION,
});
}
}
db.store_schema_version(&mut txn)?;
if let Some(on_disk_config) = db.load_config(&mut txn)? {
if !db.config.is_compatible(&on_disk_config) {
return Err(Error::ConfigIncompatible {
on_disk_config,
config: (*db.config).clone(),
});
}
}
db.store_config(&mut txn)?;
txn.commit()?;
Ok(db)
}
pub fn db_flags() -> DatabaseFlags {
DatabaseFlags::default()
}
pub fn write_flags() -> WriteFlags {
WriteFlags::default()
}
pub fn begin_rw_txn(&self) -> Result<RwTransaction<'_>, Error> {
Ok(self.env.begin_rw_txn()?)
}
pub fn load_schema_version(&self, txn: &mut RwTransaction<'_>) -> Result<Option<u64>, Error> {
Ok(txn
.get(self.metadata_db, &METADATA_VERSION_KEY)
.optional()?
.map(bincode::deserialize)
.transpose()?)
}
pub fn store_schema_version(&self, txn: &mut RwTransaction<'_>) -> Result<(), Error> {
txn.put(
self.metadata_db,
&METADATA_VERSION_KEY,
&bincode::serialize(&CURRENT_SCHEMA_VERSION)?,
Self::write_flags(),
)?;
Ok(())
}
pub fn load_config(&self, txn: &mut RwTransaction<'_>) -> Result<Option<Config>, Error> {
Ok(txn
.get(self.metadata_db, &METADATA_CONFIG_KEY)
.optional()?
.map(bincode::deserialize)
.transpose()?)
}
pub fn store_config(&self, txn: &mut RwTransaction<'_>) -> Result<(), Error> {
txn.put(
self.metadata_db,
&METADATA_CONFIG_KEY,
&bincode::serialize(self.config.as_ref())?,
Self::write_flags(),
)?;
Ok(())
}
pub fn get_current_epoch_for_validator(
&self,
validator_index: u64,
txn: &mut RwTransaction<'_>,
) -> Result<Option<Epoch>, Error> {
Ok(txn
.get(
self.current_epochs_db,
&CurrentEpochKey::new(validator_index),
)
.optional()?
.map(Epoch::from_ssz_bytes)
.transpose()?)
}
pub fn update_current_epoch_for_validator(
&self,
validator_index: u64,
current_epoch: Epoch,
txn: &mut RwTransaction<'_>,
) -> Result<(), Error> {
txn.put(
self.current_epochs_db,
&CurrentEpochKey::new(validator_index),
&current_epoch.as_ssz_bytes(),
Self::write_flags(),
)?;
Ok(())
}
pub fn store_indexed_attestation(
&self,
txn: &mut RwTransaction<'_>,
indexed_attestation_hash: Hash256,
indexed_attestation: &IndexedAttestation<E>,
) -> Result<(), Error> {
let data = indexed_attestation.as_ssz_bytes();
txn.put(
self.indexed_attestation_db,
&indexed_attestation_hash.as_bytes(),
&data,
Self::write_flags(),
)?;
Ok(())
}
pub fn get_indexed_attestation(
&self,
txn: &mut RwTransaction<'_>,
indexed_attestation_hash: Hash256,
) -> Result<IndexedAttestation<E>, Error> {
let bytes = txn
.get(self.indexed_attestation_db, &indexed_attestation_hash)
.optional()?
.ok_or_else(|| Error::MissingIndexedAttestation {
root: indexed_attestation_hash,
})?;
Ok(IndexedAttestation::from_ssz_bytes(bytes)?)
}
pub fn check_and_update_attester_record(
&self,
txn: &mut RwTransaction<'_>,
validator_index: u64,
attestation: &IndexedAttestation<E>,
record: AttesterRecord,
) -> Result<AttesterSlashingStatus<E>, Error> {
// See if there's an existing attestation for this attester.
if let Some(existing_record) =
self.get_attester_record(txn, validator_index, attestation.data.target.epoch)?
{
// If the existing attestation data is identical, then this attestation is not
// slashable and no update is required.
if existing_record.attestation_data_hash == record.attestation_data_hash {
return Ok(AttesterSlashingStatus::NotSlashable);
}
// Otherwise, load the indexed attestation so we can confirm that it's slashable.
let existing_attestation =
self.get_indexed_attestation(txn, existing_record.indexed_attestation_hash)?;
if attestation.is_double_vote(&existing_attestation) {
Ok(AttesterSlashingStatus::DoubleVote(Box::new(
existing_attestation,
)))
} else {
Err(Error::AttesterRecordInconsistentRoot)
}
}
// If no attestation exists, insert a record for this validator.
else {
txn.put(
self.attesters_db,
&AttesterKey::new(validator_index, attestation.data.target.epoch),
&record.as_ssz_bytes(),
Self::write_flags(),
)?;
Ok(AttesterSlashingStatus::NotSlashable)
}
}
pub fn get_attestation_for_validator(
&self,
txn: &mut RwTransaction<'_>,
validator_index: u64,
target_epoch: Epoch,
) -> Result<IndexedAttestation<E>, Error> {
let record = self
.get_attester_record(txn, validator_index, target_epoch)?
.ok_or_else(|| Error::MissingAttesterRecord {
validator_index,
target_epoch,
})?;
self.get_indexed_attestation(txn, record.indexed_attestation_hash)
}
pub fn get_attester_record(
&self,
txn: &mut RwTransaction<'_>,
validator_index: u64,
target: Epoch,
) -> Result<Option<AttesterRecord>, Error> {
let attester_key = AttesterKey::new(validator_index, target);
Ok(txn
.get(self.attesters_db, &attester_key)
.optional()?
.map(AttesterRecord::from_ssz_bytes)
.transpose()?)
}
pub fn get_block_proposal(
&self,
txn: &mut RwTransaction<'_>,
proposer_index: u64,
slot: Slot,
) -> Result<Option<SignedBeaconBlockHeader>, Error> {
let proposer_key = ProposerKey::new(proposer_index, slot);
Ok(txn
.get(self.proposers_db, &proposer_key)
.optional()?
.map(SignedBeaconBlockHeader::from_ssz_bytes)
.transpose()?)
}
pub fn check_or_insert_block_proposal(
&self,
txn: &mut RwTransaction<'_>,
block_header: SignedBeaconBlockHeader,
) -> Result<ProposerSlashingStatus, Error> {
let proposer_index = block_header.message.proposer_index;
let slot = block_header.message.slot;
if let Some(existing_block) = self.get_block_proposal(txn, proposer_index, slot)? {
if existing_block == block_header {
Ok(ProposerSlashingStatus::NotSlashable)
} else {
Ok(ProposerSlashingStatus::DoubleVote(Box::new(
ProposerSlashing {
signed_header_1: existing_block,
signed_header_2: block_header,
},
)))
}
} else {
txn.put(
self.proposers_db,
&ProposerKey::new(proposer_index, slot),
&block_header.as_ssz_bytes(),
Self::write_flags(),
)?;
Ok(ProposerSlashingStatus::NotSlashable)
}
}
pub fn prune(&self, current_epoch: Epoch) -> Result<(), Error> {
let mut txn = self.begin_rw_txn()?;
self.prune_proposers(current_epoch, &mut txn)?;
self.prune_attesters(current_epoch, &mut txn)?;
txn.commit()?;
Ok(())
}
fn prune_proposers(
&self,
current_epoch: Epoch,
txn: &mut RwTransaction<'_>,
) -> Result<(), Error> {
let min_slot = current_epoch
.saturating_add(1u64)
.saturating_sub(self.config.history_length)
.start_slot(E::slots_per_epoch());
let mut cursor = txn.open_rw_cursor(self.proposers_db)?;
// Position cursor at first key, bailing out if the database is empty.
if cursor
.get(None, None, lmdb_sys::MDB_FIRST)
.optional()?
.is_none()
{
return Ok(());
}
loop {
let key_bytes = cursor
.get(None, None, lmdb_sys::MDB_GET_CURRENT)?
.0
.ok_or_else(|| Error::MissingProposerKey)?;
let (slot, _) = ProposerKey::parse(key_bytes)?;
if slot < min_slot {
cursor.del(Self::write_flags())?;
// End the loop if there is no next entry.
if cursor
.get(None, None, lmdb_sys::MDB_NEXT)
.optional()?
.is_none()
{
break;
}
} else {
break;
}
}
Ok(())
}
fn prune_attesters(
&self,
current_epoch: Epoch,
txn: &mut RwTransaction<'_>,
) -> Result<(), Error> {
let min_epoch = current_epoch
.saturating_add(1u64)
.saturating_sub(self.config.history_length as u64);
let mut cursor = txn.open_rw_cursor(self.attesters_db)?;
// Position cursor at first key, bailing out if the database is empty.
if cursor
.get(None, None, lmdb_sys::MDB_FIRST)
.optional()?
.is_none()
{
return Ok(());
}
let mut indexed_attestations_to_delete = HashSet::new();
loop {
let (optional_key, value) = cursor.get(None, None, lmdb_sys::MDB_GET_CURRENT)?;
let key_bytes = optional_key.ok_or_else(|| Error::MissingAttesterKey)?;
let (target_epoch, _validator_index) = AttesterKey::parse(key_bytes)?;
if target_epoch < min_epoch {
// Stage the indexed attestation for deletion and delete the record itself.
let attester_record = AttesterRecord::from_ssz_bytes(value)?;
indexed_attestations_to_delete.insert(attester_record.indexed_attestation_hash);
cursor.del(Self::write_flags())?;
// End the loop if there is no next entry.
if cursor
.get(None, None, lmdb_sys::MDB_NEXT)
.optional()?
.is_none()
{
break;
}
} else {
break;
}
}
drop(cursor);
for indexed_attestation_hash in indexed_attestations_to_delete {
txn.del(self.indexed_attestation_db, &indexed_attestation_hash, None)?;
}
Ok(())
}
}

83
slasher/src/error.rs Normal file
View File

@@ -0,0 +1,83 @@
use crate::Config;
use std::io;
use types::{Epoch, Hash256};
#[derive(Debug)]
pub enum Error {
DatabaseError(lmdb::Error),
DatabaseIOError(io::Error),
SszDecodeError(ssz::DecodeError),
BincodeError(bincode::Error),
ArithError(safe_arith::ArithError),
ChunkIndexOutOfBounds(usize),
IncompatibleSchemaVersion {
database_schema_version: u64,
software_schema_version: u64,
},
ConfigInvalidChunkSize {
chunk_size: usize,
history_length: usize,
},
ConfigInvalidZeroParameter {
config: Config,
},
ConfigIncompatible {
on_disk_config: Config,
config: Config,
},
DistanceTooLarge,
DistanceCalculationOverflow,
/// Missing an attester record that we expected to exist.
MissingAttesterRecord {
validator_index: u64,
target_epoch: Epoch,
},
AttesterRecordCorrupt {
length: usize,
},
AttesterKeyCorrupt {
length: usize,
},
ProposerKeyCorrupt {
length: usize,
},
MissingIndexedAttestation {
root: Hash256,
},
MissingAttesterKey,
MissingProposerKey,
AttesterRecordInconsistentRoot,
}
impl From<lmdb::Error> for Error {
fn from(e: lmdb::Error) -> Self {
match e {
lmdb::Error::Other(os_error) => Error::from(io::Error::from_raw_os_error(os_error)),
_ => Error::DatabaseError(e),
}
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::DatabaseIOError(e)
}
}
impl From<ssz::DecodeError> for Error {
fn from(e: ssz::DecodeError) -> Self {
Error::SszDecodeError(e)
}
}
impl From<bincode::Error> for Error {
fn from(e: bincode::Error) -> Self {
Error::BincodeError(e)
}
}
impl From<safe_arith::ArithError> for Error {
fn from(e: safe_arith::ArithError) -> Self {
Error::ArithError(e)
}
}

66
slasher/src/lib.rs Normal file
View File

@@ -0,0 +1,66 @@
#![deny(missing_debug_implementations)]
mod array;
mod attestation_queue;
mod attester_record;
mod block_queue;
pub mod config;
mod database;
mod error;
mod metrics;
mod slasher;
mod slasher_server;
pub mod test_utils;
mod utils;
pub use crate::slasher::Slasher;
pub use attestation_queue::{AttestationBatch, AttestationQueue};
pub use attester_record::AttesterRecord;
pub use block_queue::BlockQueue;
pub use config::Config;
pub use database::SlasherDB;
pub use error::Error;
pub use slasher_server::SlasherServer;
use types::{AttesterSlashing, EthSpec, IndexedAttestation, ProposerSlashing};
#[derive(Debug, PartialEq)]
pub enum AttesterSlashingStatus<E: EthSpec> {
NotSlashable,
/// A weird outcome that can occur when we go to lookup an attestation by its target
/// epoch for a surround slashing, but find a different attestation -- indicating that
/// the validator has already been caught double voting.
AlreadyDoubleVoted,
DoubleVote(Box<IndexedAttestation<E>>),
SurroundsExisting(Box<IndexedAttestation<E>>),
SurroundedByExisting(Box<IndexedAttestation<E>>),
}
#[derive(Debug, PartialEq)]
pub enum ProposerSlashingStatus {
NotSlashable,
DoubleVote(Box<ProposerSlashing>),
}
impl<E: EthSpec> AttesterSlashingStatus<E> {
pub fn into_slashing(
self,
new_attestation: &IndexedAttestation<E>,
) -> Option<AttesterSlashing<E>> {
use AttesterSlashingStatus::*;
// The surrounding attestation must be in `attestation_1` to be valid.
match self {
NotSlashable => None,
AlreadyDoubleVoted => None,
DoubleVote(existing) | SurroundedByExisting(existing) => Some(AttesterSlashing {
attestation_1: *existing,
attestation_2: new_attestation.clone(),
}),
SurroundsExisting(existing) => Some(AttesterSlashing {
attestation_1: new_attestation.clone(),
attestation_2: *existing,
}),
}
}
}

38
slasher/src/metrics.rs Normal file
View File

@@ -0,0 +1,38 @@
use lazy_static::lazy_static;
pub use lighthouse_metrics::*;
lazy_static! {
pub static ref SLASHER_DATABASE_SIZE: Result<IntGauge> = try_create_int_gauge(
"slasher_database_size",
"Size of the LMDB database backing the slasher, in bytes"
);
pub static ref SLASHER_RUN_TIME: Result<Histogram> = try_create_histogram(
"slasher_process_batch_time",
"Time taken to process a batch of blocks and attestations"
);
pub static ref SLASHER_NUM_ATTESTATIONS_DROPPED: Result<IntGauge> = try_create_int_gauge(
"slasher_num_attestations_dropped",
"Number of attestations dropped per batch"
);
pub static ref SLASHER_NUM_ATTESTATIONS_DEFERRED: Result<IntGauge> = try_create_int_gauge(
"slasher_num_attestations_deferred",
"Number of attestations deferred per batch"
);
pub static ref SLASHER_NUM_ATTESTATIONS_VALID: Result<IntGauge> = try_create_int_gauge(
"slasher_num_attestations_valid",
"Number of valid attestations per batch"
);
pub static ref SLASHER_NUM_BLOCKS_PROCESSED: Result<IntGauge> = try_create_int_gauge(
"slasher_num_blocks_processed",
"Number of blocks processed per batch",
);
pub static ref SLASHER_NUM_CHUNKS_UPDATED: Result<IntCounterVec> = try_create_int_counter_vec(
"slasher_num_chunks_updated",
"Number of min or max target chunks updated on disk",
&["array"],
);
pub static ref SLASHER_COMPRESSION_RATIO: Result<Gauge> = try_create_float_gauge(
"slasher_compression_ratio",
"Compression ratio for min-max array chunks (higher is better)"
);
}

299
slasher/src/slasher.rs Normal file
View File

@@ -0,0 +1,299 @@
use crate::metrics::{
self, SLASHER_NUM_ATTESTATIONS_DEFERRED, SLASHER_NUM_ATTESTATIONS_DROPPED,
SLASHER_NUM_ATTESTATIONS_VALID, SLASHER_NUM_BLOCKS_PROCESSED,
};
use crate::{
array, AttestationBatch, AttestationQueue, AttesterRecord, BlockQueue, Config, Error,
ProposerSlashingStatus, SlasherDB,
};
use lmdb::{RwTransaction, Transaction};
use parking_lot::Mutex;
use slog::{debug, error, info, Logger};
use std::collections::HashSet;
use std::sync::Arc;
use types::{
AttesterSlashing, Epoch, EthSpec, IndexedAttestation, ProposerSlashing, SignedBeaconBlockHeader,
};
#[derive(Debug)]
pub struct Slasher<E: EthSpec> {
db: SlasherDB<E>,
pub(crate) attestation_queue: AttestationQueue<E>,
pub(crate) block_queue: BlockQueue,
attester_slashings: Mutex<HashSet<AttesterSlashing<E>>>,
proposer_slashings: Mutex<HashSet<ProposerSlashing>>,
config: Arc<Config>,
pub(crate) log: Logger,
}
impl<E: EthSpec> Slasher<E> {
pub fn open(config: Config, log: Logger) -> Result<Self, Error> {
config.validate()?;
let config = Arc::new(config);
let db = SlasherDB::open(config.clone())?;
let attester_slashings = Mutex::new(HashSet::new());
let proposer_slashings = Mutex::new(HashSet::new());
let attestation_queue = AttestationQueue::default();
let block_queue = BlockQueue::default();
Ok(Self {
db,
attester_slashings,
proposer_slashings,
attestation_queue,
block_queue,
config,
log,
})
}
/// Harvest all attester slashings found, removing them from the slasher.
pub fn get_attester_slashings(&self) -> HashSet<AttesterSlashing<E>> {
std::mem::take(&mut self.attester_slashings.lock())
}
/// Harvest all proposer slashings found, removing them from the slasher.
pub fn get_proposer_slashings(&self) -> HashSet<ProposerSlashing> {
std::mem::take(&mut self.proposer_slashings.lock())
}
pub fn config(&self) -> &Config {
&self.config
}
/// Accept an attestation from the network and queue it for processing.
pub fn accept_attestation(&self, attestation: IndexedAttestation<E>) {
self.attestation_queue.queue(attestation);
}
/// Accept a block from the network and queue it for processing.
pub fn accept_block_header(&self, block_header: SignedBeaconBlockHeader) {
self.block_queue.queue(block_header);
}
/// Apply queued blocks and attestations to the on-disk database, and detect slashings!
pub fn process_queued(&self, current_epoch: Epoch) -> Result<(), Error> {
let mut txn = self.db.begin_rw_txn()?;
self.process_blocks(&mut txn)?;
self.process_attestations(current_epoch, &mut txn)?;
txn.commit()?;
Ok(())
}
/// Apply queued blocks to the on-disk database.
pub fn process_blocks(&self, txn: &mut RwTransaction<'_>) -> Result<(), Error> {
let blocks = self.block_queue.dequeue();
let mut slashings = vec![];
metrics::set_gauge(&SLASHER_NUM_BLOCKS_PROCESSED, blocks.len() as i64);
for block in blocks {
if let ProposerSlashingStatus::DoubleVote(slashing) =
self.db.check_or_insert_block_proposal(txn, block)?
{
slashings.push(*slashing);
}
}
if !slashings.is_empty() {
info!(
self.log,
"Found {} new proposer slashings!",
slashings.len(),
);
self.proposer_slashings.lock().extend(slashings);
}
Ok(())
}
/// Apply queued attestations to the on-disk database.
pub fn process_attestations(
&self,
current_epoch: Epoch,
txn: &mut RwTransaction<'_>,
) -> Result<(), Error> {
let snapshot = self.attestation_queue.dequeue();
// Filter attestations for relevance.
let (snapshot, deferred, num_dropped) = self.validate(snapshot, current_epoch);
let num_deferred = deferred.len();
self.attestation_queue.requeue(deferred);
// Insert attestations into database.
debug!(
self.log,
"Storing attestations in slasher DB";
"num_valid" => snapshot.len(),
"num_deferred" => num_deferred,
"num_dropped" => num_dropped,
);
metrics::set_gauge(&SLASHER_NUM_ATTESTATIONS_VALID, snapshot.len() as i64);
metrics::set_gauge(&SLASHER_NUM_ATTESTATIONS_DEFERRED, num_deferred as i64);
metrics::set_gauge(&SLASHER_NUM_ATTESTATIONS_DROPPED, num_dropped as i64);
for attestation in snapshot.attestations.iter() {
self.db.store_indexed_attestation(
txn,
attestation.1.indexed_attestation_hash,
&attestation.0,
)?;
}
// Group attestations into batches and process them.
let grouped_attestations = snapshot.group_by_validator_index(&self.config);
for (subqueue_id, subqueue) in grouped_attestations.subqueues.into_iter().enumerate() {
self.process_batch(txn, subqueue_id, subqueue.attestations, current_epoch)?;
}
Ok(())
}
/// Process a batch of attestations for a range of validator indices.
fn process_batch(
&self,
txn: &mut RwTransaction<'_>,
subqueue_id: usize,
batch: Vec<Arc<(IndexedAttestation<E>, AttesterRecord)>>,
current_epoch: Epoch,
) -> Result<(), Error> {
// First, check for double votes.
for attestation in &batch {
match self.check_double_votes(txn, subqueue_id, &attestation.0, attestation.1) {
Ok(slashings) => {
if !slashings.is_empty() {
info!(
self.log,
"Found {} new double-vote slashings!",
slashings.len()
);
}
self.attester_slashings.lock().extend(slashings);
}
Err(e) => {
error!(
self.log,
"Error checking for double votes";
"error" => format!("{:?}", e)
);
return Err(e);
}
}
}
// Then check for surrounds using the min-max arrays.
match array::update(
&self.db,
txn,
subqueue_id,
batch,
current_epoch,
&self.config,
) {
Ok(slashings) => {
if !slashings.is_empty() {
info!(
self.log,
"Found {} new surround slashings!",
slashings.len()
);
}
self.attester_slashings.lock().extend(slashings);
}
Err(e) => {
error!(
self.log,
"Error processing array update";
"error" => format!("{:?}", e),
);
return Err(e);
}
}
Ok(())
}
/// Check for double votes from all validators on `attestation` who match the `subqueue_id`.
fn check_double_votes(
&self,
txn: &mut RwTransaction<'_>,
subqueue_id: usize,
attestation: &IndexedAttestation<E>,
attester_record: AttesterRecord,
) -> Result<HashSet<AttesterSlashing<E>>, Error> {
let mut slashings = HashSet::new();
for validator_index in self
.config
.attesting_validators_in_chunk(attestation, subqueue_id)
{
let slashing_status = self.db.check_and_update_attester_record(
txn,
validator_index,
&attestation,
attester_record,
)?;
if let Some(slashing) = slashing_status.into_slashing(attestation) {
debug!(
self.log,
"Found double-vote slashing";
"validator_index" => validator_index,
"epoch" => slashing.attestation_1.data.target.epoch,
);
slashings.insert(slashing);
}
}
Ok(slashings)
}
/// Validate the attestations in `batch` for ingestion during `current_epoch`.
///
/// Drop any attestations that are too old to ever be relevant, and return any attestations
/// that might be valid in the future.
///
/// Returns `(valid, deferred, num_dropped)`.
fn validate(
&self,
batch: AttestationBatch<E>,
current_epoch: Epoch,
) -> (AttestationBatch<E>, AttestationBatch<E>, usize) {
let mut keep = Vec::with_capacity(batch.len());
let mut defer = vec![];
let mut drop_count = 0;
for tuple in batch.attestations.into_iter() {
let attestation = &tuple.0;
let target_epoch = attestation.data.target.epoch;
let source_epoch = attestation.data.source.epoch;
if source_epoch > target_epoch
|| source_epoch + self.config.history_length as u64 <= current_epoch
{
drop_count += 1;
continue;
}
// Check that the attestation's target epoch is acceptable, and defer it
// if it's not.
if target_epoch > current_epoch {
defer.push(tuple);
} else {
// Otherwise the attestation is OK to process.
keep.push(tuple);
}
}
(
AttestationBatch { attestations: keep },
AttestationBatch {
attestations: defer,
},
drop_count,
)
}
/// Prune unnecessary attestations and blocks from the on-disk database.
pub fn prune_database(&self, current_epoch: Epoch) -> Result<(), Error> {
self.db.prune(current_epoch)
}
}

View File

@@ -0,0 +1,95 @@
use crate::metrics::{self, SLASHER_DATABASE_SIZE, SLASHER_RUN_TIME};
use crate::Slasher;
use directory::size_of_dir;
use slog::{debug, error, info, trace};
use slot_clock::SlotClock;
use std::sync::mpsc::{sync_channel, TrySendError};
use std::sync::Arc;
use task_executor::TaskExecutor;
use tokio::stream::StreamExt;
use tokio::time::{interval_at, Duration, Instant};
use types::EthSpec;
#[derive(Debug)]
pub struct SlasherServer;
impl SlasherServer {
pub fn run<E: EthSpec, C: SlotClock + 'static>(
slasher: Arc<Slasher<E>>,
slot_clock: C,
executor: &TaskExecutor,
) {
info!(slasher.log, "Starting slasher to detect misbehaviour");
// Buffer just a single message in the channel. If the receiver is still processing, we
// don't need to burden them with more work (we can wait).
let (sender, receiver) = sync_channel(1);
let log = slasher.log.clone();
let update_period = slasher.config().update_period;
executor.spawn(
async move {
// NOTE: could align each run to some fixed point in each slot, see:
// https://github.com/sigp/lighthouse/issues/1861
let slot_clock = Arc::new(slot_clock);
let mut interval = interval_at(Instant::now(), Duration::from_secs(update_period));
while interval.next().await.is_some() {
if let Some(current_slot) = slot_clock.clone().now() {
let current_epoch = current_slot.epoch(E::slots_per_epoch());
if let Err(TrySendError::Disconnected(_)) = sender.try_send(current_epoch) {
break;
}
} else {
trace!(log, "Slasher has nothing to do: we are pre-genesis");
}
}
},
"slasher_server",
);
executor.spawn_blocking(
move || {
while let Ok(current_epoch) = receiver.recv() {
let t = Instant::now();
let num_attestations = slasher.attestation_queue.len();
let num_blocks = slasher.block_queue.len();
let batch_timer = metrics::start_timer(&SLASHER_RUN_TIME);
if let Err(e) = slasher.process_queued(current_epoch) {
error!(
slasher.log,
"Error during scheduled slasher processing";
"epoch" => current_epoch,
"error" => format!("{:?}", e)
);
}
drop(batch_timer);
// Prune the database, even in the case where batch processing failed.
// If the LMDB database is full then pruning could help to free it up.
if let Err(e) = slasher.prune_database(current_epoch) {
error!(
slasher.log,
"Error during slasher database pruning";
"epoch" => current_epoch,
"error" => format!("{:?}", e),
);
continue;
}
debug!(
slasher.log,
"Completed slasher update";
"epoch" => current_epoch,
"time_taken" => format!("{}ms", t.elapsed().as_millis()),
"num_attestations" => num_attestations,
"num_blocks" => num_blocks,
);
let database_size = size_of_dir(&slasher.config().database_path);
metrics::set_gauge(&SLASHER_DATABASE_SIZE, database_size as i64);
}
},
"slasher_server_process_queued",
);
}
}

115
slasher/src/test_utils.rs Normal file
View File

@@ -0,0 +1,115 @@
use slog::Logger;
use sloggers::Build;
use std::collections::HashSet;
use std::iter::FromIterator;
use types::{
AggregateSignature, AttestationData, AttesterSlashing, BeaconBlockHeader, Checkpoint, Epoch,
Hash256, IndexedAttestation, MainnetEthSpec, Signature, SignedBeaconBlockHeader, Slot,
};
pub type E = MainnetEthSpec;
pub fn logger() -> Logger {
if cfg!(feature = "test_logger") {
sloggers::terminal::TerminalLoggerBuilder::new()
.level(sloggers::types::Severity::Trace)
.build()
.unwrap()
} else {
sloggers::null::NullLoggerBuilder.build().unwrap()
}
}
pub fn indexed_att(
attesting_indices: impl AsRef<[u64]>,
source_epoch: u64,
target_epoch: u64,
target_root: u64,
) -> IndexedAttestation<E> {
IndexedAttestation {
attesting_indices: attesting_indices.as_ref().to_vec().into(),
data: AttestationData {
slot: Slot::new(0),
index: 0,
beacon_block_root: Hash256::zero(),
source: Checkpoint {
epoch: Epoch::new(source_epoch),
root: Hash256::from_low_u64_be(0),
},
target: Checkpoint {
epoch: Epoch::new(target_epoch),
root: Hash256::from_low_u64_be(target_root),
},
},
signature: AggregateSignature::empty(),
}
}
pub fn att_slashing(
attestation_1: &IndexedAttestation<E>,
attestation_2: &IndexedAttestation<E>,
) -> AttesterSlashing<E> {
AttesterSlashing {
attestation_1: attestation_1.clone(),
attestation_2: attestation_2.clone(),
}
}
pub fn hashset_intersection(
attestation_1_indices: &[u64],
attestation_2_indices: &[u64],
) -> HashSet<u64> {
&HashSet::from_iter(attestation_1_indices.iter().copied())
& &HashSet::from_iter(attestation_2_indices.iter().copied())
}
pub fn slashed_validators_from_slashings(slashings: &HashSet<AttesterSlashing<E>>) -> HashSet<u64> {
slashings
.iter()
.flat_map(|slashing| {
let att1 = &slashing.attestation_1;
let att2 = &slashing.attestation_2;
assert!(
att1.is_double_vote(att2) || att1.is_surround_vote(att2),
"invalid slashing: {:#?}",
slashing
);
hashset_intersection(&att1.attesting_indices, &att2.attesting_indices)
})
.collect()
}
pub fn slashed_validators_from_attestations(
attestations: &[IndexedAttestation<E>],
) -> HashSet<u64> {
let mut slashed_validators = HashSet::new();
// O(n^2) code, watch out.
for att1 in attestations {
for att2 in attestations {
if att1 == att2 {
continue;
}
if att1.is_double_vote(att2) || att1.is_surround_vote(att2) {
slashed_validators.extend(hashset_intersection(
&att1.attesting_indices,
&att2.attesting_indices,
));
}
}
}
slashed_validators
}
pub fn block(slot: u64, proposer_index: u64, block_root: u64) -> SignedBeaconBlockHeader {
SignedBeaconBlockHeader {
message: BeaconBlockHeader {
slot: Slot::new(slot),
proposer_index,
parent_root: Hash256::zero(),
state_root: Hash256::zero(),
body_root: Hash256::from_low_u64_be(block_root),
},
signature: Signature::empty(),
}
}

16
slasher/src/utils.rs Normal file
View File

@@ -0,0 +1,16 @@
use crate::Error;
/// Mix-in trait for loading values from LMDB that may or may not exist.
pub trait TxnOptional<T, E> {
fn optional(self) -> Result<Option<T>, E>;
}
impl<T> TxnOptional<T, Error> for Result<T, lmdb::Error> {
fn optional(self) -> Result<Option<T>, Error> {
match self {
Ok(x) => Ok(Some(x)),
Err(lmdb::Error::NotFound) => Ok(None),
Err(e) => Err(e.into()),
}
}
}