mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-10 04:01:51 +00:00
* Renamed fork_choice::process_attestation_from_block * Processing attestation in fork choice * Retrieving state from store and checking signature * Looser check on beacon state validity. * Cleaned up get_attestation_state * Expanded fork choice api to provide latest validator message. * Checking if the an attestation contains a latest message * Correct process_attestation error handling. * Copy paste error in comment fixed. * Tidy ancestor iterators * Getting attestation slot via helper method * Refactored attestation creation in test utils * Revert "Refactored attestation creation in test utils" This reverts commit 4d277fe4239a7194758b18fb5c00dfe0b8231306. * Integration tests for free attestation processing * Implicit conflicts resolved. * formatting * Do first pass on Grants code * Add another attestation processing test * Tidy attestation processing * Remove old code fragment * Add non-compiling half finished changes * Simplify, fix bugs, add tests for chain iters * Remove attestation processing from op pool * Fix bug with fork choice, tidy * Fix overly restrictive check in fork choice. * Ensure committee cache is build during attn proc * Ignore unknown blocks at fork choice * Various minor fixes * Make fork choice write lock in to read lock * Remove unused method * Tidy comments * Fix attestation prod. target roots change * Fix compile error in store iters * Reject any attestation prior to finalization * Begin metrics refactor * Move beacon_chain to new metrics structure. * Make metrics not panic if already defined * Use global prometheus gather at rest api * Unify common metric fns into a crate * Add heavy metering to block processing * Remove hypen from prometheus metric name * Add more beacon chain metrics * Add beacon chain persistence metric * Prune op pool on finalization * Add extra prom beacon chain metrics * Prefix BeaconChain metrics with "beacon_" * Add more store metrics * Add basic metrics to libp2p * Add metrics to HTTP server * Remove old `http_server` crate * Update metrics names to be more like standard * Fix broken beacon chain metrics, add slot clock metrics * Add lighthouse_metrics gather fn * Remove http args * Fix wrong state given to op pool prune * Make prom metric names more consistent * Add more metrics, tidy existing metrics * Fix store block read metrics * Tidy attestation metrics * Fix minor PR comments * Allow travis failures on beta (see desc) There's a non-backward compatible change in `cargo fmt`. Stable and beta do not agree. * Tidy `lighthouse_metrics` docs * Fix typo
123 lines
3.3 KiB
Rust
123 lines
3.3 KiB
Rust
use super::*;
|
|
use crate::metrics;
|
|
use db_key::Key;
|
|
use leveldb::database::kv::KV;
|
|
use leveldb::database::Database;
|
|
use leveldb::error::Error as LevelDBError;
|
|
use leveldb::options::{Options, ReadOptions, WriteOptions};
|
|
use std::path::Path;
|
|
use std::sync::Arc;
|
|
|
|
/// A wrapped leveldb database.
|
|
#[derive(Clone)]
|
|
pub struct LevelDB {
|
|
// Note: this `Arc` is only included because of an artificial constraint by gRPC. Hopefully we
|
|
// can remove this one day.
|
|
db: Arc<Database<BytesKey>>,
|
|
}
|
|
|
|
impl LevelDB {
|
|
/// Open a database at `path`, creating a new database if one does not already exist.
|
|
pub fn open(path: &Path) -> Result<Self, Error> {
|
|
let mut options = Options::new();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
let db = Arc::new(Database::open(path, options)?);
|
|
|
|
Ok(Self { db })
|
|
}
|
|
|
|
fn read_options(&self) -> ReadOptions<BytesKey> {
|
|
ReadOptions::new()
|
|
}
|
|
|
|
fn write_options(&self) -> WriteOptions {
|
|
WriteOptions::new()
|
|
}
|
|
|
|
fn get_key_for_col(col: &str, key: &[u8]) -> BytesKey {
|
|
let mut col = col.as_bytes().to_vec();
|
|
col.append(&mut key.to_vec());
|
|
BytesKey { key: col }
|
|
}
|
|
}
|
|
|
|
/// Used for keying leveldb.
|
|
pub struct BytesKey {
|
|
key: Vec<u8>,
|
|
}
|
|
|
|
impl Key for BytesKey {
|
|
fn from_u8(key: &[u8]) -> Self {
|
|
Self { key: key.to_vec() }
|
|
}
|
|
|
|
fn as_slice<T, F: Fn(&[u8]) -> T>(&self, f: F) -> T {
|
|
f(self.key.as_slice())
|
|
}
|
|
}
|
|
|
|
impl Store for LevelDB {
|
|
/// Retrieve some bytes in `column` with `key`.
|
|
fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
|
|
let column_key = Self::get_key_for_col(col, key);
|
|
|
|
metrics::inc_counter(&metrics::DISK_DB_READ_COUNT);
|
|
|
|
let result = self
|
|
.db
|
|
.get(self.read_options(), column_key)
|
|
.map_err(Into::into);
|
|
|
|
if let Ok(Some(bytes)) = &result {
|
|
metrics::inc_counter_by(&metrics::DISK_DB_READ_BYTES, bytes.len() as i64)
|
|
}
|
|
|
|
result
|
|
}
|
|
|
|
/// Store some `value` in `column`, indexed with `key`.
|
|
fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> {
|
|
let column_key = Self::get_key_for_col(col, key);
|
|
|
|
metrics::inc_counter(&metrics::DISK_DB_WRITE_COUNT);
|
|
metrics::inc_counter_by(&metrics::DISK_DB_WRITE_BYTES, val.len() as i64);
|
|
|
|
self.db
|
|
.put(self.write_options(), column_key, val)
|
|
.map_err(Into::into)
|
|
}
|
|
|
|
/// Return `true` if `key` exists in `column`.
|
|
fn key_exists(&self, col: &str, key: &[u8]) -> Result<bool, Error> {
|
|
let column_key = Self::get_key_for_col(col, key);
|
|
|
|
metrics::inc_counter(&metrics::DISK_DB_EXISTS_COUNT);
|
|
|
|
self.db
|
|
.get(self.read_options(), column_key)
|
|
.map_err(Into::into)
|
|
.and_then(|val| Ok(val.is_some()))
|
|
}
|
|
|
|
/// Removes `key` from `column`.
|
|
fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> {
|
|
let column_key = Self::get_key_for_col(col, key);
|
|
|
|
metrics::inc_counter(&metrics::DISK_DB_DELETE_COUNT);
|
|
|
|
self.db
|
|
.delete(self.write_options(), column_key)
|
|
.map_err(Into::into)
|
|
}
|
|
}
|
|
|
|
impl From<LevelDBError> for Error {
|
|
fn from(e: LevelDBError) -> Error {
|
|
Error::DBError {
|
|
message: format!("{:?}", e),
|
|
}
|
|
}
|
|
}
|