mirror of
https://github.com/sigp/lighthouse.git
synced 2026-05-09 03:17:55 +00:00
Add beacon.watch (#3362)
> This is currently a WIP and all features are subject to alteration or removal at any time. ## Overview The successor to #2873. Contains the backbone of `beacon.watch` including syncing code, the initial API, and several core database tables. See `watch/README.md` for more information, requirements and usage.
This commit is contained in:
140
watch/src/block_packing/database.rs
Normal file
140
watch/src/block_packing/database.rs
Normal file
@@ -0,0 +1,140 @@
|
||||
use crate::database::{
|
||||
schema::{beacon_blocks, block_packing},
|
||||
watch_types::{WatchHash, WatchSlot},
|
||||
Error, PgConn, MAX_SIZE_BATCH_INSERT,
|
||||
};
|
||||
|
||||
use diesel::prelude::*;
|
||||
use diesel::{Insertable, Queryable};
|
||||
use log::debug;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Instant;
|
||||
|
||||
#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)]
|
||||
#[diesel(table_name = block_packing)]
|
||||
pub struct WatchBlockPacking {
|
||||
pub slot: WatchSlot,
|
||||
pub available: i32,
|
||||
pub included: i32,
|
||||
pub prior_skip_slots: i32,
|
||||
}
|
||||
|
||||
/// Insert a batch of values into the `block_packing` table.
|
||||
///
|
||||
/// On a conflict, it will do nothing, leaving the old value.
|
||||
pub fn insert_batch_block_packing(
|
||||
conn: &mut PgConn,
|
||||
packing: Vec<WatchBlockPacking>,
|
||||
) -> Result<(), Error> {
|
||||
use self::block_packing::dsl::*;
|
||||
|
||||
let mut count = 0;
|
||||
let timer = Instant::now();
|
||||
|
||||
for chunk in packing.chunks(MAX_SIZE_BATCH_INSERT) {
|
||||
count += diesel::insert_into(block_packing)
|
||||
.values(chunk)
|
||||
.on_conflict_do_nothing()
|
||||
.execute(conn)?;
|
||||
}
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Block packing inserted, count: {count}, time taken: {time_taken:?}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Selects the row from the `block_packing` table where `slot` is minimum.
|
||||
pub fn get_lowest_block_packing(conn: &mut PgConn) -> Result<Option<WatchBlockPacking>, Error> {
|
||||
use self::block_packing::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let result = block_packing
|
||||
.order_by(slot.asc())
|
||||
.limit(1)
|
||||
.first::<WatchBlockPacking>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Block packing requested: lowest, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects the row from the `block_packing` table where `slot` is maximum.
|
||||
pub fn get_highest_block_packing(conn: &mut PgConn) -> Result<Option<WatchBlockPacking>, Error> {
|
||||
use self::block_packing::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let result = block_packing
|
||||
.order_by(slot.desc())
|
||||
.limit(1)
|
||||
.first::<WatchBlockPacking>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Block packing requested: highest, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects a single row of the `block_packing` table corresponding to a given `root_query`.
|
||||
pub fn get_block_packing_by_root(
|
||||
conn: &mut PgConn,
|
||||
root_query: WatchHash,
|
||||
) -> Result<Option<WatchBlockPacking>, Error> {
|
||||
use self::beacon_blocks::dsl::{beacon_blocks, root};
|
||||
use self::block_packing::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let join = beacon_blocks.inner_join(block_packing);
|
||||
|
||||
let result = join
|
||||
.select((slot, available, included, prior_skip_slots))
|
||||
.filter(root.eq(root_query))
|
||||
.first::<WatchBlockPacking>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Block packing requested: {root_query}, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects a single row of the `block_packing` table corresponding to a given `slot_query`.
|
||||
pub fn get_block_packing_by_slot(
|
||||
conn: &mut PgConn,
|
||||
slot_query: WatchSlot,
|
||||
) -> Result<Option<WatchBlockPacking>, Error> {
|
||||
use self::block_packing::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let result = block_packing
|
||||
.filter(slot.eq(slot_query))
|
||||
.first::<WatchBlockPacking>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Block packing requested: {slot_query}, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding
|
||||
/// row in `block_packing`.
|
||||
#[allow(dead_code)]
|
||||
pub fn get_unknown_block_packing(
|
||||
conn: &mut PgConn,
|
||||
slots_per_epoch: u64,
|
||||
) -> Result<Vec<Option<WatchSlot>>, Error> {
|
||||
use self::beacon_blocks::dsl::{beacon_blocks, root, slot};
|
||||
use self::block_packing::dsl::block_packing;
|
||||
|
||||
let join = beacon_blocks.left_join(block_packing);
|
||||
|
||||
let result = join
|
||||
.select(slot)
|
||||
.filter(root.is_null())
|
||||
// Block packing cannot be retrieved for epoch 0 so we need to exclude them.
|
||||
.filter(slot.ge(slots_per_epoch as i32))
|
||||
.order_by(slot.desc())
|
||||
.nullable()
|
||||
.load::<Option<WatchSlot>>(conn)?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
38
watch/src/block_packing/mod.rs
Normal file
38
watch/src/block_packing/mod.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
pub mod database;
|
||||
pub mod server;
|
||||
pub mod updater;
|
||||
|
||||
use crate::database::watch_types::WatchSlot;
|
||||
use crate::updater::error::Error;
|
||||
|
||||
pub use database::{
|
||||
get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing,
|
||||
get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing,
|
||||
WatchBlockPacking,
|
||||
};
|
||||
pub use server::block_packing_routes;
|
||||
|
||||
use eth2::BeaconNodeHttpClient;
|
||||
use types::Epoch;
|
||||
|
||||
/// Sends a request to `lighthouse/analysis/block_packing`.
|
||||
/// Formats the response into a vector of `WatchBlockPacking`.
|
||||
///
|
||||
/// Will fail if `start_epoch == 0`.
|
||||
pub async fn get_block_packing(
|
||||
bn: &BeaconNodeHttpClient,
|
||||
start_epoch: Epoch,
|
||||
end_epoch: Epoch,
|
||||
) -> Result<Vec<WatchBlockPacking>, Error> {
|
||||
Ok(bn
|
||||
.get_lighthouse_analysis_block_packing(start_epoch, end_epoch)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|data| WatchBlockPacking {
|
||||
slot: WatchSlot::from_slot(data.slot),
|
||||
available: data.available_attestations as i32,
|
||||
included: data.included_attestations as i32,
|
||||
prior_skip_slots: data.prior_skip_slots as i32,
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
31
watch/src/block_packing/server.rs
Normal file
31
watch/src/block_packing/server.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
use crate::block_packing::database::{
|
||||
get_block_packing_by_root, get_block_packing_by_slot, WatchBlockPacking,
|
||||
};
|
||||
use crate::database::{get_connection, PgPool, WatchHash, WatchSlot};
|
||||
use crate::server::Error;
|
||||
|
||||
use axum::{extract::Path, routing::get, Extension, Json, Router};
|
||||
use eth2::types::BlockId;
|
||||
use std::str::FromStr;
|
||||
|
||||
pub async fn get_block_packing(
|
||||
Path(block_query): Path<String>,
|
||||
Extension(pool): Extension<PgPool>,
|
||||
) -> Result<Json<Option<WatchBlockPacking>>, Error> {
|
||||
let mut conn = get_connection(&pool).map_err(Error::Database)?;
|
||||
match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? {
|
||||
BlockId::Root(root) => Ok(Json(get_block_packing_by_root(
|
||||
&mut conn,
|
||||
WatchHash::from_hash(root),
|
||||
)?)),
|
||||
BlockId::Slot(slot) => Ok(Json(get_block_packing_by_slot(
|
||||
&mut conn,
|
||||
WatchSlot::from_slot(slot),
|
||||
)?)),
|
||||
_ => Err(Error::BadRequest),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block_packing_routes() -> Router {
|
||||
Router::new().route("/v1/blocks/:block/packing", get(get_block_packing))
|
||||
}
|
||||
211
watch/src/block_packing/updater.rs
Normal file
211
watch/src/block_packing/updater.rs
Normal file
@@ -0,0 +1,211 @@
|
||||
use crate::database::{self, Error as DbError};
|
||||
use crate::updater::{Error, UpdateHandler};
|
||||
|
||||
use crate::block_packing::get_block_packing;
|
||||
|
||||
use eth2::types::{Epoch, EthSpec};
|
||||
use log::{debug, error, warn};
|
||||
|
||||
const MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING: u64 = 50;
|
||||
|
||||
impl<T: EthSpec> UpdateHandler<T> {
|
||||
/// Forward fills the `block_packing` table starting from the entry with the
|
||||
/// highest slot.
|
||||
///
|
||||
/// It constructs a request to the `get_block_packing` API with:
|
||||
/// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest beacon block)
|
||||
/// `end_epoch` -> epoch of highest beacon block
|
||||
///
|
||||
/// It will resync the latest epoch if it is not fully filled.
|
||||
/// That is, `if highest_filled_slot % slots_per_epoch != 31`
|
||||
/// This means that if the last slot of an epoch is a skip slot, the whole epoch will be
|
||||
//// resynced during the next head update.
|
||||
///
|
||||
/// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`.
|
||||
pub async fn fill_block_packing(&mut self) -> Result<(), Error> {
|
||||
let mut conn = database::get_connection(&self.pool)?;
|
||||
|
||||
// Get the slot of the highest entry in the `block_packing` table.
|
||||
let highest_filled_slot_opt = if self.config.block_packing {
|
||||
database::get_highest_block_packing(&mut conn)?.map(|packing| packing.slot)
|
||||
} else {
|
||||
return Err(Error::NotEnabled("block_packing".to_string()));
|
||||
};
|
||||
|
||||
let mut start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt {
|
||||
if highest_filled_slot.as_slot() % self.slots_per_epoch
|
||||
== self.slots_per_epoch.saturating_sub(1)
|
||||
{
|
||||
// The whole epoch is filled so we can begin syncing the next one.
|
||||
highest_filled_slot.as_slot().epoch(self.slots_per_epoch) + 1
|
||||
} else {
|
||||
// The epoch is only partially synced. Try to sync it fully.
|
||||
highest_filled_slot.as_slot().epoch(self.slots_per_epoch)
|
||||
}
|
||||
} else {
|
||||
// No entries in the `block_packing` table. Use `beacon_blocks` instead.
|
||||
if let Some(lowest_beacon_block) = database::get_lowest_beacon_block(&mut conn)? {
|
||||
lowest_beacon_block
|
||||
.slot
|
||||
.as_slot()
|
||||
.epoch(self.slots_per_epoch)
|
||||
} else {
|
||||
// There are no blocks in the database, do not fill the `block_packing` table.
|
||||
warn!("Refusing to fill block packing as there are no blocks in the database");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
// The `get_block_packing` API endpoint cannot accept `start_epoch == 0`.
|
||||
if start_epoch == 0 {
|
||||
start_epoch += 1
|
||||
}
|
||||
|
||||
if let Some(highest_block_slot) =
|
||||
database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot())
|
||||
{
|
||||
let mut end_epoch = highest_block_slot.epoch(self.slots_per_epoch);
|
||||
|
||||
if start_epoch > end_epoch {
|
||||
debug!("Block packing is up to date with the head of the database");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Ensure the size of the request does not exceed the maximum allowed value.
|
||||
if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) {
|
||||
end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING
|
||||
}
|
||||
|
||||
if let Some(lowest_block_slot) =
|
||||
database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot())
|
||||
{
|
||||
let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?;
|
||||
|
||||
// Since we pull a full epoch of data but are not guaranteed to have all blocks of
|
||||
// that epoch available, only insert blocks with corresponding `beacon_block`s.
|
||||
packing.retain(|packing| {
|
||||
packing.slot.as_slot() >= lowest_block_slot
|
||||
&& packing.slot.as_slot() <= highest_block_slot
|
||||
});
|
||||
database::insert_batch_block_packing(&mut conn, packing)?;
|
||||
} else {
|
||||
return Err(Error::Database(DbError::Other(
|
||||
"Database did not return a lowest block when one exists".to_string(),
|
||||
)));
|
||||
}
|
||||
} else {
|
||||
// There are no blocks in the `beacon_blocks` database, but there are entries in the
|
||||
// `block_packing` table. This is a critical failure. It usually means someone has
|
||||
// manually tampered with the database tables and should not occur during normal
|
||||
// operation.
|
||||
error!("Database is corrupted. Please re-sync the database");
|
||||
return Err(Error::Database(DbError::DatabaseCorrupted));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Backfill the `block_packing` table starting from the entry with the lowest slot.
|
||||
///
|
||||
/// It constructs a request to the `get_block_packing` function with:
|
||||
/// `start_epoch` -> epoch of lowest_beacon_block
|
||||
/// `end_epoch` -> epoch of lowest filled `block_packing` - 1 (or epoch of highest beacon block)
|
||||
///
|
||||
/// It will resync the lowest epoch if it is not fully filled.
|
||||
/// That is, `if lowest_filled_slot % slots_per_epoch != 0`
|
||||
/// This means that if the last slot of an epoch is a skip slot, the whole epoch will be
|
||||
//// resynced during the next head update.
|
||||
///
|
||||
/// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`.
|
||||
pub async fn backfill_block_packing(&mut self) -> Result<(), Error> {
|
||||
let mut conn = database::get_connection(&self.pool)?;
|
||||
let max_block_packing_backfill = self.config.max_backfill_size_epochs;
|
||||
|
||||
// Get the slot of the lowest entry in the `block_packing` table.
|
||||
let lowest_filled_slot_opt = if self.config.block_packing {
|
||||
database::get_lowest_block_packing(&mut conn)?.map(|packing| packing.slot)
|
||||
} else {
|
||||
return Err(Error::NotEnabled("block_packing".to_string()));
|
||||
};
|
||||
|
||||
let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt {
|
||||
if lowest_filled_slot.as_slot() % self.slots_per_epoch == 0 {
|
||||
lowest_filled_slot
|
||||
.as_slot()
|
||||
.epoch(self.slots_per_epoch)
|
||||
.saturating_sub(Epoch::new(1))
|
||||
} else {
|
||||
// The epoch is only partially synced. Try to sync it fully.
|
||||
lowest_filled_slot.as_slot().epoch(self.slots_per_epoch)
|
||||
}
|
||||
} else {
|
||||
// No entries in the `block_packing` table. Use `beacon_blocks` instead.
|
||||
if let Some(highest_beacon_block) =
|
||||
database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot)
|
||||
{
|
||||
highest_beacon_block.as_slot().epoch(self.slots_per_epoch)
|
||||
} else {
|
||||
// There are no blocks in the database, do not backfill the `block_packing` table.
|
||||
warn!("Refusing to backfill block packing as there are no blocks in the database");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
if end_epoch <= 1 {
|
||||
debug!("Block packing backfill is complete");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some(lowest_block_slot) =
|
||||
database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot())
|
||||
{
|
||||
let mut start_epoch = lowest_block_slot.epoch(self.slots_per_epoch);
|
||||
|
||||
if start_epoch >= end_epoch {
|
||||
debug!("Block packing is up to date with the base of the database");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Ensure that the request range does not exceed `max_block_packing_backfill` or
|
||||
// `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`.
|
||||
if start_epoch < end_epoch.saturating_sub(max_block_packing_backfill) {
|
||||
start_epoch = end_epoch.saturating_sub(max_block_packing_backfill)
|
||||
}
|
||||
if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) {
|
||||
start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING)
|
||||
}
|
||||
|
||||
// The `block_packing` API cannot accept `start_epoch == 0`.
|
||||
if start_epoch == 0 {
|
||||
start_epoch += 1
|
||||
}
|
||||
|
||||
if let Some(highest_block_slot) =
|
||||
database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot())
|
||||
{
|
||||
let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?;
|
||||
|
||||
// Only insert blocks with corresponding `beacon_block`s.
|
||||
packing.retain(|packing| {
|
||||
packing.slot.as_slot() >= lowest_block_slot
|
||||
&& packing.slot.as_slot() <= highest_block_slot
|
||||
});
|
||||
|
||||
database::insert_batch_block_packing(&mut conn, packing)?;
|
||||
} else {
|
||||
return Err(Error::Database(DbError::Other(
|
||||
"Database did not return a lowest block when one exists".to_string(),
|
||||
)));
|
||||
}
|
||||
} else {
|
||||
// There are no blocks in the `beacon_blocks` database, but there are entries in the
|
||||
// `block_packing` table. This is a critical failure. It usually means someone has
|
||||
// manually tampered with the database tables and should not occur during normal
|
||||
// operation.
|
||||
error!("Database is corrupted. Please re-sync the database");
|
||||
return Err(Error::Database(DbError::DatabaseCorrupted));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user