diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 416179445f..86f99b53e1 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -258,8 +258,8 @@ jobs:
| | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu-portable.tar.gz.asc) |
| | aarch64 | [lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz.asc) |
| | aarch64 | [lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu-portable.tar.gz.asc) |
- | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz.asc) |
- | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz.asc) |
+ | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz.asc) |
+ | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz.asc) |
| | | | |
| **System** | **Option** | - | **Resource** |
| | Docker | [${{ env.VERSION }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}/tags?page=1&ordering=last_updated&name=${{ env.VERSION }}) | [${{ env.IMAGE_NAME }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}) |
diff --git a/Cargo.lock b/Cargo.lock
index cc6c1a9fd6..431a272be8 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1340,6 +1340,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0"
dependencies = [
"clap_builder",
+ "clap_derive",
]
[[package]]
@@ -1355,6 +1356,18 @@ dependencies = [
"terminal_size",
]
+[[package]]
+name = "clap_derive"
+version = "4.5.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64"
+dependencies = [
+ "heck 0.5.0",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.66",
+]
+
[[package]]
name = "clap_lex"
version = "0.7.0"
@@ -1824,6 +1837,7 @@ dependencies = [
"clap_utils",
"environment",
"hex",
+ "serde",
"slog",
"store",
"strum",
@@ -2069,7 +2083,7 @@ dependencies = [
"enr",
"fnv",
"futures",
- "hashlink",
+ "hashlink 0.8.4",
"hex",
"hkdf",
"lazy_static",
@@ -3358,6 +3372,7 @@ dependencies = [
"futures-ticker",
"futures-timer",
"getrandom",
+ "hashlink 0.9.0",
"hex_fmt",
"libp2p",
"prometheus-client",
@@ -3473,6 +3488,15 @@ dependencies = [
"hashbrown 0.14.5",
]
+[[package]]
+name = "hashlink"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee"
+dependencies = [
+ "hashbrown 0.14.5",
+]
+
[[package]]
name = "headers"
version = "0.3.9"
@@ -6957,7 +6981,7 @@ dependencies = [
"bitflags 1.3.2",
"fallible-iterator",
"fallible-streaming-iterator",
- "hashlink",
+ "hashlink 0.8.4",
"libsqlite3-sys",
"smallvec",
]
@@ -9774,7 +9798,7 @@ checksum = "498f4d102a79ea1c9d4dd27573c0fc96ad74c023e8da38484e47883076da25fb"
dependencies = [
"arraydeque",
"encoding_rs",
- "hashlink",
+ "hashlink 0.8.4",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index eedc47470e..fad5fbead1 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -101,7 +101,7 @@ bincode = "1"
bitvec = "1"
byteorder = "1"
bytes = "1"
-clap = { version = "4.5.4", features = ["cargo", "wrap_help"] }
+clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] }
# Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable
# feature ourselves when desired.
c-kzg = { version = "1", default-features = false }
@@ -126,6 +126,7 @@ fnv = "1"
fs2 = "0.4"
futures = "0.3"
hex = "0.4"
+hashlink = "0.9.0"
hyper = "1"
itertools = "0.10"
lazy_static = "1"
@@ -153,7 +154,7 @@ serde_json = "1"
serde_repr = "0.1"
serde_yaml = "0.9"
sha2 = "0.9"
-slog = { version = "2", features = ["max_level_trace", "release_max_level_trace", "nested-values"] }
+slog = { version = "2", features = ["max_level_debug", "release_max_level_debug", "nested-values"] }
slog-async = "2"
slog-term = "2"
sloggers = { version = "2", features = ["json"] }
diff --git a/Dockerfile b/Dockerfile
index e0c48699bf..ff7f14d534 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -4,8 +4,8 @@ COPY . lighthouse
ARG FEATURES
ARG PROFILE=release
ARG CARGO_USE_GIT_CLI=true
-ENV FEATURES $FEATURES
-ENV PROFILE $PROFILE
+ENV FEATURES=$FEATURES
+ENV PROFILE=$PROFILE
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_USE_GIT_CLI
RUN cd lighthouse && make
diff --git a/Makefile b/Makefile
index 7d144e55fb..d18a673880 100644
--- a/Makefile
+++ b/Makefile
@@ -174,8 +174,9 @@ test-network-%:
# Run the tests in the `slasher` crate for all supported database backends.
test-slasher:
cargo nextest run --release -p slasher --features "lmdb,$(TEST_FEATURES)"
+ cargo nextest run --release -p slasher --no-default-features --features "redb,$(TEST_FEATURES)"
cargo nextest run --release -p slasher --no-default-features --features "mdbx,$(TEST_FEATURES)"
- cargo nextest run --release -p slasher --features "lmdb,mdbx,$(TEST_FEATURES)" # both backends enabled
+ cargo nextest run --release -p slasher --features "lmdb,mdbx,redb,$(TEST_FEATURES)" # all backends enabled
# Runs only the tests/state_transition_vectors tests.
run-state-transition-tests:
diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs
index 7f09430227..19ee3d116c 100644
--- a/beacon_node/beacon_chain/src/beacon_chain.rs
+++ b/beacon_node/beacon_chain/src/beacon_chain.rs
@@ -723,13 +723,6 @@ impl BeaconChain {
Ok(())
}
- pub fn persist_data_availability_checker(&self) -> Result<(), Error> {
- let _timer = metrics::start_timer(&metrics::PERSIST_DATA_AVAILABILITY_CHECKER);
- self.data_availability_checker.persist_all()?;
-
- Ok(())
- }
-
/// Returns the slot _right now_ according to `self.slot_clock`. Returns `Err` if the slot is
/// unavailable.
///
@@ -3095,14 +3088,21 @@ impl BeaconChain {
notify_execution_layer,
)?;
publish_fn()?;
+
+ // Record the time it took to complete consensus verification.
+ if let Some(timestamp) = self.slot_clock.now_duration() {
+ self.block_times_cache
+ .write()
+ .set_time_consensus_verified(block_root, block_slot, timestamp)
+ }
+
let executed_block = chain.into_executed_block(execution_pending).await?;
- // Record the time it took to ask the execution layer.
- if let Some(seen_timestamp) = self.slot_clock.now_duration() {
- self.block_times_cache.write().set_execution_time(
- block_root,
- block_slot,
- seen_timestamp,
- )
+
+ // Record the *additional* time it took to wait for execution layer verification.
+ if let Some(timestamp) = self.slot_clock.now_duration() {
+ self.block_times_cache
+ .write()
+ .set_time_executed(block_root, block_slot, timestamp)
}
match executed_block {
@@ -6753,7 +6753,6 @@ impl Drop for BeaconChain {
let drop = || -> Result<(), Error> {
self.persist_head_and_fork_choice()?;
self.persist_op_pool()?;
- self.persist_data_availability_checker()?;
self.persist_eth1_cache()
};
diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs
index f7f389c543..f746b68996 100644
--- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs
+++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs
@@ -20,26 +20,12 @@ use types::{
Hash256, Slot,
};
-/// Ensure this justified checkpoint has an epoch of 0 so that it is never
-/// greater than the justified checkpoint and enshrined as the actual justified
-/// checkpoint.
-const JUNK_BEST_JUSTIFIED_CHECKPOINT: Checkpoint = Checkpoint {
- epoch: Epoch::new(0),
- root: Hash256::repeat_byte(0),
-};
-
#[derive(Debug)]
pub enum Error {
- UnableToReadSlot,
- UnableToReadTime,
- InvalidGenesisSnapshot(Slot),
- AncestorUnknown { ancestor_slot: Slot },
- UninitializedBestJustifiedBalances,
FailedToReadBlock(StoreError),
MissingBlock(Hash256),
FailedToReadState(StoreError),
MissingState(Hash256),
- InvalidPersistedBytes(ssz::DecodeError),
BeaconStateError(BeaconStateError),
Arith(ArithError),
}
@@ -66,7 +52,6 @@ const MAX_BALANCE_CACHE_SIZE: usize = 4;
)]
pub(crate) struct CacheItem {
pub(crate) block_root: Hash256,
- #[superstruct(only(V8))]
pub(crate) epoch: Epoch,
pub(crate) balances: Vec,
}
@@ -79,7 +64,6 @@ pub(crate) type CacheItem = CacheItemV8;
no_enum
)]
pub struct BalancesCache {
- #[superstruct(only(V8))]
pub(crate) items: Vec,
}
@@ -365,59 +349,15 @@ where
pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV17;
/// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database.
-#[superstruct(
- variants(V11, V17),
- variant_attributes(derive(Encode, Decode)),
- no_enum
-)]
+#[superstruct(variants(V17), variant_attributes(derive(Encode, Decode)), no_enum)]
pub struct PersistedForkChoiceStore {
- #[superstruct(only(V11, V17))]
pub balances_cache: BalancesCacheV8,
pub time: Slot,
pub finalized_checkpoint: Checkpoint,
pub justified_checkpoint: Checkpoint,
pub justified_balances: Vec,
- #[superstruct(only(V11))]
- pub best_justified_checkpoint: Checkpoint,
- #[superstruct(only(V11, V17))]
pub unrealized_justified_checkpoint: Checkpoint,
- #[superstruct(only(V11, V17))]
pub unrealized_finalized_checkpoint: Checkpoint,
- #[superstruct(only(V11, V17))]
pub proposer_boost_root: Hash256,
- #[superstruct(only(V11, V17))]
pub equivocating_indices: BTreeSet,
}
-
-impl From for PersistedForkChoiceStore {
- fn from(from: PersistedForkChoiceStoreV11) -> PersistedForkChoiceStore {
- PersistedForkChoiceStore {
- balances_cache: from.balances_cache,
- time: from.time,
- finalized_checkpoint: from.finalized_checkpoint,
- justified_checkpoint: from.justified_checkpoint,
- justified_balances: from.justified_balances,
- unrealized_justified_checkpoint: from.unrealized_justified_checkpoint,
- unrealized_finalized_checkpoint: from.unrealized_finalized_checkpoint,
- proposer_boost_root: from.proposer_boost_root,
- equivocating_indices: from.equivocating_indices,
- }
- }
-}
-
-impl From for PersistedForkChoiceStoreV11 {
- fn from(from: PersistedForkChoiceStore) -> PersistedForkChoiceStoreV11 {
- PersistedForkChoiceStoreV11 {
- balances_cache: from.balances_cache,
- time: from.time,
- finalized_checkpoint: from.finalized_checkpoint,
- justified_checkpoint: from.justified_checkpoint,
- justified_balances: from.justified_balances,
- best_justified_checkpoint: JUNK_BEST_JUSTIFIED_CHECKPOINT,
- unrealized_justified_checkpoint: from.unrealized_justified_checkpoint,
- unrealized_finalized_checkpoint: from.unrealized_finalized_checkpoint,
- proposer_boost_root: from.proposer_boost_root,
- equivocating_indices: from.equivocating_indices,
- }
- }
-}
diff --git a/beacon_node/beacon_chain/src/block_times_cache.rs b/beacon_node/beacon_chain/src/block_times_cache.rs
index db547a1186..3b75046f3a 100644
--- a/beacon_node/beacon_chain/src/block_times_cache.rs
+++ b/beacon_node/beacon_chain/src/block_times_cache.rs
@@ -19,7 +19,9 @@ type BlockRoot = Hash256;
pub struct Timestamps {
pub observed: Option,
pub all_blobs_observed: Option,
- pub execution_time: Option,
+ pub consensus_verified: Option,
+ pub started_execution: Option,
+ pub executed: Option,
pub attestable: Option,
pub imported: Option,
pub set_as_head: Option,
@@ -32,7 +34,9 @@ pub struct BlockDelays {
pub observed: Option,
/// The time after the start of the slot we saw all blobs.
pub all_blobs_observed: Option,
- /// The time it took to get verification from the EL for the block.
+ /// The time it took to complete consensus verification of the block.
+ pub consensus_verification_time: Option,
+ /// The time it took to complete execution verification of the block.
pub execution_time: Option,
/// The delay from the start of the slot before the block became available
///
@@ -58,13 +62,16 @@ impl BlockDelays {
let all_blobs_observed = times
.all_blobs_observed
.and_then(|all_blobs_observed| all_blobs_observed.checked_sub(slot_start_time));
+ let consensus_verification_time = times
+ .consensus_verified
+ .and_then(|consensus_verified| consensus_verified.checked_sub(times.observed?));
let execution_time = times
- .execution_time
- .and_then(|execution_time| execution_time.checked_sub(times.observed?));
+ .executed
+ .and_then(|executed| executed.checked_sub(times.started_execution?));
// Duration since UNIX epoch at which block became available.
- let available_time = times.execution_time.map(|execution_time| {
- std::cmp::max(execution_time, times.all_blobs_observed.unwrap_or_default())
- });
+ let available_time = times
+ .executed
+ .map(|executed| std::cmp::max(executed, times.all_blobs_observed.unwrap_or_default()));
// Duration from the start of the slot until the block became available.
let available_delay =
available_time.and_then(|available_time| available_time.checked_sub(slot_start_time));
@@ -80,6 +87,7 @@ impl BlockDelays {
BlockDelays {
observed,
all_blobs_observed,
+ consensus_verification_time,
execution_time,
available: available_delay,
attestable,
@@ -155,6 +163,9 @@ impl BlockTimesCache {
slot: Slot,
timestamp: Duration,
) {
+ // Unlike other functions in this file, we update the blob observed time only if it is
+ // *greater* than existing blob observation times. This allows us to know the observation
+ // time of the last blob to arrive.
let block_times = self
.cache
.entry(block_root)
@@ -168,48 +179,89 @@ impl BlockTimesCache {
}
}
- pub fn set_execution_time(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) {
+ /// Set the timestamp for `field` if that timestamp is less than any previously known value.
+ ///
+ /// If no previous value is known for the field, then the supplied timestamp will always be
+ /// stored.
+ pub fn set_time_if_less(
+ &mut self,
+ block_root: BlockRoot,
+ slot: Slot,
+ field: impl Fn(&mut Timestamps) -> &mut Option,
+ timestamp: Duration,
+ ) {
let block_times = self
.cache
.entry(block_root)
.or_insert_with(|| BlockTimesCacheValue::new(slot));
- if block_times
- .timestamps
- .execution_time
- .map_or(true, |prev| timestamp < prev)
- {
- block_times.timestamps.execution_time = Some(timestamp);
+ let existing_timestamp = field(&mut block_times.timestamps);
+ if existing_timestamp.map_or(true, |prev| timestamp < prev) {
+ *existing_timestamp = Some(timestamp);
}
}
+ pub fn set_time_consensus_verified(
+ &mut self,
+ block_root: BlockRoot,
+ slot: Slot,
+ timestamp: Duration,
+ ) {
+ self.set_time_if_less(
+ block_root,
+ slot,
+ |timestamps| &mut timestamps.consensus_verified,
+ timestamp,
+ )
+ }
+
+ pub fn set_time_executed(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) {
+ self.set_time_if_less(
+ block_root,
+ slot,
+ |timestamps| &mut timestamps.executed,
+ timestamp,
+ )
+ }
+
+ pub fn set_time_started_execution(
+ &mut self,
+ block_root: BlockRoot,
+ slot: Slot,
+ timestamp: Duration,
+ ) {
+ self.set_time_if_less(
+ block_root,
+ slot,
+ |timestamps| &mut timestamps.started_execution,
+ timestamp,
+ )
+ }
+
pub fn set_time_attestable(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) {
- let block_times = self
- .cache
- .entry(block_root)
- .or_insert_with(|| BlockTimesCacheValue::new(slot));
- if block_times
- .timestamps
- .attestable
- .map_or(true, |prev| timestamp < prev)
- {
- block_times.timestamps.attestable = Some(timestamp);
- }
+ self.set_time_if_less(
+ block_root,
+ slot,
+ |timestamps| &mut timestamps.attestable,
+ timestamp,
+ )
}
pub fn set_time_imported(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) {
- let block_times = self
- .cache
- .entry(block_root)
- .or_insert_with(|| BlockTimesCacheValue::new(slot));
- block_times.timestamps.imported = Some(timestamp);
+ self.set_time_if_less(
+ block_root,
+ slot,
+ |timestamps| &mut timestamps.imported,
+ timestamp,
+ )
}
pub fn set_time_set_as_head(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) {
- let block_times = self
- .cache
- .entry(block_root)
- .or_insert_with(|| BlockTimesCacheValue::new(slot));
- block_times.timestamps.set_as_head = Some(timestamp);
+ self.set_time_if_less(
+ block_root,
+ slot,
+ |timestamps| &mut timestamps.set_as_head,
+ timestamp,
+ )
}
pub fn get_block_delays(
diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs
index b24921e317..d906518ff5 100644
--- a/beacon_node/beacon_chain/src/block_verification.rs
+++ b/beacon_node/beacon_chain/src/block_verification.rs
@@ -67,7 +67,7 @@ use crate::{
metrics, BeaconChain, BeaconChainError, BeaconChainTypes,
};
use derivative::Derivative;
-use eth2::types::{EventKind, PublishBlockRequest};
+use eth2::types::{BlockGossip, EventKind, PublishBlockRequest};
use execution_layer::PayloadStatus;
pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus};
use parking_lot::RwLockReadGuard;
@@ -93,7 +93,6 @@ use std::io::Write;
use std::sync::Arc;
use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp};
use task_executor::JoinHandle;
-use tree_hash::TreeHash;
use types::{
BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ExecutionBlockHash,
Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock,
@@ -975,6 +974,16 @@ impl GossipVerifiedBlock {
// Validate the block's execution_payload (if any).
validate_execution_payload_for_gossip(&parent_block, block.message(), chain)?;
+ // Beacon API block_gossip events
+ if let Some(event_handler) = chain.event_handler.as_ref() {
+ if event_handler.has_block_gossip_subscribers() {
+ event_handler.register(EventKind::BlockGossip(Box::new(BlockGossip {
+ slot: block.slot(),
+ block: block_root,
+ })));
+ }
+ }
+
// Having checked the proposer index and the block root we can cache them.
let consensus_context = ConsensusContext::new(block.slot())
.set_current_block_root(block_root)
@@ -1335,6 +1344,13 @@ impl ExecutionPendingBlock {
// The specification declares that this should be run *inside* `per_block_processing`,
// however we run it here to keep `per_block_processing` pure (i.e., no calls to external
// servers).
+ if let Some(started_execution) = chain.slot_clock.now_duration() {
+ chain.block_times_cache.write().set_time_started_execution(
+ block_root,
+ block.slot(),
+ started_execution,
+ );
+ }
let payload_verification_status = payload_notifier.notify_new_payload().await?;
// If the payload did not validate or invalidate the block, check to see if this block is
@@ -2107,7 +2123,14 @@ pub fn verify_header_signature(
fn write_state(prefix: &str, state: &BeaconState, log: &Logger) {
if WRITE_BLOCK_PROCESSING_SSZ {
- let root = state.tree_hash_root();
+ let mut state = state.clone();
+ let Ok(root) = state.canonical_root() else {
+ error!(
+ log,
+ "Unable to hash state for writing";
+ );
+ return;
+ };
let filename = format!("{}_slot_{}_root_{}.ssz", prefix, state.slot(), root);
let mut path = std::env::temp_dir().join("lighthouse");
let _ = fs::create_dir_all(path.clone());
diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs
index 90461b8f03..7217f2c640 100644
--- a/beacon_node/beacon_chain/src/builder.rs
+++ b/beacon_node/beacon_chain/src/builder.rs
@@ -1195,7 +1195,7 @@ mod test {
let head = chain.head_snapshot();
- let state = &head.beacon_state;
+ let mut state = head.beacon_state.clone();
let block = &head.beacon_block;
assert_eq!(state.slot(), Slot::new(0), "should start from genesis");
@@ -1206,7 +1206,7 @@ mod test {
);
assert_eq!(
block.state_root(),
- state.canonical_root(),
+ state.canonical_root().unwrap(),
"block should have correct state root"
);
assert_eq!(
diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs
index a84cfab298..84e1544451 100644
--- a/beacon_node/beacon_chain/src/canonical_head.rs
+++ b/beacon_node/beacon_chain/src/canonical_head.rs
@@ -1385,6 +1385,15 @@ fn observe_head_block_delays(
.as_millis() as i64,
);
+ // The time it took to check the validity within Lighthouse
+ metrics::set_gauge(
+ &metrics::BEACON_BLOCK_DELAY_CONSENSUS_VERIFICATION_TIME,
+ block_delays
+ .consensus_verification_time
+ .unwrap_or_else(|| Duration::from_secs(0))
+ .as_millis() as i64,
+ );
+
// The time it took to check the validity with the EL
metrics::set_gauge(
&metrics::BEACON_BLOCK_DELAY_EXECUTION_TIME,
@@ -1447,6 +1456,7 @@ fn observe_head_block_delays(
"total_delay_ms" => block_delay_total.as_millis(),
"observed_delay_ms" => format_delay(&block_delays.observed),
"blob_delay_ms" => format_delay(&block_delays.all_blobs_observed),
+ "consensus_time_ms" => format_delay(&block_delays.consensus_verification_time),
"execution_time_ms" => format_delay(&block_delays.execution_time),
"available_delay_ms" => format_delay(&block_delays.available),
"attestable_delay_ms" => format_delay(&block_delays.attestable),
@@ -1463,6 +1473,7 @@ fn observe_head_block_delays(
"total_delay_ms" => block_delay_total.as_millis(),
"observed_delay_ms" => format_delay(&block_delays.observed),
"blob_delay_ms" => format_delay(&block_delays.all_blobs_observed),
+ "consensus_time_ms" => format_delay(&block_delays.consensus_verification_time),
"execution_time_ms" => format_delay(&block_delays.execution_time),
"available_delay_ms" => format_delay(&block_delays.available),
"attestable_delay_ms" => format_delay(&block_delays.attestable),
diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs
index e0347d81c3..2431769ddb 100644
--- a/beacon_node/beacon_chain/src/data_availability_checker.rs
+++ b/beacon_node/beacon_chain/src/data_availability_checker.rs
@@ -2,7 +2,7 @@ use crate::blob_verification::{verify_kzg_for_blob_list, GossipVerifiedBlob, Kzg
use crate::block_verification_types::{
AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock,
};
-use crate::data_availability_checker::overflow_lru_cache::OverflowLRUCache;
+use crate::data_availability_checker::overflow_lru_cache::DataAvailabilityCheckerInner;
use crate::{BeaconChain, BeaconChainTypes, BeaconStore};
use kzg::Kzg;
use slog::{debug, error, Logger};
@@ -33,12 +33,32 @@ pub const OVERFLOW_LRU_CAPACITY: NonZeroUsize = new_non_zero_usize(1024);
pub const STATE_LRU_CAPACITY_NON_ZERO: NonZeroUsize = new_non_zero_usize(2);
pub const STATE_LRU_CAPACITY: usize = STATE_LRU_CAPACITY_NON_ZERO.get();
-/// This includes a cache for any blocks or blobs that have been received over gossip or RPC
-/// and are awaiting more components before they can be imported. Additionally the
-/// `DataAvailabilityChecker` is responsible for KZG verification of block components as well as
-/// checking whether a "availability check" is required at all.
+/// Cache to hold fully valid data that can't be imported to fork-choice yet. After Dencun hard-fork
+/// blocks have a sidecar of data that is received separately from the network. We call the concept
+/// of a block "becoming available" when all of its import dependencies are inserted into this
+/// cache.
+///
+/// Usually a block becomes available on its slot within a second of receiving its first component
+/// over gossip. However, a block may never become available if a malicious proposer does not
+/// publish its data, or there are network issues that prevent us from receiving it. If the block
+/// does not become available after some time we can safely forget about it. Consider these two
+/// cases:
+///
+/// - Global unavailability: If nobody has received the block components it's likely that the
+/// proposer never made the block available. So we can safely forget about the block as it will
+/// never become available.
+/// - Local unavailability: Some fraction of the network has received all block components, but not us.
+/// Some of our peers will eventually attest to a descendant of that block and lookup sync will
+/// fetch its components. Therefore it's not strictly necessary to hold to the partially available
+/// block for too long as we can recover from other peers.
+///
+/// Even in periods of non-finality, the proposer is expected to publish the block's data
+/// immediately. Because this cache only holds fully valid data, its capacity is bound to 1 block
+/// per slot and fork: before inserting into this cache we check the proposer signature and correct
+/// proposer. Having a capacity > 1 is an optimization to prevent sync lookup from having re-fetch
+/// data during moments of unstable network conditions.
pub struct DataAvailabilityChecker {
- availability_cache: Arc>,
+ availability_cache: Arc>,
slot_clock: T::SlotClock,
kzg: Option>,
log: Logger,
@@ -74,7 +94,8 @@ impl DataAvailabilityChecker {
log: &Logger,
spec: ChainSpec,
) -> Result {
- let overflow_cache = OverflowLRUCache::new(OVERFLOW_LRU_CAPACITY, store, spec.clone())?;
+ let overflow_cache =
+ DataAvailabilityCheckerInner::new(OVERFLOW_LRU_CAPACITY, store, spec.clone())?;
Ok(Self {
availability_cache: Arc::new(overflow_cache),
slot_clock,
@@ -329,15 +350,9 @@ impl DataAvailabilityChecker {
})
}
- /// Persist all in memory components to disk
- pub fn persist_all(&self) -> Result<(), AvailabilityCheckError> {
- self.availability_cache.write_all_to_disk()
- }
-
/// Collects metrics from the data availability checker.
pub fn metrics(&self) -> DataAvailabilityCheckerMetrics {
DataAvailabilityCheckerMetrics {
- num_store_entries: self.availability_cache.num_store_entries(),
state_cache_size: self.availability_cache.state_cache_size(),
block_cache_size: self.availability_cache.block_cache_size(),
}
@@ -346,7 +361,6 @@ impl DataAvailabilityChecker {
/// Helper struct to group data availability checker metrics.
pub struct DataAvailabilityCheckerMetrics {
- pub num_store_entries: usize,
pub state_cache_size: usize,
pub block_cache_size: usize,
}
@@ -372,7 +386,7 @@ pub fn start_availability_cache_maintenance_service(
async fn availability_cache_maintenance_service(
chain: Arc>,
- overflow_cache: Arc>,
+ overflow_cache: Arc>,
) {
let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32;
loop {
diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs
index adc1a1e202..5e0513c8d3 100644
--- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs
+++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs
@@ -1,32 +1,3 @@
-//! This module implements a LRU cache for storing partially available blocks and blobs.
-//! When the cache overflows, the least recently used items are persisted to the database.
-//! This prevents lighthouse from using too much memory storing unfinalized blocks and blobs
-//! if the chain were to lose finality.
-//!
-//! ## Deadlock safety
-//!
-//! The main object in this module is the `OverflowLruCache`. It contains two locks:
-//!
-//! - `self.critical` is an `RwLock` that protects content stored in memory.
-//! - `self.maintenance_lock` is held when moving data between memory and disk.
-//!
-//! You mostly need to ensure that you don't try to hold the critical lock more than once
-//!
-//! ## Basic Algorithm
-//!
-//! As blocks and blobs come in from the network, their components are stored in memory in
-//! this cache. When a block becomes fully available, it is removed from the cache and
-//! imported into fork-choice. Blocks/blobs that remain unavailable will linger in the
-//! cache until they are older than the finalized epoch or older than the data availability
-//! cutoff. In the event the chain is not finalizing, the cache will eventually overflow and
-//! the least recently used items will be persisted to disk. When this happens, we will still
-//! store the hash of the block in memory so we always know we have data for that block
-//! without needing to check the database.
-//!
-//! When the client is shut down, all pending components are persisted in the database.
-//! On startup, the keys of these components are stored in memory and will be loaded in
-//! the cache when they are accessed.
-
use super::state_lru_cache::{DietAvailabilityPendingExecutedBlock, StateLRUCache};
use crate::beacon_chain::BeaconStore;
use crate::blob_verification::KzgVerifiedBlob;
@@ -34,15 +5,13 @@ use crate::block_verification_types::{
AvailabilityPendingExecutedBlock, AvailableBlock, AvailableExecutedBlock,
};
use crate::data_availability_checker::{Availability, AvailabilityCheckError};
-use crate::store::{DBColumn, KeyValueStore};
use crate::BeaconChainTypes;
use lru::LruCache;
-use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard};
-use ssz::{Decode, Encode};
+use parking_lot::RwLock;
use ssz_derive::{Decode, Encode};
use ssz_types::{FixedVector, VariableList};
use std::num::NonZeroUsize;
-use std::{collections::HashSet, sync::Arc};
+use std::sync::Arc;
use types::blob_sidecar::BlobIdentifier;
use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock};
@@ -243,318 +212,27 @@ impl PendingComponents {
AvailableExecutedBlock::new(available_block, import_data, payload_verification_outcome),
)))
}
-
- /// Returns the epoch of the block if it is cached, otherwise returns the epoch of the first blob.
- pub fn epoch(&self) -> Option {
- self.executed_block
- .as_ref()
- .map(|pending_block| pending_block.as_block().epoch())
- .or_else(|| {
- for maybe_blob in self.verified_blobs.iter() {
- if maybe_blob.is_some() {
- return maybe_blob.as_ref().map(|kzg_verified_blob| {
- kzg_verified_blob
- .as_blob()
- .slot()
- .epoch(E::slots_per_epoch())
- });
- }
- }
- None
- })
- }
-}
-
-/// Blocks and blobs are stored in the database sequentially so that it's
-/// fast to iterate over all the data for a particular block.
-#[derive(Debug, PartialEq)]
-enum OverflowKey {
- Block(Hash256),
- Blob(Hash256, u8),
-}
-
-impl OverflowKey {
- pub fn from_block_root(block_root: Hash256) -> Self {
- Self::Block(block_root)
- }
-
- pub fn from_blob_id(
- blob_id: BlobIdentifier,
- ) -> Result {
- if blob_id.index > E::max_blobs_per_block() as u64 || blob_id.index > u8::MAX as u64 {
- return Err(AvailabilityCheckError::BlobIndexInvalid(blob_id.index));
- }
- Ok(Self::Blob(blob_id.block_root, blob_id.index as u8))
- }
-
- pub fn root(&self) -> &Hash256 {
- match self {
- Self::Block(root) => root,
- Self::Blob(root, _) => root,
- }
- }
-}
-
-/// A wrapper around BeaconStore that implements various
-/// methods used for saving and retrieving blocks / blobs
-/// from the store (for organization)
-struct OverflowStore(BeaconStore);
-
-impl OverflowStore {
- /// Store pending components in the database
- pub fn persist_pending_components(
- &self,
- block_root: Hash256,
- mut pending_components: PendingComponents,
- ) -> Result<(), AvailabilityCheckError> {
- let col = DBColumn::OverflowLRUCache;
-
- if let Some(block) = pending_components.executed_block.take() {
- let key = OverflowKey::from_block_root(block_root);
- self.0
- .hot_db
- .put_bytes(col.as_str(), &key.as_ssz_bytes(), &block.as_ssz_bytes())?
- }
-
- for blob in Vec::from(pending_components.verified_blobs)
- .into_iter()
- .flatten()
- {
- let key = OverflowKey::from_blob_id::(BlobIdentifier {
- block_root,
- index: blob.blob_index(),
- })?;
-
- self.0
- .hot_db
- .put_bytes(col.as_str(), &key.as_ssz_bytes(), &blob.as_ssz_bytes())?
- }
-
- Ok(())
- }
-
- /// Load the pending components that we have in the database for a given block root
- pub fn load_pending_components(
- &self,
- block_root: Hash256,
- ) -> Result