Merge branch 'unstable' into peerdas-devnet-7

This commit is contained in:
Jimmy Chen
2025-05-26 14:42:18 +10:00
53 changed files with 1084 additions and 567 deletions

View File

@@ -11,9 +11,6 @@ edition = { workspace = true }
name = "beacon_node"
path = "src/lib.rs"
[dev-dependencies]
node_test_rig = { path = "../testing/node_test_rig" }
[features]
write_ssz_files = [
"beacon_chain/write_ssz_files",
@@ -45,3 +42,6 @@ task_executor = { workspace = true }
tracing = { workspace = true }
types = { workspace = true }
unused_port = { workspace = true }
[dev-dependencies]
node_test_rig = { path = "../testing/node_test_rig" }

View File

@@ -1,3 +1,4 @@
[package]
name = "beacon_chain"
version = "0.2.0"
@@ -5,10 +6,6 @@ authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com
edition = { workspace = true }
autotests = false # using a single test binary compiles faster
[[bench]]
name = "benches"
harness = false
[features]
default = ["participation_metrics"]
write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing.
@@ -17,11 +14,6 @@ fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env va
portable = ["bls/supranational-portable"]
test_backfill = []
[dev-dependencies]
criterion = { workspace = true }
maplit = { workspace = true }
serde_json = { workspace = true }
[dependencies]
alloy-primitives = { workspace = true }
bitvec = { workspace = true }
@@ -75,6 +67,17 @@ tree_hash = { workspace = true }
tree_hash_derive = { workspace = true }
types = { workspace = true }
[dev-dependencies]
criterion = { workspace = true }
maplit = { workspace = true }
mockall = { workspace = true }
mockall_double = { workspace = true }
serde_json = { workspace = true }
[[bench]]
name = "benches"
harness = false
[[test]]
name = "beacon_chain_tests"
path = "tests/main.rs"

View File

@@ -0,0 +1,95 @@
use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob};
use crate::fetch_blobs::{EngineGetBlobsOutput, FetchEngineBlobError};
use crate::observed_data_sidecars::DoNotObserve;
use crate::{AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes};
use execution_layer::json_structures::{BlobAndProofV1, BlobAndProofV2};
use kzg::Kzg;
#[cfg(test)]
use mockall::automock;
use std::sync::Arc;
use task_executor::TaskExecutor;
use types::{BlobSidecar, ChainSpec, Hash256, Slot};
/// An adapter to the `BeaconChain` functionalities to remove `BeaconChain` from direct dependency to enable testing fetch blobs logic.
pub(crate) struct FetchBlobsBeaconAdapter<T: BeaconChainTypes> {
chain: Arc<BeaconChain<T>>,
spec: Arc<ChainSpec>,
}
#[cfg_attr(test, automock, allow(dead_code))]
impl<T: BeaconChainTypes> FetchBlobsBeaconAdapter<T> {
pub(crate) fn new(chain: Arc<BeaconChain<T>>) -> Self {
let spec = chain.spec.clone();
Self { chain, spec }
}
pub(crate) fn spec(&self) -> &Arc<ChainSpec> {
&self.spec
}
pub(crate) fn kzg(&self) -> &Arc<Kzg> {
&self.chain.kzg
}
pub(crate) fn executor(&self) -> &TaskExecutor {
&self.chain.task_executor
}
pub(crate) async fn get_blobs_v1(
&self,
versioned_hashes: Vec<Hash256>,
) -> Result<Vec<Option<BlobAndProofV1<T::EthSpec>>>, FetchEngineBlobError> {
let execution_layer = self
.chain
.execution_layer
.as_ref()
.ok_or(FetchEngineBlobError::ExecutionLayerMissing)?;
execution_layer
.get_blobs_v1(versioned_hashes)
.await
.map_err(FetchEngineBlobError::RequestFailed)
}
pub(crate) async fn get_blobs_v2(
&self,
versioned_hashes: Vec<Hash256>,
) -> Result<Option<Vec<BlobAndProofV2<T::EthSpec>>>, FetchEngineBlobError> {
let execution_layer = self
.chain
.execution_layer
.as_ref()
.ok_or(FetchEngineBlobError::ExecutionLayerMissing)?;
execution_layer
.get_blobs_v2(versioned_hashes)
.await
.map_err(FetchEngineBlobError::RequestFailed)
}
pub(crate) fn verify_blob_for_gossip(
&self,
blob: &Arc<BlobSidecar<T::EthSpec>>,
) -> Result<GossipVerifiedBlob<T, DoNotObserve>, GossipBlobError> {
GossipVerifiedBlob::<T, DoNotObserve>::new(blob.clone(), blob.index, &self.chain)
}
pub(crate) async fn process_engine_blobs(
&self,
slot: Slot,
block_root: Hash256,
blobs: EngineGetBlobsOutput<T::EthSpec>,
) -> Result<AvailabilityProcessingStatus, FetchEngineBlobError> {
self.chain
.process_engine_blobs(slot, block_root, blobs)
.await
.map_err(FetchEngineBlobError::BlobProcessingError)
}
pub(crate) fn fork_choice_contains_block(&self, block_root: &Hash256) -> bool {
self.chain
.canonical_head
.fork_choice_read_lock()
.contains_block(block_root)
}
}

View File

@@ -8,7 +8,13 @@
//! broadcasting blobs requires a much higher bandwidth, and is only done by high capacity
//! supernodes.
mod fetch_blobs_beacon_adapter;
#[cfg(test)]
mod tests;
use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob};
#[cfg_attr(test, double)]
use crate::fetch_blobs::fetch_blobs_beacon_adapter::FetchBlobsBeaconAdapter;
use crate::kzg_utils::blobs_to_data_column_sidecars;
use crate::observed_data_sidecars::DoNotObserve;
use crate::{
@@ -18,11 +24,13 @@ use crate::{
use execution_layer::json_structures::{BlobAndProofV1, BlobAndProofV2};
use execution_layer::Error as ExecutionLayerError;
use metrics::{inc_counter, TryExt};
#[cfg(test)]
use mockall_double::double;
use ssz_types::FixedVector;
use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash;
use std::collections::HashSet;
use std::sync::Arc;
use tracing::debug;
use tracing::{debug, warn};
use types::blob_sidecar::{BlobSidecarError, FixedBlobSidecarList};
use types::data_column_sidecar::DataColumnSidecarError;
use types::{
@@ -58,6 +66,7 @@ pub enum FetchEngineBlobError {
GossipBlob(GossipBlobError),
RequestFailed(ExecutionLayerError),
RuntimeShutdown,
TokioJoin(tokio::task::JoinError),
}
/// Fetches blobs from the EL mempool and processes them. It also broadcasts unseen blobs or
@@ -68,6 +77,25 @@ pub async fn fetch_and_process_engine_blobs<T: BeaconChainTypes>(
block: Arc<SignedBeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>>,
custody_columns: HashSet<ColumnIndex>,
publish_fn: impl Fn(BlobsOrDataColumns<T>) + Send + 'static,
) -> Result<Option<AvailabilityProcessingStatus>, FetchEngineBlobError> {
fetch_and_process_engine_blobs_inner(
FetchBlobsBeaconAdapter::new(chain),
block_root,
block,
custody_columns,
publish_fn,
)
.await
}
/// Internal implementation of fetch blobs, which uses `FetchBlobsBeaconAdapter` instead of
/// `BeaconChain` for better testability.
async fn fetch_and_process_engine_blobs_inner<T: BeaconChainTypes>(
chain_adapter: FetchBlobsBeaconAdapter<T>,
block_root: Hash256,
block: Arc<SignedBeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>>,
custody_columns: HashSet<ColumnIndex>,
publish_fn: impl Fn(BlobsOrDataColumns<T>) + Send + 'static,
) -> Result<Option<AvailabilityProcessingStatus>, FetchEngineBlobError> {
let versioned_hashes = if let Some(kzg_commitments) = block
.message()
@@ -90,9 +118,12 @@ pub async fn fetch_and_process_engine_blobs<T: BeaconChainTypes>(
"Fetching blobs from the EL"
);
if chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) {
if chain_adapter
.spec()
.is_peer_das_enabled_for_epoch(block.epoch())
{
fetch_and_process_blobs_v2(
chain,
chain_adapter,
block_root,
block,
versioned_hashes,
@@ -101,32 +132,33 @@ pub async fn fetch_and_process_engine_blobs<T: BeaconChainTypes>(
)
.await
} else {
fetch_and_process_blobs_v1(chain, block_root, block, versioned_hashes, publish_fn).await
fetch_and_process_blobs_v1(
chain_adapter,
block_root,
block,
versioned_hashes,
publish_fn,
)
.await
}
}
async fn fetch_and_process_blobs_v1<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
chain_adapter: FetchBlobsBeaconAdapter<T>,
block_root: Hash256,
block: Arc<SignedBeaconBlock<T::EthSpec>>,
versioned_hashes: Vec<VersionedHash>,
publish_fn: impl Fn(BlobsOrDataColumns<T>) + Send + Sized,
) -> Result<Option<AvailabilityProcessingStatus>, FetchEngineBlobError> {
let num_expected_blobs = versioned_hashes.len();
let execution_layer = chain
.execution_layer
.as_ref()
.ok_or(FetchEngineBlobError::ExecutionLayerMissing)?;
metrics::observe(&metrics::BLOBS_FROM_EL_EXPECTED, num_expected_blobs as f64);
debug!(num_expected_blobs, "Fetching blobs from the EL");
let response = execution_layer
let response = chain_adapter
.get_blobs_v1(versioned_hashes)
.await
.inspect_err(|_| {
inc_counter(&metrics::BLOBS_FROM_EL_ERROR_TOTAL);
})
.map_err(FetchEngineBlobError::RequestFailed)?;
})?;
let num_fetched_blobs = response.iter().filter(|opt| opt.is_some()).count();
metrics::observe(&metrics::BLOBS_FROM_EL_RECEIVED, num_fetched_blobs as f64);
@@ -148,7 +180,7 @@ async fn fetch_and_process_blobs_v1<T: BeaconChainTypes>(
response,
signed_block_header,
&kzg_commitments_proof,
&chain.spec,
chain_adapter.spec(),
)?;
// Gossip verify blobs before publishing. This prevents blobs with invalid KZG proofs from
@@ -160,7 +192,7 @@ async fn fetch_and_process_blobs_v1<T: BeaconChainTypes>(
.iter()
.filter_map(|opt_blob| {
let blob = opt_blob.as_ref()?;
match GossipVerifiedBlob::<T, DoNotObserve>::new(blob.clone(), blob.index, &chain) {
match chain_adapter.verify_blob_for_gossip(blob) {
Ok(verified) => Some(Ok(verified)),
// Ignore already seen blobs.
Err(GossipBlobError::RepeatBlob { .. }) => None,
@@ -176,20 +208,19 @@ async fn fetch_and_process_blobs_v1<T: BeaconChainTypes>(
debug!(num_fetched_blobs, "Processing engine blobs");
let availability_processing_status = chain
let availability_processing_status = chain_adapter
.process_engine_blobs(
block.slot(),
block_root,
EngineGetBlobsOutput::Blobs(fixed_blob_sidecar_list.clone()),
)
.await
.map_err(FetchEngineBlobError::BlobProcessingError)?;
.await?;
Ok(Some(availability_processing_status))
}
async fn fetch_and_process_blobs_v2<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
chain_adapter: FetchBlobsBeaconAdapter<T>,
block_root: Hash256,
block: Arc<SignedBeaconBlock<T::EthSpec>>,
versioned_hashes: Vec<VersionedHash>,
@@ -197,52 +228,49 @@ async fn fetch_and_process_blobs_v2<T: BeaconChainTypes>(
publish_fn: impl Fn(BlobsOrDataColumns<T>) + Send + 'static,
) -> Result<Option<AvailabilityProcessingStatus>, FetchEngineBlobError> {
let num_expected_blobs = versioned_hashes.len();
let execution_layer = chain
.execution_layer
.as_ref()
.ok_or(FetchEngineBlobError::ExecutionLayerMissing)?;
metrics::observe(&metrics::BLOBS_FROM_EL_EXPECTED, num_expected_blobs as f64);
debug!(num_expected_blobs, "Fetching blobs from the EL");
let response = execution_layer
let response = chain_adapter
.get_blobs_v2(versioned_hashes)
.await
.inspect_err(|_| {
inc_counter(&metrics::BLOBS_FROM_EL_ERROR_TOTAL);
})
.map_err(FetchEngineBlobError::RequestFailed)?;
})?;
let (blobs, proofs): (Vec<_>, Vec<_>) = response
let Some(blobs_and_proofs) = response else {
debug!(num_expected_blobs, "No blobs fetched from the EL");
inc_counter(&metrics::BLOBS_FROM_EL_MISS_TOTAL);
return Ok(None);
};
let (blobs, proofs): (Vec<_>, Vec<_>) = blobs_and_proofs
.into_iter()
.filter_map(|blob_and_proof_opt| {
blob_and_proof_opt.map(|blob_and_proof| {
let BlobAndProofV2 { blob, proofs } = blob_and_proof;
(blob, proofs)
})
.map(|blob_and_proof| {
let BlobAndProofV2 { blob, proofs } = blob_and_proof;
(blob, proofs)
})
.unzip();
let num_fetched_blobs = blobs.len();
metrics::observe(&metrics::BLOBS_FROM_EL_RECEIVED, num_fetched_blobs as f64);
// Partial blobs response isn't useful for PeerDAS, so we don't bother building and publishing data columns.
if num_fetched_blobs != num_expected_blobs {
debug!(
info = "Unable to compute data columns",
num_fetched_blobs, num_expected_blobs, "Not all blobs fetched from the EL"
// This scenario is not supposed to happen if the EL is spec compliant.
// It should either return all requested blobs or none, but NOT partial responses.
// If we attempt to compute columns with partial blobs, we'd end up with invalid columns.
warn!(
num_fetched_blobs,
num_expected_blobs, "The EL did not return all requested blobs"
);
inc_counter(&metrics::BLOBS_FROM_EL_MISS_TOTAL);
return Ok(None);
} else {
inc_counter(&metrics::BLOBS_FROM_EL_HIT_TOTAL);
}
if chain
.canonical_head
.fork_choice_read_lock()
.contains_block(&block_root)
{
// Avoid computing columns if block has already been imported.
inc_counter(&metrics::BLOBS_FROM_EL_HIT_TOTAL);
if chain_adapter.fork_choice_contains_block(&block_root) {
// Avoid computing columns if the block has already been imported.
debug!(
info = "block has already been imported",
"Ignoring EL blobs response"
@@ -251,7 +279,7 @@ async fn fetch_and_process_blobs_v2<T: BeaconChainTypes>(
}
let custody_columns = compute_and_publish_data_columns(
&chain,
&chain_adapter,
block.clone(),
blobs,
proofs,
@@ -262,29 +290,30 @@ async fn fetch_and_process_blobs_v2<T: BeaconChainTypes>(
debug!(num_fetched_blobs, "Processing engine blobs");
let availability_processing_status = chain
let availability_processing_status = chain_adapter
.process_engine_blobs(
block.slot(),
block_root,
EngineGetBlobsOutput::CustodyColumns(custody_columns),
)
.await
.map_err(FetchEngineBlobError::BlobProcessingError)?;
.await?;
Ok(Some(availability_processing_status))
}
/// Offload the data column computation to a blocking task to avoid holding up the async runtime.
async fn compute_and_publish_data_columns<T: BeaconChainTypes>(
chain: &Arc<BeaconChain<T>>,
chain_adapter: &FetchBlobsBeaconAdapter<T>,
block: Arc<SignedBeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>>,
blobs: Vec<Blob<T::EthSpec>>,
proofs: Vec<KzgProofs<T::EthSpec>>,
custody_columns_indices: HashSet<ColumnIndex>,
publish_fn: impl Fn(BlobsOrDataColumns<T>) + Send + 'static,
) -> Result<DataColumnSidecarList<T::EthSpec>, FetchEngineBlobError> {
let chain_cloned = chain.clone();
chain
let kzg = chain_adapter.kzg().clone();
let spec = chain_adapter.spec().clone();
chain_adapter
.executor()
.spawn_blocking_handle(
move || {
let mut timer = metrics::start_timer_vec(
@@ -294,14 +323,9 @@ async fn compute_and_publish_data_columns<T: BeaconChainTypes>(
let blob_refs = blobs.iter().collect::<Vec<_>>();
let cell_proofs = proofs.into_iter().flatten().collect();
let data_columns_result = blobs_to_data_column_sidecars(
&blob_refs,
cell_proofs,
&block,
&chain_cloned.kzg,
&chain_cloned.spec,
)
.discard_timer_on_break(&mut timer);
let data_columns_result =
blobs_to_data_column_sidecars(&blob_refs, cell_proofs, &block, &kzg, &spec)
.discard_timer_on_break(&mut timer);
drop(timer);
// This filtering ensures we only import and publish the custody columns.
@@ -319,9 +343,9 @@ async fn compute_and_publish_data_columns<T: BeaconChainTypes>(
},
"compute_and_publish_data_columns",
)
.ok_or(FetchEngineBlobError::RuntimeShutdown)?
.await
.map_err(|e| FetchEngineBlobError::BeaconChainError(Box::new(e)))
.and_then(|r| r)
.map_err(FetchEngineBlobError::TokioJoin)?
}
fn build_blob_sidecars<E: EthSpec>(

View File

@@ -0,0 +1,278 @@
use crate::fetch_blobs::fetch_blobs_beacon_adapter::MockFetchBlobsBeaconAdapter;
use crate::fetch_blobs::{
fetch_and_process_engine_blobs_inner, BlobsOrDataColumns, FetchEngineBlobError,
};
use crate::test_utils::{get_kzg, EphemeralHarnessType};
use crate::AvailabilityProcessingStatus;
use bls::Signature;
use eth2::types::BlobsBundle;
use execution_layer::json_structures::BlobAndProofV2;
use execution_layer::test_utils::generate_blobs;
use maplit::hashset;
use std::sync::{Arc, Mutex};
use task_executor::test_utils::TestRuntime;
use types::{
BeaconBlockFulu, EmptyBlock, EthSpec, ForkName, Hash256, MainnetEthSpec, SignedBeaconBlock,
SignedBeaconBlockFulu,
};
type E = MainnetEthSpec;
type T = EphemeralHarnessType<E>;
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_fetch_blobs_v2_no_blobs_in_block() {
let mut mock_adapter = mock_beacon_adapter();
let (publish_fn, _s) = mock_publish_fn();
let block = SignedBeaconBlock::<E>::Fulu(SignedBeaconBlockFulu {
message: BeaconBlockFulu::empty(mock_adapter.spec()),
signature: Signature::empty(),
});
let block_root = block.canonical_root();
// Expectations: engine fetch blobs should not be triggered
mock_adapter.expect_get_blobs_v2().times(0);
mock_adapter.expect_process_engine_blobs().times(0);
let custody_columns = hashset![0, 1, 2];
let processing_status = fetch_and_process_engine_blobs_inner(
mock_adapter,
block_root,
Arc::new(block),
custody_columns.clone(),
publish_fn,
)
.await
.expect("fetch blobs should succeed");
assert_eq!(processing_status, None);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_fetch_blobs_v2_no_blobs_returned() {
let mut mock_adapter = mock_beacon_adapter();
let (publish_fn, _) = mock_publish_fn();
let (block, _blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter);
let block_root = block.canonical_root();
// No blobs in EL response
mock_get_blobs_v2_response(&mut mock_adapter, None);
// Trigger fetch blobs on the block
let custody_columns = hashset![0, 1, 2];
let processing_status = fetch_and_process_engine_blobs_inner(
mock_adapter,
block_root,
block,
custody_columns.clone(),
publish_fn,
)
.await
.expect("fetch blobs should succeed");
assert_eq!(processing_status, None);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_fetch_blobs_v2_partial_blobs_returned() {
let mut mock_adapter = mock_beacon_adapter();
let (publish_fn, publish_fn_args) = mock_publish_fn();
let (block, mut blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter);
let block_root = block.canonical_root();
// Missing blob in EL response
blobs_and_proofs.pop();
mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs));
// No blobs should be processed
mock_adapter.expect_process_engine_blobs().times(0);
// Trigger fetch blobs on the block
let custody_columns = hashset![0, 1, 2];
let processing_status = fetch_and_process_engine_blobs_inner(
mock_adapter,
block_root,
block,
custody_columns.clone(),
publish_fn,
)
.await
.expect("fetch blobs should succeed");
assert_eq!(processing_status, None);
assert_eq!(
publish_fn_args.lock().unwrap().len(),
0,
"no columns should be published"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_fetch_blobs_v2_block_imported_after_el_response() {
let mut mock_adapter = mock_beacon_adapter();
let (publish_fn, publish_fn_args) = mock_publish_fn();
let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter);
let block_root = block.canonical_root();
// All blobs returned, but fork choice already imported the block
mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs));
mock_fork_choice_contains_block(&mut mock_adapter, vec![block.canonical_root()]);
// No blobs should be processed
mock_adapter.expect_process_engine_blobs().times(0);
// Trigger fetch blobs on the block
let custody_columns = hashset![0, 1, 2];
let processing_status = fetch_and_process_engine_blobs_inner(
mock_adapter,
block_root,
block,
custody_columns.clone(),
publish_fn,
)
.await
.expect("fetch blobs should succeed");
assert_eq!(processing_status, None);
assert_eq!(
publish_fn_args.lock().unwrap().len(),
0,
"no columns should be published"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_fetch_blobs_v2_success() {
let mut mock_adapter = mock_beacon_adapter();
let (publish_fn, publish_fn_args) = mock_publish_fn();
let (block, blobs_and_proofs) = create_test_block_and_blobs(&mock_adapter);
let block_root = block.canonical_root();
// All blobs returned, fork choice doesn't contain block
mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs));
mock_fork_choice_contains_block(&mut mock_adapter, vec![]);
mock_process_engine_blobs_result(
&mut mock_adapter,
Ok(AvailabilityProcessingStatus::Imported(block_root)),
);
// Trigger fetch blobs on the block
let custody_columns = hashset![0, 1, 2];
let processing_status = fetch_and_process_engine_blobs_inner(
mock_adapter,
block_root,
block,
custody_columns.clone(),
publish_fn,
)
.await
.expect("fetch blobs should succeed");
assert_eq!(
processing_status,
Some(AvailabilityProcessingStatus::Imported(block_root))
);
let published_columns = extract_published_blobs(publish_fn_args);
assert!(
matches!(
published_columns,
BlobsOrDataColumns::DataColumns (columns) if columns.len() == custody_columns.len()
),
"should publish custody columns"
);
}
/// Extract the `BlobsOrDataColumns` passed to the `publish_fn`.
fn extract_published_blobs(
publish_fn_args: Arc<Mutex<Vec<BlobsOrDataColumns<T>>>>,
) -> BlobsOrDataColumns<T> {
let mut calls = publish_fn_args.lock().unwrap();
assert_eq!(calls.len(), 1);
calls.pop().unwrap()
}
fn mock_process_engine_blobs_result(
mock_adapter: &mut MockFetchBlobsBeaconAdapter<T>,
result: Result<AvailabilityProcessingStatus, FetchEngineBlobError>,
) {
mock_adapter
.expect_process_engine_blobs()
.return_once(move |_, _, _| result);
}
fn mock_fork_choice_contains_block(
mock_adapter: &mut MockFetchBlobsBeaconAdapter<T>,
block_roots: Vec<Hash256>,
) {
mock_adapter
.expect_fork_choice_contains_block()
.returning(move |block_root| block_roots.contains(block_root));
}
fn mock_get_blobs_v2_response(
mock_adapter: &mut MockFetchBlobsBeaconAdapter<T>,
blobs_and_proofs_opt: Option<Vec<BlobAndProofV2<E>>>,
) {
mock_adapter
.expect_get_blobs_v2()
.return_once(move |_| Ok(blobs_and_proofs_opt));
}
fn create_test_block_and_blobs(
mock_adapter: &MockFetchBlobsBeaconAdapter<T>,
) -> (Arc<SignedBeaconBlock<E>>, Vec<BlobAndProofV2<E>>) {
let mut block = SignedBeaconBlock::Fulu(SignedBeaconBlockFulu {
message: BeaconBlockFulu::empty(mock_adapter.spec()),
signature: Signature::empty(),
});
let (blobs_bundle, _tx) = generate_blobs::<E>(2, block.fork_name_unchecked()).unwrap();
let BlobsBundle {
commitments,
proofs,
blobs,
} = blobs_bundle;
*block
.message_mut()
.body_mut()
.blob_kzg_commitments_mut()
.unwrap() = commitments;
let proofs_len = proofs.len() / blobs.len();
let blob_and_proofs: Vec<BlobAndProofV2<E>> = blobs
.into_iter()
.zip(proofs.chunks(proofs_len))
.map(|(blob, proofs)| BlobAndProofV2 {
blob,
proofs: proofs.to_vec().into(),
})
.collect();
(Arc::new(block), blob_and_proofs)
}
#[allow(clippy::type_complexity)]
fn mock_publish_fn() -> (
impl Fn(BlobsOrDataColumns<T>) + Send + 'static,
Arc<Mutex<Vec<BlobsOrDataColumns<T>>>>,
) {
// Keep track of the arguments captured by `publish_fn`.
let captured_args = Arc::new(Mutex::new(vec![]));
let captured_args_clone = captured_args.clone();
let publish_fn = move |args| {
let mut lock = captured_args_clone.lock().unwrap();
lock.push(args);
};
(publish_fn, captured_args)
}
fn mock_beacon_adapter() -> MockFetchBlobsBeaconAdapter<T> {
let test_runtime = TestRuntime::default();
let spec = Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec()));
let kzg = get_kzg(&spec);
let mut mock_adapter = MockFetchBlobsBeaconAdapter::default();
mock_adapter.expect_spec().return_const(spec.clone());
mock_adapter.expect_kzg().return_const(kzg.clone());
mock_adapter
.expect_executor()
.return_const(test_runtime.task_executor.clone());
mock_adapter
}

View File

@@ -69,8 +69,6 @@ use types::{typenum::U4294967296, *};
pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690;
// Environment variable to read if `fork_from_env` feature is enabled.
pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME";
// Environment variable to read if `ci_logger` feature is enabled.
pub const CI_LOGGER_DIR_ENV_VAR: &str = "CI_LOGGER_DIR";
// Pre-computed data column sidecar using a single static blob from:
// `beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz`
@@ -2674,10 +2672,7 @@ where
mut latest_block_hash: Option<SignedBeaconBlockHash>,
sync_committee_strategy: SyncCommitteeStrategy,
) -> AddBlocksResult<E> {
assert!(
slots.windows(2).all(|w| w[0] <= w[1]),
"Slots have to be sorted"
); // slice.is_sorted() isn't stabilized at the moment of writing this
assert!(slots.is_sorted(), "Slots have to be in ascending order");
let mut block_hash_from_slot: HashMap<Slot, SignedBeaconBlockHash> = HashMap::new();
let mut state_hash_from_slot: HashMap<Slot, BeaconStateHash> = HashMap::new();
for slot in slots {
@@ -2717,10 +2712,7 @@ where
mut latest_block_hash: Option<SignedBeaconBlockHash>,
sync_committee_strategy: SyncCommitteeStrategy,
) -> AddBlocksResult<E> {
assert!(
slots.windows(2).all(|w| w[0] <= w[1]),
"Slots have to be sorted"
); // slice.is_sorted() isn't stabilized at the moment of writing this
assert!(slots.is_sorted(), "Slots have to be in ascending order");
let mut block_hash_from_slot: HashMap<Slot, SignedBeaconBlockHash> = HashMap::new();
let mut state_hash_from_slot: HashMap<Slot, BeaconStateHash> = HashMap::new();
for slot in slots {

View File

@@ -4,12 +4,6 @@ version = "0.2.0"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = { workspace = true }
[dev-dependencies]
operation_pool = { workspace = true }
serde_yaml = { workspace = true }
state_processing = { workspace = true }
tokio = { workspace = true }
[dependencies]
beacon_chain = { workspace = true }
beacon_processor = { workspace = true }
@@ -46,3 +40,9 @@ tokio = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
types = { workspace = true }
[dev-dependencies]
operation_pool = { workspace = true }
serde_yaml = { workspace = true }
state_processing = { workspace = true }
tokio = { workspace = true }

View File

@@ -4,11 +4,6 @@ version = "0.2.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = { workspace = true }
[dev-dependencies]
environment = { workspace = true }
eth1_test_rig = { workspace = true }
serde_yaml = { workspace = true }
[dependencies]
eth2 = { workspace = true }
ethereum_ssz = { workspace = true }
@@ -28,3 +23,8 @@ tokio = { workspace = true }
tracing = { workspace = true }
tree_hash = { workspace = true }
types = { workspace = true }
[dev-dependencies]
environment = { workspace = true }
eth1_test_rig = { workspace = true }
serde_yaml = { workspace = true }

View File

@@ -727,7 +727,7 @@ impl HttpJsonRpc {
pub async fn get_blobs_v2<E: EthSpec>(
&self,
versioned_hashes: Vec<Hash256>,
) -> Result<Vec<Option<BlobAndProofV2<E>>>, Error> {
) -> Result<Option<Vec<BlobAndProofV2<E>>>, Error> {
let params = json!([versioned_hashes]);
self.rpc_request(
@@ -1242,6 +1242,10 @@ impl HttpJsonRpc {
} else {
let engine_version = self.get_client_version_v1().await?;
*lock = Some(CachedResponse::new(engine_version.clone()));
if !engine_version.is_empty() {
// reset metric gauge when there's a fresh fetch
crate::metrics::reset_execution_layer_info_gauge();
}
Ok(engine_version)
}
}

View File

@@ -129,8 +129,7 @@ impl<E: EthSpec> TryFrom<BuilderBid<E>> for ProvenancedPayload<BlockProposalCont
block_value: builder_bid.value,
kzg_commitments: builder_bid.blob_kzg_commitments,
blobs_and_proofs: None,
// TODO(fulu): update this with builder api returning the requests
requests: None,
requests: Some(builder_bid.execution_requests),
},
};
Ok(ProvenancedPayload::Builder(
@@ -1555,10 +1554,14 @@ impl<E: EthSpec> ExecutionLayer<E> {
&self,
age_limit: Option<Duration>,
) -> Result<Vec<ClientVersionV1>, Error> {
self.engine()
let versions = self
.engine()
.request(|engine| engine.get_engine_version(age_limit))
.await
.map_err(Into::into)
.map_err(Into::<Error>::into)?;
metrics::expose_execution_layer_info(&versions);
Ok(versions)
}
/// Used during block production to determine if the merge has been triggered.
@@ -1861,7 +1864,7 @@ impl<E: EthSpec> ExecutionLayer<E> {
pub async fn get_blobs_v2(
&self,
query: Vec<Hash256>,
) -> Result<Vec<Option<BlobAndProofV2<E>>>, Error> {
) -> Result<Option<Vec<BlobAndProofV2<E>>>, Error> {
let capabilities = self.get_engine_capabilities(None).await?;
if capabilities.get_blobs_v2 {

View File

@@ -116,3 +116,29 @@ pub static EXECUTION_LAYER_PAYLOAD_BIDS: LazyLock<Result<IntGaugeVec>> = LazyLoc
&["source"]
)
});
pub static EXECUTION_LAYER_INFO: LazyLock<Result<IntGaugeVec>> = LazyLock::new(|| {
try_create_int_gauge_vec(
"execution_layer_info",
"The build of the execution layer connected to lighthouse",
&["code", "name", "version", "commit"],
)
});
pub fn reset_execution_layer_info_gauge() {
let _ = EXECUTION_LAYER_INFO.as_ref().map(|gauge| gauge.reset());
}
pub fn expose_execution_layer_info(els: &Vec<crate::ClientVersionV1>) {
for el in els {
set_gauge_vec(
&EXECUTION_LAYER_INFO,
&[
&el.code.to_string(),
&el.name,
&el.version,
&el.commit.to_string(),
],
1,
);
}
}

View File

@@ -4,11 +4,6 @@ version = "0.2.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = { workspace = true }
[dev-dependencies]
eth1_test_rig = { workspace = true }
logging = { workspace = true }
sensitive_url = { workspace = true }
[dependencies]
environment = { workspace = true }
eth1 = { workspace = true }
@@ -23,3 +18,8 @@ tokio = { workspace = true }
tracing = { workspace = true }
tree_hash = { workspace = true }
types = { workspace = true }
[dev-dependencies]
eth1_test_rig = { workspace = true }
logging = { workspace = true }
sensitive_url = { workspace = true }

View File

@@ -4,6 +4,9 @@ version = "0.2.0"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = { workspace = true }
[features]
libp2p-websocket = []
[dependencies]
alloy-primitives = { workspace = true }
alloy-rlp = { workspace = true }
@@ -53,7 +56,21 @@ unused_port = { workspace = true }
[dependencies.libp2p]
version = "0.55"
default-features = false
features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic", "upnp"]
features = [
"identify",
"yamux",
"noise",
"dns",
"tcp",
"tokio",
"plaintext",
"secp256k1",
"macros",
"ecdsa",
"metrics",
"quic",
"upnp",
]
[dev-dependencies]
async-channel = { workspace = true }
@@ -61,6 +78,3 @@ logging = { workspace = true }
quickcheck = { workspace = true }
quickcheck_macros = { workspace = true }
tempfile = { workspace = true }
[features]
libp2p-websocket = []

View File

@@ -12,7 +12,7 @@ use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::runtime::Runtime;
use tokio::time::sleep;
use tracing::{debug, error, warn};
use tracing::{debug, error, info_span, warn, Instrument};
use types::{
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BlobSidecar, ChainSpec,
EmptyBlock, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MinimalEthSpec,
@@ -55,7 +55,7 @@ fn bellatrix_block_large(spec: &ChainSpec) -> BeaconBlock<E> {
fn test_tcp_status_rpc() {
// Set up the logging.
let log_level = "debug";
let enable_logging = false;
let enable_logging = true;
build_tracing_subscriber(log_level, enable_logging);
let rt = Arc::new(Runtime::new().unwrap());
@@ -117,7 +117,8 @@ fn test_tcp_status_rpc() {
_ => {}
}
}
};
}
.instrument(info_span!("Sender"));
// build the receiver future
let receiver_future = async {
@@ -141,7 +142,8 @@ fn test_tcp_status_rpc() {
_ => {} // Ignore other events
}
}
};
}
.instrument(info_span!("Receiver"));
tokio::select! {
_ = sender_future => {}
@@ -159,7 +161,7 @@ fn test_tcp_status_rpc() {
fn test_tcp_blocks_by_range_chunked_rpc() {
// Set up the logging.
let log_level = "debug";
let enable_logging = false;
let enable_logging = true;
build_tracing_subscriber(log_level, enable_logging);
let messages_to_send = 6;
@@ -245,7 +247,8 @@ fn test_tcp_blocks_by_range_chunked_rpc() {
_ => {} // Ignore other behaviour events
}
}
};
}
.instrument(info_span!("Sender"));
// build the receiver future
let receiver_future = async {
@@ -286,7 +289,8 @@ fn test_tcp_blocks_by_range_chunked_rpc() {
_ => {} // Ignore other events
}
}
};
}
.instrument(info_span!("Receiver"));
tokio::select! {
_ = sender_future => {}
@@ -304,7 +308,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() {
fn test_blobs_by_range_chunked_rpc() {
// Set up the logging.
let log_level = "debug";
let enable_logging = false;
let enable_logging = true;
build_tracing_subscriber(log_level, enable_logging);
let slot_count = 32;
@@ -373,7 +377,8 @@ fn test_blobs_by_range_chunked_rpc() {
_ => {} // Ignore other behaviour events
}
}
};
}
.instrument(info_span!("Sender"));
// build the receiver future
let receiver_future = async {
@@ -407,7 +412,8 @@ fn test_blobs_by_range_chunked_rpc() {
_ => {} // Ignore other events
}
}
};
}
.instrument(info_span!("Receiver"));
tokio::select! {
_ = sender_future => {}
@@ -425,7 +431,7 @@ fn test_blobs_by_range_chunked_rpc() {
fn test_tcp_blocks_by_range_over_limit() {
// Set up the logging.
let log_level = "debug";
let enable_logging = false;
let enable_logging = true;
build_tracing_subscriber(log_level, enable_logging);
let messages_to_send = 5;
@@ -479,7 +485,8 @@ fn test_tcp_blocks_by_range_over_limit() {
_ => {} // Ignore other behaviour events
}
}
};
}
.instrument(info_span!("Sender"));
// build the receiver future
let receiver_future = async {
@@ -512,7 +519,8 @@ fn test_tcp_blocks_by_range_over_limit() {
_ => {} // Ignore other events
}
}
};
}
.instrument(info_span!("Receiver"));
tokio::select! {
_ = sender_future => {}
@@ -529,7 +537,7 @@ fn test_tcp_blocks_by_range_over_limit() {
fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() {
// Set up the logging.
let log_level = "debug";
let enable_logging = false;
let enable_logging = true;
build_tracing_subscriber(log_level, enable_logging);
let messages_to_send = 10;
@@ -601,7 +609,8 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() {
_ => {} // Ignore other behaviour events
}
}
};
}
.instrument(info_span!("Sender"));
// determine messages to send (PeerId, RequestId). If some, indicates we still need to send
// messages
@@ -648,7 +657,8 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() {
}
}
}
};
}
.instrument(info_span!("Receiver"));
tokio::select! {
_ = sender_future => {}
@@ -666,7 +676,7 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() {
fn test_tcp_blocks_by_range_single_empty_rpc() {
// Set up the logging.
let log_level = "trace";
let enable_logging = false;
let enable_logging = true;
build_tracing_subscriber(log_level, enable_logging);
let rt = Arc::new(Runtime::new().unwrap());
@@ -734,7 +744,8 @@ fn test_tcp_blocks_by_range_single_empty_rpc() {
_ => {} // Ignore other behaviour events
}
}
};
}
.instrument(info_span!("Sender"));
// build the receiver future
let receiver_future = async {
@@ -767,7 +778,8 @@ fn test_tcp_blocks_by_range_single_empty_rpc() {
_ => {} // Ignore other events
}
}
};
}
.instrument(info_span!("Receiver"));
tokio::select! {
_ = sender_future => {}
_ = receiver_future => {}
@@ -787,7 +799,7 @@ fn test_tcp_blocks_by_range_single_empty_rpc() {
fn test_tcp_blocks_by_root_chunked_rpc() {
// Set up the logging.
let log_level = "debug";
let enable_logging = false;
let enable_logging = true;
build_tracing_subscriber(log_level, enable_logging);
let messages_to_send = 6;
@@ -877,7 +889,8 @@ fn test_tcp_blocks_by_root_chunked_rpc() {
_ => {} // Ignore other behaviour events
}
}
};
}
.instrument(info_span!("Sender"));
// build the receiver future
let receiver_future = async {
@@ -916,7 +929,8 @@ fn test_tcp_blocks_by_root_chunked_rpc() {
_ => {} // Ignore other events
}
}
};
}
.instrument(info_span!("Receiver"));
tokio::select! {
_ = sender_future => {}
_ = receiver_future => {}
@@ -932,7 +946,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() {
fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() {
// Set up the logging.
let log_level = "debug";
let enable_logging = false;
let enable_logging = true;
build_tracing_subscriber(log_level, enable_logging);
let messages_to_send: u64 = 10;
@@ -1015,7 +1029,8 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() {
_ => {} // Ignore other behaviour events
}
}
};
}
.instrument(info_span!("Sender"));
// determine messages to send (PeerId, RequestId). If some, indicates we still need to send
// messages
@@ -1062,7 +1077,8 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() {
}
}
}
};
}
.instrument(info_span!("Receiver"));
tokio::select! {
_ = sender_future => {}
@@ -1115,7 +1131,8 @@ fn goodbye_test(log_level: &str, enable_logging: bool, protocol: Protocol) {
_ => {} // Ignore other RPC messages
}
}
};
}
.instrument(info_span!("Sender"));
// build the receiver future
let receiver_future = async {
@@ -1125,7 +1142,8 @@ fn goodbye_test(log_level: &str, enable_logging: bool, protocol: Protocol) {
return;
}
}
};
}
.instrument(info_span!("Receiver"));
let total_future = futures::future::join(sender_future, receiver_future);
@@ -1143,7 +1161,7 @@ fn goodbye_test(log_level: &str, enable_logging: bool, protocol: Protocol) {
#[allow(clippy::single_match)]
fn tcp_test_goodbye_rpc() {
let log_level = "debug";
let enabled_logging = false;
let enabled_logging = true;
goodbye_test(log_level, enabled_logging, Protocol::Tcp);
}
@@ -1152,13 +1170,15 @@ fn tcp_test_goodbye_rpc() {
#[allow(clippy::single_match)]
fn quic_test_goodbye_rpc() {
let log_level = "debug";
let enabled_logging = false;
let enabled_logging = true;
goodbye_test(log_level, enabled_logging, Protocol::Quic);
}
// Test that the receiver delays the responses during response rate-limiting.
#[test]
fn test_delayed_rpc_response() {
// Set up the logging.
build_tracing_subscriber("debug", true);
let rt = Arc::new(Runtime::new().unwrap());
let spec = Arc::new(E::default_spec());
@@ -1214,7 +1234,7 @@ fn test_delayed_rpc_response() {
app_request_id: _,
response,
} => {
debug!(%request_id, "Sender received");
debug!(%request_id, elapsed = ?request_sent_at.elapsed(), "Sender received response");
assert_eq!(response, rpc_response);
match request_id {
@@ -1289,6 +1309,8 @@ fn test_delayed_rpc_response() {
// once, thanks to the self-limiter on the sender side.
#[test]
fn test_active_requests() {
// Set up the logging.
build_tracing_subscriber("debug", true);
let rt = Arc::new(Runtime::new().unwrap());
let spec = Arc::new(E::default_spec());

View File

@@ -4,17 +4,12 @@ version = "0.2.0"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = { workspace = true }
[dev-dependencies]
bls = { workspace = true }
eth2 = { workspace = true }
eth2_network_config = { workspace = true }
genesis = { workspace = true }
gossipsub = { workspace = true }
k256 = "0.13.4"
kzg = { workspace = true }
matches = "0.1.8"
rand_chacha = "0.3.1"
serde_json = { workspace = true }
[features]
# NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill
disable-backfill = []
fork_from_env = ["beacon_chain/fork_from_env"]
portable = ["beacon_chain/portable"]
test_logger = []
[dependencies]
alloy-primitives = { workspace = true }
@@ -51,10 +46,14 @@ tracing = { workspace = true }
tracing-subscriber = { workspace = true }
types = { workspace = true }
[features]
# NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill
disable-backfill = []
fork_from_env = ["beacon_chain/fork_from_env"]
portable = ["beacon_chain/portable"]
test_logger = []
ci_logger = []
[dev-dependencies]
bls = { workspace = true }
eth2 = { workspace = true }
eth2_network_config = { workspace = true }
genesis = { workspace = true }
gossipsub = { workspace = true }
k256 = "0.13.4"
kzg = { workspace = true }
matches = "0.1.8"
rand_chacha = "0.3.1"
serde_json = { workspace = true }

View File

@@ -107,6 +107,8 @@ impl TestRig {
// deterministic seed
let rng = ChaCha20Rng::from_seed([0u8; 32]);
init_tracing();
TestRig {
beacon_processor_rx,
beacon_processor_rx_queue: vec![],

View File

@@ -9,9 +9,14 @@ use beacon_processor::WorkEvent;
use lighthouse_network::NetworkGlobals;
use rand_chacha::ChaCha20Rng;
use slot_clock::ManualSlotClock;
use std::sync::Arc;
use std::fs::OpenOptions;
use std::io::Write;
use std::sync::{Arc, Once};
use store::MemoryStore;
use tokio::sync::mpsc;
use tracing_subscriber::fmt::MakeWriter;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use types::{ChainSpec, ForkName, MinimalEthSpec as E};
mod lookups;
@@ -65,3 +70,55 @@ struct TestRig {
fork_name: ForkName,
spec: Arc<ChainSpec>,
}
// Environment variable to read if `fork_from_env` feature is enabled.
pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME";
// Environment variable specifying the log output directory in CI.
pub const CI_LOGGER_DIR_ENV_VAR: &str = "CI_LOGGER_DIR";
static INIT_TRACING: Once = Once::new();
pub fn init_tracing() {
INIT_TRACING.call_once(|| {
if std::env::var(CI_LOGGER_DIR_ENV_VAR).is_ok() {
// Enable logging to log files for each test and each fork.
tracing_subscriber::registry()
.with(
tracing_subscriber::fmt::layer()
.with_ansi(false)
.with_writer(CILogWriter),
)
.init();
}
});
}
// CILogWriter writes logs to separate files for each test and each fork.
struct CILogWriter;
impl<'a> MakeWriter<'a> for CILogWriter {
type Writer = Box<dyn Write + Send>;
// fmt::Layer calls this method each time an event is recorded.
fn make_writer(&'a self) -> Self::Writer {
let log_dir = std::env::var(CI_LOGGER_DIR_ENV_VAR).unwrap();
let fork_name = std::env::var(FORK_NAME_ENV_VAR)
.map(|s| format!("{s}_"))
.unwrap_or_default();
// The current test name can be got via the thread name.
let test_name = std::thread::current()
.name()
.unwrap_or("unnamed")
.replace(|c: char| !c.is_alphanumeric(), "_");
let file_path = format!("{log_dir}/{fork_name}{test_name}.log");
let file = OpenOptions::new()
.append(true)
.create(true)
.open(&file_path)
.expect("failed to open a log file");
Box::new(file)
}
}

View File

@@ -4,6 +4,9 @@ version = "0.2.0"
authors = ["Michael Sproul <michael@sigmaprime.io>"]
edition = { workspace = true }
[features]
portable = ["beacon_chain/portable"]
[dependencies]
bitvec = { workspace = true }
derivative = { workspace = true }
@@ -23,6 +26,3 @@ types = { workspace = true }
beacon_chain = { workspace = true }
maplit = { workspace = true }
tokio = { workspace = true }
[features]
portable = ["beacon_chain/portable"]

View File

@@ -9,12 +9,6 @@ default = ["leveldb"]
leveldb = ["dep:leveldb"]
redb = ["dep:redb"]
[dev-dependencies]
beacon_chain = { workspace = true }
criterion = { workspace = true }
rand = { workspace = true, features = ["small_rng"] }
tempfile = { workspace = true }
[dependencies]
bls = { workspace = true }
db-key = "0.0.5"
@@ -40,6 +34,12 @@ types = { workspace = true }
xdelta3 = { workspace = true }
zstd = { workspace = true }
[dev-dependencies]
beacon_chain = { workspace = true }
criterion = { workspace = true }
rand = { workspace = true, features = ["small_rng"] }
tempfile = { workspace = true }
[[bench]]
name = "hdiff"
harness = false