From 64031b6cbb7d20d90bc72064b6778f840bbb1e4a Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 1 Dec 2025 11:19:49 +1100 Subject: [PATCH 01/26] Add tracing spans to validator client duty cycles (#8482) Co-Authored-By: Jimmy Chen Co-Authored-By: Jimmy Chen --- .../src/attestation_service.rs | 28 ++++++++++++++++- .../validator_services/src/block_service.rs | 11 ++++++- .../src/sync_committee_service.rs | 30 ++++++++++++++++--- 3 files changed, 63 insertions(+), 6 deletions(-) diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index da6e8f3588..a6ce67fae9 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -8,7 +8,7 @@ use std::ops::Deref; use std::sync::Arc; use task_executor::TaskExecutor; use tokio::time::{Duration, Instant, sleep, sleep_until}; -use tracing::{debug, error, info, trace, warn}; +use tracing::{Instrument, debug, error, info, info_span, instrument, trace, warn}; use tree_hash::TreeHash; use types::{Attestation, AttestationData, ChainSpec, CommitteeIndex, EthSpec, Slot}; use validator_store::{Error as ValidatorStoreError, ValidatorStore}; @@ -243,6 +243,11 @@ impl AttestationService AttestationService AttestationService AttestationService, Vec<_>) = join_all(signing_futures) + .instrument(info_span!( + "sign_attestations", + count = validator_duties.len() + )) .await .into_iter() .flatten() @@ -487,6 +498,10 @@ impl AttestationService(single_attestations, fork_name) .await }) + .instrument(info_span!( + "publish_attestations", + count = attestations.len() + )) .await { Ok(()) => info!( @@ -523,6 +538,7 @@ impl AttestationService AttestationService AttestationService AttestationService { diff --git a/validator_client/validator_services/src/block_service.rs b/validator_client/validator_services/src/block_service.rs index c111b1f22e..5ffabd22ec 100644 --- a/validator_client/validator_services/src/block_service.rs +++ b/validator_client/validator_services/src/block_service.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::mpsc; -use tracing::{debug, error, info, trace, warn}; +use tracing::{Instrument, debug, error, info, info_span, instrument, trace, warn}; use types::{BlockType, ChainSpec, EthSpec, Graffiti, PublicKeyBytes, Slot}; use validator_store::{Error as ValidatorStoreError, SignedBlock, UnsignedBlock, ValidatorStore}; @@ -320,6 +320,7 @@ impl BlockService { } #[allow(clippy::too_many_arguments)] + #[instrument(skip_all, fields(%slot, ?validator_pubkey))] async fn sign_and_publish_block( &self, proposer_fallback: ProposerFallback, @@ -333,6 +334,7 @@ impl BlockService { let res = self .validator_store .sign_block(*validator_pubkey, unsigned_block, slot) + .instrument(info_span!("sign_block")) .await; let signed_block = match res { @@ -389,6 +391,11 @@ impl BlockService { Ok(()) } + #[instrument( + name = "block_proposal_duty_cycle", + skip_all, + fields(%slot, ?validator_pubkey) + )] async fn publish_block( self, slot: Slot, @@ -483,6 +490,7 @@ impl BlockService { Ok(()) } + #[instrument(skip_all)] async fn publish_signed_block_contents( &self, signed_block: &SignedBlock, @@ -518,6 +526,7 @@ impl BlockService { Ok::<_, BlockError>(()) } + #[instrument(skip_all, fields(%slot))] async fn get_validator_block( beacon_node: &BeaconNodeHttpClient, slot: Slot, diff --git a/validator_client/validator_services/src/sync_committee_service.rs b/validator_client/validator_services/src/sync_committee_service.rs index 02f9f24c8a..5f6b1cb710 100644 --- a/validator_client/validator_services/src/sync_committee_service.rs +++ b/validator_client/validator_services/src/sync_committee_service.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use std::sync::atomic::{AtomicBool, Ordering}; use task_executor::TaskExecutor; use tokio::time::{Duration, Instant, sleep, sleep_until}; -use tracing::{debug, error, info, trace, warn}; +use tracing::{Instrument, debug, error, info, info_span, instrument, trace, warn}; use types::{ ChainSpec, EthSpec, Hash256, PublicKeyBytes, Slot, SyncCommitteeSubscription, SyncContributionData, SyncDuty, SyncSelectionProof, SyncSubnetId, @@ -208,7 +208,8 @@ impl SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService Date: Mon, 1 Dec 2025 13:56:50 +0800 Subject: [PATCH 02/26] Refactor get_validator_blocks_v3 fallback (#8186) #7727 introduced a bug in the logging, where as long as the node failed the SSZ `get_validator_blocks_v3` endpoint, it would log as `Beacon node does not support...`. However, the failure can be due to other reasons, such as a timed out error as found by @jimmygchen: `WARN Beacon node does not support SSZ in block production, falling back to JSON slot: 5283379, error: HttpClient(url: https://ho-h-bn-cowl.spesi.io:15052/, kind: timeout, detail: operation timed out` This PR made the error log more generic, so there is less confusion. Additionally, suggested by @michaelsproul, this PR refactors the `get_validator_blocks_v3` calls by trying all beacon nodes using the SSZ endpoint first, and if all beacon node fails the SSZ endpoint, only then fallback to JSON. It changes the logic from: "SSZ -> JSON for primary beacon node, followed by SSZ -> JSON for second beacon node and so on" to "SSZ for all beacon nodes -> JSON for all beacon nodes" This has the advantage that if the primary beacon node is having issues and failed the SSZ, we avoid retrying the primary beacon node again on JSON (as it could be that the primary beacon node fail again); rather, we switch to the second beacon node. Co-Authored-By: Tan Chee Keong Co-Authored-By: chonghe <44791194+chong-he@users.noreply.github.com> --- .../validator_services/src/block_service.rs | 155 ++++++++---------- 1 file changed, 68 insertions(+), 87 deletions(-) diff --git a/validator_client/validator_services/src/block_service.rs b/validator_client/validator_services/src/block_service.rs index 5ffabd22ec..8ec53d3f40 100644 --- a/validator_client/validator_services/src/block_service.rs +++ b/validator_client/validator_services/src/block_service.rs @@ -1,5 +1,4 @@ use beacon_node_fallback::{ApiTopic, BeaconNodeFallback, Error as FallbackError, Errors}; -use bls::SignatureBytes; use eth2::{BeaconNodeHttpClient, StatusCode}; use graffiti_file::{GraffitiFile, determine_graffiti}; use logging::crit; @@ -298,7 +297,7 @@ impl BlockService { self.inner.executor.spawn( async move { let result = service - .publish_block(slot, validator_pubkey, builder_boost_factor) + .get_validator_block_and_publish_block(slot, validator_pubkey, builder_boost_factor) .await; match result { @@ -396,7 +395,7 @@ impl BlockService { skip_all, fields(%slot, ?validator_pubkey) )] - async fn publish_block( + async fn get_validator_block_and_publish_block( self, slot: Slot, validator_pubkey: PublicKeyBytes, @@ -449,33 +448,80 @@ impl BlockService { info!(slot = slot.as_u64(), "Requesting unsigned block"); - // Request block from first responsive beacon node. + // Request an SSZ block from all beacon nodes in order, returning on the first successful response. + // If all nodes fail, run a second pass falling back to JSON. // - // Try the proposer nodes last, since it's likely that they don't have a + // Proposer nodes will always be tried last during each pass since it's likely that they don't have a // great view of attestations on the network. - let unsigned_block = proposer_fallback + let ssz_block_response = proposer_fallback .request_proposers_last(|beacon_node| async move { let _get_timer = validator_metrics::start_timer_vec( &validator_metrics::BLOCK_SERVICE_TIMES, &[validator_metrics::BEACON_BLOCK_HTTP_GET], ); - Self::get_validator_block( - &beacon_node, - slot, - randao_reveal_ref, - graffiti, - proposer_index, - builder_boost_factor, - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - }) + beacon_node + .get_validator_blocks_v3_ssz::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + builder_boost_factor, + ) + .await }) - .await?; + .await; + + let block_response = match ssz_block_response { + Ok((ssz_block_response, _metadata)) => ssz_block_response, + Err(e) => { + warn!( + slot = slot.as_u64(), + error = %e, + "SSZ block production failed, falling back to JSON" + ); + + proposer_fallback + .request_proposers_last(|beacon_node| async move { + let _get_timer = validator_metrics::start_timer_vec( + &validator_metrics::BLOCK_SERVICE_TIMES, + &[validator_metrics::BEACON_BLOCK_HTTP_GET], + ); + let (json_block_response, _metadata) = beacon_node + .get_validator_blocks_v3::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + builder_boost_factor, + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })?; + + Ok(json_block_response.data) + }) + .await + .map_err(BlockError::from)? + } + }; + + let (block_proposer, unsigned_block) = match block_response { + eth2::types::ProduceBlockV3Response::Full(block) => { + (block.block().proposer_index(), UnsignedBlock::Full(block)) + } + eth2::types::ProduceBlockV3Response::Blinded(block) => { + (block.proposer_index(), UnsignedBlock::Blinded(block)) + } + }; + + info!(slot = slot.as_u64(), "Received unsigned block"); + if proposer_index != Some(block_proposer) { + return Err(BlockError::Recoverable( + "Proposer index does not match block proposer. Beacon chain re-orged".to_string(), + )); + } self_ref .sign_and_publish_block( @@ -525,71 +571,6 @@ impl BlockService { } Ok::<_, BlockError>(()) } - - #[instrument(skip_all, fields(%slot))] - async fn get_validator_block( - beacon_node: &BeaconNodeHttpClient, - slot: Slot, - randao_reveal_ref: &SignatureBytes, - graffiti: Option, - proposer_index: Option, - builder_boost_factor: Option, - ) -> Result, BlockError> { - let block_response = match beacon_node - .get_validator_blocks_v3_ssz::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - builder_boost_factor, - ) - .await - { - Ok((ssz_block_response, _)) => ssz_block_response, - Err(e) => { - warn!( - slot = slot.as_u64(), - error = %e, - "Beacon node does not support SSZ in block production, falling back to JSON" - ); - - let (json_block_response, _) = beacon_node - .get_validator_blocks_v3::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - builder_boost_factor, - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })?; - - // Extract ProduceBlockV3Response (data field of the struct ForkVersionedResponse) - json_block_response.data - } - }; - - let (block_proposer, unsigned_block) = match block_response { - eth2::types::ProduceBlockV3Response::Full(block) => { - (block.block().proposer_index(), UnsignedBlock::Full(block)) - } - eth2::types::ProduceBlockV3Response::Blinded(block) => { - (block.proposer_index(), UnsignedBlock::Blinded(block)) - } - }; - - info!(slot = slot.as_u64(), "Received unsigned block"); - if proposer_index != Some(block_proposer) { - return Err(BlockError::Recoverable( - "Proposer index does not match block proposer. Beacon chain re-orged".to_string(), - )); - } - - Ok::<_, BlockError>(unsigned_block) - } } /// Wrapper for values we want to log about a block we signed, for easy extraction from the possible From f42b14ac589b29013cbb971bb8fa733bd13a6537 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 2 Dec 2025 09:09:08 +1100 Subject: [PATCH 03/26] Update local testnet scripts for the fulu fork (#8489) * Remove `fulu-devnet-3` testing on CI * Delete `scripts/local_testnet/network_params_das.yaml` and consolidate it into the main `network_params.yaml` file we use on CI * Delete enclave before building image, so it doesn't cause slow image building. Co-Authored-By: Jimmy Chen --- .github/workflows/local-testnet.yml | 2 +- scripts/local_testnet/README.md | 2 +- scripts/local_testnet/network_params.yaml | 32 +++++++++++---- scripts/local_testnet/network_params_das.yaml | 41 ------------------- scripts/local_testnet/start_local_testnet.sh | 10 ++--- .../tests/checkpoint-sync-config-devnet.yaml | 24 ----------- 6 files changed, 32 insertions(+), 79 deletions(-) delete mode 100644 scripts/local_testnet/network_params_das.yaml delete mode 100644 scripts/tests/checkpoint-sync-config-devnet.yaml diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index c129c0ec95..9992273e0a 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -179,7 +179,7 @@ jobs: continue-on-error: true strategy: matrix: - network: [sepolia, devnet] + network: [sepolia] steps: - uses: actions/checkout@v5 diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index 9d9844c4c4..6260f91019 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -21,7 +21,7 @@ cd ./scripts/local_testnet ``` It will build a Lighthouse docker image from the root of the directory and will take an approximately 12 minutes to complete. Once built, the testing will be started automatically. You will see a list of services running and "Started!" at the end. -You can also select your own Lighthouse docker image to use by specifying it in `network_params.yml` under the `cl_image` key. +You can also select your own Lighthouse docker image to use by specifying it in `network_params.yaml` under the `cl_image` key. Full configuration reference for Kurtosis is specified [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). To view all running services: diff --git a/scripts/local_testnet/network_params.yaml b/scripts/local_testnet/network_params.yaml index cdfacbced4..a048674e63 100644 --- a/scripts/local_testnet/network_params.yaml +++ b/scripts/local_testnet/network_params.yaml @@ -1,19 +1,37 @@ # Full configuration reference [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). participants: - - el_type: geth - el_image: ethereum/client-go:latest - cl_type: lighthouse + - cl_type: lighthouse cl_image: lighthouse:local + el_type: geth + el_image: ethereum/client-go:latest + supernode: true cl_extra_params: - --target-peers=3 - count: 4 + count: 2 + - cl_type: lighthouse + cl_image: lighthouse:local + el_type: geth + el_image: ethereum/client-go:latest + supernode: false + cl_extra_params: + - --target-peers=3 + count: 2 network_params: - electra_fork_epoch: 0 - seconds_per_slot: 3 -global_log_level: debug + fulu_fork_epoch: 0 + seconds_per_slot: 6 snooper_enabled: false +global_log_level: debug additional_services: - dora - spamoor - prometheus_grafana - tempo +spamoor_params: + image: ethpandaops/spamoor:master + spammers: + - scenario: eoatx + config: + throughput: 200 + - scenario: blobs + config: + throughput: 20 \ No newline at end of file diff --git a/scripts/local_testnet/network_params_das.yaml b/scripts/local_testnet/network_params_das.yaml deleted file mode 100644 index e3bc513153..0000000000 --- a/scripts/local_testnet/network_params_das.yaml +++ /dev/null @@ -1,41 +0,0 @@ -participants: - - cl_type: lighthouse - cl_image: lighthouse:local - el_type: geth - el_image: ethpandaops/geth:master - supernode: true - cl_extra_params: - # Note: useful for testing range sync (only produce block if the node is in sync to prevent forking) - - --sync-tolerance-epochs=0 - - --target-peers=3 - count: 2 - - cl_type: lighthouse - cl_image: lighthouse:local - el_type: geth - el_image: ethpandaops/geth:master - supernode: false - cl_extra_params: - # Note: useful for testing range sync (only produce block if the node is in sync to prevent forking) - - --sync-tolerance-epochs=0 - - --target-peers=3 - count: 2 -network_params: - electra_fork_epoch: 0 - fulu_fork_epoch: 1 - seconds_per_slot: 6 -snooper_enabled: false -global_log_level: debug -additional_services: - - dora - - spamoor - - prometheus_grafana - - tempo -spamoor_params: - image: ethpandaops/spamoor:master - spammers: - - scenario: eoatx - config: - throughput: 200 - - scenario: blobs - config: - throughput: 20 \ No newline at end of file diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index 442e6fd98d..8d8b33526d 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -78,6 +78,11 @@ if [ "$RUN_ASSERTOOR_TESTS" = true ]; then echo "Assertoor has been added to $NETWORK_PARAMS_FILE." fi +if [ "$KEEP_ENCLAVE" = false ]; then + # Stop local testnet + kurtosis enclave rm -f $ENCLAVE_NAME 2>/dev/null || true +fi + if [ "$BUILD_IMAGE" = true ]; then echo "Building Lighthouse Docker image." ROOT_DIR="$SCRIPT_DIR/../.." @@ -86,11 +91,6 @@ else echo "Not rebuilding Lighthouse Docker image." fi -if [ "$KEEP_ENCLAVE" = false ]; then - # Stop local testnet - kurtosis enclave rm -f $ENCLAVE_NAME 2>/dev/null || true -fi - kurtosis run --enclave $ENCLAVE_NAME github.com/ethpandaops/ethereum-package@$ETHEREUM_PKG_VERSION --args-file $NETWORK_PARAMS_FILE echo "Started!" diff --git a/scripts/tests/checkpoint-sync-config-devnet.yaml b/scripts/tests/checkpoint-sync-config-devnet.yaml deleted file mode 100644 index 2392011ed3..0000000000 --- a/scripts/tests/checkpoint-sync-config-devnet.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Kurtosis config file to checkpoint sync to a running devnet supported by ethPandaOps and `ethereum-package`. -participants: - - cl_type: lighthouse - cl_image: lighthouse:local - el_type: geth - el_image: ethpandaops/geth:master - cl_extra_params: - - --disable-backfill-rate-limiting - supernode: true - - cl_type: lighthouse - cl_image: lighthouse:local - el_type: geth - el_image: ethpandaops/geth:master - cl_extra_params: - - --disable-backfill-rate-limiting - supernode: false - -checkpoint_sync_enabled: true -checkpoint_sync_url: "https://checkpoint-sync.fusaka-devnet-3.ethpandaops.io" - -global_log_level: debug - -network_params: - network: fusaka-devnet-3 From 4fbe5174915a7d98998e83512ed1f1bacdf433b2 Mon Sep 17 00:00:00 2001 From: 0xMushow <105550256+0xMushow@users.noreply.github.com> Date: Tue, 2 Dec 2025 04:06:29 +0100 Subject: [PATCH 04/26] Fix data columns sorting when reconstructing blobs (#8510) Closes https://github.com/sigp/lighthouse/issues/8509 Co-Authored-By: Antoine James --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- beacon_node/beacon_chain/src/kzg_utils.rs | 36 +++++++++++++++++--- beacon_node/http_api/src/block_id.rs | 2 +- 3 files changed, 34 insertions(+), 6 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 494346e7ff..00c5ab415c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1248,7 +1248,7 @@ impl BeaconChain { let num_required_columns = T::EthSpec::number_of_columns() / 2; let reconstruction_possible = columns.len() >= num_required_columns; if reconstruction_possible { - reconstruct_blobs(&self.kzg, &columns, None, &block, &self.spec) + reconstruct_blobs(&self.kzg, columns, None, &block, &self.spec) .map(Some) .map_err(Error::FailedToReconstructBlobs) } else { diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 200774ebe4..334124419b 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -308,12 +308,14 @@ pub(crate) fn build_data_column_sidecars( /// and it will be slow if the node needs to reconstruct the blobs pub fn reconstruct_blobs( kzg: &Kzg, - data_columns: &[Arc>], + mut data_columns: Vec>>, blob_indices_opt: Option>, signed_block: &SignedBlindedBeaconBlock, spec: &ChainSpec, ) -> Result, String> { - // The data columns are from the database, so we assume their correctness. + // Sort data columns by index to ensure ascending order for KZG operations + data_columns.sort_unstable_by_key(|dc| dc.index); + let first_data_column = data_columns .first() .ok_or("data_columns should have at least one element".to_string())?; @@ -331,7 +333,7 @@ pub fn reconstruct_blobs( .map(|row_index| { let mut cells: Vec = vec![]; let mut cell_ids: Vec = vec![]; - for data_column in data_columns { + for data_column in &data_columns { let cell = data_column .column .get(row_index) @@ -463,6 +465,7 @@ mod test { test_reconstruct_data_columns(&kzg, &spec); test_reconstruct_data_columns_unordered(&kzg, &spec); test_reconstruct_blobs_from_data_columns(&kzg, &spec); + test_reconstruct_blobs_from_data_columns_unordered(&kzg, &spec); test_validate_data_columns(&kzg, &spec); } @@ -595,7 +598,7 @@ mod test { let blob_indices = vec![1, 2]; let reconstructed_blobs = reconstruct_blobs( kzg, - &column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2], + column_sidecars[0..column_sidecars.len() / 2].to_vec(), Some(blob_indices.clone()), &signed_blinded_block, spec, @@ -613,6 +616,31 @@ mod test { } } + #[track_caller] + fn test_reconstruct_blobs_from_data_columns_unordered(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 2; + let (signed_block, blobs, proofs) = + create_test_fulu_block_and_blobs::(num_of_blobs, spec); + let blob_refs = blobs.iter().collect::>(); + let column_sidecars = + blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) + .unwrap(); + + // Test reconstruction with columns in reverse order (non-ascending) + let mut subset_columns: Vec<_> = + column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2].to_vec(); + subset_columns.reverse(); // This would fail without proper sorting in reconstruct_blobs + + let signed_blinded_block = signed_block.into(); + let reconstructed_blobs = + reconstruct_blobs(kzg, subset_columns, None, &signed_blinded_block, spec).unwrap(); + + for (i, original_blob) in blobs.iter().enumerate() { + let reconstructed_blob = &reconstructed_blobs.get(i).unwrap().blob; + assert_eq!(reconstructed_blob, original_blob, "{i}"); + } + } + fn get_kzg() -> Kzg { Kzg::new_from_trusted_setup(&get_trusted_setup()).expect("should create kzg") } diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 778067c32b..e088005f20 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -474,7 +474,7 @@ impl BlockId { ) .collect::, _>>()?; - reconstruct_blobs(&chain.kzg, &data_columns, blob_indices, block, &chain.spec).map_err( + reconstruct_blobs(&chain.kzg, data_columns, blob_indices, block, &chain.spec).map_err( |e| { warp_utils::reject::custom_server_error(format!( "Error reconstructing data columns: {e:?}" From 7ef9501ff66f0edb88b7680b4a24d65d3309b363 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 2 Dec 2025 16:17:13 +1100 Subject: [PATCH 05/26] Instrument attestation signing. (#8508) We noticed attestation signing taking 2+ seconds on some of our hoodi nodes and the current traces doesn't provide enough details. This PR adds a few more spans to the `attestation_duty_cycle` code path in the VC. Before: image After: image Co-Authored-By: Jimmy Chen --- Cargo.lock | 1 + .../lighthouse_validator_store/src/lib.rs | 4 +- validator_client/signing_method/Cargo.toml | 1 + validator_client/signing_method/src/lib.rs | 2 + .../src/slashing_database.rs | 2 + .../src/attestation_service.rs | 133 +++++++++--------- 6 files changed, 77 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e3730f132b..7ddcad7239 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8336,6 +8336,7 @@ dependencies = [ "reqwest", "serde", "task_executor", + "tracing", "types", "url", "validator_metrics", diff --git a/validator_client/lighthouse_validator_store/src/lib.rs b/validator_client/lighthouse_validator_store/src/lib.rs index d10fecb32e..dc8fb07b65 100644 --- a/validator_client/lighthouse_validator_store/src/lib.rs +++ b/validator_client/lighthouse_validator_store/src/lib.rs @@ -15,7 +15,7 @@ use std::marker::PhantomData; use std::path::Path; use std::sync::Arc; use task_executor::TaskExecutor; -use tracing::{error, info, warn}; +use tracing::{error, info, instrument, warn}; use types::{ AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, @@ -242,6 +242,7 @@ impl LighthouseValidatorStore { /// Returns a `SigningMethod` for `validator_pubkey` *only if* that validator is considered safe /// by doppelganger protection. + #[instrument(skip_all, level = "debug")] fn doppelganger_checked_signing_method( &self, validator_pubkey: PublicKeyBytes, @@ -745,6 +746,7 @@ impl ValidatorStore for LighthouseValidatorS } } + #[instrument(skip_all)] async fn sign_attestation( &self, validator_pubkey: PublicKeyBytes, diff --git a/validator_client/signing_method/Cargo.toml b/validator_client/signing_method/Cargo.toml index 3e1a48142f..2defd25caa 100644 --- a/validator_client/signing_method/Cargo.toml +++ b/validator_client/signing_method/Cargo.toml @@ -12,6 +12,7 @@ parking_lot = { workspace = true } reqwest = { workspace = true } serde = { workspace = true } task_executor = { workspace = true } +tracing = { workspace = true } types = { workspace = true } url = { workspace = true } validator_metrics = { workspace = true } diff --git a/validator_client/signing_method/src/lib.rs b/validator_client/signing_method/src/lib.rs index c535415b1e..7e0f2c02f7 100644 --- a/validator_client/signing_method/src/lib.rs +++ b/validator_client/signing_method/src/lib.rs @@ -10,6 +10,7 @@ use reqwest::{Client, header::ACCEPT}; use std::path::PathBuf; use std::sync::Arc; use task_executor::TaskExecutor; +use tracing::instrument; use types::*; use url::Url; use web3signer::{ForkInfo, MessageType, SigningRequest, SigningResponse}; @@ -131,6 +132,7 @@ impl SigningMethod { } /// Return the signature of `signable_message`, with respect to the `signing_context`. + #[instrument(skip_all, level = "debug")] pub async fn get_signature>( &self, signable_message: SignableMessage<'_, E, Payload>, diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index ce32299a51..00677212a3 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -11,6 +11,7 @@ use rusqlite::{OptionalExtension, Transaction, TransactionBehavior, params}; use std::fs::File; use std::path::Path; use std::time::Duration; +use tracing::instrument; use types::{AttestationData, BeaconBlockHeader, Epoch, Hash256, PublicKeyBytes, SignedRoot, Slot}; type Pool = r2d2::Pool; @@ -639,6 +640,7 @@ impl SlashingDatabase { /// to prevent concurrent checks and inserts from resulting in slashable data being inserted. /// /// This is the safe, externally-callable interface for checking attestations. + #[instrument(skip_all, level = "debug")] pub fn check_and_insert_attestation( &self, validator_pubkey: &PublicKeyBytes, diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index a6ce67fae9..8211fb11f3 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -8,7 +8,7 @@ use std::ops::Deref; use std::sync::Arc; use task_executor::TaskExecutor; use tokio::time::{Duration, Instant, sleep, sleep_until}; -use tracing::{Instrument, debug, error, info, info_span, instrument, trace, warn}; +use tracing::{Instrument, Span, debug, error, info, info_span, instrument, trace, warn}; use tree_hash::TreeHash; use types::{Attestation, AttestationData, ChainSpec, CommitteeIndex, EthSpec, Slot}; use validator_store::{Error as ValidatorStoreError, ValidatorStore}; @@ -369,79 +369,82 @@ impl AttestationService(attestation_data, &self.chain_spec) { - crit!( - validator = ?duty.pubkey, - duty_slot = %duty.slot, - attestation_slot = %attestation_data.slot, - duty_index = duty.committee_index, - attestation_index = attestation_data.index, - "Inconsistent validator duties during signing" - ); - return None; - } - - let mut attestation = match Attestation::empty_for_signing( - duty.committee_index, - duty.committee_length as usize, - attestation_data.slot, - attestation_data.beacon_block_root, - attestation_data.source, - attestation_data.target, - &self.chain_spec, - ) { - Ok(attestation) => attestation, - Err(err) => { + // Ensure that the attestation matches the duties. + if !duty.match_attestation_data::(attestation_data, &self.chain_spec) { crit!( validator = ?duty.pubkey, - ?duty, - ?err, - "Invalid validator duties during signing" + duty_slot = %duty.slot, + attestation_slot = %attestation_data.slot, + duty_index = duty.committee_index, + attestation_index = attestation_data.index, + "Inconsistent validator duties during signing" ); return None; } - }; - match self - .validator_store - .sign_attestation( - duty.pubkey, - duty.validator_committee_index as usize, - &mut attestation, - current_epoch, - ) - .await - { - Ok(()) => Some((attestation, duty.validator_index)), - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - // A pubkey can be missing when a validator was recently - // removed via the API. - warn!( - info = "a validator may have recently been removed from this VC", - pubkey = ?pubkey, - validator = ?duty.pubkey, - committee_index = committee_index, - slot = slot.as_u64(), - "Missing pubkey for attestation" - ); - None - } - Err(e) => { - crit!( - error = ?e, - validator = ?duty.pubkey, - committee_index, - slot = slot.as_u64(), - "Failed to sign attestation" - ); - None + let mut attestation = match Attestation::empty_for_signing( + duty.committee_index, + duty.committee_length as usize, + attestation_data.slot, + attestation_data.beacon_block_root, + attestation_data.source, + attestation_data.target, + &self.chain_spec, + ) { + Ok(attestation) => attestation, + Err(err) => { + crit!( + validator = ?duty.pubkey, + ?duty, + ?err, + "Invalid validator duties during signing" + ); + return None; + } + }; + + match self + .validator_store + .sign_attestation( + duty.pubkey, + duty.validator_committee_index as usize, + &mut attestation, + current_epoch, + ) + .await + { + Ok(()) => Some((attestation, duty.validator_index)), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + warn!( + info = "a validator may have recently been removed from this VC", + pubkey = ?pubkey, + validator = ?duty.pubkey, + committee_index = committee_index, + slot = slot.as_u64(), + "Missing pubkey for attestation" + ); + None + } + Err(e) => { + crit!( + error = ?e, + validator = ?duty.pubkey, + committee_index, + slot = slot.as_u64(), + "Failed to sign attestation" + ); + None + } } } + .instrument(Span::current()) }); // Execute all the futures in parallel, collecting any successful results. From 0bccc7090c9dc27b57e860f7ab9aaeb83eb305e1 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Wed, 3 Dec 2025 09:45:47 +0800 Subject: [PATCH 06/26] Always use committee index 0 when getting attestation data (#8171) * #8046 Split the function `publish_attestations_and_aggregates` into `publish_attestations` and `handle_aggregates`, so that for attestations, only 1 task is spawned. Co-Authored-By: Tan Chee Keong Co-Authored-By: chonghe <44791194+chong-he@users.noreply.github.com> Co-Authored-By: Michael Sproul Co-Authored-By: Michael Sproul --- .../src/attestation_service.rs | 240 +++++++++--------- 1 file changed, 126 insertions(+), 114 deletions(-) diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index 8211fb11f3..b2b8bc81e2 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -180,8 +180,9 @@ impl AttestationService Result<(), String> { let slot = self.slot_clock.now().ok_or("Failed to read slot clock")?; let duration_to_next_slot = self @@ -189,6 +190,53 @@ impl AttestationService = self.duties_service.attesters(slot).into_iter().collect(); + let attestation_service = self.clone(); + + let attestation_data_handle = self + .inner + .executor + .spawn_handle( + async move { + let attestation_data = attestation_service + .beacon_nodes + .first_success(|beacon_node| async move { + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::ATTESTATIONS_HTTP_GET], + ); + beacon_node + .get_validator_attestation_data(slot, 0) + .await + .map_err(|e| format!("Failed to produce attestation data: {:?}", e)) + .map(|result| result.data) + }) + .await + .map_err(|e| e.to_string())?; + + attestation_service + .sign_and_publish_attestations( + slot, + &attestation_duties, + attestation_data.clone(), + ) + .await + .map_err(|e| { + crit!( + error = format!("{:?}", e), + slot = slot.as_u64(), + "Error during attestation routine" + ); + e + })?; + Ok::(attestation_data) + }, + "unaggregated attestation production", + ) + .ok_or("Failed to spawn attestation data task")?; + // If a validator needs to publish an aggregate attestation, they must do so at 2/3 // through the slot. This delay triggers at this time let aggregate_production_instant = Instant::now() @@ -196,7 +244,7 @@ impl AttestationService> = self + let aggregate_duties_by_committee_index: HashMap> = self .duties_service .attesters(slot) .into_iter() @@ -207,24 +255,45 @@ impl AttestationService data, + Ok(Some(Err(err))) => { + error!(?err, "Attestation production failed"); + return; + } + Ok(None) | Err(_) => { + info!("Aborting attestation production due to shutdown"); + return; + } + }; + + // For each committee index for this slot: + // Create and publish `SignedAggregateAndProof` for all aggregating validators. + aggregate_duties_by_committee_index.into_iter().for_each( + |(committee_index, validator_duties)| { + let attestation_service = attestation_service_clone.clone(); + let attestation_data = attestation_data.clone(); + executor.spawn_ignoring_error( + attestation_service.handle_aggregates( + slot, + committee_index, + validator_duties, + aggregate_production_instant, + attestation_data, + ), + "aggregate publish", + ); + }, + ) + }, + "attestation and aggregate publish", + ); // Schedule pruning of the slashing protection database once all unaggregated // attestations have (hopefully) been signed, i.e. at the same time as aggregate @@ -234,114 +303,76 @@ impl AttestationService, aggregate_production_instant: Instant, + attestation_data: AttestationData, ) -> Result<(), ()> { - let attestations_timer = validator_metrics::start_timer_vec( - &validator_metrics::ATTESTATION_SERVICE_TIMES, - &[validator_metrics::ATTESTATIONS], - ); - - // There's not need to produce `Attestation` or `SignedAggregateAndProof` if we do not have + // There's not need to produce `SignedAggregateAndProof` if we do not have // any validators for the given `slot` and `committee_index`. if validator_duties.is_empty() { return Ok(()); } - // Step 1. - // - // Download, sign and publish an `Attestation` for each validator. - let attestation_opt = self - .produce_and_publish_attestations(slot, committee_index, &validator_duties) + // Wait until the `aggregation_production_instant` (2/3rds + // of the way though the slot). As verified in the + // `delay_triggers_when_in_the_past` test, this code will still run + // even if the instant has already elapsed. + sleep_until(aggregate_production_instant).await; + + // Start the metrics timer *after* we've done the delay. + let _aggregates_timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::AGGREGATES], + ); + + // Download, sign and publish a `SignedAggregateAndProof` for each + // validator that is elected to aggregate for this `slot` and + // `committee_index`. + self.produce_and_publish_aggregates(&attestation_data, committee_index, &validator_duties) .await .map_err(move |e| { crit!( error = format!("{:?}", e), committee_index, slot = slot.as_u64(), - "Error during attestation routine" + "Error during aggregate attestation routine" ) })?; - drop(attestations_timer); - - // Step 2. - // - // If an attestation was produced, make an aggregate. - if let Some(attestation_data) = attestation_opt { - // First, wait until the `aggregation_production_instant` (2/3rds - // of the way though the slot). As verified in the - // `delay_triggers_when_in_the_past` test, this code will still run - // even if the instant has already elapsed. - sleep_until(aggregate_production_instant).await; - - // Start the metrics timer *after* we've done the delay. - let _aggregates_timer = validator_metrics::start_timer_vec( - &validator_metrics::ATTESTATION_SERVICE_TIMES, - &[validator_metrics::AGGREGATES], - ); - - // Then download, sign and publish a `SignedAggregateAndProof` for each - // validator that is elected to aggregate for this `slot` and - // `committee_index`. - self.produce_and_publish_aggregates( - &attestation_data, - committee_index, - &validator_duties, - ) - .await - .map_err(move |e| { - crit!( - error = format!("{:?}", e), - committee_index, - slot = slot.as_u64(), - "Error during attestation routine" - ) - })?; - } - Ok(()) } - /// Performs the first step of the attesting process: downloading `Attestation` objects, - /// signing them and returning them to the validator. + /// Performs the main steps of the attesting process: signing and publishing to the BN. /// - /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#attesting + /// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/validator.md#attesting /// /// ## Detail /// /// The given `validator_duties` should already be filtered to only contain those that match - /// `slot` and `committee_index`. Critical errors will be logged if this is not the case. - /// - /// Only one `Attestation` is downloaded from the BN. It is then cloned and signed by each - /// validator and the list of individually-signed `Attestation` objects is returned to the BN. - #[instrument(skip_all, fields(%slot, %committee_index))] - async fn produce_and_publish_attestations( + /// `slot`. Critical errors will be logged if this is not the case. + #[instrument(skip_all, fields(%slot, %attestation_data.beacon_block_root))] + async fn sign_and_publish_attestations( &self, slot: Slot, - committee_index: CommitteeIndex, validator_duties: &[DutyAndProof], - ) -> Result, String> { + attestation_data: AttestationData, + ) -> Result<(), String> { + let _attestations_timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::ATTESTATIONS], + ); + if validator_duties.is_empty() { - return Ok(None); + return Ok(()); } let current_epoch = self @@ -350,23 +381,6 @@ impl AttestationService AttestationService AttestationService AttestationService AttestationService Date: Wed, 3 Dec 2025 15:06:28 +1100 Subject: [PATCH 07/26] Move deposit contract artifacts to /target (#8518) Alternative to: - https://github.com/sigp/lighthouse/pull/8488 Refactors deposit_contract crate to comply with Rust build conventions by placing generated artifacts in the build output directory. Co-Authored-By: Michael Sproul --- common/deposit_contract/build.rs | 9 ++++----- common/deposit_contract/src/lib.rs | 22 ++++++++++++++++------ 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/common/deposit_contract/build.rs b/common/deposit_contract/build.rs index cae1d480c8..2061d13c24 100644 --- a/common/deposit_contract/build.rs +++ b/common/deposit_contract/build.rs @@ -153,14 +153,13 @@ fn verify_checksum(bytes: &[u8], expected_checksum: &str) { /// Returns the directory that will be used to store the deposit contract ABI. fn abi_dir() -> PathBuf { - let base = env::var("CARGO_MANIFEST_DIR") - .expect("should know manifest dir") + let base = env::var("OUT_DIR") + .expect("should know out dir") .parse::() - .expect("should parse manifest dir as path") - .join("contracts"); + .expect("should parse out dir as path"); std::fs::create_dir_all(base.clone()) - .expect("should be able to create abi directory in manifest"); + .expect("should be able to create abi directory in out dir"); base } diff --git a/common/deposit_contract/src/lib.rs b/common/deposit_contract/src/lib.rs index 12c3bdaa89..e5f11bb89c 100644 --- a/common/deposit_contract/src/lib.rs +++ b/common/deposit_contract/src/lib.rs @@ -44,15 +44,25 @@ impl From for Error { pub const CONTRACT_DEPLOY_GAS: usize = 4_000_000; pub const DEPOSIT_GAS: usize = 400_000; -pub const ABI: &[u8] = include_bytes!("../contracts/v0.12.1_validator_registration.json"); -pub const BYTECODE: &[u8] = include_bytes!("../contracts/v0.12.1_validator_registration.bytecode"); +pub const ABI: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/v0.12.1_validator_registration.json" +)); +pub const BYTECODE: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/v0.12.1_validator_registration.bytecode" +)); pub const DEPOSIT_DATA_LEN: usize = 420; // lol pub mod testnet { - pub const ABI: &[u8] = - include_bytes!("../contracts/v0.12.1_testnet_validator_registration.json"); - pub const BYTECODE: &[u8] = - include_bytes!("../contracts/v0.12.1_testnet_validator_registration.bytecode"); + pub const ABI: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/v0.12.1_testnet_validator_registration.json" + )); + pub const BYTECODE: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/v0.12.1_testnet_validator_registration.bytecode" + )); } pub fn encode_eth1_tx_data(deposit_data: &DepositData) -> Result, Error> { From 51d033602098d75da1e921025bb7665df07e5ae3 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 4 Dec 2025 17:58:57 +1100 Subject: [PATCH 08/26] Move beacon state endpoints to a separate module. (#8529) Part of the http api refactor to move endpoint handlers to separate modules. This should improve code maintainability, incremental compilation time and rust analyzer performance. Co-Authored-By: Jimmy Chen --- beacon_node/http_api/src/beacon/mod.rs | 1 + beacon_node/http_api/src/beacon/states.rs | 787 ++++++++++++++++++++++ beacon_node/http_api/src/lib.rs | 705 ++----------------- beacon_node/http_api/src/utils.rs | 3 + 4 files changed, 830 insertions(+), 666 deletions(-) create mode 100644 beacon_node/http_api/src/beacon/mod.rs create mode 100644 beacon_node/http_api/src/beacon/states.rs create mode 100644 beacon_node/http_api/src/utils.rs diff --git a/beacon_node/http_api/src/beacon/mod.rs b/beacon_node/http_api/src/beacon/mod.rs new file mode 100644 index 0000000000..20394784ae --- /dev/null +++ b/beacon_node/http_api/src/beacon/mod.rs @@ -0,0 +1 @@ +pub mod states; diff --git a/beacon_node/http_api/src/beacon/states.rs b/beacon_node/http_api/src/beacon/states.rs new file mode 100644 index 0000000000..6d06bcc77d --- /dev/null +++ b/beacon_node/http_api/src/beacon/states.rs @@ -0,0 +1,787 @@ +use crate::StateId; +use crate::task_spawner::{Priority, TaskSpawner}; +use crate::utils::ResponseFilter; +use crate::validator::pubkey_to_validator_index; +use crate::version::{ + ResponseIncludesVersion, add_consensus_version_header, + execution_optimistic_finalized_beacon_response, +}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; +use eth2::types::{ + ValidatorBalancesRequestBody, ValidatorId, ValidatorIdentitiesRequestBody, + ValidatorsRequestBody, +}; +use std::sync::Arc; +use types::{ + AttestationShufflingId, CommitteeCache, Error as BeaconStateError, EthSpec, RelativeEpoch, +}; +use warp::filters::BoxedFilter; +use warp::{Filter, Reply}; +use warp_utils::query::multi_key_query; + +type BeaconStatesPath = BoxedFilter<( + StateId, + TaskSpawner<::EthSpec>, + Arc>, +)>; + +// GET beacon/states/{state_id}/pending_consolidations +pub fn get_beacon_state_pending_consolidations( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("pending_consolidations")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(consolidations) = state.pending_consolidations() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending consolidations not found".to_string(), + )); + }; + + Ok(( + consolidations.clone(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) + }, + )?; + + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + data, + ) + .map(|res| warp::reply::json(&res).into_response()) + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/pending_partial_withdrawals +pub fn get_beacon_state_pending_partial_withdrawals( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("pending_partial_withdrawals")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(withdrawals) = state.pending_partial_withdrawals() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending withdrawals not found".to_string(), + )); + }; + + Ok(( + withdrawals.clone(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) + }, + )?; + + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + data, + ) + .map(|res| warp::reply::json(&res).into_response()) + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/pending_deposits +pub fn get_beacon_state_pending_deposits( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("pending_deposits")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(deposits) = state.pending_deposits() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending deposits not found".to_string(), + )); + }; + + Ok(( + deposits.clone(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) + }, + )?; + + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + data, + ) + .map(|res| warp::reply::json(&res).into_response()) + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/randao?epoch +pub fn get_beacon_state_randao( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("randao")) + .and(warp::query::()) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: eth2::types::RandaoQuery| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (randao, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let epoch = query.epoch.unwrap_or_else(|| state.current_epoch()); + let randao = *state.get_randao_mix(epoch).map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "epoch out of range: {e:?}" + )) + })?; + Ok((randao, execution_optimistic, finalized)) + }, + )?; + + Ok( + eth2::types::GenericResponse::from(eth2::types::RandaoMix { randao }) + .add_execution_optimistic_finalized(execution_optimistic, finalized), + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/sync_committees?epoch +pub fn get_beacon_state_sync_committees( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("sync_committees")) + .and(warp::query::()) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: eth2::types::SyncCommitteesQuery| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (sync_committee, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); + Ok(( + state + .get_built_sync_committee(epoch, &chain.spec) + .cloned() + .map_err(|e| match e { + BeaconStateError::SyncCommitteeNotKnown { .. } => { + warp_utils::reject::custom_bad_request(format!( + "state at epoch {} has no \ + sync committee for epoch {}", + current_epoch, epoch + )) + } + BeaconStateError::IncorrectStateVariant => { + warp_utils::reject::custom_bad_request(format!( + "state at epoch {} is not activated for Altair", + current_epoch, + )) + } + e => warp_utils::reject::beacon_state_error(e), + })?, + execution_optimistic, + finalized, + )) + }, + )?; + + let validators = chain + .validator_indices(sync_committee.pubkeys.iter()) + .map_err(warp_utils::reject::unhandled_error)?; + + let validator_aggregates = validators + .chunks_exact(T::EthSpec::sync_subcommittee_size()) + .map(|indices| eth2::types::SyncSubcommittee { + indices: indices.to_vec(), + }) + .collect(); + + let response = eth2::types::SyncCommitteeByValidatorIndices { + validators, + validator_aggregates, + }; + + Ok(eth2::types::GenericResponse::from(response) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/committees?slot,index,epoch +pub fn get_beacon_state_committees( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("committees")) + .and(warp::query::()) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: eth2::types::CommitteesQuery| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); + + // Attempt to obtain the committee_cache from the beacon chain + let decision_slot = (epoch.saturating_sub(2u64)) + .end_slot(T::EthSpec::slots_per_epoch()); + // Find the decision block and skip to another method on any kind + // of failure + let shuffling_id = if let Ok(Some(shuffling_decision_block)) = + chain.block_root_at_slot(decision_slot, WhenSlotSkipped::Prev) + { + Some(AttestationShufflingId { + shuffling_epoch: epoch, + shuffling_decision_block, + }) + } else { + None + }; + + // Attempt to read from the chain cache if there exists a + // shuffling_id + let maybe_cached_shuffling = if let Some(shuffling_id) = + shuffling_id.as_ref() + { + chain + .shuffling_cache + .try_write_for(std::time::Duration::from_secs(1)) + .and_then(|mut cache_write| cache_write.get(shuffling_id)) + .and_then(|cache_item| cache_item.wait().ok()) + } else { + None + }; + + let committee_cache = + if let Some(shuffling) = maybe_cached_shuffling { + shuffling + } else { + let possibly_built_cache = + match RelativeEpoch::from_epoch(current_epoch, epoch) { + Ok(relative_epoch) + if state.committee_cache_is_initialized( + relative_epoch, + ) => + { + state.committee_cache(relative_epoch).cloned() + } + _ => CommitteeCache::initialized( + state, + epoch, + &chain.spec, + ), + } + .map_err( + |e| match e { + BeaconStateError::EpochOutOfBounds => { + let max_sprp = + T::EthSpec::slots_per_historical_root() + as u64; + let first_subsequent_restore_point_slot = + ((epoch.start_slot( + T::EthSpec::slots_per_epoch(), + ) / max_sprp) + + 1) + * max_sprp; + if epoch < current_epoch { + warp_utils::reject::custom_bad_request( + format!( + "epoch out of bounds, \ + try state at slot {}", + first_subsequent_restore_point_slot, + ), + ) + } else { + warp_utils::reject::custom_bad_request( + "epoch out of bounds, \ + too far in future" + .into(), + ) + } + } + _ => warp_utils::reject::unhandled_error( + BeaconChainError::from(e), + ), + }, + )?; + + // Attempt to write to the beacon cache (only if the cache + // size is not the default value). + if chain.config.shuffling_cache_size + != beacon_chain::shuffling_cache::DEFAULT_CACHE_SIZE + && let Some(shuffling_id) = shuffling_id + && let Some(mut cache_write) = chain + .shuffling_cache + .try_write_for(std::time::Duration::from_secs(1)) + { + cache_write.insert_committee_cache( + shuffling_id, + &possibly_built_cache, + ); + } + + possibly_built_cache + }; + + // Use either the supplied slot or all slots in the epoch. + let slots = + query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { + epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() + }); + + // Use either the supplied committee index or all available indices. + let indices = + query.index.map(|index| vec![index]).unwrap_or_else(|| { + (0..committee_cache.committees_per_slot()).collect() + }); + + let mut response = Vec::with_capacity(slots.len() * indices.len()); + + for slot in slots { + // It is not acceptable to query with a slot that is not within the + // specified epoch. + if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { + return Err(warp_utils::reject::custom_bad_request( + format!("{} is not in epoch {}", slot, epoch), + )); + } + + for &index in &indices { + let committee = committee_cache + .get_beacon_committee(slot, index) + .ok_or_else(|| { + warp_utils::reject::custom_bad_request(format!( + "committee index {} does not exist in epoch {}", + index, epoch + )) + })?; + + response.push(eth2::types::CommitteeData { + index, + slot, + validators: committee + .committee + .iter() + .map(|i| *i as u64) + .collect(), + }); + } + } + + Ok((response, execution_optimistic, finalized)) + }, + )?; + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/validators/{validator_id} +pub fn get_beacon_state_validators_id( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid validator ID".to_string(), + )) + })) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + validator_id: ValidatorId| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let index_opt = match &validator_id { + ValidatorId::PublicKey(pubkey) => pubkey_to_validator_index( + &chain, state, pubkey, + ) + .map_err(|e| { + warp_utils::reject::custom_not_found(format!( + "unable to access pubkey cache: {e:?}", + )) + })?, + ValidatorId::Index(index) => Some(*index as usize), + }; + + Ok(( + index_opt + .and_then(|index| { + let validator = state.validators().get(index)?; + let balance = *state.balances().get(index)?; + let epoch = state.current_epoch(); + let far_future_epoch = chain.spec.far_future_epoch; + + Some(eth2::types::ValidatorData { + index: index as u64, + balance, + status: + eth2::types::ValidatorStatus::from_validator( + validator, + epoch, + far_future_epoch, + ), + validator: validator.clone(), + }) + }) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "unknown validator: {}", + validator_id + )) + })?, + execution_optimistic, + finalized, + )) + }, + )?; + + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// POST beacon/states/{state_id}/validators +pub fn post_beacon_state_validators( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorsRequestBody| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + crate::validators::get_beacon_state_validators( + state_id, + chain, + &query.ids, + &query.statuses, + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/validators?id,status +pub fn get_beacon_state_validators( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::end()) + .and(multi_key_query::()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query_res: Result| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + let query = query_res?; + crate::validators::get_beacon_state_validators( + state_id, + chain, + &query.id, + &query.status, + ) + }) + }, + ) + .boxed() +} + +// POST beacon/states/{state_id}/validator_identities +pub fn post_beacon_state_validator_identities( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validator_identities")) + .and(warp::path::end()) + .and(warp_utils::json::json_no_body()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorIdentitiesRequestBody| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + crate::validators::get_beacon_state_validator_identities( + state_id, + chain, + Some(&query.ids), + ) + }) + }, + ) + .boxed() +} + +// POST beacon/states/{state_id}/validator_balances +pub fn post_beacon_state_validator_balances( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validator_balances")) + .and(warp::path::end()) + .and(warp_utils::json::json_no_body()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorBalancesRequestBody| { + task_spawner.blocking_json_task(Priority::P1, move || { + crate::validators::get_beacon_state_validator_balances( + state_id, + chain, + Some(&query.ids), + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/validator_balances?id +pub fn get_beacon_state_validator_balances( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validator_balances")) + .and(warp::path::end()) + .and(multi_key_query::()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query_res: Result| { + task_spawner.blocking_json_task(Priority::P1, move || { + let query = query_res?; + crate::validators::get_beacon_state_validator_balances( + state_id, + chain, + query.id.as_deref(), + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/finality_checkpoints +pub fn get_beacon_state_finality_checkpoints( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("finality_checkpoints")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + Ok(( + eth2::types::FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + execution_optimistic, + finalized, + )) + }, + )?; + + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/fork +pub fn get_beacon_state_fork( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("fork")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (fork, execution_optimistic, finalized) = + state_id.fork_and_execution_optimistic_and_finalized(&chain)?; + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data: fork, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/root +pub fn get_beacon_state_root( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .and(warp::path("root")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (root, execution_optimistic, finalized) = state_id.root(&chain)?; + Ok(eth2::types::GenericResponse::from( + eth2::types::RootData::from(root), + )) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) + }) + }, + ) + .boxed() +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 6389b34961..ccd0698161 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -8,6 +8,7 @@ mod aggregate_attestation; mod attestation_performance; mod attester_duties; +mod beacon; mod block_id; mod block_packing_efficiency; mod block_rewards; @@ -29,13 +30,16 @@ mod sync_committees; mod task_spawner; pub mod test_utils; mod ui; +mod utils; mod validator; mod validator_inclusion; mod validators; mod version; + use crate::light_client::{get_light_client_bootstrap, get_light_client_updates}; use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; use crate::version::beacon_response; +use beacon::states; use beacon_chain::{ AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped, attestation_verification::VerifiedAttestation, @@ -50,8 +54,7 @@ use eth2::StatusCode; use eth2::types::{ self as api_types, BroadcastValidation, ContextDeserialize, EndpointVersion, ForkChoice, ForkChoiceExtraData, ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, - StateId as CoreStateId, ValidatorBalancesRequestBody, ValidatorId, - ValidatorIdentitiesRequestBody, ValidatorStatus, ValidatorsRequestBody, + StateId as CoreStateId, ValidatorId, ValidatorStatus, }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use health_metrics::observe::Observe; @@ -90,14 +93,12 @@ use tokio_stream::{ }; use tracing::{debug, error, info, warn}; use types::{ - Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, - ChainSpec, Checkpoint, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, Hash256, - ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, - SignedBlindedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, - SignedValidatorRegistrationData, SignedVoluntaryExit, SingleAttestation, Slot, - SyncCommitteeMessage, SyncContributionData, + Attestation, AttestationData, AttesterSlashing, BeaconStateError, ChainSpec, Checkpoint, + ConfigAndPreset, Epoch, EthSpec, ForkName, Hash256, ProposerPreparationData, ProposerSlashing, + SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, + SingleAttestation, Slot, SyncCommitteeMessage, SyncContributionData, }; -use validator::pubkey_to_validator_index; use version::{ ResponseIncludesVersion, V1, V2, V3, add_consensus_version_header, add_ssz_content_type_header, execution_optimistic_finalized_beacon_response, inconsistent_fork_rejection, @@ -583,693 +584,65 @@ pub fn serve( )) })) .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); + .and(chain_filter.clone()) + .boxed(); // GET beacon/states/{state_id}/root - let get_beacon_state_root = beacon_states_path - .clone() - .and(warp::path("root")) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (root, execution_optimistic, finalized) = state_id.root(&chain)?; - Ok(api_types::GenericResponse::from(api_types::RootData::from( - root, - ))) - .map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) - }) - }, - ); + let get_beacon_state_root = states::get_beacon_state_root(beacon_states_path.clone()); // GET beacon/states/{state_id}/fork - let get_beacon_state_fork = beacon_states_path - .clone() - .and(warp::path("fork")) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (fork, execution_optimistic, finalized) = - state_id.fork_and_execution_optimistic_and_finalized(&chain)?; - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data: fork, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) - }) - }, - ); + let get_beacon_state_fork = states::get_beacon_state_fork(beacon_states_path.clone()); // GET beacon/states/{state_id}/finality_checkpoints - let get_beacon_state_finality_checkpoints = beacon_states_path - .clone() - .and(warp::path("finality_checkpoints")) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - Ok(( - api_types::FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }, - execution_optimistic, - finalized, - )) - }, - )?; - - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) - }) - }, - ); + let get_beacon_state_finality_checkpoints = + states::get_beacon_state_finality_checkpoints(beacon_states_path.clone()); // GET beacon/states/{state_id}/validator_balances?id - let get_beacon_state_validator_balances = beacon_states_path - .clone() - .and(warp::path("validator_balances")) - .and(warp::path::end()) - .and(multi_key_query::()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query_res: Result| { - task_spawner.blocking_json_task(Priority::P1, move || { - let query = query_res?; - crate::validators::get_beacon_state_validator_balances( - state_id, - chain, - query.id.as_deref(), - ) - }) - }, - ); + let get_beacon_state_validator_balances = + states::get_beacon_state_validator_balances(beacon_states_path.clone()); // POST beacon/states/{state_id}/validator_balances - let post_beacon_state_validator_balances = beacon_states_path - .clone() - .and(warp::path("validator_balances")) - .and(warp::path::end()) - .and(warp_utils::json::json_no_body()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: ValidatorBalancesRequestBody| { - task_spawner.blocking_json_task(Priority::P1, move || { - crate::validators::get_beacon_state_validator_balances( - state_id, - chain, - Some(&query.ids), - ) - }) - }, - ); + let post_beacon_state_validator_balances = + states::post_beacon_state_validator_balances(beacon_states_path.clone()); // POST beacon/states/{state_id}/validator_identities - let post_beacon_state_validator_identities = beacon_states_path - .clone() - .and(warp::path("validator_identities")) - .and(warp::path::end()) - .and(warp_utils::json::json_no_body()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: ValidatorIdentitiesRequestBody| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - crate::validators::get_beacon_state_validator_identities( - state_id, - chain, - Some(&query.ids), - ) - }) - }, - ); + let post_beacon_state_validator_identities = + states::post_beacon_state_validator_identities(beacon_states_path.clone()); // GET beacon/states/{state_id}/validators?id,status - let get_beacon_state_validators = beacon_states_path - .clone() - .and(warp::path("validators")) - .and(warp::path::end()) - .and(multi_key_query::()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query_res: Result| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - let query = query_res?; - crate::validators::get_beacon_state_validators( - state_id, - chain, - &query.id, - &query.status, - ) - }) - }, - ); + let get_beacon_state_validators = + states::get_beacon_state_validators(beacon_states_path.clone()); // POST beacon/states/{state_id}/validators - let post_beacon_state_validators = beacon_states_path - .clone() - .and(warp::path("validators")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: ValidatorsRequestBody| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - crate::validators::get_beacon_state_validators( - state_id, - chain, - &query.ids, - &query.statuses, - ) - }) - }, - ); + let post_beacon_state_validators = + states::post_beacon_state_validators(beacon_states_path.clone()); // GET beacon/states/{state_id}/validators/{validator_id} - let get_beacon_state_validators_id = beacon_states_path - .clone() - .and(warp::path("validators")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid validator ID".to_string(), - )) - })) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - validator_id: ValidatorId| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let index_opt = match &validator_id { - ValidatorId::PublicKey(pubkey) => pubkey_to_validator_index( - &chain, state, pubkey, - ) - .map_err(|e| { - warp_utils::reject::custom_not_found(format!( - "unable to access pubkey cache: {e:?}", - )) - })?, - ValidatorId::Index(index) => Some(*index as usize), - }; - - Ok(( - index_opt - .and_then(|index| { - let validator = state.validators().get(index)?; - let balance = *state.balances().get(index)?; - let epoch = state.current_epoch(); - let far_future_epoch = chain.spec.far_future_epoch; - - Some(api_types::ValidatorData { - index: index as u64, - balance, - status: api_types::ValidatorStatus::from_validator( - validator, - epoch, - far_future_epoch, - ), - validator: validator.clone(), - }) - }) - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "unknown validator: {}", - validator_id - )) - })?, - execution_optimistic, - finalized, - )) - }, - )?; - - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) - }) - }, - ); + let get_beacon_state_validators_id = + states::get_beacon_state_validators_id(beacon_states_path.clone()); // GET beacon/states/{state_id}/committees?slot,index,epoch - let get_beacon_state_committees = beacon_states_path - .clone() - .and(warp::path("committees")) - .and(warp::query::()) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: api_types::CommitteesQuery| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let current_epoch = state.current_epoch(); - let epoch = query.epoch.unwrap_or(current_epoch); - - // Attempt to obtain the committee_cache from the beacon chain - let decision_slot = (epoch.saturating_sub(2u64)) - .end_slot(T::EthSpec::slots_per_epoch()); - // Find the decision block and skip to another method on any kind - // of failure - let shuffling_id = if let Ok(Some(shuffling_decision_block)) = - chain.block_root_at_slot(decision_slot, WhenSlotSkipped::Prev) - { - Some(AttestationShufflingId { - shuffling_epoch: epoch, - shuffling_decision_block, - }) - } else { - None - }; - - // Attempt to read from the chain cache if there exists a - // shuffling_id - let maybe_cached_shuffling = if let Some(shuffling_id) = - shuffling_id.as_ref() - { - chain - .shuffling_cache - .try_write_for(std::time::Duration::from_secs(1)) - .and_then(|mut cache_write| cache_write.get(shuffling_id)) - .and_then(|cache_item| cache_item.wait().ok()) - } else { - None - }; - - let committee_cache = - if let Some(shuffling) = maybe_cached_shuffling { - shuffling - } else { - let possibly_built_cache = - match RelativeEpoch::from_epoch(current_epoch, epoch) { - Ok(relative_epoch) - if state.committee_cache_is_initialized( - relative_epoch, - ) => - { - state.committee_cache(relative_epoch).cloned() - } - _ => CommitteeCache::initialized( - state, - epoch, - &chain.spec, - ), - } - .map_err( - |e| match e { - BeaconStateError::EpochOutOfBounds => { - let max_sprp = - T::EthSpec::slots_per_historical_root() - as u64; - let first_subsequent_restore_point_slot = - ((epoch.start_slot( - T::EthSpec::slots_per_epoch(), - ) / max_sprp) - + 1) - * max_sprp; - if epoch < current_epoch { - warp_utils::reject::custom_bad_request( - format!( - "epoch out of bounds, \ - try state at slot {}", - first_subsequent_restore_point_slot, - ), - ) - } else { - warp_utils::reject::custom_bad_request( - "epoch out of bounds, \ - too far in future" - .into(), - ) - } - } - _ => warp_utils::reject::unhandled_error( - BeaconChainError::from(e), - ), - }, - )?; - - // Attempt to write to the beacon cache (only if the cache - // size is not the default value). - if chain.config.shuffling_cache_size - != beacon_chain::shuffling_cache::DEFAULT_CACHE_SIZE - && let Some(shuffling_id) = shuffling_id - && let Some(mut cache_write) = chain - .shuffling_cache - .try_write_for(std::time::Duration::from_secs(1)) - { - cache_write.insert_committee_cache( - shuffling_id, - &possibly_built_cache, - ); - } - - possibly_built_cache - }; - - // Use either the supplied slot or all slots in the epoch. - let slots = - query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { - epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() - }); - - // Use either the supplied committee index or all available indices. - let indices = - query.index.map(|index| vec![index]).unwrap_or_else(|| { - (0..committee_cache.committees_per_slot()).collect() - }); - - let mut response = Vec::with_capacity(slots.len() * indices.len()); - - for slot in slots { - // It is not acceptable to query with a slot that is not within the - // specified epoch. - if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { - return Err(warp_utils::reject::custom_bad_request( - format!("{} is not in epoch {}", slot, epoch), - )); - } - - for &index in &indices { - let committee = committee_cache - .get_beacon_committee(slot, index) - .ok_or_else(|| { - warp_utils::reject::custom_bad_request(format!( - "committee index {} does not exist in epoch {}", - index, epoch - )) - })?; - - response.push(api_types::CommitteeData { - index, - slot, - validators: committee - .committee - .iter() - .map(|i| *i as u64) - .collect(), - }); - } - } - - Ok((response, execution_optimistic, finalized)) - }, - )?; - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) - }) - }, - ); + let get_beacon_state_committees = + states::get_beacon_state_committees(beacon_states_path.clone()); // GET beacon/states/{state_id}/sync_committees?epoch - let get_beacon_state_sync_committees = beacon_states_path - .clone() - .and(warp::path("sync_committees")) - .and(warp::query::()) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: api_types::SyncCommitteesQuery| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (sync_committee, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let current_epoch = state.current_epoch(); - let epoch = query.epoch.unwrap_or(current_epoch); - Ok(( - state - .get_built_sync_committee(epoch, &chain.spec) - .cloned() - .map_err(|e| match e { - BeaconStateError::SyncCommitteeNotKnown { .. } => { - warp_utils::reject::custom_bad_request(format!( - "state at epoch {} has no \ - sync committee for epoch {}", - current_epoch, epoch - )) - } - BeaconStateError::IncorrectStateVariant => { - warp_utils::reject::custom_bad_request(format!( - "state at epoch {} is not activated for Altair", - current_epoch, - )) - } - e => warp_utils::reject::beacon_state_error(e), - })?, - execution_optimistic, - finalized, - )) - }, - )?; - - let validators = chain - .validator_indices(sync_committee.pubkeys.iter()) - .map_err(warp_utils::reject::unhandled_error)?; - - let validator_aggregates = validators - .chunks_exact(T::EthSpec::sync_subcommittee_size()) - .map(|indices| api_types::SyncSubcommittee { - indices: indices.to_vec(), - }) - .collect(); - - let response = api_types::SyncCommitteeByValidatorIndices { - validators, - validator_aggregates, - }; - - Ok(api_types::GenericResponse::from(response) - .add_execution_optimistic_finalized(execution_optimistic, finalized)) - }) - }, - ); + let get_beacon_state_sync_committees = + states::get_beacon_state_sync_committees(beacon_states_path.clone()); // GET beacon/states/{state_id}/randao?epoch - let get_beacon_state_randao = beacon_states_path - .clone() - .and(warp::path("randao")) - .and(warp::query::()) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: api_types::RandaoQuery| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (randao, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let epoch = query.epoch.unwrap_or_else(|| state.current_epoch()); - let randao = *state.get_randao_mix(epoch).map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "epoch out of range: {e:?}" - )) - })?; - Ok((randao, execution_optimistic, finalized)) - }, - )?; - - Ok( - api_types::GenericResponse::from(api_types::RandaoMix { randao }) - .add_execution_optimistic_finalized(execution_optimistic, finalized), - ) - }) - }, - ); + let get_beacon_state_randao = states::get_beacon_state_randao(beacon_states_path.clone()); // GET beacon/states/{state_id}/pending_deposits - let get_beacon_state_pending_deposits = beacon_states_path - .clone() - .and(warp::path("pending_deposits")) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P1, move || { - let (data, execution_optimistic, finalized, fork_name) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let Ok(deposits) = state.pending_deposits() else { - return Err(warp_utils::reject::custom_bad_request( - "Pending deposits not found".to_string(), - )); - }; - - Ok(( - deposits.clone(), - execution_optimistic, - finalized, - state.fork_name_unchecked(), - )) - }, - )?; - - execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - data, - ) - .map(|res| warp::reply::json(&res).into_response()) - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); + let get_beacon_state_pending_deposits = + states::get_beacon_state_pending_deposits(beacon_states_path.clone()); // GET beacon/states/{state_id}/pending_partial_withdrawals - let get_beacon_state_pending_partial_withdrawals = beacon_states_path - .clone() - .and(warp::path("pending_partial_withdrawals")) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P1, move || { - let (data, execution_optimistic, finalized, fork_name) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let Ok(withdrawals) = state.pending_partial_withdrawals() else { - return Err(warp_utils::reject::custom_bad_request( - "Pending withdrawals not found".to_string(), - )); - }; - - Ok(( - withdrawals.clone(), - execution_optimistic, - finalized, - state.fork_name_unchecked(), - )) - }, - )?; - - execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - data, - ) - .map(|res| warp::reply::json(&res).into_response()) - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); + let get_beacon_state_pending_partial_withdrawals = + states::get_beacon_state_pending_partial_withdrawals(beacon_states_path.clone()); // GET beacon/states/{state_id}/pending_consolidations - let get_beacon_state_pending_consolidations = beacon_states_path - .clone() - .and(warp::path("pending_consolidations")) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P1, move || { - let (data, execution_optimistic, finalized, fork_name) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let Ok(consolidations) = state.pending_consolidations() else { - return Err(warp_utils::reject::custom_bad_request( - "Pending consolidations not found".to_string(), - )); - }; - - Ok(( - consolidations.clone(), - execution_optimistic, - finalized, - state.fork_name_unchecked(), - )) - }, - )?; - - execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - data, - ) - .map(|res| warp::reply::json(&res).into_response()) - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); + let get_beacon_state_pending_consolidations = + states::get_beacon_state_pending_consolidations(beacon_states_path.clone()); // GET beacon/headers // diff --git a/beacon_node/http_api/src/utils.rs b/beacon_node/http_api/src/utils.rs new file mode 100644 index 0000000000..cf61fa481c --- /dev/null +++ b/beacon_node/http_api/src/utils.rs @@ -0,0 +1,3 @@ +use warp::filters::BoxedFilter; + +pub type ResponseFilter = BoxedFilter<(warp::reply::Response,)>; From 4e958a92d333c8f8e816db645508cb9b2082309d Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 4 Dec 2025 13:28:52 +0400 Subject: [PATCH 09/26] Refactor `consensus/types` (#7827) Organize and categorize `consensus/types` into modules based on their relation to key consensus structures/concepts. This is a precursor to a sensible public interface. While this refactor is very opinionated, I am open to suggestions on module names, or type groupings if my current ones are inappropriate. Co-Authored-By: Mac L --- Cargo.lock | 2 + Cargo.toml | 2 +- .../beacon_chain/src/attester_cache.rs | 2 +- beacon_node/beacon_chain/src/beacon_chain.rs | 1 + .../overflow_lru_cache.rs | 4 +- .../src/sync_committee_verification.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 1 + .../src/test_utils/mock_builder.rs | 7 +- beacon_node/http_api/src/block_id.rs | 4 +- beacon_node/http_api/src/light_client.rs | 3 +- beacon_node/http_api/src/produce_block.rs | 1 + beacon_node/http_api/src/version.rs | 12 +- .../lighthouse_network/src/rpc/codec.rs | 10 +- .../lighthouse_network/src/rpc/methods.rs | 4 +- .../lighthouse_network/tests/rpc_tests.rs | 4 +- .../src/network_beacon_processor/tests.rs | 5 +- .../src/sync/block_sidecar_coupling.rs | 3 +- beacon_node/network/src/sync/tests/lookups.rs | 2 +- common/eth2/Cargo.toml | 1 + .../eth2}/src/beacon_response.rs | 8 +- common/eth2/src/lib.rs | 7 +- common/eth2/src/lighthouse_vc/types.rs | 1 - common/eth2/src/types.rs | 7 +- consensus/types/Cargo.toml | 3 + .../{ => attestation}/aggregate_and_proof.rs | 19 +- .../src/{ => attestation}/attestation.rs | 29 +- .../src/{ => attestation}/attestation_data.rs | 11 +- .../src/{ => attestation}/attestation_duty.rs | 3 +- .../src/{ => attestation}/beacon_committee.rs | 2 +- .../types/src/{ => attestation}/checkpoint.rs | 8 +- .../{ => attestation}/indexed_attestation.rs | 21 +- consensus/types/src/attestation/mod.rs | 39 ++ .../{ => attestation}/participation_flags.rs | 6 +- .../{ => attestation}/pending_attestation.rs | 7 +- .../src/{ => attestation}/selection_proof.rs | 12 +- .../src/{ => attestation}/shuffling_id.rs | 9 +- .../signed_aggregate_and_proof.rs | 21 +- .../types/src/{ => attestation}/subnet_id.rs | 14 +- .../types/src/{ => block}/beacon_block.rs | 40 +- .../src/{ => block}/beacon_block_body.rs | 95 +++- .../src/{ => block}/beacon_block_header.rs | 11 +- consensus/types/src/block/mod.rs | 26 + .../src/{ => block}/signed_beacon_block.rs | 38 +- .../{ => block}/signed_beacon_block_header.rs | 14 +- .../types/src/{ => builder}/builder_bid.rs | 22 +- consensus/types/src/builder/mod.rs | 6 + .../consolidation_request.rs | 10 +- consensus/types/src/consolidation/mod.rs | 5 + .../pending_consolidation.rs | 6 +- .../src/{ => core}/application_domain.rs | 0 consensus/types/src/{ => core}/chain_spec.rs | 23 +- .../types/src/{ => core}/config_and_preset.rs | 15 +- consensus/types/src/{ => core}/consts.rs | 2 +- consensus/types/src/{ => core}/enr_fork_id.rs | 5 +- consensus/types/src/{ => core}/eth_spec.rs | 20 +- consensus/types/src/{ => core}/graffiti.rs | 11 +- consensus/types/src/core/mod.rs | 44 ++ .../types/src/{ => core}/non_zero_usize.rs | 0 consensus/types/src/{ => core}/preset.rs | 4 +- .../types/src/{ => core}/relative_epoch.rs | 3 +- .../types/src/{ => core}/signing_data.rs | 7 +- consensus/types/src/{ => core}/slot_data.rs | 2 +- consensus/types/src/{ => core}/slot_epoch.rs | 10 +- .../types/src/{ => core}/slot_epoch_macros.rs | 0 consensus/types/src/{ => core}/sqlite.rs | 3 +- .../types/src/{ => data}/blob_sidecar.rs | 28 +- .../{ => data}/data_column_custody_group.rs | 10 +- .../src/{ => data}/data_column_sidecar.rs | 21 +- .../src/{ => data}/data_column_subnet_id.rs | 25 +- consensus/types/src/data/mod.rs | 23 + consensus/types/src/{ => deposit}/deposit.rs | 8 +- .../types/src/{ => deposit}/deposit_data.rs | 11 +- .../src/{ => deposit}/deposit_message.rs | 11 +- .../src/{ => deposit}/deposit_request.rs | 8 +- .../{ => deposit}/deposit_tree_snapshot.rs | 5 +- consensus/types/src/deposit/mod.rs | 13 + .../src/{ => deposit}/pending_deposit.rs | 10 +- .../bls_to_execution_change.rs | 11 +- .../types/src/{ => execution}/eth1_data.rs | 7 +- .../{ => execution}/execution_block_hash.rs | 9 +- .../{ => execution}/execution_block_header.rs | 7 +- .../src/{ => execution}/execution_payload.rs | 26 +- .../execution_payload_header.rs | 27 +- .../src/{ => execution}/execution_requests.rs | 13 +- consensus/types/src/execution/mod.rs | 36 ++ .../types/src/{ => execution}/payload.rs | 122 ++-- .../signed_bls_to_execution_change.rs | 6 +- consensus/types/src/exit/mod.rs | 5 + .../src/{ => exit}/signed_voluntary_exit.rs | 6 +- .../types/src/{ => exit}/voluntary_exit.rs | 15 +- consensus/types/src/{ => fork}/fork.rs | 5 +- .../types/src/{ => fork}/fork_context.rs | 11 +- consensus/types/src/{ => fork}/fork_data.rs | 9 +- consensus/types/src/fork/fork_macros.rs | 60 ++ consensus/types/src/{ => fork}/fork_name.rs | 71 +-- .../types/src/fork/fork_version_decode.rs | 6 + consensus/types/src/fork/mod.rs | 15 + consensus/types/src/kzg_ext/consts.rs | 3 + consensus/types/src/kzg_ext/mod.rs | 27 + consensus/types/src/lib.rs | 418 +++++--------- consensus/types/src/light_client/consts.rs | 21 + consensus/types/src/light_client/error.rs | 41 ++ .../light_client_bootstrap.rs | 63 ++- .../light_client_finality_update.rs | 54 +- .../{ => light_client}/light_client_header.rs | 55 +- .../light_client_optimistic_update.rs | 31 +- .../{ => light_client}/light_client_update.rs | 152 ++--- consensus/types/src/light_client/mod.rs | 37 ++ consensus/types/src/runtime_fixed_vector.rs | 90 --- consensus/types/src/runtime_var_list.rs | 387 ------------- .../src/{ => slashing}/attester_slashing.rs | 14 +- consensus/types/src/slashing/mod.rs | 8 + .../src/{ => slashing}/proposer_slashing.rs | 7 +- .../types/src/{ => state}/activation_queue.rs | 6 +- .../src/{beacon_state => state}/balance.rs | 0 .../types/src/{ => state}/beacon_state.rs | 530 ++++++++++-------- .../committee_cache.rs | 38 +- .../types/src/{ => state}/epoch_cache.rs | 9 +- .../src/{beacon_state => state}/exit_cache.rs | 10 +- .../types/src/{ => state}/historical_batch.rs | 12 +- .../src/{ => state}/historical_summary.rs | 11 +- .../types/src/{beacon_state => state}/iter.rs | 7 +- consensus/types/src/state/mod.rs | 35 ++ .../progressive_balances_cache.rs | 18 +- .../{beacon_state => state}/pubkey_cache.rs | 2 +- .../slashings_cache.rs | 3 +- .../contribution_and_proof.rs | 15 +- consensus/types/src/sync_committee/mod.rs | 25 + .../signed_contribution_and_proof.rs | 15 +- .../{ => sync_committee}/sync_aggregate.rs | 14 +- .../sync_aggregator_selection_data.rs | 10 +- .../{ => sync_committee}/sync_committee.rs | 10 +- .../sync_committee_contribution.rs | 14 +- .../sync_committee_message.rs | 14 +- .../sync_committee_subscription.rs | 3 +- .../src/{ => sync_committee}/sync_duty.rs | 9 +- .../sync_selection_proof.rs | 20 +- .../{ => sync_committee}/sync_subnet_id.rs | 15 +- .../generate_deterministic_keypairs.rs | 5 +- .../generate_random_block_and_blobs.rs | 18 +- consensus/types/src/test_utils/mod.rs | 24 +- .../src/test_utils/test_random/address.rs | 4 +- .../test_random/aggregate_signature.rs | 6 +- .../src/test_utils/test_random/bitfield.rs | 8 +- .../src/test_utils/test_random/hash256.rs | 4 +- .../test_utils/test_random/kzg_commitment.rs | 4 +- .../src/test_utils/test_random/kzg_proof.rs | 7 +- .../types/src/test_utils/test_random/mod.rs | 15 + .../src/test_utils/test_random/public_key.rs | 6 +- .../test_random/public_key_bytes.rs | 6 +- .../src/test_utils/test_random/secret_key.rs | 6 +- .../src/test_utils/test_random/signature.rs | 6 +- .../test_utils/test_random/signature_bytes.rs | 6 +- .../{ => test_random}/test_random.rs | 22 +- .../src/test_utils/test_random/uint256.rs | 4 +- consensus/types/src/validator/mod.rs | 9 + .../proposer_preparation_data.rs | 3 +- .../types/src/{ => validator}/validator.rs | 16 +- .../validator_registration_data.rs | 4 +- .../{ => validator}/validator_subscription.rs | 3 +- consensus/types/src/withdrawal/mod.rs | 9 + .../pending_partial_withdrawal.rs | 6 +- .../types/src/{ => withdrawal}/withdrawal.rs | 12 +- .../withdrawal_credentials.rs | 7 +- .../{ => withdrawal}/withdrawal_request.rs | 7 +- .../tests.rs => tests/committee_cache.rs} | 11 +- .../beacon_state/tests.rs => tests/state.rs} | 18 +- 167 files changed, 2117 insertions(+), 1751 deletions(-) rename {consensus/types => common/eth2}/src/beacon_response.rs (97%) rename consensus/types/src/{ => attestation}/aggregate_and_proof.rs (93%) rename consensus/types/src/{ => attestation}/attestation.rs (97%) rename consensus/types/src/{ => attestation}/attestation_data.rs (87%) rename consensus/types/src/{ => attestation}/attestation_duty.rs (92%) rename consensus/types/src/{ => attestation}/beacon_committee.rs (92%) rename consensus/types/src/{ => attestation}/checkpoint.rs (88%) rename consensus/types/src/{ => attestation}/indexed_attestation.rs (96%) create mode 100644 consensus/types/src/attestation/mod.rs rename consensus/types/src/{ => attestation}/participation_flags.rs (96%) rename consensus/types/src/{ => attestation}/pending_attestation.rs (84%) rename consensus/types/src/{ => attestation}/selection_proof.rs (95%) rename consensus/types/src/{ => attestation}/shuffling_id.rs (93%) rename consensus/types/src/{ => attestation}/signed_aggregate_and_proof.rs (90%) rename consensus/types/src/{ => attestation}/subnet_id.rs (97%) rename consensus/types/src/{ => block}/beacon_block.rs (97%) rename consensus/types/src/{ => block}/beacon_block_body.rs (93%) rename consensus/types/src/{ => block}/beacon_block_header.rs (90%) create mode 100644 consensus/types/src/block/mod.rs rename consensus/types/src/{ => block}/signed_beacon_block.rs (95%) rename consensus/types/src/{ => block}/signed_beacon_block_header.rs (84%) rename consensus/types/src/{ => builder}/builder_bid.rs (93%) create mode 100644 consensus/types/src/builder/mod.rs rename consensus/types/src/{ => consolidation}/consolidation_request.rs (84%) create mode 100644 consensus/types/src/consolidation/mod.rs rename consensus/types/src/{ => consolidation}/pending_consolidation.rs (86%) rename consensus/types/src/{ => core}/application_domain.rs (100%) rename consensus/types/src/{ => core}/chain_spec.rs (99%) rename consensus/types/src/{ => core}/config_and_preset.rs (95%) rename consensus/types/src/{ => core}/consts.rs (94%) rename consensus/types/src/{ => core}/enr_fork_id.rs (95%) rename consensus/types/src/{ => core}/eth_spec.rs (98%) rename consensus/types/src/{ => core}/graffiti.rs (98%) create mode 100644 consensus/types/src/core/mod.rs rename consensus/types/src/{ => core}/non_zero_usize.rs (100%) rename consensus/types/src/{ => core}/preset.rs (99%) rename consensus/types/src/{ => core}/relative_epoch.rs (99%) rename consensus/types/src/{ => core}/signing_data.rs (85%) rename consensus/types/src/{ => core}/slot_data.rs (92%) rename consensus/types/src/{ => core}/slot_epoch.rs (98%) rename consensus/types/src/{ => core}/slot_epoch_macros.rs (100%) rename consensus/types/src/{ => core}/sqlite.rs (96%) rename consensus/types/src/{ => data}/blob_sidecar.rs (94%) rename consensus/types/src/{ => data}/data_column_custody_group.rs (98%) rename consensus/types/src/{ => data}/data_column_sidecar.rs (94%) rename consensus/types/src/{ => data}/data_column_subnet_id.rs (80%) create mode 100644 consensus/types/src/data/mod.rs rename consensus/types/src/{ => deposit}/deposit.rs (78%) rename consensus/types/src/{ => deposit}/deposit_data.rs (86%) rename consensus/types/src/{ => deposit}/deposit_message.rs (81%) rename consensus/types/src/{ => deposit}/deposit_request.rs (86%) rename consensus/types/src/{ => deposit}/deposit_tree_snapshot.rs (95%) create mode 100644 consensus/types/src/deposit/mod.rs rename consensus/types/src/{ => deposit}/pending_deposit.rs (78%) rename consensus/types/src/{ => execution}/bls_to_execution_change.rs (83%) rename consensus/types/src/{ => execution}/eth1_data.rs (86%) rename consensus/types/src/{ => execution}/execution_block_hash.rs (96%) rename consensus/types/src/{ => execution}/execution_block_header.rs (98%) rename consensus/types/src/{ => execution}/execution_payload.rs (92%) rename consensus/types/src/{ => execution}/execution_payload_header.rs (96%) rename consensus/types/src/{ => execution}/execution_requests.rs (93%) create mode 100644 consensus/types/src/execution/mod.rs rename consensus/types/src/{ => execution}/payload.rs (91%) rename consensus/types/src/{ => execution}/signed_bls_to_execution_change.rs (78%) create mode 100644 consensus/types/src/exit/mod.rs rename consensus/types/src/{ => exit}/signed_voluntary_exit.rs (84%) rename consensus/types/src/{ => exit}/voluntary_exit.rs (90%) rename consensus/types/src/{ => fork}/fork.rs (96%) rename consensus/types/src/{ => fork}/fork_context.rs (98%) rename consensus/types/src/{ => fork}/fork_data.rs (88%) create mode 100644 consensus/types/src/fork/fork_macros.rs rename consensus/types/src/{ => fork}/fork_name.rs (84%) create mode 100644 consensus/types/src/fork/fork_version_decode.rs create mode 100644 consensus/types/src/fork/mod.rs create mode 100644 consensus/types/src/kzg_ext/consts.rs create mode 100644 consensus/types/src/kzg_ext/mod.rs create mode 100644 consensus/types/src/light_client/consts.rs create mode 100644 consensus/types/src/light_client/error.rs rename consensus/types/src/{ => light_client}/light_client_bootstrap.rs (88%) rename consensus/types/src/{ => light_client}/light_client_finality_update.rs (89%) rename consensus/types/src/{ => light_client}/light_client_header.rs (91%) rename consensus/types/src/{ => light_client}/light_client_optimistic_update.rs (94%) rename consensus/types/src/{ => light_client}/light_client_update.rs (87%) create mode 100644 consensus/types/src/light_client/mod.rs delete mode 100644 consensus/types/src/runtime_fixed_vector.rs delete mode 100644 consensus/types/src/runtime_var_list.rs rename consensus/types/src/{ => slashing}/attester_slashing.rs (96%) create mode 100644 consensus/types/src/slashing/mod.rs rename consensus/types/src/{ => slashing}/proposer_slashing.rs (86%) rename consensus/types/src/{ => state}/activation_queue.rs (95%) rename consensus/types/src/{beacon_state => state}/balance.rs (100%) rename consensus/types/src/{ => state}/beacon_state.rs (88%) rename consensus/types/src/{beacon_state => state}/committee_cache.rs (93%) rename consensus/types/src/{ => state}/epoch_cache.rs (97%) rename consensus/types/src/{beacon_state => state}/exit_cache.rs (97%) rename consensus/types/src/{ => state}/historical_batch.rs (81%) rename consensus/types/src/{ => state}/historical_summary.rs (87%) rename consensus/types/src/{beacon_state => state}/iter.rs (96%) create mode 100644 consensus/types/src/state/mod.rs rename consensus/types/src/{beacon_state => state}/progressive_balances_cache.rs (98%) rename consensus/types/src/{beacon_state => state}/pubkey_cache.rs (98%) rename consensus/types/src/{beacon_state => state}/slashings_cache.rs (96%) rename consensus/types/src/{ => sync_committee}/contribution_and_proof.rs (88%) create mode 100644 consensus/types/src/sync_committee/mod.rs rename consensus/types/src/{ => sync_committee}/signed_contribution_and_proof.rs (87%) rename consensus/types/src/{ => sync_committee}/sync_aggregate.rs (91%) rename consensus/types/src/{ => sync_committee}/sync_aggregator_selection_data.rs (82%) rename consensus/types/src/{ => sync_committee}/sync_committee.rs (95%) rename consensus/types/src/{ => sync_committee}/sync_committee_contribution.rs (93%) rename consensus/types/src/{ => sync_committee}/sync_committee_message.rs (88%) rename consensus/types/src/{ => sync_committee}/sync_committee_subscription.rs (96%) rename consensus/types/src/{ => sync_committee}/sync_duty.rs (96%) rename consensus/types/src/{ => sync_committee}/sync_selection_proof.rs (92%) rename consensus/types/src/{ => sync_committee}/sync_subnet_id.rs (92%) create mode 100644 consensus/types/src/test_utils/test_random/mod.rs rename consensus/types/src/test_utils/{ => test_random}/test_random.rs (90%) create mode 100644 consensus/types/src/validator/mod.rs rename consensus/types/src/{ => validator}/proposer_preparation_data.rs (95%) rename consensus/types/src/{ => validator}/validator.rs (97%) rename consensus/types/src/{ => validator}/validator_registration_data.rs (93%) rename consensus/types/src/{ => validator}/validator_subscription.rs (93%) create mode 100644 consensus/types/src/withdrawal/mod.rs rename consensus/types/src/{ => withdrawal}/pending_partial_withdrawal.rs (85%) rename consensus/types/src/{ => withdrawal}/withdrawal.rs (73%) rename consensus/types/src/{ => withdrawal}/withdrawal_credentials.rs (91%) rename consensus/types/src/{ => withdrawal}/withdrawal_request.rs (87%) rename consensus/types/{src/beacon_state/committee_cache/tests.rs => tests/committee_cache.rs} (97%) rename consensus/types/{src/beacon_state/tests.rs => tests/state.rs} (97%) diff --git a/Cargo.lock b/Cargo.lock index 7ddcad7239..481808b41f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3125,6 +3125,7 @@ dependencies = [ name = "eth2" version = "0.1.0" dependencies = [ + "context_deserialize", "educe", "eip_3076", "either", @@ -8547,6 +8548,7 @@ checksum = "1fc20a89bab2dabeee65e9c9eb96892dc222c23254b401e1319b85efd852fa31" dependencies = [ "arbitrary", "context_deserialize", + "educe", "ethereum_serde_utils", "ethereum_ssz", "itertools 0.14.0", diff --git a/Cargo.toml b/Cargo.toml index 6ccf429b6c..21cf551c48 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -227,7 +227,7 @@ slashing_protection = { path = "validator_client/slashing_protection" } slot_clock = { path = "common/slot_clock" } smallvec = { version = "1.11.2", features = ["arbitrary"] } snap = "1" -ssz_types = { version = "0.14.0", features = ["context_deserialize"] } +ssz_types = { version = "0.14.0", features = ["context_deserialize", "runtime_types"] } state_processing = { path = "consensus/state_processing" } store = { path = "beacon_node/store" } strum = { version = "0.24", features = ["derive"] } diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs index f879adfb49..beaa1e581c 100644 --- a/beacon_node/beacon_chain/src/attester_cache.rs +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -17,7 +17,7 @@ use std::ops::Range; use types::{ BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, Hash256, RelativeEpoch, Slot, - attestation::Error as AttestationError, + attestation::AttestationError, beacon_state::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, }, diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 00c5ab415c..adc400b1c1 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -74,6 +74,7 @@ use crate::{ AvailabilityPendingExecutedBlock, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead, metrics, }; +use eth2::beacon_response::ForkVersionedResponse; use eth2::types::{ EventKind, SseBlobSidecar, SseBlock, SseDataColumnSidecar, SseExtendedPayloadAttributes, }; diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index c383c20f9f..e7c536c0d8 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -12,6 +12,7 @@ use crate::{BeaconChainTypes, BlockProcessStatus}; use lighthouse_tracing::SPAN_PENDING_COMPONENTS; use lru::LruCache; use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use ssz_types::{RuntimeFixedVector, RuntimeVariableList}; use std::cmp::Ordering; use std::num::NonZeroUsize; use std::sync::Arc; @@ -20,8 +21,7 @@ use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobIdentifier; use types::{ BlobSidecar, BlockImportSource, ChainSpec, ColumnIndex, DataColumnSidecar, - DataColumnSidecarList, Epoch, EthSpec, Hash256, RuntimeFixedVector, RuntimeVariableList, - SignedBeaconBlock, + DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, }; #[derive(Clone)] diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index e72e9a6b21..88b040a6e5 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -49,7 +49,7 @@ use tree_hash_derive::TreeHash; use types::ChainSpec; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::slot_data::SlotData; -use types::sync_committee::Error as SyncCommitteeError; +use types::sync_committee::SyncCommitteeError; use types::{ AggregateSignature, BeaconStateError, EthSpec, Hash256, SignedContributionAndProof, Slot, SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 05d67e4504..759b7e9bd7 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -46,6 +46,7 @@ use rand::seq::SliceRandom; use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slot_clock::{SlotClock, TestingSlotClock}; +use ssz_types::RuntimeVariableList; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 9add136919..589b29193c 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,6 +1,7 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes, PayloadParameters}; use bytes::Bytes; +use eth2::beacon_response::ForkVersionedResponse; use eth2::types::PublishBlockRequest; use eth2::types::{ BlobsBundle, BlockId, BroadcastValidation, EndpointVersion, EventKind, EventTopic, @@ -31,9 +32,9 @@ use types::builder_bid::{ }; use types::{ Address, BeaconState, ChainSpec, Epoch, EthSpec, ExecPayload, ExecutionPayload, - ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, - ForkVersionedResponse, Hash256, PublicKeyBytes, Signature, SignedBlindedBeaconBlock, - SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, + ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, Hash256, + PublicKeyBytes, Signature, SignedBlindedBeaconBlock, SignedRoot, + SignedValidatorRegistrationData, Slot, Uint256, }; use types::{ExecutionBlockHash, SecretKey}; use warp::reply::{self, Reply}; diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index e088005f20..64f5451560 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -2,6 +2,7 @@ use crate::version::inconsistent_fork_rejection; use crate::{ExecutionOptimistic, state_id::checkpoint_slot_and_execution_optimistic}; use beacon_chain::kzg_utils::reconstruct_blobs; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; +use eth2::beacon_response::{ExecutionOptimisticFinalizedMetadata, UnversionedResponse}; use eth2::types::BlockId as CoreBlockId; use eth2::types::DataColumnIndicesQuery; use eth2::types::{BlobIndicesQuery, BlobWrapper, BlobsVersionedHashesQuery}; @@ -10,8 +11,7 @@ use std::str::FromStr; use std::sync::Arc; use types::{ BlobSidecarList, DataColumnSidecarList, EthSpec, FixedBytesExtended, ForkName, Hash256, - SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, UnversionedResponse, - beacon_response::ExecutionOptimisticFinalizedMetadata, + SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, }; use warp::Rejection; diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs index ca9b86990c..86eef03218 100644 --- a/beacon_node/http_api/src/light_client.rs +++ b/beacon_node/http_api/src/light_client.rs @@ -3,13 +3,14 @@ use crate::version::{ beacon_response, }; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::beacon_response::BeaconResponse; use eth2::types::{ self as api_types, LightClientUpdate, LightClientUpdateResponseChunk, LightClientUpdateResponseChunkInner, LightClientUpdatesQuery, }; use ssz::Encode; use std::sync::Arc; -use types::{BeaconResponse, EthSpec, ForkName, Hash256, LightClientBootstrap}; +use types::{EthSpec, ForkName, Hash256, LightClientBootstrap}; use warp::{ Rejection, hyper::{Body, Response}, diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index 367e09969b..472ec0b65e 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -9,6 +9,7 @@ use crate::{ use beacon_chain::{ BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, ProduceBlockVerification, }; +use eth2::beacon_response::ForkVersionedResponse; use eth2::types::{self as api_types, ProduceBlockV3Metadata, SkipRandaoVerification}; use lighthouse_tracing::{SPAN_PRODUCE_BLOCK_V2, SPAN_PRODUCE_BLOCK_V3}; use ssz::Encode; diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index 871a10e7d4..371064c886 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,16 +1,14 @@ use crate::api_types::EndpointVersion; +use eth2::beacon_response::{ + BeaconResponse, ExecutionOptimisticFinalizedBeaconResponse, + ExecutionOptimisticFinalizedMetadata, ForkVersionedResponse, UnversionedResponse, +}; use eth2::{ CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, EXECUTION_PAYLOAD_BLINDED_HEADER, EXECUTION_PAYLOAD_VALUE_HEADER, SSZ_CONTENT_TYPE_HEADER, }; use serde::Serialize; -use types::{ - BeaconResponse, ForkName, ForkVersionedResponse, InconsistentFork, Uint256, - UnversionedResponse, - beacon_response::{ - ExecutionOptimisticFinalizedBeaconResponse, ExecutionOptimisticFinalizedMetadata, - }, -}; +use types::{ForkName, InconsistentFork, Uint256}; use warp::reply::{self, Reply, Response}; pub const V1: EndpointVersion = EndpointVersion(1); diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 77d2a34e16..5b3574d48a 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -8,7 +8,7 @@ use libp2p::bytes::BytesMut; use snap::read::FrameDecoder; use snap::write::FrameEncoder; use ssz::{Decode, Encode}; -use ssz_types::VariableList; +use ssz_types::{RuntimeVariableList, VariableList}; use std::io::Cursor; use std::io::ErrorKind; use std::io::{Read, Write}; @@ -18,10 +18,10 @@ use tokio_util::codec::{Decoder, Encoder}; use types::{ BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EthSpec, ForkContext, ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, - SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, - SignedBeaconBlockFulu, SignedBeaconBlockGloas, + LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, + SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, + SignedBeaconBlockGloas, }; use unsigned_varint::codec::Uvi; diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 9aab079952..a9b4aa2fba 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -5,7 +5,7 @@ use regex::bytes::Regex; use serde::Serialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::{VariableList, typenum::U256}; +use ssz_types::{RuntimeVariableList, VariableList, typenum::U256}; use std::fmt::Display; use std::marker::PhantomData; use std::ops::Deref; @@ -17,7 +17,7 @@ use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnsByRootIdentifier, Epoch, EthSpec, ForkContext, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, + LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, Slot, blob_sidecar::BlobSidecar, }; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 60e3e3da97..8613edf5f5 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -7,7 +7,7 @@ use lighthouse_network::rpc::{RequestType, methods::*}; use lighthouse_network::service::api_types::AppRequestId; use lighthouse_network::{NetworkEvent, ReportSource, Response}; use ssz::Encode; -use ssz_types::VariableList; +use ssz_types::{RuntimeVariableList, VariableList}; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::runtime::Runtime; @@ -17,7 +17,7 @@ use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EmptyBlock, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, KzgCommitment, KzgProof, MinimalEthSpec, - RuntimeVariableList, Signature, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + Signature, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; type E = MinimalEthSpec; diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index d83059ad27..841a8679cf 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -33,6 +33,7 @@ use lighthouse_network::{ }; use matches::assert_matches; use slot_clock::SlotClock; +use ssz_types::RuntimeVariableList; use std::collections::HashSet; use std::iter::Iterator; use std::sync::Arc; @@ -42,8 +43,8 @@ use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; use types::{ AttesterSlashing, BlobSidecar, BlobSidecarList, ChainSpec, DataColumnSidecarList, DataColumnSubnetId, Epoch, EthSpec, Hash256, MainnetEthSpec, ProposerSlashing, - RuntimeVariableList, SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, - SingleAttestation, Slot, SubnetId, + SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, SingleAttestation, Slot, + SubnetId, }; type E = MainnetEthSpec; diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 01929cbf90..ed9a11a03d 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -7,11 +7,12 @@ use lighthouse_network::{ BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, }, }; +use ssz_types::RuntimeVariableList; use std::{collections::HashMap, sync::Arc}; use tracing::{Span, debug}; use types::{ BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, - Hash256, RuntimeVariableList, SignedBeaconBlock, + Hash256, SignedBeaconBlock, }; use crate::sync::network_context::MAX_COLUMN_RETRIES; diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 63bcd176f5..ef52f89678 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -1929,8 +1929,8 @@ mod deneb_only { block_verification_types::{AsBlock, RpcBlock}, data_availability_checker::AvailabilityCheckError, }; + use ssz_types::RuntimeVariableList; use std::collections::VecDeque; - use types::RuntimeVariableList; struct DenebTester { rig: TestRig, diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 7a75bdc80a..f7e6cde210 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -9,6 +9,7 @@ default = ["lighthouse"] lighthouse = [] [dependencies] +context_deserialize = { workspace = true } educe = { workspace = true } eip_3076 = { workspace = true } either = { workspace = true } diff --git a/consensus/types/src/beacon_response.rs b/common/eth2/src/beacon_response.rs similarity index 97% rename from consensus/types/src/beacon_response.rs rename to common/eth2/src/beacon_response.rs index fc59fc9432..d58734997c 100644 --- a/consensus/types/src/beacon_response.rs +++ b/common/eth2/src/beacon_response.rs @@ -1,12 +1,8 @@ -use crate::{ContextDeserialize, ForkName}; +use context_deserialize::ContextDeserialize; use serde::de::DeserializeOwned; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::value::Value; - -pub trait ForkVersionDecode: Sized { - /// SSZ decode with explicit fork variant. - fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result; -} +use types::ForkName; /// The metadata of type M should be set to `EmptyMetadata` if you don't care about adding fields other than /// version. If you *do* care about adding other fields you can mix in any type that implements diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index bcd979daca..4e832a11df 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -7,6 +7,7 @@ //! Eventually it would be ideal to publish this crate on crates.io, however we have some local //! dependencies preventing this presently. +pub mod beacon_response; pub mod error; #[cfg(feature = "lighthouse")] pub mod lighthouse; @@ -15,10 +16,14 @@ pub mod lighthouse_vc; pub mod mixin; pub mod types; +pub use beacon_response::{ + BeaconResponse, EmptyMetadata, ExecutionOptimisticFinalizedBeaconResponse, + ExecutionOptimisticFinalizedMetadata, ForkVersionedResponse, UnversionedResponse, +}; + pub use self::error::{Error, ok_or_error, success_or_error}; use self::mixin::{RequestAccept, ResponseOptional}; use self::types::*; -use ::types::beacon_response::ExecutionOptimisticFinalizedBeaconResponse; use educe::Educe; use futures::Stream; use futures_util::StreamExt; diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 4407e30e43..8e1d90f8f9 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -2,7 +2,6 @@ pub use crate::lighthouse::Health; pub use crate::lighthouse_vc::std_types::*; pub use crate::types::{GenericResponse, VersionData}; use eth2_keystore::Keystore; -use graffiti::GraffitiString; use serde::{Deserialize, Serialize}; use std::path::PathBuf; pub use types::*; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 6aad00301a..cbdaa004d0 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -19,10 +19,15 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use test_random_derive::TestRandom; -use types::beacon_block_body::KzgCommitments; use types::test_utils::TestRandom; pub use types::*; +// TODO(mac): Temporary module and re-export hack to expose old `consensus/types` via `eth2/types`. +pub use crate::beacon_response::*; +pub mod beacon_response { + pub use crate::beacon_response::*; +} + #[cfg(feature = "lighthouse")] use crate::lighthouse::BlockReward; diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 1f527c0de8..559a181948 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -73,6 +73,9 @@ paste = { workspace = true } state_processing = { workspace = true } tokio = { workspace = true } +[lints.clippy] +module_inception = "allow" + [[bench]] name = "benches" harness = false diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/attestation/aggregate_and_proof.rs similarity index 93% rename from consensus/types/src/aggregate_and_proof.rs rename to consensus/types/src/attestation/aggregate_and_proof.rs index e76ba48bf4..4c6e775e56 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/attestation/aggregate_and_proof.rs @@ -1,17 +1,20 @@ -use super::{AttestationBase, AttestationElectra, AttestationRef}; -use super::{ - ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, PublicKey, SecretKey, SelectionProof, - Signature, SignedRoot, -}; -use crate::Attestation; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{PublicKey, SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{ + Attestation, AttestationBase, AttestationElectra, AttestationRef, SelectionProof, + }, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + #[superstruct( variants(Base, Electra), variant_attributes( diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation/attestation.rs similarity index 97% rename from consensus/types/src/attestation.rs rename to consensus/types/src/attestation/attestation.rs index 1430582658..693b5889f5 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation/attestation.rs @@ -1,23 +1,28 @@ -use super::{ - AggregateSignature, AttestationData, BitList, ChainSpec, Domain, EthSpec, Fork, SecretKey, - Signature, SignedRoot, +use std::{ + collections::HashSet, + hash::{Hash, Hasher}, }; -use crate::slot_data::SlotData; -use crate::{ - Checkpoint, ContextDeserialize, ForkName, IndexedAttestationBase, IndexedAttestationElectra, -}; -use crate::{Hash256, Slot, test_utils::TestRandom}; -use crate::{IndexedAttestation, context_deserialize}; + +use bls::{AggregateSignature, SecretKey, Signature}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; -use ssz_types::BitVector; -use std::collections::HashSet; -use std::hash::{Hash, Hasher}; +use ssz_types::{BitList, BitVector}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{ + AttestationData, Checkpoint, IndexedAttestation, IndexedAttestationBase, + IndexedAttestationElectra, + }, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot, SlotData}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation/attestation_data.rs similarity index 87% rename from consensus/types/src/attestation_data.rs rename to consensus/types/src/attestation/attestation_data.rs index a4643e5474..f3fceb9b70 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation/attestation_data.rs @@ -1,11 +1,16 @@ -use crate::slot_data::SlotData; -use crate::test_utils::TestRandom; -use crate::{Checkpoint, ForkName, Hash256, SignedRoot, Slot}; use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; + +use crate::{ + attestation::Checkpoint, + core::{Hash256, SignedRoot, Slot, SlotData}, + fork::ForkName, + test_utils::TestRandom, +}; + /// The data upon which an attestation is based. /// /// Spec v0.12.1 diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation/attestation_duty.rs similarity index 92% rename from consensus/types/src/attestation_duty.rs rename to consensus/types/src/attestation/attestation_duty.rs index 70c7c5c170..fe3da79a2b 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation/attestation_duty.rs @@ -1,6 +1,7 @@ -use crate::*; use serde::{Deserialize, Serialize}; +use crate::{attestation::CommitteeIndex, core::Slot}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] pub struct AttestationDuty { diff --git a/consensus/types/src/beacon_committee.rs b/consensus/types/src/attestation/beacon_committee.rs similarity index 92% rename from consensus/types/src/beacon_committee.rs rename to consensus/types/src/attestation/beacon_committee.rs index 04fe763a11..2dba30bad3 100644 --- a/consensus/types/src/beacon_committee.rs +++ b/consensus/types/src/attestation/beacon_committee.rs @@ -1,4 +1,4 @@ -use crate::*; +use crate::{attestation::CommitteeIndex, core::Slot}; #[derive(Default, Clone, Debug, PartialEq)] pub struct BeaconCommittee<'a> { diff --git a/consensus/types/src/checkpoint.rs b/consensus/types/src/attestation/checkpoint.rs similarity index 88% rename from consensus/types/src/checkpoint.rs rename to consensus/types/src/attestation/checkpoint.rs index 545af59985..f5a95f0ad9 100644 --- a/consensus/types/src/checkpoint.rs +++ b/consensus/types/src/attestation/checkpoint.rs @@ -1,11 +1,15 @@ -use crate::test_utils::TestRandom; -use crate::{Epoch, ForkName, Hash256}; use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Epoch, Hash256}, + fork::ForkName, + test_utils::TestRandom, +}; + /// Casper FFG checkpoint, used in attestations. /// /// Spec v0.12.1 diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/attestation/indexed_attestation.rs similarity index 96% rename from consensus/types/src/indexed_attestation.rs rename to consensus/types/src/attestation/indexed_attestation.rs index dc32884217..272b015d90 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/attestation/indexed_attestation.rs @@ -1,17 +1,21 @@ -use crate::context_deserialize; -use crate::{ - AggregateSignature, AttestationData, EthSpec, ForkName, VariableList, test_utils::TestRandom, +use std::{ + hash::{Hash, Hasher}, + slice::Iter, }; -use core::slice::Iter; + +use bls::AggregateSignature; +use context_deserialize::context_deserialize; use educe::Educe; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use std::hash::{Hash, Hasher}; +use ssz_types::VariableList; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{attestation::AttestationData, core::EthSpec, fork::ForkName, test_utils::TestRandom}; + /// Details an attestation that can be slashable. /// /// To be included in an `AttesterSlashing`. @@ -208,9 +212,10 @@ impl Hash for IndexedAttestation { #[cfg(test)] mod tests { use super::*; - use crate::MainnetEthSpec; - use crate::slot_epoch::Epoch; - use crate::test_utils::{SeedableRng, XorShiftRng}; + use crate::{ + core::{Epoch, MainnetEthSpec}, + test_utils::{SeedableRng, XorShiftRng}, + }; #[test] pub fn test_is_double_vote_true() { diff --git a/consensus/types/src/attestation/mod.rs b/consensus/types/src/attestation/mod.rs new file mode 100644 index 0000000000..2d2bf74e49 --- /dev/null +++ b/consensus/types/src/attestation/mod.rs @@ -0,0 +1,39 @@ +mod aggregate_and_proof; +mod attestation; +mod attestation_data; +mod attestation_duty; +mod beacon_committee; +mod checkpoint; +mod indexed_attestation; +mod participation_flags; +mod pending_attestation; +mod selection_proof; +mod shuffling_id; +mod signed_aggregate_and_proof; +mod subnet_id; + +pub use aggregate_and_proof::{ + AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, +}; +pub use attestation::{ + Attestation, AttestationBase, AttestationElectra, AttestationOnDisk, AttestationRef, + AttestationRefMut, AttestationRefOnDisk, Error as AttestationError, SingleAttestation, +}; +pub use attestation_data::AttestationData; +pub use attestation_duty::AttestationDuty; +pub use beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; +pub use checkpoint::Checkpoint; +pub use indexed_attestation::{ + IndexedAttestation, IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, +}; +pub use participation_flags::ParticipationFlags; +pub use pending_attestation::PendingAttestation; +pub use selection_proof::SelectionProof; +pub use shuffling_id::AttestationShufflingId; +pub use signed_aggregate_and_proof::{ + SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, + SignedAggregateAndProofRefMut, +}; +pub use subnet_id::SubnetId; + +pub type CommitteeIndex = u64; diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/attestation/participation_flags.rs similarity index 96% rename from consensus/types/src/participation_flags.rs rename to consensus/types/src/attestation/participation_flags.rs index e59efc5170..66831abfac 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/attestation/participation_flags.rs @@ -1,10 +1,14 @@ -use crate::{Hash256, consts::altair::NUM_FLAG_INDICES, test_utils::TestRandom}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use test_random_derive::TestRandom; use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; +use crate::{ + core::{Hash256, consts::altair::NUM_FLAG_INDICES}, + test_utils::TestRandom, +}; + #[derive(Debug, Default, Clone, Copy, PartialEq, Deserialize, Serialize, TestRandom)] #[serde(transparent)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/attestation/pending_attestation.rs similarity index 84% rename from consensus/types/src/pending_attestation.rs rename to consensus/types/src/attestation/pending_attestation.rs index 4a00a0495a..84353ac118 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/attestation/pending_attestation.rs @@ -1,11 +1,12 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{AttestationData, BitList, EthSpec, ForkName}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::BitList; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{attestation::AttestationData, core::EthSpec, fork::ForkName, test_utils::TestRandom}; + /// An attestation that has been included in the state but not yet fully processed. /// /// Spec v0.12.1 diff --git a/consensus/types/src/selection_proof.rs b/consensus/types/src/attestation/selection_proof.rs similarity index 95% rename from consensus/types/src/selection_proof.rs rename to consensus/types/src/attestation/selection_proof.rs index aa8c0c5658..b4c48d0078 100644 --- a/consensus/types/src/selection_proof.rs +++ b/consensus/types/src/attestation/selection_proof.rs @@ -1,11 +1,15 @@ -use crate::{ - ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, -}; +use std::cmp; + +use bls::{PublicKey, SecretKey, Signature}; use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::Encode; -use std::cmp; + +use crate::{ + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot}, + fork::Fork, +}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] diff --git a/consensus/types/src/shuffling_id.rs b/consensus/types/src/attestation/shuffling_id.rs similarity index 93% rename from consensus/types/src/shuffling_id.rs rename to consensus/types/src/attestation/shuffling_id.rs index df16f605ed..25217288f6 100644 --- a/consensus/types/src/shuffling_id.rs +++ b/consensus/types/src/attestation/shuffling_id.rs @@ -1,7 +1,12 @@ -use crate::*; +use std::hash::Hash; + use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use std::hash::Hash; + +use crate::{ + core::{Epoch, EthSpec, Hash256, RelativeEpoch}, + state::{BeaconState, BeaconStateError}, +}; /// Can be used to key (ID) the shuffling in some chain, in some epoch. /// diff --git a/consensus/types/src/signed_aggregate_and_proof.rs b/consensus/types/src/attestation/signed_aggregate_and_proof.rs similarity index 90% rename from consensus/types/src/signed_aggregate_and_proof.rs rename to consensus/types/src/attestation/signed_aggregate_and_proof.rs index 758ac2734b..48c3f4c567 100644 --- a/consensus/types/src/signed_aggregate_and_proof.rs +++ b/consensus/types/src/attestation/signed_aggregate_and_proof.rs @@ -1,18 +1,21 @@ -use super::{ - AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, -}; -use super::{ - Attestation, AttestationRef, ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, SecretKey, - SelectionProof, Signature, SignedRoot, -}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{ + AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, + Attestation, AttestationRef, SelectionProof, + }, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// A Validators signed aggregate proof to publish on the `beacon_aggregate_and_proof` /// gossipsub topic. /// diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/attestation/subnet_id.rs similarity index 97% rename from consensus/types/src/subnet_id.rs rename to consensus/types/src/attestation/subnet_id.rs index 6ec8ca4a27..9585d077b5 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/attestation/subnet_id.rs @@ -1,11 +1,17 @@ //! Identifies each shard by an integer identifier. -use crate::SingleAttestation; -use crate::{AttestationRef, ChainSpec, CommitteeIndex, EthSpec, Slot}; +use std::{ + ops::{Deref, DerefMut}, + sync::LazyLock, +}; + use alloy_primitives::{U256, bytes::Buf}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; -use std::ops::{Deref, DerefMut}; -use std::sync::LazyLock; + +use crate::{ + attestation::{AttestationRef, CommitteeIndex, SingleAttestation}, + core::{ChainSpec, EthSpec, Slot}, +}; const MAX_SUBNET_ID: usize = 64; diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/block/beacon_block.rs similarity index 97% rename from consensus/types/src/beacon_block.rs rename to consensus/types/src/block/beacon_block.rs index 060709d655..c2f361eb4b 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/block/beacon_block.rs @@ -1,18 +1,39 @@ -use crate::attestation::AttestationBase; -use crate::test_utils::TestRandom; -use crate::*; +use std::{fmt, marker::PhantomData}; + +use bls::{AggregateSignature, PublicKeyBytes, SecretKey, Signature, SignatureBytes}; +use context_deserialize::ContextDeserialize; use educe::Educe; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; -use std::fmt; -use std::marker::PhantomData; +use ssz_types::{BitList, BitVector, FixedVector, VariableList, typenum::Unsigned}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -use self::indexed_attestation::IndexedAttestationBase; +use crate::{ + attestation::{AttestationBase, AttestationData, IndexedAttestationBase}, + block::{ + BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyBellatrix, + BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, + BeaconBlockBodyGloas, BeaconBlockBodyRef, BeaconBlockBodyRefMut, BeaconBlockHeader, + SignedBeaconBlock, SignedBeaconBlockHeader, + }, + core::{ChainSpec, Domain, Epoch, EthSpec, Graffiti, Hash256, SignedRoot, Slot}, + deposit::{Deposit, DepositData}, + execution::{ + AbstractExecPayload, BlindedPayload, Eth1Data, ExecutionPayload, ExecutionRequests, + FullPayload, + }, + exit::{SignedVoluntaryExit, VoluntaryExit}, + fork::{Fork, ForkName, InconsistentFork, map_fork_name}, + slashing::{AttesterSlashingBase, ProposerSlashing}, + state::BeaconStateError, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; /// A block of the `BeaconChain`. #[superstruct( @@ -283,7 +304,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockRef<'a, E, Payl /// Extracts a reference to an execution payload from a block, returning an error if the block /// is pre-merge. - pub fn execution_payload(&self) -> Result, Error> { + pub fn execution_payload(&self) -> Result, BeaconStateError> { self.body().execution_payload() } } @@ -865,7 +886,10 @@ impl fmt::Display for BlockImportSource { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, XorShiftRng, test_ssz_tree_hash_pair_with}; + use crate::{ + core::MainnetEthSpec, + test_utils::{SeedableRng, XorShiftRng, test_ssz_tree_hash_pair_with}, + }; use ssz::Encode; type BeaconBlock = super::BeaconBlock; diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/block/beacon_block_body.rs similarity index 93% rename from consensus/types/src/beacon_block_body.rs rename to consensus/types/src/block/beacon_block_body.rs index ced8fea4a9..f85dd8909e 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/block/beacon_block_body.rs @@ -1,18 +1,42 @@ -use crate::test_utils::TestRandom; -use crate::*; +use std::marker::PhantomData; + +use bls::Signature; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use merkle_proof::{MerkleTree, MerkleTreeError}; use metastruct::metastruct; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; -use std::marker::PhantomData; +use ssz_types::{FixedVector, VariableList}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::{BYTES_PER_CHUNK, TreeHash}; use tree_hash_derive::TreeHash; -pub type KzgCommitments = - VariableList::MaxBlobCommitmentsPerBlock>; +use crate::{ + attestation::{AttestationBase, AttestationElectra, AttestationRef, AttestationRefMut}, + core::{EthSpec, Graffiti, Hash256}, + deposit::Deposit, + execution::{ + AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, + BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, BlindedPayloadGloas, + Eth1Data, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, + ExecutionPayloadGloas, ExecutionRequests, FullPayload, FullPayloadBellatrix, + FullPayloadCapella, FullPayloadDeneb, FullPayloadElectra, FullPayloadFulu, + FullPayloadGloas, SignedBlsToExecutionChange, + }, + exit::SignedVoluntaryExit, + fork::{ForkName, map_fork_name}, + kzg_ext::KzgCommitments, + light_client::consts::{EXECUTION_PAYLOAD_INDEX, EXECUTION_PAYLOAD_PROOF_LEN}, + slashing::{ + AttesterSlashingBase, AttesterSlashingElectra, AttesterSlashingRef, ProposerSlashing, + }, + state::BeaconStateError, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; /// The number of leaves (including padding) on the `BeaconBlockBody` Merkle tree. /// @@ -63,8 +87,14 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; Fulu(metastruct(mappings(beacon_block_body_fulu_fields(groups(fields))))), Gloas(metastruct(mappings(beacon_block_body_gloas_fields(groups(fields))))), ), - cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) )] #[cfg_attr( feature = "arbitrary", @@ -147,7 +177,7 @@ pub struct BeaconBlockBody = FullPay } impl> BeaconBlockBody { - pub fn execution_payload(&self) -> Result, Error> { + pub fn execution_payload(&self) -> Result, BeaconStateError> { self.to_ref().execution_payload() } @@ -158,9 +188,9 @@ impl> BeaconBlockBody { } impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Payload> { - pub fn execution_payload(&self) -> Result, Error> { + pub fn execution_payload(&self) -> Result, BeaconStateError> { match self { - Self::Base(_) | Self::Altair(_) => Err(Error::IncorrectStateVariant), + Self::Base(_) | Self::Altair(_) => Err(BeaconStateError::IncorrectStateVariant), Self::Bellatrix(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Capella(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Deneb(body) => Ok(Payload::Ref::from(&body.execution_payload)), @@ -216,7 +246,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, pub fn kzg_commitment_merkle_proof( &self, index: usize, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let kzg_commitments_proof = self.kzg_commitments_merkle_proof()?; let proof = self.complete_kzg_commitment_merkle_proof(index, &kzg_commitments_proof)?; Ok(proof) @@ -228,10 +258,10 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, &self, index: usize, kzg_commitments_proof: &[Hash256], - ) -> Result, Error> { + ) -> Result, BeaconStateError> { match self { Self::Base(_) | Self::Altair(_) | Self::Bellatrix(_) | Self::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } Self::Deneb(_) | Self::Electra(_) | Self::Fulu(_) | Self::Gloas(_) => { // We compute the branches by generating 2 merkle trees: @@ -253,7 +283,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, let tree = MerkleTree::create(&blob_leaves, depth as usize); let (_, mut proof) = tree .generate_proof(index, depth as usize) - .map_err(Error::MerkleTreeError)?; + .map_err(BeaconStateError::MerkleTreeError)?; // Add the branch corresponding to the length mix-in. let length = blob_leaves.len(); @@ -261,7 +291,9 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, let mut length_bytes = [0; BYTES_PER_CHUNK]; length_bytes .get_mut(0..usize_len) - .ok_or(Error::MerkleTreeError(MerkleTreeError::PleaseNotifyTheDevs))? + .ok_or(BeaconStateError::MerkleTreeError( + MerkleTreeError::PleaseNotifyTheDevs, + ))? .copy_from_slice(&length.to_le_bytes()); let length_root = Hash256::from_slice(length_bytes.as_slice()); proof.push(length_root); @@ -279,32 +311,41 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, /// Produces the proof of inclusion for `self.blob_kzg_commitments`. pub fn kzg_commitments_merkle_proof( &self, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let body_leaves = self.body_merkle_leaves(); let beacon_block_body_depth = body_leaves.len().next_power_of_two().ilog2() as usize; let tree = MerkleTree::create(&body_leaves, beacon_block_body_depth); let (_, proof) = tree .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) - .map_err(Error::MerkleTreeError)?; + .map_err(BeaconStateError::MerkleTreeError)?; Ok(FixedVector::new(proof)?) } - pub fn block_body_merkle_proof(&self, generalized_index: usize) -> Result, Error> { + pub fn block_body_merkle_proof( + &self, + generalized_index: usize, + ) -> Result, BeaconStateError> { let field_index = match generalized_index { - light_client_update::EXECUTION_PAYLOAD_INDEX => { + EXECUTION_PAYLOAD_INDEX => { // Execution payload is a top-level field, subtract off the generalized indices // for the internal nodes. Result should be 9, the field offset of the execution // payload in the `BeaconBlockBody`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/deneb/beacon-chain.md#beaconblockbody generalized_index .checked_sub(NUM_BEACON_BLOCK_BODY_HASH_TREE_ROOT_LEAVES) - .ok_or(Error::GeneralizedIndexNotSupported(generalized_index))? + .ok_or(BeaconStateError::GeneralizedIndexNotSupported( + generalized_index, + ))? + } + _ => { + return Err(BeaconStateError::GeneralizedIndexNotSupported( + generalized_index, + )); } - _ => return Err(Error::GeneralizedIndexNotSupported(generalized_index)), }; let leaves = self.body_merkle_leaves(); - let depth = light_client_update::EXECUTION_PAYLOAD_PROOF_LEN; + let depth = EXECUTION_PAYLOAD_PROOF_LEN; let tree = merkle_proof::MerkleTree::create(&leaves, depth); let (_, proof) = tree.generate_proof(field_index, depth)?; @@ -1100,22 +1141,16 @@ impl<'de, E: EthSpec, Payload: AbstractExecPayload> ContextDeserialize<'de, F } } -/// Util method helpful for logging. -pub fn format_kzg_commitments(commitments: &[KzgCommitment]) -> String { - let commitment_strings: Vec = commitments.iter().map(|x| x.to_string()).collect(); - let commitments_joined = commitment_strings.join(", "); - let surrounded_commitments = format!("[{}]", commitments_joined); - surrounded_commitments -} - #[cfg(test)] mod tests { mod base { use super::super::*; + use crate::core::MainnetEthSpec; ssz_and_tree_hash_tests!(BeaconBlockBodyBase); } mod altair { use super::super::*; + use crate::core::MainnetEthSpec; ssz_and_tree_hash_tests!(BeaconBlockBodyAltair); } } diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/block/beacon_block_header.rs similarity index 90% rename from consensus/types/src/beacon_block_header.rs rename to consensus/types/src/block/beacon_block_header.rs index e14a9fc8af..06e1023d91 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/block/beacon_block_header.rs @@ -1,6 +1,4 @@ -use crate::test_utils::TestRandom; -use crate::*; - +use bls::SecretKey; use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -8,6 +6,13 @@ use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBeaconBlockHeader, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// A header of a `BeaconBlock`. /// /// Spec v0.12.1 diff --git a/consensus/types/src/block/mod.rs b/consensus/types/src/block/mod.rs new file mode 100644 index 0000000000..81c8ffbd63 --- /dev/null +++ b/consensus/types/src/block/mod.rs @@ -0,0 +1,26 @@ +mod beacon_block; +mod beacon_block_body; +mod beacon_block_header; +mod signed_beacon_block; +mod signed_beacon_block_header; + +pub use beacon_block::{ + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockCapella, + BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockFulu, BeaconBlockGloas, BeaconBlockRef, + BeaconBlockRefMut, BlindedBeaconBlock, BlockImportSource, EmptyBlock, +}; +pub use beacon_block_body::{ + BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, + BeaconBlockBodyBellatrix, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, + BeaconBlockBodyFulu, BeaconBlockBodyGloas, BeaconBlockBodyRef, BeaconBlockBodyRefMut, + NUM_BEACON_BLOCK_BODY_HASH_TREE_ROOT_LEAVES, +}; +pub use beacon_block_header::BeaconBlockHeader; + +pub use signed_beacon_block::{ + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + SignedBeaconBlockFulu, SignedBeaconBlockGloas, SignedBeaconBlockHash, SignedBlindedBeaconBlock, + ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, +}; +pub use signed_beacon_block_header::SignedBeaconBlockHeader; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/block/signed_beacon_block.rs similarity index 95% rename from consensus/types/src/signed_beacon_block.rs rename to consensus/types/src/block/signed_beacon_block.rs index 7b04cc5771..e8927ee765 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/block/signed_beacon_block.rs @@ -1,17 +1,42 @@ -use crate::beacon_block_body::{BLOB_KZG_COMMITMENTS_INDEX, format_kzg_commitments}; -use crate::test_utils::TestRandom; -use crate::*; +use std::fmt; + +use bls::{PublicKey, Signature}; +use context_deserialize::ContextDeserialize; use educe::Educe; use merkle_proof::MerkleTree; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; -use std::fmt; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tracing::instrument; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::{ + BLOB_KZG_COMMITMENTS_INDEX, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, + BeaconBlockBellatrix, BeaconBlockBodyBellatrix, BeaconBlockBodyCapella, + BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, BeaconBlockBodyGloas, + BeaconBlockCapella, BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockFulu, + BeaconBlockGloas, BeaconBlockHeader, BeaconBlockRef, BeaconBlockRefMut, + SignedBeaconBlockHeader, + }, + core::{ChainSpec, Domain, Epoch, EthSpec, Hash256, SignedRoot, SigningData, Slot}, + execution::{ + AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, + BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, BlindedPayloadGloas, + ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, + ExecutionPayloadGloas, FullPayload, FullPayloadBellatrix, FullPayloadCapella, + FullPayloadDeneb, FullPayloadElectra, FullPayloadFulu, FullPayloadGloas, + }, + fork::{Fork, ForkName, ForkVersionDecode, InconsistentFork, map_fork_name}, + kzg_ext::format_kzg_commitments, + state::BeaconStateError, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Eq, Hash, Clone, Copy)] pub struct SignedBeaconBlockHash(Hash256); @@ -272,7 +297,7 @@ impl> SignedBeaconBlock SignedBeaconBlockHeader, FixedVector, ), - Error, + BeaconStateError, > { // Create the block body merkle tree let body_leaves = self.message().body().body_merkle_leaves(); @@ -282,7 +307,7 @@ impl> SignedBeaconBlock // Compute the KZG commitments inclusion proof let (_, proof) = body_merkle_tree .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) - .map_err(Error::MerkleTreeError)?; + .map_err(BeaconStateError::MerkleTreeError)?; let kzg_commitments_inclusion_proof = FixedVector::new(proof)?; let block_header = BeaconBlockHeader { @@ -919,6 +944,7 @@ pub mod ssz_tagged_signed_beacon_block_arc { #[cfg(test)] mod test { use super::*; + use crate::{block::EmptyBlock, core::MainnetEthSpec}; #[test] fn add_remove_payload_roundtrip() { diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/block/signed_beacon_block_header.rs similarity index 84% rename from consensus/types/src/signed_beacon_block_header.rs rename to consensus/types/src/block/signed_beacon_block_header.rs index 4a5ff2ec1a..2fcd8a705f 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/block/signed_beacon_block_header.rs @@ -1,13 +1,17 @@ -use crate::context_deserialize; -use crate::{ - BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, PublicKey, Signature, - SignedRoot, test_utils::TestRandom, -}; +use bls::{PublicKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::BeaconBlockHeader, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// A signed header of a `BeaconBlock`. /// /// Spec v0.12.1 diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder/builder_bid.rs similarity index 93% rename from consensus/types/src/builder_bid.rs rename to consensus/types/src/builder/builder_bid.rs index 3fb7af35ca..be9bb28155 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder/builder_bid.rs @@ -1,13 +1,6 @@ -use crate::beacon_block_body::KzgCommitments; -use crate::{ - ChainSpec, ContextDeserialize, EthSpec, ExecutionPayloadHeaderBellatrix, - ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, - ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, - ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, SignedRoot, - Uint256, test_utils::TestRandom, -}; use bls::PublicKeyBytes; use bls::Signature; +use context_deserialize::ContextDeserialize; use serde::{Deserialize, Deserializer, Serialize}; use ssz::Decode; use ssz_derive::{Decode, Encode}; @@ -15,6 +8,19 @@ use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, EthSpec, SignedRoot, Uint256}, + execution::{ + ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, + ExecutionRequests, + }, + fork::{ForkName, ForkVersionDecode}, + kzg_ext::KzgCommitments, + test_utils::TestRandom, +}; + #[superstruct( variants(Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), variant_attributes( diff --git a/consensus/types/src/builder/mod.rs b/consensus/types/src/builder/mod.rs new file mode 100644 index 0000000000..88a8e6a01a --- /dev/null +++ b/consensus/types/src/builder/mod.rs @@ -0,0 +1,6 @@ +mod builder_bid; + +pub use builder_bid::{ + BuilderBid, BuilderBidBellatrix, BuilderBidCapella, BuilderBidDeneb, BuilderBidElectra, + BuilderBidFulu, BuilderBidGloas, SignedBuilderBid, +}; diff --git a/consensus/types/src/consolidation_request.rs b/consensus/types/src/consolidation/consolidation_request.rs similarity index 84% rename from consensus/types/src/consolidation_request.rs rename to consensus/types/src/consolidation/consolidation_request.rs index 2af3426b68..3f09517a90 100644 --- a/consensus/types/src/consolidation_request.rs +++ b/consensus/types/src/consolidation/consolidation_request.rs @@ -1,11 +1,17 @@ -use crate::context_deserialize; -use crate::{Address, ForkName, PublicKeyBytes, SignedRoot, test_utils::TestRandom}; +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, SignedRoot}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/consolidation/mod.rs b/consensus/types/src/consolidation/mod.rs new file mode 100644 index 0000000000..a6a2f4a331 --- /dev/null +++ b/consensus/types/src/consolidation/mod.rs @@ -0,0 +1,5 @@ +mod consolidation_request; +mod pending_consolidation; + +pub use consolidation_request::ConsolidationRequest; +pub use pending_consolidation::PendingConsolidation; diff --git a/consensus/types/src/pending_consolidation.rs b/consensus/types/src/consolidation/pending_consolidation.rs similarity index 86% rename from consensus/types/src/pending_consolidation.rs rename to consensus/types/src/consolidation/pending_consolidation.rs index 9fb8c3566d..fcd76e43b6 100644 --- a/consensus/types/src/pending_consolidation.rs +++ b/consensus/types/src/consolidation/pending_consolidation.rs @@ -1,11 +1,11 @@ -use crate::ForkName; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/application_domain.rs b/consensus/types/src/core/application_domain.rs similarity index 100% rename from consensus/types/src/application_domain.rs rename to consensus/types/src/core/application_domain.rs diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/core/chain_spec.rs similarity index 99% rename from consensus/types/src/chain_spec.rs rename to consensus/types/src/core/chain_spec.rs index a66080ada6..c8052b502b 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -1,19 +1,27 @@ -use crate::application_domain::{APPLICATION_DOMAIN_BUILDER, ApplicationDomain}; -use crate::blob_sidecar::BlobIdentifier; -use crate::data_column_sidecar::DataColumnsByRootIdentifier; -use crate::*; +use std::{fs::File, path::Path, time::Duration}; + use educe::Educe; use ethereum_hashing::hash; +use fixed_bytes::FixedBytesExtended; use int_to_bytes::int_to_bytes4; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_utils::quoted_u64::MaybeQuoted; use ssz::Encode; -use std::fs::File; -use std::path::Path; -use std::time::Duration; +use ssz_types::{RuntimeVariableList, VariableList}; use tree_hash::TreeHash; +use crate::{ + core::{ + APPLICATION_DOMAIN_BUILDER, Address, ApplicationDomain, EnrForkId, Epoch, EthSpec, + EthSpecId, Hash256, MainnetEthSpec, Slot, Uint256, + }, + data::{BlobIdentifier, DataColumnSubnetId, DataColumnsByRootIdentifier}, + execution::ExecutionBlockHash, + fork::{Fork, ForkData, ForkName}, + state::BeaconState, +}; + /// Each of the BLS signature domains. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Domain { @@ -2581,6 +2589,7 @@ mod tests { #[cfg(test)] mod yaml_tests { use super::*; + use crate::core::MinimalEthSpec; use paste::paste; use std::sync::Arc; use tempfile::NamedTempFile; diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/core/config_and_preset.rs similarity index 95% rename from consensus/types/src/config_and_preset.rs rename to consensus/types/src/core/config_and_preset.rs index 16b09c9c08..08141c7731 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/core/config_and_preset.rs @@ -1,13 +1,14 @@ -use crate::{ - AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, DenebPreset, - ElectraPreset, EthSpec, FuluPreset, GloasPreset, consts::altair, consts::deneb, -}; use maplit::hashmap; use serde::{Deserialize, Serialize}; use serde_json::Value; use std::collections::HashMap; use superstruct::superstruct; +use crate::core::{ + AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, DenebPreset, + ElectraPreset, EthSpec, FuluPreset, GloasPreset, consts, +}; + /// Fusion of a runtime-config with the compile-time preset values. /// /// Mostly useful for the API. @@ -131,11 +132,11 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { "domain_sync_committee_selection_proof".to_uppercase() => u32_hex(spec.domain_sync_committee_selection_proof), "sync_committee_subnet_count".to_uppercase() => - altair::SYNC_COMMITTEE_SUBNET_COUNT.to_string().into(), + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT.to_string().into(), "target_aggregators_per_sync_subcommittee".to_uppercase() => - altair::TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE.to_string().into(), + consts::altair::TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE.to_string().into(), // Deneb - "versioned_hash_version_kzg".to_uppercase() => deneb::VERSIONED_HASH_VERSION_KZG.to_string().into(), + "versioned_hash_version_kzg".to_uppercase() => consts::deneb::VERSIONED_HASH_VERSION_KZG.to_string().into(), // Electra "compounding_withdrawal_prefix".to_uppercase() => u8_hex(spec.compounding_withdrawal_prefix_byte), "unset_deposit_requests_start_index".to_uppercase() => spec.unset_deposit_requests_start_index.to_string().into(), diff --git a/consensus/types/src/consts.rs b/consensus/types/src/core/consts.rs similarity index 94% rename from consensus/types/src/consts.rs rename to consensus/types/src/core/consts.rs index c20d5fe8f3..b6d63c47a8 100644 --- a/consensus/types/src/consts.rs +++ b/consensus/types/src/core/consts.rs @@ -23,5 +23,5 @@ pub mod bellatrix { pub const INTERVALS_PER_SLOT: u64 = 3; } pub mod deneb { - pub use crate::VERSIONED_HASH_VERSION_KZG; + pub use kzg::VERSIONED_HASH_VERSION_KZG; } diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/core/enr_fork_id.rs similarity index 95% rename from consensus/types/src/enr_fork_id.rs rename to consensus/types/src/core/enr_fork_id.rs index e22672aeb6..c3b400cd13 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/core/enr_fork_id.rs @@ -1,11 +1,10 @@ -use crate::Epoch; -use crate::test_utils::TestRandom; - use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Epoch, test_utils::TestRandom}; + /// Specifies a fork which allows nodes to identify each other on the network. This fork is used in /// a nodes local ENR. /// diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/core/eth_spec.rs similarity index 98% rename from consensus/types/src/eth_spec.rs rename to consensus/types/src/core/eth_spec.rs index 47d32ad9e4..11857e678c 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/core/eth_spec.rs @@ -1,16 +1,22 @@ -use crate::*; +use std::{ + fmt::{self, Debug}, + str::FromStr, +}; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ U0, U1, U2, U4, U8, U16, U17, U32, U64, U128, U256, U512, U625, U1024, U2048, U4096, U8192, U65536, U131072, U262144, U1048576, U16777216, U33554432, U134217728, U1073741824, - U1099511627776, UInt, bit::B0, + U1099511627776, UInt, Unsigned, bit::B0, }; -use std::fmt::{self, Debug}; -use std::str::FromStr; -pub type U5000 = UInt, B0>, B0>; // 625 * 8 = 5000 +use crate::{ + core::{ChainSpec, Epoch}, + state::BeaconStateError, +}; + +type U5000 = UInt, B0>, B0>; // 625 * 8 = 5000 const MAINNET: &str = "mainnet"; const MINIMAL: &str = "minimal"; @@ -182,7 +188,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + fn get_committee_count_per_slot( active_validator_count: usize, spec: &ChainSpec, - ) -> Result { + ) -> Result { Self::get_committee_count_per_slot_with( active_validator_count, spec.max_committees_per_slot, @@ -194,7 +200,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + active_validator_count: usize, max_committees_per_slot: usize, target_committee_size: usize, - ) -> Result { + ) -> Result { let slots_per_epoch = Self::SlotsPerEpoch::to_usize(); Ok(std::cmp::max( diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/core/graffiti.rs similarity index 98% rename from consensus/types/src/graffiti.rs rename to consensus/types/src/core/graffiti.rs index 31cc4187a6..d0e0e1b1a8 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/core/graffiti.rs @@ -1,14 +1,13 @@ -use crate::{ - Hash256, - test_utils::{RngCore, TestRandom}, -}; +use std::{fmt, str::FromStr}; + +use rand::RngCore; use regex::bytes::Regex; use serde::{Deserialize, Deserializer, Serialize, Serializer, de::Error}; use ssz::{Decode, DecodeError, Encode}; -use std::fmt; -use std::str::FromStr; use tree_hash::{PackedEncoding, TreeHash}; +use crate::{core::Hash256, test_utils::TestRandom}; + pub const GRAFFITI_BYTES_LEN: usize = 32; /// The 32-byte `graffiti` field on a beacon block. diff --git a/consensus/types/src/core/mod.rs b/consensus/types/src/core/mod.rs new file mode 100644 index 0000000000..bb50bb1856 --- /dev/null +++ b/consensus/types/src/core/mod.rs @@ -0,0 +1,44 @@ +pub mod consts; + +mod application_domain; +mod chain_spec; +mod config_and_preset; +mod enr_fork_id; +mod eth_spec; +mod graffiti; +mod non_zero_usize; +mod preset; +mod relative_epoch; +mod signing_data; +mod slot_data; +#[macro_use] +mod slot_epoch_macros; +mod slot_epoch; +#[cfg(feature = "sqlite")] +mod sqlite; + +pub use application_domain::{APPLICATION_DOMAIN_BUILDER, ApplicationDomain}; +pub use chain_spec::{BlobParameters, BlobSchedule, ChainSpec, Config, Domain}; +pub use config_and_preset::{ + ConfigAndPreset, ConfigAndPresetDeneb, ConfigAndPresetElectra, ConfigAndPresetFulu, + ConfigAndPresetGloas, get_extra_fields, +}; +pub use enr_fork_id::EnrForkId; +pub use eth_spec::{EthSpec, EthSpecId, GNOSIS, GnosisEthSpec, MainnetEthSpec, MinimalEthSpec}; +pub use graffiti::{GRAFFITI_BYTES_LEN, Graffiti, GraffitiString}; +pub use non_zero_usize::new_non_zero_usize; +pub use preset::{ + AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, + FuluPreset, GloasPreset, +}; +pub use relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; +pub use signing_data::{SignedRoot, SigningData}; +pub use slot_data::SlotData; +pub use slot_epoch::{Epoch, Slot}; + +pub type Hash256 = alloy_primitives::B256; +pub type Uint256 = alloy_primitives::U256; +pub type Hash64 = alloy_primitives::B64; +pub type Address = alloy_primitives::Address; +pub type VersionedHash = Hash256; +pub type MerkleProof = Vec; diff --git a/consensus/types/src/non_zero_usize.rs b/consensus/types/src/core/non_zero_usize.rs similarity index 100% rename from consensus/types/src/non_zero_usize.rs rename to consensus/types/src/core/non_zero_usize.rs diff --git a/consensus/types/src/preset.rs b/consensus/types/src/core/preset.rs similarity index 99% rename from consensus/types/src/preset.rs rename to consensus/types/src/core/preset.rs index ab54c0345f..b436fafd3a 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/core/preset.rs @@ -1,5 +1,7 @@ -use crate::{ChainSpec, Epoch, EthSpec, Unsigned}; use serde::{Deserialize, Serialize}; +use ssz_types::typenum::Unsigned; + +use crate::core::{ChainSpec, Epoch, EthSpec}; /// Value-level representation of an Ethereum consensus "preset". /// diff --git a/consensus/types/src/relative_epoch.rs b/consensus/types/src/core/relative_epoch.rs similarity index 99% rename from consensus/types/src/relative_epoch.rs rename to consensus/types/src/core/relative_epoch.rs index 2fa0ae41bd..d1ee7ecc7c 100644 --- a/consensus/types/src/relative_epoch.rs +++ b/consensus/types/src/core/relative_epoch.rs @@ -1,6 +1,7 @@ -use crate::*; use safe_arith::{ArithError, SafeArith}; +use crate::core::{Epoch, Slot}; + #[derive(Debug, PartialEq, Clone, Copy)] pub enum Error { EpochTooLow { base: Epoch, other: Epoch }, diff --git a/consensus/types/src/signing_data.rs b/consensus/types/src/core/signing_data.rs similarity index 85% rename from consensus/types/src/signing_data.rs rename to consensus/types/src/core/signing_data.rs index 69b7dabfe5..907f03fac7 100644 --- a/consensus/types/src/signing_data.rs +++ b/consensus/types/src/core/signing_data.rs @@ -1,13 +1,12 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, Hash256}; - +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{core::Hash256, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] #[context_deserialize(ForkName)] diff --git a/consensus/types/src/slot_data.rs b/consensus/types/src/core/slot_data.rs similarity index 92% rename from consensus/types/src/slot_data.rs rename to consensus/types/src/core/slot_data.rs index 19775913b9..f0bd01814f 100644 --- a/consensus/types/src/slot_data.rs +++ b/consensus/types/src/core/slot_data.rs @@ -1,4 +1,4 @@ -use crate::Slot; +use crate::core::Slot; /// A trait providing a `Slot` getter for messages that are related to a single slot. Useful in /// making parts of attestation and sync committee processing generic. diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/core/slot_epoch.rs similarity index 98% rename from consensus/types/src/slot_epoch.rs rename to consensus/types/src/core/slot_epoch.rs index 05af9c5232..97457701b1 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/core/slot_epoch.rs @@ -10,15 +10,17 @@ //! implement `Into`, however this would allow operations between `Slots` and `Epochs` which //! may lead to programming errors which are not detected by the compiler. -use crate::test_utils::TestRandom; -use crate::{ChainSpec, SignedRoot}; +use std::{fmt, hash::Hash}; use rand::RngCore; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; -use std::fmt; -use std::hash::Hash; + +use crate::{ + core::{ChainSpec, SignedRoot}, + test_utils::TestRandom, +}; #[cfg(feature = "legacy-arith")] use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; diff --git a/consensus/types/src/slot_epoch_macros.rs b/consensus/types/src/core/slot_epoch_macros.rs similarity index 100% rename from consensus/types/src/slot_epoch_macros.rs rename to consensus/types/src/core/slot_epoch_macros.rs diff --git a/consensus/types/src/sqlite.rs b/consensus/types/src/core/sqlite.rs similarity index 96% rename from consensus/types/src/sqlite.rs rename to consensus/types/src/core/sqlite.rs index b6318dc4ce..de892b4e98 100644 --- a/consensus/types/src/sqlite.rs +++ b/consensus/types/src/core/sqlite.rs @@ -1,10 +1,11 @@ //! Implementations of SQLite compatibility traits. -use crate::{Epoch, Slot}; use rusqlite::{ Error, types::{FromSql, FromSqlError, ToSql, ToSqlOutput, ValueRef}, }; +use crate::core::{Epoch, Slot}; + macro_rules! impl_to_from_sql { ($type:ty) => { impl ToSql for $type { diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/data/blob_sidecar.rs similarity index 94% rename from consensus/types/src/blob_sidecar.rs rename to consensus/types/src/data/blob_sidecar.rs index d2c7331a57..709e556933 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/data/blob_sidecar.rs @@ -1,12 +1,7 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ - AbstractExecPayload, BeaconBlockHeader, BeaconStateError, Blob, ChainSpec, Epoch, EthSpec, - FixedVector, ForkName, Hash256, KzgProofs, RuntimeFixedVector, RuntimeVariableList, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, VariableList, - beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, -}; +use std::{fmt::Debug, hash::Hash, sync::Arc}; + use bls::Signature; +use context_deserialize::context_deserialize; use educe::Educe; use kzg::{BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT, Blob as KzgBlob, Kzg, KzgCommitment, KzgProof}; use merkle_proof::{MerkleTreeError, merkle_root_from_branch, verify_merkle_proof}; @@ -15,13 +10,24 @@ use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use std::fmt::Debug; -use std::hash::Hash; -use std::sync::Arc; +use ssz_types::{FixedVector, RuntimeFixedVector, RuntimeVariableList, VariableList}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::{ + BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, SignedBeaconBlock, SignedBeaconBlockHeader, + }, + core::{ChainSpec, Epoch, EthSpec, Hash256, Slot}, + data::Blob, + execution::AbstractExecPayload, + fork::ForkName, + kzg_ext::KzgProofs, + state::BeaconStateError, + test_utils::TestRandom, +}; + /// Container of the data that identifies an individual blob. #[derive( Serialize, Deserialize, Encode, Decode, TreeHash, Copy, Clone, Debug, PartialEq, Eq, Hash, diff --git a/consensus/types/src/data_column_custody_group.rs b/consensus/types/src/data/data_column_custody_group.rs similarity index 98% rename from consensus/types/src/data_column_custody_group.rs rename to consensus/types/src/data/data_column_custody_group.rs index 7ecabab0ab..d96d13cfff 100644 --- a/consensus/types/src/data_column_custody_group.rs +++ b/consensus/types/src/data/data_column_custody_group.rs @@ -1,8 +1,14 @@ -use crate::{ChainSpec, ColumnIndex, DataColumnSubnetId, EthSpec}; +use std::collections::HashSet; + use alloy_primitives::U256; use itertools::Itertools; use safe_arith::{ArithError, SafeArith}; -use std::collections::HashSet; + +use crate::{ + EthSpec, + core::ChainSpec, + data::{ColumnIndex, DataColumnSubnetId}, +}; pub type CustodyIndex = u64; diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data/data_column_sidecar.rs similarity index 94% rename from consensus/types/src/data_column_sidecar.rs rename to consensus/types/src/data/data_column_sidecar.rs index 62ce4467df..71d821f83e 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data/data_column_sidecar.rs @@ -1,13 +1,8 @@ -use crate::beacon_block_body::{BLOB_KZG_COMMITMENTS_INDEX, KzgCommitments}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ - BeaconBlockHeader, BeaconStateError, Epoch, EthSpec, ForkName, Hash256, - SignedBeaconBlockHeader, Slot, -}; +use std::sync::Arc; + use bls::Signature; +use context_deserialize::context_deserialize; use educe::Educe; -use kzg::Error as KzgError; use kzg::{KzgCommitment, KzgProof}; use merkle_proof::verify_merkle_proof; use safe_arith::ArithError; @@ -16,11 +11,19 @@ use ssz::Encode; use ssz_derive::{Decode, Encode}; use ssz_types::Error as SszError; use ssz_types::{FixedVector, VariableList}; -use std::sync::Arc; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::{BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, SignedBeaconBlockHeader}, + core::{Epoch, EthSpec, Hash256, Slot}, + fork::ForkName, + kzg_ext::{KzgCommitments, KzgError}, + state::BeaconStateError, + test_utils::TestRandom, +}; + pub type ColumnIndex = u64; pub type Cell = FixedVector::BytesPerCell>; pub type DataColumn = VariableList, ::MaxBlobCommitmentsPerBlock>; diff --git a/consensus/types/src/data_column_subnet_id.rs b/consensus/types/src/data/data_column_subnet_id.rs similarity index 80% rename from consensus/types/src/data_column_subnet_id.rs rename to consensus/types/src/data/data_column_subnet_id.rs index c6b8846c78..c30ebbba20 100644 --- a/consensus/types/src/data_column_subnet_id.rs +++ b/consensus/types/src/data/data_column_subnet_id.rs @@ -1,10 +1,13 @@ //! Identifies each data column subnet by an integer identifier. -use crate::ChainSpec; -use crate::data_column_sidecar::ColumnIndex; -use safe_arith::{ArithError, SafeArith}; +use std::{ + fmt::{self, Display}, + ops::{Deref, DerefMut}, +}; + +use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; -use std::fmt::{self, Display}; -use std::ops::{Deref, DerefMut}; + +use crate::{core::ChainSpec, data::ColumnIndex}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] @@ -69,15 +72,3 @@ impl From<&DataColumnSubnetId> for u64 { val.0 } } - -#[derive(Debug)] -pub enum Error { - ArithError(ArithError), - InvalidCustodySubnetCount(u64), -} - -impl From for Error { - fn from(e: ArithError) -> Self { - Error::ArithError(e) - } -} diff --git a/consensus/types/src/data/mod.rs b/consensus/types/src/data/mod.rs new file mode 100644 index 0000000000..10d062bada --- /dev/null +++ b/consensus/types/src/data/mod.rs @@ -0,0 +1,23 @@ +mod blob_sidecar; +mod data_column_custody_group; +mod data_column_sidecar; +mod data_column_subnet_id; + +pub use blob_sidecar::{ + BlobIdentifier, BlobSidecar, BlobSidecarError, BlobSidecarList, BlobsList, FixedBlobSidecarList, +}; +pub use data_column_custody_group::{ + CustodyIndex, DataColumnCustodyGroupError, compute_columns_for_custody_group, + compute_ordered_custody_column_indices, compute_subnets_for_node, + compute_subnets_from_custody_group, get_custody_groups, +}; +pub use data_column_sidecar::{ + Cell, ColumnIndex, DataColumn, DataColumnSidecar, DataColumnSidecarError, + DataColumnSidecarList, DataColumnsByRootIdentifier, +}; +pub use data_column_subnet_id::DataColumnSubnetId; + +use crate::core::EthSpec; +use ssz_types::FixedVector; + +pub type Blob = FixedVector::BytesPerBlob>; diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit/deposit.rs similarity index 78% rename from consensus/types/src/deposit.rs rename to consensus/types/src/deposit/deposit.rs index 724f3de2f0..67f8572def 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit/deposit.rs @@ -1,12 +1,12 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::*; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use ssz_types::typenum::U33; +use ssz_types::{FixedVector, typenum::U33}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Hash256, deposit::DepositData, fork::ForkName, test_utils::TestRandom}; + pub const DEPOSIT_TREE_DEPTH: usize = 32; /// A deposit to potentially become a beacon chain validator. diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit/deposit_data.rs similarity index 86% rename from consensus/types/src/deposit_data.rs rename to consensus/types/src/deposit/deposit_data.rs index 3d9ae12808..51697f5d1a 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit/deposit_data.rs @@ -1,10 +1,17 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::{PublicKeyBytes, SecretKey, SignatureBytes}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Hash256, SignedRoot}, + deposit::DepositMessage, + fork::ForkName, + test_utils::TestRandom, +}; + /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit/deposit_message.rs similarity index 81% rename from consensus/types/src/deposit_message.rs rename to consensus/types/src/deposit/deposit_message.rs index 9fe3b87885..4495a5c023 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit/deposit_message.rs @@ -1,11 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; - +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Hash256, SignedRoot}, + fork::ForkName, + test_utils::TestRandom, +}; + /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 diff --git a/consensus/types/src/deposit_request.rs b/consensus/types/src/deposit/deposit_request.rs similarity index 86% rename from consensus/types/src/deposit_request.rs rename to consensus/types/src/deposit/deposit_request.rs index 16acfb3b44..8d3c6e88ba 100644 --- a/consensus/types/src/deposit_request.rs +++ b/consensus/types/src/deposit/deposit_request.rs @@ -1,13 +1,13 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, Hash256, PublicKeyBytes}; -use bls::SignatureBytes; +use bls::{PublicKeyBytes, SignatureBytes}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Hash256, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit/deposit_tree_snapshot.rs similarity index 95% rename from consensus/types/src/deposit_tree_snapshot.rs rename to consensus/types/src/deposit/deposit_tree_snapshot.rs index 400fca217d..24f41397a0 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit/deposit_tree_snapshot.rs @@ -1,10 +1,11 @@ -use crate::*; use ethereum_hashing::{ZERO_HASHES, hash32_concat}; +use fixed_bytes::FixedBytesExtended; use int_to_bytes::int_to_bytes32; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; -use test_utils::TestRandom; + +use crate::{core::Hash256, deposit::DEPOSIT_TREE_DEPTH, test_utils::TestRandom}; #[derive(Encode, Decode, Deserialize, Serialize, Clone, Debug, PartialEq, TestRandom)] pub struct FinalizedExecutionBlock { diff --git a/consensus/types/src/deposit/mod.rs b/consensus/types/src/deposit/mod.rs new file mode 100644 index 0000000000..ff80f65cdb --- /dev/null +++ b/consensus/types/src/deposit/mod.rs @@ -0,0 +1,13 @@ +mod deposit; +mod deposit_data; +mod deposit_message; +mod deposit_request; +mod deposit_tree_snapshot; +mod pending_deposit; + +pub use deposit::{DEPOSIT_TREE_DEPTH, Deposit}; +pub use deposit_data::DepositData; +pub use deposit_message::DepositMessage; +pub use deposit_request::DepositRequest; +pub use deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock}; +pub use pending_deposit::PendingDeposit; diff --git a/consensus/types/src/pending_deposit.rs b/consensus/types/src/deposit/pending_deposit.rs similarity index 78% rename from consensus/types/src/pending_deposit.rs rename to consensus/types/src/deposit/pending_deposit.rs index 4a921edd54..4c039af39c 100644 --- a/consensus/types/src/pending_deposit.rs +++ b/consensus/types/src/deposit/pending_deposit.rs @@ -1,10 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::{PublicKeyBytes, SignatureBytes}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Hash256, Slot}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/execution/bls_to_execution_change.rs similarity index 83% rename from consensus/types/src/bls_to_execution_change.rs rename to consensus/types/src/execution/bls_to_execution_change.rs index 72d737ac71..de14f1b4c5 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/execution/bls_to_execution_change.rs @@ -1,10 +1,17 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::{PublicKeyBytes, SecretKey}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, ChainSpec, Domain, Hash256, SignedRoot}, + execution::SignedBlsToExecutionChange, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/execution/eth1_data.rs similarity index 86% rename from consensus/types/src/eth1_data.rs rename to consensus/types/src/execution/eth1_data.rs index 800f3e25f9..89a4e634a6 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/execution/eth1_data.rs @@ -1,12 +1,11 @@ -use super::Hash256; -use crate::ForkName; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Hash256, fork::ForkName, test_utils::TestRandom}; + /// Contains data obtained from the Eth1 chain. /// /// Spec v0.12.1 diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution/execution_block_hash.rs similarity index 96% rename from consensus/types/src/execution_block_hash.rs rename to consensus/types/src/execution/execution_block_hash.rs index 31905d64df..91c019ce04 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution/execution_block_hash.rs @@ -1,10 +1,11 @@ -use crate::FixedBytesExtended; -use crate::Hash256; -use crate::test_utils::TestRandom; +use std::fmt; + +use fixed_bytes::FixedBytesExtended; use rand::RngCore; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; -use std::fmt; + +use crate::{core::Hash256, test_utils::TestRandom}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash)] diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution/execution_block_header.rs similarity index 98% rename from consensus/types/src/execution_block_header.rs rename to consensus/types/src/execution/execution_block_header.rs index 02152adbf7..e596ba1831 100644 --- a/consensus/types/src/execution_block_header.rs +++ b/consensus/types/src/execution/execution_block_header.rs @@ -17,10 +17,15 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -use crate::{Address, EthSpec, ExecutionPayloadRef, Hash64, Hash256, Uint256}; use alloy_rlp::RlpEncodable; +use fixed_bytes::Uint256; use metastruct::metastruct; +use crate::{ + core::{Address, EthSpec, Hash64, Hash256}, + execution::ExecutionPayloadRef, +}; + /// Execution block header as used for RLP encoding and Keccak hashing. /// /// Credit to Reth for the type definition. diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution/execution_payload.rs similarity index 92% rename from consensus/types/src/execution_payload.rs rename to consensus/types/src/execution/execution_payload.rs index 3548f67db2..7973b7efdc 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution/execution_payload.rs @@ -1,19 +1,29 @@ -use crate::{test_utils::TestRandom, *}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; +use fixed_bytes::Uint256; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use ssz_types::{FixedVector, VariableList}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec, Hash256}, + execution::ExecutionBlockHash, + fork::{ForkName, ForkVersionDecode}, + state::BeaconStateError, + test_utils::TestRandom, + withdrawal::Withdrawals, +}; + pub type Transaction = VariableList; pub type Transactions = VariableList< Transaction<::MaxBytesPerTransaction>, ::MaxTransactionsPerPayload, >; -pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; - #[superstruct( variants(Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), variant_attributes( @@ -38,8 +48,14 @@ pub type Withdrawals = VariableList::MaxWithdrawal arbitrary(bound = "E: EthSpec"), ), ), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), map_into(FullPayload, BlindedPayload), map_ref_into(ExecutionPayloadHeader) )] diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution/execution_payload_header.rs similarity index 96% rename from consensus/types/src/execution_payload_header.rs rename to consensus/types/src/execution/execution_payload_header.rs index 241ecb4ce6..bd91a6471b 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution/execution_payload_header.rs @@ -1,12 +1,27 @@ -use crate::{test_utils::TestRandom, *}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use ssz_types::{FixedVector, VariableList}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec, Hash256, Uint256}, + execution::{ + ExecutionBlockHash, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, + ExecutionPayloadGloas, ExecutionPayloadRef, Transactions, + }, + fork::ForkName, + state::BeaconStateError, + test_utils::TestRandom, +}; + #[superstruct( variants(Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), variant_attributes( @@ -35,8 +50,14 @@ use tree_hash_derive::TreeHash; derive(PartialEq, TreeHash, Debug), tree_hash(enum_behaviour = "transparent") ), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), map_ref_into(ExecutionPayloadHeader) )] #[cfg_attr( diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution/execution_requests.rs similarity index 93% rename from consensus/types/src/execution_requests.rs rename to consensus/types/src/execution/execution_requests.rs index 67396af71d..92d717778e 100644 --- a/consensus/types/src/execution_requests.rs +++ b/consensus/types/src/execution/execution_requests.rs @@ -1,7 +1,5 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ConsolidationRequest, DepositRequest, EthSpec, ForkName, Hash256, WithdrawalRequest}; use alloy_primitives::Bytes; +use context_deserialize::context_deserialize; use educe::Educe; use ethereum_hashing::{DynamicContext, Sha256Context}; use serde::{Deserialize, Serialize}; @@ -11,6 +9,15 @@ use ssz_types::VariableList; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + consolidation::ConsolidationRequest, + core::{EthSpec, Hash256}, + deposit::DepositRequest, + fork::ForkName, + test_utils::TestRandom, + withdrawal::WithdrawalRequest, +}; + pub type DepositRequests = VariableList::MaxDepositRequestsPerPayload>; pub type WithdrawalRequests = diff --git a/consensus/types/src/execution/mod.rs b/consensus/types/src/execution/mod.rs new file mode 100644 index 0000000000..0708bc5d96 --- /dev/null +++ b/consensus/types/src/execution/mod.rs @@ -0,0 +1,36 @@ +mod eth1_data; +mod execution_block_hash; +mod execution_block_header; +#[macro_use] +mod execution_payload; +mod bls_to_execution_change; +mod execution_payload_header; +mod execution_requests; +mod payload; +mod signed_bls_to_execution_change; + +pub use bls_to_execution_change::BlsToExecutionChange; +pub use eth1_data::Eth1Data; +pub use execution_block_hash::ExecutionBlockHash; +pub use execution_block_header::{EncodableExecutionBlockHeader, ExecutionBlockHeader}; +pub use execution_payload::{ + ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, + ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, ExecutionPayloadRef, + Transaction, Transactions, +}; +pub use execution_payload_header::{ + ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, +}; +pub use execution_requests::{ + ConsolidationRequests, DepositRequests, ExecutionRequests, RequestType, WithdrawalRequests, +}; +pub use payload::{ + AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, + BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, BlindedPayloadGloas, + BlindedPayloadRef, BlockProductionVersion, BlockType, ExecPayload, FullPayload, + FullPayloadBellatrix, FullPayloadCapella, FullPayloadDeneb, FullPayloadElectra, + FullPayloadFulu, FullPayloadGloas, FullPayloadRef, OwnedExecPayload, +}; +pub use signed_bls_to_execution_change::SignedBlsToExecutionChange; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/execution/payload.rs similarity index 91% rename from consensus/types/src/payload.rs rename to consensus/types/src/execution/payload.rs index 370c73ad0a..c1cc6c4eb6 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/execution/payload.rs @@ -1,16 +1,29 @@ -use crate::{test_utils::TestRandom, *}; use educe::Educe; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::borrow::Cow; -use std::fmt::Debug; -use std::hash::Hash; +use ssz_types::VariableList; +use std::{borrow::Cow, fmt::Debug, hash::Hash}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec, Hash256}, + execution::{ + ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, + ExecutionPayloadGloas, ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, + ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, + ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderGloas, ExecutionPayloadRef, Transactions, + }, + fork::ForkName, + state::BeaconStateError, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq)] pub enum BlockType { Blinded, @@ -38,8 +51,8 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn gas_limit(&self) -> u64; fn transactions(&self) -> Option<&Transactions>; /// fork-specific fields - fn withdrawals_root(&self) -> Result; - fn blob_gas_used(&self) -> Result; + fn withdrawals_root(&self) -> Result; + fn blob_gas_used(&self) -> Result; /// Is this a default payload with 0x0 roots for transactions and withdrawals? fn is_default_with_zero_roots(&self) -> bool; @@ -179,8 +192,14 @@ pub trait AbstractExecPayload: ), map_into(ExecutionPayload), map_ref_into(ExecutionPayloadRef), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) )] #[cfg_attr( feature = "arbitrary", @@ -311,9 +330,9 @@ impl ExecPayload for FullPayload { }) } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - FullPayload::Bellatrix(_) => Err(Error::IncorrectStateVariant), + FullPayload::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), FullPayload::Capella(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), FullPayload::Deneb(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), FullPayload::Electra(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), @@ -322,10 +341,10 @@ impl ExecPayload for FullPayload { } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { FullPayload::Bellatrix(_) | FullPayload::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } FullPayload::Deneb(inner) => Ok(inner.execution_payload.blob_gas_used), FullPayload::Electra(inner) => Ok(inner.execution_payload.blob_gas_used), @@ -354,9 +373,9 @@ impl FullPayload { }) } - pub fn default_at_fork(fork_name: ForkName) -> Result { + pub fn default_at_fork(fork_name: ForkName) -> Result { match fork_name { - ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Base | ForkName::Altair => Err(BeaconStateError::IncorrectStateVariant), ForkName::Bellatrix => Ok(FullPayloadBellatrix::default().into()), ForkName::Capella => Ok(FullPayloadCapella::default().into()), ForkName::Deneb => Ok(FullPayloadDeneb::default().into()), @@ -450,9 +469,9 @@ impl ExecPayload for FullPayloadRef<'_, E> { }) } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - FullPayloadRef::Bellatrix(_) => Err(Error::IncorrectStateVariant), + FullPayloadRef::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), FullPayloadRef::Capella(inner) => { Ok(inner.execution_payload.withdrawals.tree_hash_root()) } @@ -469,10 +488,10 @@ impl ExecPayload for FullPayloadRef<'_, E> { } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { FullPayloadRef::Bellatrix(_) | FullPayloadRef::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } FullPayloadRef::Deneb(inner) => Ok(inner.execution_payload.blob_gas_used), FullPayloadRef::Electra(inner) => Ok(inner.execution_payload.blob_gas_used), @@ -548,8 +567,14 @@ impl TryFrom> for FullPayload { tree_hash(enum_behaviour = "transparent"), ), map_into(ExecutionPayloadHeader), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) )] #[cfg_attr( feature = "arbitrary", @@ -658,9 +683,9 @@ impl ExecPayload for BlindedPayload { None } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - BlindedPayload::Bellatrix(_) => Err(Error::IncorrectStateVariant), + BlindedPayload::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), BlindedPayload::Capella(inner) => Ok(inner.execution_payload_header.withdrawals_root), BlindedPayload::Deneb(inner) => Ok(inner.execution_payload_header.withdrawals_root), BlindedPayload::Electra(inner) => Ok(inner.execution_payload_header.withdrawals_root), @@ -669,10 +694,10 @@ impl ExecPayload for BlindedPayload { } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { BlindedPayload::Bellatrix(_) | BlindedPayload::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } BlindedPayload::Deneb(inner) => Ok(inner.execution_payload_header.blob_gas_used), BlindedPayload::Electra(inner) => Ok(inner.execution_payload_header.blob_gas_used), @@ -766,9 +791,9 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { None } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - BlindedPayloadRef::Bellatrix(_) => Err(Error::IncorrectStateVariant), + BlindedPayloadRef::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), BlindedPayloadRef::Capella(inner) => { Ok(inner.execution_payload_header.withdrawals_root) } @@ -781,10 +806,10 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { BlindedPayloadRef::Bellatrix(_) | BlindedPayloadRef::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } BlindedPayloadRef::Deneb(inner) => Ok(inner.execution_payload_header.blob_gas_used), BlindedPayloadRef::Electra(inner) => Ok(inner.execution_payload_header.blob_gas_used), @@ -877,12 +902,12 @@ macro_rules! impl_exec_payload_common { f(self) } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { let g = $g; g(self) } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { let h = $h; h(self) } @@ -917,15 +942,16 @@ macro_rules! impl_exec_payload_for_fork { }, { |_| { None } }, { - let c: for<'a> fn(&'a $wrapper_type_header) -> Result = - |payload: &$wrapper_type_header| { - let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); - wrapper_ref_type.withdrawals_root() - }; + let c: for<'a> fn( + &'a $wrapper_type_header, + ) -> Result = |payload: &$wrapper_type_header| { + let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); + wrapper_ref_type.withdrawals_root() + }; c }, { - let c: for<'a> fn(&'a $wrapper_type_header) -> Result = + let c: for<'a> fn(&'a $wrapper_type_header) -> Result = |payload: &$wrapper_type_header| { let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); wrapper_ref_type.blob_gas_used() @@ -935,12 +961,12 @@ macro_rules! impl_exec_payload_for_fork { ); impl TryInto<$wrapper_type_header> for BlindedPayload { - type Error = Error; + type Error = BeaconStateError; fn try_into(self) -> Result<$wrapper_type_header, Self::Error> { match self { BlindedPayload::$fork_variant(payload) => Ok(payload), - _ => Err(Error::IncorrectStateVariant), + _ => Err(BeaconStateError::IncorrectStateVariant), } } } @@ -963,13 +989,13 @@ macro_rules! impl_exec_payload_for_fork { } impl TryFrom> for $wrapper_type_header { - type Error = Error; + type Error = BeaconStateError; fn try_from(header: ExecutionPayloadHeader) -> Result { match header { ExecutionPayloadHeader::$fork_variant(execution_payload_header) => { Ok(execution_payload_header.into()) } - _ => Err(Error::PayloadConversionLogicFlaw), + _ => Err(BeaconStateError::PayloadConversionLogicFlaw), } } } @@ -1004,7 +1030,7 @@ macro_rules! impl_exec_payload_for_fork { c }, { - let c: for<'a> fn(&'a $wrapper_type_full) -> Result = + let c: for<'a> fn(&'a $wrapper_type_full) -> Result = |payload: &$wrapper_type_full| { let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); wrapper_ref_type.withdrawals_root() @@ -1012,7 +1038,7 @@ macro_rules! impl_exec_payload_for_fork { c }, { - let c: for<'a> fn(&'a $wrapper_type_full) -> Result = + let c: for<'a> fn(&'a $wrapper_type_full) -> Result = |payload: &$wrapper_type_full| { let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); wrapper_ref_type.blob_gas_used() @@ -1039,26 +1065,26 @@ macro_rules! impl_exec_payload_for_fork { } impl TryFrom> for $wrapper_type_full { - type Error = Error; + type Error = BeaconStateError; fn try_from(_: ExecutionPayloadHeader) -> Result { - Err(Error::PayloadConversionLogicFlaw) + Err(BeaconStateError::PayloadConversionLogicFlaw) } } impl TryFrom<$wrapped_type_header> for $wrapper_type_full { - type Error = Error; + type Error = BeaconStateError; fn try_from(_: $wrapped_type_header) -> Result { - Err(Error::PayloadConversionLogicFlaw) + Err(BeaconStateError::PayloadConversionLogicFlaw) } } impl TryInto<$wrapper_type_full> for FullPayload { - type Error = Error; + type Error = BeaconStateError; fn try_into(self) -> Result<$wrapper_type_full, Self::Error> { match self { FullPayload::$fork_variant(payload) => Ok(payload), - _ => Err(Error::PayloadConversionLogicFlaw), + _ => Err(BeaconStateError::PayloadConversionLogicFlaw), } } } diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/execution/signed_bls_to_execution_change.rs similarity index 78% rename from consensus/types/src/signed_bls_to_execution_change.rs rename to consensus/types/src/execution/signed_bls_to_execution_change.rs index 910c4c7d7e..535960fb3f 100644 --- a/consensus/types/src/signed_bls_to_execution_change.rs +++ b/consensus/types/src/execution/signed_bls_to_execution_change.rs @@ -1,10 +1,12 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::Signature; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{execution::BlsToExecutionChange, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/exit/mod.rs b/consensus/types/src/exit/mod.rs new file mode 100644 index 0000000000..cb066d1d7a --- /dev/null +++ b/consensus/types/src/exit/mod.rs @@ -0,0 +1,5 @@ +mod signed_voluntary_exit; +mod voluntary_exit; + +pub use signed_voluntary_exit::SignedVoluntaryExit; +pub use voluntary_exit::VoluntaryExit; diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/exit/signed_voluntary_exit.rs similarity index 84% rename from consensus/types/src/signed_voluntary_exit.rs rename to consensus/types/src/exit/signed_voluntary_exit.rs index 0beffa1e04..b49401a721 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/exit/signed_voluntary_exit.rs @@ -1,12 +1,12 @@ -use crate::context_deserialize; -use crate::{ForkName, VoluntaryExit, test_utils::TestRandom}; use bls::Signature; - +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{exit::VoluntaryExit, fork::ForkName, test_utils::TestRandom}; + /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/exit/voluntary_exit.rs similarity index 90% rename from consensus/types/src/voluntary_exit.rs rename to consensus/types/src/exit/voluntary_exit.rs index 42d792a814..30c6a97c4d 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/exit/voluntary_exit.rs @@ -1,14 +1,17 @@ -use crate::context_deserialize; -use crate::{ - ChainSpec, Domain, Epoch, ForkName, Hash256, SecretKey, SignedRoot, SignedVoluntaryExit, - test_utils::TestRandom, -}; - +use bls::SecretKey; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Domain, Epoch, Hash256, SignedRoot}, + exit::SignedVoluntaryExit, + fork::ForkName, + test_utils::TestRandom, +}; + /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork/fork.rs similarity index 96% rename from consensus/types/src/fork.rs rename to consensus/types/src/fork/fork.rs index 5c5bd7ffd1..371b11e05c 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork/fork.rs @@ -1,12 +1,11 @@ -use crate::test_utils::TestRandom; -use crate::{Epoch, ForkName}; use context_deserialize::context_deserialize; - use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Epoch, fork::ForkName, test_utils::TestRandom}; + /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork/fork_context.rs similarity index 98% rename from consensus/types/src/fork_context.rs rename to consensus/types/src/fork/fork_context.rs index 66617326e1..aec7276124 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork/fork_context.rs @@ -1,7 +1,11 @@ +use std::collections::BTreeMap; + use parking_lot::RwLock; -use crate::{ChainSpec, Epoch, EthSpec, ForkName, Hash256, Slot}; -use std::collections::BTreeMap; +use crate::{ + core::{ChainSpec, Epoch, EthSpec, Hash256, Slot}, + fork::ForkName, +}; /// Represents a hard fork in the consensus protocol. /// @@ -152,8 +156,7 @@ impl ForkContext { #[cfg(test)] mod tests { use super::*; - use crate::MainnetEthSpec; - use crate::chain_spec::{BlobParameters, BlobSchedule}; + use crate::core::{BlobParameters, BlobSchedule, MainnetEthSpec}; type E = MainnetEthSpec; diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork/fork_data.rs similarity index 88% rename from consensus/types/src/fork_data.rs rename to consensus/types/src/fork/fork_data.rs index 2d5e905efb..1b9c8bad9f 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork/fork_data.rs @@ -1,12 +1,15 @@ -use crate::test_utils::TestRandom; -use crate::{ForkName, Hash256, SignedRoot}; use context_deserialize::context_deserialize; - use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Hash256, SignedRoot}, + fork::ForkName, + test_utils::TestRandom, +}; + /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 diff --git a/consensus/types/src/fork/fork_macros.rs b/consensus/types/src/fork/fork_macros.rs new file mode 100644 index 0000000000..0c7f382ffc --- /dev/null +++ b/consensus/types/src/fork/fork_macros.rs @@ -0,0 +1,60 @@ +/// Map a fork name into a fork-versioned superstruct type like `BeaconBlock`. +/// +/// The `$body` expression is where the magic happens. The macro allows us to achieve polymorphism +/// in the return type, which is not usually possible in Rust without trait objects. +/// +/// E.g. you could call `map_fork_name!(fork, BeaconBlock, serde_json::from_str(s))` to decode +/// different `BeaconBlock` variants depending on the value of `fork`. Note how the type of the body +/// will change between `BeaconBlockBase` and `BeaconBlockAltair` depending on which branch is +/// taken, the important thing is that they are re-unified by injecting them back into the +/// `BeaconBlock` parent enum. +/// +/// If you would also like to extract additional data alongside the superstruct type, use +/// the more flexible `map_fork_name_with` macro. +#[macro_export] +macro_rules! map_fork_name { + ($fork_name:expr, $t:tt, $body:expr) => { + $crate::map_fork_name_with!($fork_name, $t, { ($body, ()) }).0 + }; +} + +/// Map a fork name into a tuple of `(t, extra)` where `t` is a superstruct type. +#[macro_export] +macro_rules! map_fork_name_with { + ($fork_name:expr, $t:tt, $body:block) => { + match $fork_name { + $crate::fork::ForkName::Base => { + let (value, extra_data) = $body; + ($t::Base(value), extra_data) + } + $crate::fork::ForkName::Altair => { + let (value, extra_data) = $body; + ($t::Altair(value), extra_data) + } + $crate::fork::ForkName::Bellatrix => { + let (value, extra_data) = $body; + ($t::Bellatrix(value), extra_data) + } + $crate::fork::ForkName::Capella => { + let (value, extra_data) = $body; + ($t::Capella(value), extra_data) + } + $crate::fork::ForkName::Deneb => { + let (value, extra_data) = $body; + ($t::Deneb(value), extra_data) + } + $crate::fork::ForkName::Electra => { + let (value, extra_data) = $body; + ($t::Electra(value), extra_data) + } + $crate::fork::ForkName::Fulu => { + let (value, extra_data) = $body; + ($t::Fulu(value), extra_data) + } + $crate::fork::ForkName::Gloas => { + let (value, extra_data) = $body; + ($t::Gloas(value), extra_data) + } + } + }; +} diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork/fork_name.rs similarity index 84% rename from consensus/types/src/fork_name.rs rename to consensus/types/src/fork/fork_name.rs index 1d7bf3795b..e9ec5fbe41 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork/fork_name.rs @@ -1,8 +1,12 @@ -use crate::{ChainSpec, Epoch}; +use std::{ + fmt::{self, Display, Formatter}, + str::FromStr, +}; + use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use std::fmt::{self, Display, Formatter}; -use std::str::FromStr; + +use crate::core::{ChainSpec, Epoch}; #[derive( Debug, Clone, Copy, Decode, Encode, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, @@ -243,67 +247,6 @@ impl ForkName { } } -/// Map a fork name into a fork-versioned superstruct type like `BeaconBlock`. -/// -/// The `$body` expression is where the magic happens. The macro allows us to achieve polymorphism -/// in the return type, which is not usually possible in Rust without trait objects. -/// -/// E.g. you could call `map_fork_name!(fork, BeaconBlock, serde_json::from_str(s))` to decode -/// different `BeaconBlock` variants depending on the value of `fork`. Note how the type of the body -/// will change between `BeaconBlockBase` and `BeaconBlockAltair` depending on which branch is -/// taken, the important thing is that they are re-unified by injecting them back into the -/// `BeaconBlock` parent enum. -/// -/// If you would also like to extract additional data alongside the superstruct type, use -/// the more flexible `map_fork_name_with` macro. -#[macro_export] -macro_rules! map_fork_name { - ($fork_name:expr, $t:tt, $body:expr) => { - map_fork_name_with!($fork_name, $t, { ($body, ()) }).0 - }; -} - -/// Map a fork name into a tuple of `(t, extra)` where `t` is a superstruct type. -#[macro_export] -macro_rules! map_fork_name_with { - ($fork_name:expr, $t:tt, $body:block) => { - match $fork_name { - ForkName::Base => { - let (value, extra_data) = $body; - ($t::Base(value), extra_data) - } - ForkName::Altair => { - let (value, extra_data) = $body; - ($t::Altair(value), extra_data) - } - ForkName::Bellatrix => { - let (value, extra_data) = $body; - ($t::Bellatrix(value), extra_data) - } - ForkName::Capella => { - let (value, extra_data) = $body; - ($t::Capella(value), extra_data) - } - ForkName::Deneb => { - let (value, extra_data) = $body; - ($t::Deneb(value), extra_data) - } - ForkName::Electra => { - let (value, extra_data) = $body; - ($t::Electra(value), extra_data) - } - ForkName::Fulu => { - let (value, extra_data) = $body; - ($t::Fulu(value), extra_data) - } - ForkName::Gloas => { - let (value, extra_data) = $body; - ($t::Gloas(value), extra_data) - } - } - }; -} - impl FromStr for ForkName { type Err = String; diff --git a/consensus/types/src/fork/fork_version_decode.rs b/consensus/types/src/fork/fork_version_decode.rs new file mode 100644 index 0000000000..4349efb21f --- /dev/null +++ b/consensus/types/src/fork/fork_version_decode.rs @@ -0,0 +1,6 @@ +use crate::fork::ForkName; + +pub trait ForkVersionDecode: Sized { + /// SSZ decode with explicit fork variant. + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result; +} diff --git a/consensus/types/src/fork/mod.rs b/consensus/types/src/fork/mod.rs new file mode 100644 index 0000000000..1ad1c7cb62 --- /dev/null +++ b/consensus/types/src/fork/mod.rs @@ -0,0 +1,15 @@ +mod fork; +mod fork_context; +mod fork_data; +mod fork_macros; +mod fork_name; +mod fork_version_decode; + +pub use crate::{map_fork_name, map_fork_name_with}; +pub use fork::Fork; +pub use fork_context::{ForkContext, HardFork}; +pub use fork_data::ForkData; +pub use fork_name::{ForkName, InconsistentFork}; +pub use fork_version_decode::ForkVersionDecode; + +pub type ForkVersion = [u8; 4]; diff --git a/consensus/types/src/kzg_ext/consts.rs b/consensus/types/src/kzg_ext/consts.rs new file mode 100644 index 0000000000..06c9f9c749 --- /dev/null +++ b/consensus/types/src/kzg_ext/consts.rs @@ -0,0 +1,3 @@ +pub use kzg::{ + BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_FIELD_ELEMENT, VERSIONED_HASH_VERSION_KZG, +}; diff --git a/consensus/types/src/kzg_ext/mod.rs b/consensus/types/src/kzg_ext/mod.rs new file mode 100644 index 0000000000..63533ec71f --- /dev/null +++ b/consensus/types/src/kzg_ext/mod.rs @@ -0,0 +1,27 @@ +pub mod consts; + +pub use kzg::{Blob as KzgBlob, Error as KzgError, Kzg, KzgCommitment, KzgProof}; + +use ssz_types::VariableList; + +use crate::core::EthSpec; + +// Note on List limit: +// - Deneb to Electra: `MaxBlobCommitmentsPerBlock` +// - Fulu: `MaxCellsPerBlock` +// We choose to use a single type (with the larger value from Fulu as `N`) instead of having to +// introduce a new type for Fulu. This is to avoid messy conversions and having to add extra types +// with no gains - as `N` does not impact serialisation at all, and only affects merkleization, +// which we don't current do on `KzgProofs` anyway. +pub type KzgProofs = VariableList::MaxCellsPerBlock>; + +pub type KzgCommitments = + VariableList::MaxBlobCommitmentsPerBlock>; + +/// Util method helpful for logging. +pub fn format_kzg_commitments(commitments: &[KzgCommitment]) -> String { + let commitment_strings: Vec = commitments.iter().map(|x| x.to_string()).collect(); + let commitments_joined = commitment_strings.join(", "); + let surrounded_commitments = format!("[{}]", commitments_joined); + surrounded_commitments +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 8e83fed1d9..a8a78f8cfb 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -1,4 +1,4 @@ -//! Ethereum 2.0 types +//! Ethereum Consensus types // Clippy lint set up #![cfg_attr( not(test), @@ -12,287 +12,167 @@ #[macro_use] pub mod test_utils; -pub mod aggregate_and_proof; -pub mod application_domain; pub mod attestation; -pub mod attestation_data; -pub mod attestation_duty; -pub mod attester_slashing; -pub mod beacon_block; -pub mod beacon_block_body; -pub mod beacon_block_header; -pub mod beacon_committee; -pub mod beacon_response; -pub mod beacon_state; -pub mod bls_to_execution_change; -pub mod builder_bid; -pub mod chain_spec; -pub mod checkpoint; -pub mod consolidation_request; -pub mod consts; -pub mod contribution_and_proof; +pub mod block; +pub mod builder; +pub mod consolidation; +pub mod core; +pub mod data; pub mod deposit; -pub mod deposit_data; -pub mod deposit_message; -pub mod deposit_request; -pub mod deposit_tree_snapshot; -pub mod enr_fork_id; -pub mod eth1_data; -pub mod eth_spec; -pub mod execution_block_hash; -pub mod execution_payload; -pub mod execution_payload_header; +pub mod execution; +pub mod exit; pub mod fork; -pub mod fork_data; -pub mod fork_name; -pub mod graffiti; -pub mod historical_batch; -pub mod historical_summary; -pub mod indexed_attestation; -pub mod light_client_bootstrap; -pub mod light_client_finality_update; -pub mod light_client_optimistic_update; -pub mod light_client_update; -pub mod pending_attestation; -pub mod pending_consolidation; -pub mod pending_deposit; -pub mod pending_partial_withdrawal; -pub mod proposer_preparation_data; -pub mod proposer_slashing; -pub mod relative_epoch; -pub mod selection_proof; -pub mod shuffling_id; -pub mod signed_aggregate_and_proof; -pub mod signed_beacon_block; -pub mod signed_beacon_block_header; -pub mod signed_bls_to_execution_change; -pub mod signed_contribution_and_proof; -pub mod signed_voluntary_exit; -pub mod signing_data; -pub mod sync_committee_subscription; -pub mod sync_duty; -pub mod validator; -pub mod validator_subscription; -pub mod voluntary_exit; -pub mod withdrawal_credentials; -pub mod withdrawal_request; -#[macro_use] -pub mod slot_epoch_macros; -pub mod activation_queue; -pub mod config_and_preset; -pub mod execution_block_header; -pub mod execution_requests; -pub mod fork_context; -pub mod participation_flags; -pub mod payload; -pub mod preset; -pub mod slot_epoch; -pub mod subnet_id; -pub mod sync_aggregate; -pub mod sync_aggregator_selection_data; +pub mod kzg_ext; +pub mod light_client; +pub mod slashing; +pub mod state; pub mod sync_committee; -pub mod sync_committee_contribution; -pub mod sync_committee_message; -pub mod sync_selection_proof; -pub mod sync_subnet_id; -pub mod validator_registration_data; +pub mod validator; pub mod withdrawal; -pub mod epoch_cache; -pub mod slot_data; -#[cfg(feature = "sqlite")] -pub mod sqlite; +// Temporary root level exports to maintain backwards compatibility for Lighthouse. +pub use attestation::*; +pub use block::*; +pub use builder::*; +pub use consolidation::*; +pub use core::{consts, *}; +pub use data::*; +pub use deposit::*; +pub use execution::*; +pub use exit::*; +pub use fork::*; +pub use kzg_ext::*; +pub use light_client::*; +pub use slashing::*; +pub use state::*; +pub use sync_committee::*; +pub use validator::*; +pub use withdrawal::*; -pub mod blob_sidecar; -pub mod data_column_custody_group; -pub mod data_column_sidecar; -pub mod data_column_subnet_id; -pub mod light_client_header; -pub mod non_zero_usize; -pub mod runtime_fixed_vector; -pub mod runtime_var_list; +// Temporary facade modules to maintain backwards compatibility for Lighthouse. +pub mod eth_spec { + pub use crate::core::EthSpec; +} -pub use crate::activation_queue::ActivationQueue; -pub use crate::aggregate_and_proof::{ - AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, -}; -pub use crate::attestation::{ - Attestation, AttestationBase, AttestationElectra, AttestationRef, AttestationRefMut, - Error as AttestationError, SingleAttestation, -}; -pub use crate::attestation_data::AttestationData; -pub use crate::attestation_duty::AttestationDuty; -pub use crate::attester_slashing::{ - AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, AttesterSlashingOnDisk, - AttesterSlashingRef, AttesterSlashingRefOnDisk, -}; -pub use crate::beacon_block::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockCapella, - BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockFulu, BeaconBlockGloas, BeaconBlockRef, - BeaconBlockRefMut, BlindedBeaconBlock, BlockImportSource, EmptyBlock, -}; -pub use crate::beacon_block_body::{ - BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyBellatrix, - BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, - BeaconBlockBodyGloas, BeaconBlockBodyRef, BeaconBlockBodyRefMut, -}; -pub use crate::beacon_block_header::BeaconBlockHeader; -pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; -pub use crate::beacon_response::{ - BeaconResponse, ForkVersionDecode, ForkVersionedResponse, UnversionedResponse, -}; -pub use crate::beacon_state::{Error as BeaconStateError, *}; -pub use crate::blob_sidecar::{BlobIdentifier, BlobSidecar, BlobSidecarList, BlobsList}; -pub use crate::bls_to_execution_change::BlsToExecutionChange; -pub use crate::chain_spec::{ChainSpec, Config, Domain}; -pub use crate::checkpoint::Checkpoint; -pub use crate::config_and_preset::{ - ConfigAndPreset, ConfigAndPresetDeneb, ConfigAndPresetElectra, ConfigAndPresetFulu, - ConfigAndPresetGloas, -}; -pub use crate::consolidation_request::ConsolidationRequest; -pub use crate::contribution_and_proof::ContributionAndProof; -pub use crate::data_column_sidecar::{ - ColumnIndex, DataColumnSidecar, DataColumnSidecarList, DataColumnsByRootIdentifier, -}; -pub use crate::data_column_subnet_id::DataColumnSubnetId; -pub use crate::deposit::{DEPOSIT_TREE_DEPTH, Deposit}; -pub use crate::deposit_data::DepositData; -pub use crate::deposit_message::DepositMessage; -pub use crate::deposit_request::DepositRequest; -pub use crate::deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock}; -pub use crate::enr_fork_id::EnrForkId; -pub use crate::epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; -pub use crate::eth_spec::EthSpecId; -pub use crate::eth1_data::Eth1Data; -pub use crate::execution_block_hash::ExecutionBlockHash; -pub use crate::execution_block_header::{EncodableExecutionBlockHeader, ExecutionBlockHeader}; -pub use crate::execution_payload::{ - ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, ExecutionPayloadRef, - Transaction, Transactions, Withdrawals, -}; -pub use crate::execution_payload_header::{ - ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, - ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, - ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, -}; -pub use crate::execution_requests::{ExecutionRequests, RequestType}; -pub use crate::fork::Fork; -pub use crate::fork_context::ForkContext; -pub use crate::fork_data::ForkData; -pub use crate::fork_name::{ForkName, InconsistentFork}; -pub use crate::graffiti::{GRAFFITI_BYTES_LEN, Graffiti}; -pub use crate::historical_batch::HistoricalBatch; -pub use crate::indexed_attestation::{ - IndexedAttestation, IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, -}; -pub use crate::light_client_bootstrap::{ - LightClientBootstrap, LightClientBootstrapAltair, LightClientBootstrapCapella, - LightClientBootstrapDeneb, LightClientBootstrapElectra, LightClientBootstrapFulu, - LightClientBootstrapGloas, -}; -pub use crate::light_client_finality_update::{ - LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientFinalityUpdateCapella, - LightClientFinalityUpdateDeneb, LightClientFinalityUpdateElectra, - LightClientFinalityUpdateFulu, LightClientFinalityUpdateGloas, -}; -pub use crate::light_client_header::{ - LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, LightClientHeaderFulu, LightClientHeaderGloas, -}; -pub use crate::light_client_optimistic_update::{ - LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, - LightClientOptimisticUpdateCapella, LightClientOptimisticUpdateDeneb, - LightClientOptimisticUpdateElectra, LightClientOptimisticUpdateFulu, - LightClientOptimisticUpdateGloas, -}; -pub use crate::light_client_update::{ - Error as LightClientUpdateError, LightClientUpdate, LightClientUpdateAltair, - LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, - LightClientUpdateFulu, LightClientUpdateGloas, MerkleProof, -}; -pub use crate::participation_flags::ParticipationFlags; -pub use crate::payload::{ - AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, - BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, BlindedPayloadGloas, - BlindedPayloadRef, BlockType, ExecPayload, FullPayload, FullPayloadBellatrix, - FullPayloadCapella, FullPayloadDeneb, FullPayloadElectra, FullPayloadFulu, FullPayloadGloas, - FullPayloadRef, OwnedExecPayload, -}; -pub use crate::pending_attestation::PendingAttestation; -pub use crate::pending_consolidation::PendingConsolidation; -pub use crate::pending_deposit::PendingDeposit; -pub use crate::pending_partial_withdrawal::PendingPartialWithdrawal; -pub use crate::preset::{ - AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, - FuluPreset, GloasPreset, -}; -pub use crate::proposer_preparation_data::ProposerPreparationData; -pub use crate::proposer_slashing::ProposerSlashing; -pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; -pub use crate::runtime_fixed_vector::RuntimeFixedVector; -pub use crate::runtime_var_list::RuntimeVariableList; -pub use crate::selection_proof::SelectionProof; -pub use crate::shuffling_id::AttestationShufflingId; -pub use crate::signed_aggregate_and_proof::{ - SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, -}; -pub use crate::signed_beacon_block::{ - SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, - SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, - SignedBeaconBlockFulu, SignedBeaconBlockGloas, SignedBeaconBlockHash, SignedBlindedBeaconBlock, - ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, -}; -pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; -pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; -pub use crate::signed_contribution_and_proof::SignedContributionAndProof; -pub use crate::signed_voluntary_exit::SignedVoluntaryExit; -pub use crate::signing_data::{SignedRoot, SigningData}; -pub use crate::slot_epoch::{Epoch, Slot}; -pub use crate::subnet_id::SubnetId; -pub use crate::sync_aggregate::SyncAggregate; -pub use crate::sync_aggregator_selection_data::SyncAggregatorSelectionData; -pub use crate::sync_committee::SyncCommittee; -pub use crate::sync_committee_contribution::{SyncCommitteeContribution, SyncContributionData}; -pub use crate::sync_committee_message::SyncCommitteeMessage; -pub use crate::sync_committee_subscription::SyncCommitteeSubscription; -pub use crate::sync_duty::SyncDuty; -pub use crate::sync_selection_proof::SyncSelectionProof; -pub use crate::sync_subnet_id::SyncSubnetId; -pub use crate::validator::Validator; -pub use crate::validator_registration_data::*; -pub use crate::validator_subscription::ValidatorSubscription; -pub use crate::voluntary_exit::VoluntaryExit; -pub use crate::withdrawal::Withdrawal; -pub use crate::withdrawal_credentials::WithdrawalCredentials; -pub use crate::withdrawal_request::WithdrawalRequest; -pub use fixed_bytes::FixedBytesExtended; +pub mod chain_spec { + pub use crate::core::ChainSpec; +} -pub type CommitteeIndex = u64; -pub type Hash256 = fixed_bytes::Hash256; -pub type Uint256 = fixed_bytes::Uint256; -pub type Address = fixed_bytes::Address; -pub type ForkVersion = [u8; 4]; -pub type BLSFieldElement = Uint256; -pub type Blob = FixedVector::BytesPerBlob>; -// Note on List limit: -// - Deneb to Electra: `MaxBlobCommitmentsPerBlock` -// - Fulu: `MaxCellsPerBlock` -// We choose to use a single type (with the larger value from Fulu as `N`) instead of having to -// introduce a new type for Fulu. This is to avoid messy conversions and having to add extra types -// with no gains - as `N` does not impact serialisation at all, and only affects merkleization, -// which we don't current do on `KzgProofs` anyway. -pub type KzgProofs = VariableList::MaxCellsPerBlock>; -pub type VersionedHash = Hash256; -pub type Hash64 = alloy_primitives::B64; +pub mod beacon_block { + pub use crate::block::{BlindedBeaconBlock, BlockImportSource}; +} + +pub mod beacon_block_body { + pub use crate::kzg_ext::{KzgCommitments, format_kzg_commitments}; +} + +pub mod beacon_state { + pub use crate::state::{ + BeaconState, BeaconStateBase, CommitteeCache, compute_committee_index_in_epoch, + compute_committee_range_in_epoch, epoch_committee_count, + }; +} + +pub mod graffiti { + pub use crate::core::GraffitiString; +} + +pub mod indexed_attestation { + pub use crate::attestation::{IndexedAttestationBase, IndexedAttestationElectra}; +} + +pub mod historical_summary { + pub use crate::state::HistoricalSummary; +} + +pub mod participation_flags { + pub use crate::attestation::ParticipationFlags; +} + +pub mod epoch_cache { + pub use crate::state::{EpochCache, EpochCacheError, EpochCacheKey}; +} + +pub mod non_zero_usize { + pub use crate::core::new_non_zero_usize; +} + +pub mod data_column_sidecar { + pub use crate::data::{ + Cell, ColumnIndex, DataColumn, DataColumnSidecar, DataColumnSidecarError, + DataColumnSidecarList, + }; +} + +pub mod builder_bid { + pub use crate::builder::*; +} + +pub mod blob_sidecar { + pub use crate::data::{ + BlobIdentifier, BlobSidecar, BlobSidecarError, BlobsList, FixedBlobSidecarList, + }; +} + +pub mod payload { + pub use crate::execution::BlockProductionVersion; +} + +pub mod execution_requests { + pub use crate::execution::{ + ConsolidationRequests, DepositRequests, ExecutionRequests, RequestType, WithdrawalRequests, + }; +} + +pub mod data_column_custody_group { + pub use crate::data::{ + CustodyIndex, compute_columns_for_custody_group, compute_ordered_custody_column_indices, + compute_subnets_for_node, compute_subnets_from_custody_group, get_custody_groups, + }; +} + +pub mod sync_aggregate { + pub use crate::sync_committee::SyncAggregateError as Error; +} + +pub mod light_client_update { + pub use crate::light_client::consts::{ + CURRENT_SYNC_COMMITTEE_INDEX, CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA, FINALIZED_ROOT_INDEX, + FINALIZED_ROOT_INDEX_ELECTRA, MAX_REQUEST_LIGHT_CLIENT_UPDATES, NEXT_SYNC_COMMITTEE_INDEX, + NEXT_SYNC_COMMITTEE_INDEX_ELECTRA, + }; +} + +pub mod sync_committee_contribution { + pub use crate::sync_committee::{ + SyncCommitteeContributionError as Error, SyncContributionData, + }; +} + +pub mod slot_data { + pub use crate::core::SlotData; +} + +pub mod signed_aggregate_and_proof { + pub use crate::attestation::SignedAggregateAndProofRefMut; +} + +pub mod application_domain { + pub use crate::core::ApplicationDomain; +} + +// Temporary re-exports to maintain backwards compatibility for Lighthouse. +pub use crate::kzg_ext::consts::VERSIONED_HASH_VERSION_KZG; +pub use crate::light_client::LightClientError as LightClientUpdateError; +pub use crate::state::BeaconStateError as Error; pub use bls::{ - AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, - Signature, SignatureBytes, + AggregatePublicKey, AggregateSignature, Error as BlsError, Keypair, PUBLIC_KEY_BYTES_LEN, + PublicKey, PublicKeyBytes, SIGNATURE_BYTES_LEN, SecretKey, Signature, SignatureBytes, + get_withdrawal_credentials, }; pub use context_deserialize::{ContextDeserialize, context_deserialize}; -pub use kzg::{KzgCommitment, KzgProof, VERSIONED_HASH_VERSION_KZG}; +pub use fixed_bytes::FixedBytesExtended; pub use milhouse::{self, List, Vector}; pub use ssz_types::{BitList, BitVector, FixedVector, VariableList, typenum, typenum::Unsigned}; pub use superstruct::superstruct; diff --git a/consensus/types/src/light_client/consts.rs b/consensus/types/src/light_client/consts.rs new file mode 100644 index 0000000000..0092e75e87 --- /dev/null +++ b/consensus/types/src/light_client/consts.rs @@ -0,0 +1,21 @@ +pub const FINALIZED_ROOT_PROOF_LEN: usize = 6; +pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; +pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; +pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; + +pub const FINALIZED_ROOT_PROOF_LEN_ELECTRA: usize = 7; +pub const NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; +pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; + +pub const FINALIZED_ROOT_INDEX: usize = 105; +pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; +pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; +pub const EXECUTION_PAYLOAD_INDEX: usize = 25; + +pub const FINALIZED_ROOT_INDEX_ELECTRA: usize = 169; +pub const CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 86; +pub const NEXT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 87; + +// Max light client updates by range request limits +// spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/p2p-interface.md#configuration +pub const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; diff --git a/consensus/types/src/light_client/error.rs b/consensus/types/src/light_client/error.rs new file mode 100644 index 0000000000..c492cfcbde --- /dev/null +++ b/consensus/types/src/light_client/error.rs @@ -0,0 +1,41 @@ +use safe_arith::ArithError; + +use crate::state::BeaconStateError; + +#[derive(Debug, PartialEq, Clone)] +pub enum LightClientError { + SszTypesError(ssz_types::Error), + MilhouseError(milhouse::Error), + BeaconStateError(BeaconStateError), + ArithError(ArithError), + AltairForkNotActive, + NotEnoughSyncCommitteeParticipants, + MismatchingPeriods, + InvalidFinalizedBlock, + BeaconBlockBodyError, + InconsistentFork, +} + +impl From for LightClientError { + fn from(e: ssz_types::Error) -> LightClientError { + LightClientError::SszTypesError(e) + } +} + +impl From for LightClientError { + fn from(e: BeaconStateError) -> LightClientError { + LightClientError::BeaconStateError(e) + } +} + +impl From for LightClientError { + fn from(e: ArithError) -> LightClientError { + LightClientError::ArithError(e) + } +} + +impl From for LightClientError { + fn from(e: milhouse::Error) -> LightClientError { + LightClientError::MilhouseError(e) + } +} diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client/light_client_bootstrap.rs similarity index 88% rename from consensus/types/src/light_client_bootstrap.rs rename to consensus/types/src/light_client/light_client_bootstrap.rs index 80d5bbacf9..847b2a2a96 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client/light_client_bootstrap.rs @@ -1,19 +1,30 @@ -use crate::context_deserialize; -use crate::{ - BeaconState, ChainSpec, ContextDeserialize, EthSpec, FixedVector, ForkName, Hash256, - LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, LightClientHeaderFulu, LightClientHeaderGloas, - SignedBlindedBeaconBlock, Slot, SyncCommittee, light_client_update::*, test_utils::TestRandom, -}; +use std::sync::Arc; + +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::sync::Arc; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, EthSpec, Hash256, Slot}, + fork::ForkName, + light_client::{ + CurrentSyncCommitteeProofLen, CurrentSyncCommitteeProofLenElectra, LightClientError, + LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, + LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, + LightClientHeaderGloas, + }, + state::BeaconState, + sync_committee::SyncCommittee, + test_utils::TestRandom, +}; + /// A LightClientBootstrap is the initializer we send over to light_client nodes /// that are trying to generate their basic storage when booting up. #[superstruct( @@ -142,53 +153,53 @@ impl LightClientBootstrap { current_sync_committee: Arc>, current_sync_committee_branch: Vec, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let light_client_bootstrap = match block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu { header: LightClientHeaderFulu::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Gloas => Self::Gloas(LightClientBootstrapGloas { header: LightClientHeaderGloas::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), }; @@ -199,56 +210,56 @@ impl LightClientBootstrap { beacon_state: &mut BeaconState, block: &SignedBlindedBeaconBlock, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let current_sync_committee_branch = beacon_state.compute_current_sync_committee_proof()?; let current_sync_committee = beacon_state.current_sync_committee()?.clone(); let light_client_bootstrap = match block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu { header: LightClientHeaderFulu::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Gloas => Self::Gloas(LightClientBootstrapGloas { header: LightClientHeaderGloas::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), }; diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client/light_client_finality_update.rs similarity index 89% rename from consensus/types/src/light_client_finality_update.rs rename to consensus/types/src/light_client/light_client_finality_update.rs index e58d7f4d72..04374edcd9 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client/light_client_finality_update.rs @@ -1,21 +1,27 @@ -use super::{EthSpec, FixedVector, Hash256, LightClientHeader, Slot, SyncAggregate}; -use crate::ChainSpec; -use crate::context_deserialize; -use crate::{ - ContextDeserialize, ForkName, LightClientHeaderAltair, LightClientHeaderCapella, - LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, - LightClientHeaderGloas, SignedBlindedBeaconBlock, light_client_update::*, - test_utils::TestRandom, -}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, EthSpec, Hash256, Slot}, + fork::ForkName, + light_client::{ + FinalizedRootProofLen, FinalizedRootProofLenElectra, LightClientError, LightClientHeader, + LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, + LightClientHeaderElectra, LightClientHeaderFulu, LightClientHeaderGloas, + }, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; + #[superstruct( variants(Altair, Capella, Deneb, Electra, Fulu, Gloas), variant_attributes( @@ -103,10 +109,10 @@ impl LightClientFinalityUpdate { sync_aggregate: SyncAggregate, signature_slot: Slot, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let finality_update = match attested_block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { ForkName::Altair | ForkName::Bellatrix => { Self::Altair(LightClientFinalityUpdateAltair { @@ -116,7 +122,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderAltair::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }) @@ -128,7 +136,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderCapella::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -139,7 +149,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderDeneb::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -150,7 +162,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderElectra::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -161,7 +175,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderFulu::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -172,12 +188,14 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderGloas::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), }; Ok(finality_update) diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client/light_client_header.rs similarity index 91% rename from consensus/types/src/light_client_header.rs rename to consensus/types/src/light_client/light_client_header.rs index 5820efcc91..a7ecd3b7fb 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client/light_client_header.rs @@ -1,22 +1,27 @@ -use crate::ChainSpec; -use crate::context_deserialize; -use crate::{BeaconBlockBody, light_client_update::*}; -use crate::{BeaconBlockHeader, ExecutionPayloadHeader}; -use crate::{ContextDeserialize, ForkName}; -use crate::{ - EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, - ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderGloas, - FixedVector, Hash256, SignedBlindedBeaconBlock, test_utils::TestRandom, -}; +use std::marker::PhantomData; + +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::Decode; use ssz_derive::{Decode, Encode}; -use std::marker::PhantomData; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::{BeaconBlockBody, BeaconBlockHeader, SignedBlindedBeaconBlock}, + core::{ChainSpec, EthSpec, Hash256}, + execution::{ + ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, + ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderGloas, + }, + fork::ForkName, + light_client::{ExecutionPayloadProofLen, LightClientError, consts::EXECUTION_PAYLOAD_INDEX}, + test_utils::TestRandom, +}; + #[superstruct( variants(Altair, Capella, Deneb, Electra, Fulu, Gloas), variant_attributes( @@ -85,12 +90,12 @@ impl LightClientHeader { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let header = match block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), ForkName::Altair | ForkName::Bellatrix => LightClientHeader::Altair( LightClientHeaderAltair::block_to_light_client_header(block)?, ), @@ -163,7 +168,7 @@ impl LightClientHeader { impl LightClientHeaderAltair { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { Ok(LightClientHeaderAltair { beacon: block.message().block_header(), _phantom_data: PhantomData, @@ -183,7 +188,7 @@ impl Default for LightClientHeaderAltair { impl LightClientHeaderCapella { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let payload = block .message() .execution_payload()? @@ -194,7 +199,7 @@ impl LightClientHeaderCapella { block .message() .body_capella() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); @@ -225,7 +230,7 @@ impl Default for LightClientHeaderCapella { impl LightClientHeaderDeneb { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let header = block .message() .execution_payload()? @@ -236,7 +241,7 @@ impl LightClientHeaderDeneb { block .message() .body_deneb() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); @@ -267,7 +272,7 @@ impl Default for LightClientHeaderDeneb { impl LightClientHeaderElectra { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let payload = block .message() .execution_payload()? @@ -278,7 +283,7 @@ impl LightClientHeaderElectra { block .message() .body_electra() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); @@ -309,7 +314,7 @@ impl Default for LightClientHeaderElectra { impl LightClientHeaderFulu { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let payload = block .message() .execution_payload()? @@ -320,7 +325,7 @@ impl LightClientHeaderFulu { block .message() .body_fulu() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); @@ -351,7 +356,7 @@ impl Default for LightClientHeaderFulu { impl LightClientHeaderGloas { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let payload = block .message() .execution_payload()? @@ -362,7 +367,7 @@ impl LightClientHeaderGloas { block .message() .body_gloas() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client/light_client_optimistic_update.rs similarity index 94% rename from consensus/types/src/light_client_optimistic_update.rs rename to consensus/types/src/light_client/light_client_optimistic_update.rs index ca9957331f..9266ce647a 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client/light_client_optimistic_update.rs @@ -1,21 +1,26 @@ -use super::{ContextDeserialize, EthSpec, ForkName, LightClientHeader, Slot, SyncAggregate}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ - ChainSpec, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, LightClientHeaderFulu, LightClientHeaderGloas, - SignedBlindedBeaconBlock, light_client_update::*, -}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; -use ssz_derive::Decode; -use ssz_derive::Encode; +use ssz_derive::{Decode, Encode}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::Hash256; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, EthSpec, Slot}, + fork::ForkName, + light_client::{ + LightClientError, LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, + LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, + LightClientHeaderGloas, + }, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; + /// A LightClientOptimisticUpdate is the update we send on each slot, /// it is based off the current unfinalized epoch is verified only against BLS signature. #[superstruct( @@ -79,10 +84,10 @@ impl LightClientOptimisticUpdate { sync_aggregate: SyncAggregate, signature_slot: Slot, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let optimistic_update = match attested_block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { ForkName::Altair | ForkName::Bellatrix => { Self::Altair(LightClientOptimisticUpdateAltair { @@ -128,7 +133,7 @@ impl LightClientOptimisticUpdate { sync_aggregate, signature_slot, }), - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), }; Ok(optimistic_update) diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client/light_client_update.rs similarity index 87% rename from consensus/types/src/light_client_update.rs rename to consensus/types/src/light_client/light_client_update.rs index ede9436c50..7fc2c36239 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client/light_client_update.rs @@ -1,12 +1,6 @@ -use super::{EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; -use crate::LightClientHeader; -use crate::context_deserialize; -use crate::light_client_header::LightClientHeaderElectra; -use crate::{ - ChainSpec, ContextDeserialize, Epoch, ForkName, LightClientHeaderAltair, - LightClientHeaderCapella, LightClientHeaderDeneb, LightClientHeaderFulu, - LightClientHeaderGloas, SignedBlindedBeaconBlock, beacon_state, test_utils::TestRandom, -}; +use std::sync::Arc; + +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use safe_arith::ArithError; use safe_arith::SafeArith; @@ -14,20 +8,24 @@ use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; +use ssz_types::FixedVector; use ssz_types::typenum::{U4, U5, U6, U7}; -use std::sync::Arc; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -pub const FINALIZED_ROOT_INDEX: usize = 105; -pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; -pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; -pub const EXECUTION_PAYLOAD_INDEX: usize = 25; - -pub const FINALIZED_ROOT_INDEX_ELECTRA: usize = 169; -pub const CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 86; -pub const NEXT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 87; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, Epoch, EthSpec, Hash256, Slot}, + fork::ForkName, + light_client::{ + LightClientError, LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, + LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, + LightClientHeaderGloas, + }, + sync_committee::{SyncAggregate, SyncCommittee}, + test_utils::TestRandom, +}; pub type FinalizedRootProofLen = U6; pub type CurrentSyncCommitteeProofLen = U5; @@ -38,64 +36,12 @@ pub type FinalizedRootProofLenElectra = U7; pub type CurrentSyncCommitteeProofLenElectra = U6; pub type NextSyncCommitteeProofLenElectra = U6; -pub const FINALIZED_ROOT_PROOF_LEN: usize = 6; -pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; -pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; -pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; - -pub const FINALIZED_ROOT_PROOF_LEN_ELECTRA: usize = 7; -pub const NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; -pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; - -pub type MerkleProof = Vec; -// Max light client updates by range request limits -// spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/p2p-interface.md#configuration -pub const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; - type FinalityBranch = FixedVector; type FinalityBranchElectra = FixedVector; type NextSyncCommitteeBranch = FixedVector; type NextSyncCommitteeBranchElectra = FixedVector; -#[derive(Debug, PartialEq, Clone)] -pub enum Error { - SszTypesError(ssz_types::Error), - MilhouseError(milhouse::Error), - BeaconStateError(beacon_state::Error), - ArithError(ArithError), - AltairForkNotActive, - NotEnoughSyncCommitteeParticipants, - MismatchingPeriods, - InvalidFinalizedBlock, - BeaconBlockBodyError, - InconsistentFork, -} - -impl From for Error { - fn from(e: ssz_types::Error) -> Error { - Error::SszTypesError(e) - } -} - -impl From for Error { - fn from(e: beacon_state::Error) -> Error { - Error::BeaconStateError(e) - } -} - -impl From for Error { - fn from(e: ArithError) -> Error { - Error::ArithError(e) - } -} - -impl From for Error { - fn from(e: milhouse::Error) -> Error { - Error::MilhouseError(e) - } -} - /// A LightClientUpdate is the update we request solely to either complete the bootstrapping process, /// or to sync up to the last committee period, we need to have one ready for each ALTAIR period /// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD]. @@ -238,12 +184,12 @@ impl LightClientUpdate { attested_block: &SignedBlindedBeaconBlock, finalized_block: Option<&SignedBlindedBeaconBlock>, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let light_client_update = match attested_block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), fork_name @ ForkName::Altair | fork_name @ ForkName::Bellatrix => { let attested_header = LightClientHeaderAltair::block_to_light_client_header(attested_block)?; @@ -263,9 +209,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -289,9 +237,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -315,9 +265,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -341,9 +293,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -367,9 +321,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -393,9 +349,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -452,23 +410,32 @@ impl LightClientUpdate { fn attested_header_sync_committee_period( &self, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { compute_sync_committee_period_at_slot::(self.attested_header_slot(), chain_spec) - .map_err(Error::ArithError) + .map_err(LightClientError::ArithError) } - fn signature_slot_sync_committee_period(&self, chain_spec: &ChainSpec) -> Result { + fn signature_slot_sync_committee_period( + &self, + chain_spec: &ChainSpec, + ) -> Result { compute_sync_committee_period_at_slot::(*self.signature_slot(), chain_spec) - .map_err(Error::ArithError) + .map_err(LightClientError::ArithError) } - pub fn is_sync_committee_update(&self, chain_spec: &ChainSpec) -> Result { + pub fn is_sync_committee_update( + &self, + chain_spec: &ChainSpec, + ) -> Result { Ok(!self.is_next_sync_committee_branch_empty() && (self.attested_header_sync_committee_period(chain_spec)? == self.signature_slot_sync_committee_period(chain_spec)?)) } - pub fn has_sync_committee_finality(&self, chain_spec: &ChainSpec) -> Result { + pub fn has_sync_committee_finality( + &self, + chain_spec: &ChainSpec, + ) -> Result { Ok( compute_sync_committee_period_at_slot::(self.finalized_header_slot(), chain_spec)? == self.attested_header_sync_committee_period(chain_spec)?, @@ -482,7 +449,7 @@ impl LightClientUpdate { &self, new: &Self, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { // Compare super majority (> 2/3) sync committee participation let max_active_participants = new.sync_aggregate().sync_committee_bits.len(); @@ -606,6 +573,7 @@ fn compute_sync_committee_period_at_slot( #[cfg(test)] mod tests { use super::*; + use crate::light_client::consts::*; use ssz_types::typenum::Unsigned; // `ssz_tests!` can only be defined once per namespace diff --git a/consensus/types/src/light_client/mod.rs b/consensus/types/src/light_client/mod.rs new file mode 100644 index 0000000000..4e287c2294 --- /dev/null +++ b/consensus/types/src/light_client/mod.rs @@ -0,0 +1,37 @@ +mod error; +mod light_client_bootstrap; +mod light_client_finality_update; +mod light_client_header; +mod light_client_optimistic_update; +mod light_client_update; + +pub mod consts; + +pub use error::LightClientError; +pub use light_client_bootstrap::{ + LightClientBootstrap, LightClientBootstrapAltair, LightClientBootstrapCapella, + LightClientBootstrapDeneb, LightClientBootstrapElectra, LightClientBootstrapFulu, + LightClientBootstrapGloas, +}; +pub use light_client_finality_update::{ + LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientFinalityUpdateCapella, + LightClientFinalityUpdateDeneb, LightClientFinalityUpdateElectra, + LightClientFinalityUpdateFulu, LightClientFinalityUpdateGloas, +}; +pub use light_client_header::{ + LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, + LightClientHeaderElectra, LightClientHeaderFulu, LightClientHeaderGloas, +}; +pub use light_client_optimistic_update::{ + LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, + LightClientOptimisticUpdateCapella, LightClientOptimisticUpdateDeneb, + LightClientOptimisticUpdateElectra, LightClientOptimisticUpdateFulu, + LightClientOptimisticUpdateGloas, +}; +pub use light_client_update::{ + CurrentSyncCommitteeProofLen, CurrentSyncCommitteeProofLenElectra, ExecutionPayloadProofLen, + FinalizedRootProofLen, FinalizedRootProofLenElectra, LightClientUpdate, + LightClientUpdateAltair, LightClientUpdateCapella, LightClientUpdateDeneb, + LightClientUpdateElectra, LightClientUpdateFulu, LightClientUpdateGloas, + NextSyncCommitteeProofLen, NextSyncCommitteeProofLenElectra, +}; diff --git a/consensus/types/src/runtime_fixed_vector.rs b/consensus/types/src/runtime_fixed_vector.rs deleted file mode 100644 index f562322a3d..0000000000 --- a/consensus/types/src/runtime_fixed_vector.rs +++ /dev/null @@ -1,90 +0,0 @@ -//! Emulates a fixed size array but with the length set at runtime. -//! -//! The length of the list cannot be changed once it is set. - -use std::fmt; -use std::fmt::Debug; - -#[derive(Clone)] -pub struct RuntimeFixedVector { - vec: Vec, - len: usize, -} - -impl Debug for RuntimeFixedVector { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?} (len={})", self.vec, self.len) - } -} - -impl RuntimeFixedVector { - pub fn new(vec: Vec) -> Self { - let len = vec.len(); - Self { vec, len } - } - - pub fn to_vec(&self) -> Vec { - self.vec.clone() - } - - pub fn as_slice(&self) -> &[T] { - self.vec.as_slice() - } - - #[allow(clippy::len_without_is_empty)] - pub fn len(&self) -> usize { - self.len - } - - pub fn into_vec(self) -> Vec { - self.vec - } - - pub fn default(max_len: usize) -> Self { - Self { - vec: vec![T::default(); max_len], - len: max_len, - } - } - - pub fn take(&mut self) -> Self { - let new = std::mem::take(&mut self.vec); - *self = Self::new(vec![T::default(); self.len]); - Self { - vec: new, - len: self.len, - } - } -} - -impl std::ops::Deref for RuntimeFixedVector { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -impl std::ops::DerefMut for RuntimeFixedVector { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.vec[..] - } -} - -impl IntoIterator for RuntimeFixedVector { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.vec.into_iter() - } -} - -impl<'a, T> IntoIterator for &'a RuntimeFixedVector { - type Item = &'a T; - type IntoIter = std::slice::Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.vec.iter() - } -} diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs deleted file mode 100644 index e7b846029e..0000000000 --- a/consensus/types/src/runtime_var_list.rs +++ /dev/null @@ -1,387 +0,0 @@ -use crate::ContextDeserialize; -use educe::Educe; -use serde::de::Error as DeError; -use serde::{Deserialize, Deserializer, Serialize}; -use ssz::Decode; -use ssz_types::Error; -use std::fmt; -use std::fmt::Debug; -use std::ops::{Deref, Index, IndexMut}; -use std::slice::SliceIndex; -use tree_hash::{Hash256, MerkleHasher, PackedEncoding, TreeHash, TreeHashType}; - -/// Emulates a SSZ `List`. -/// -/// An ordered, heap-allocated, variable-length, homogeneous collection of `T`, with no more than -/// `max_len` values. -/// -/// To ensure there are no inconsistent states, we do not allow any mutating operation if `max_len` is not set. -/// -/// ## Example -/// -/// ``` -/// use types::{RuntimeVariableList}; -/// -/// let base: Vec = vec![1, 2, 3, 4]; -/// -/// // Create a `RuntimeVariableList` from a `Vec` that has the expected length. -/// let exact: RuntimeVariableList<_> = RuntimeVariableList::new(base.clone(), 4).unwrap(); -/// assert_eq!(&exact[..], &[1, 2, 3, 4]); -/// -/// // Create a `RuntimeVariableList` from a `Vec` that is too long you'll get an error. -/// let err = RuntimeVariableList::new(base.clone(), 3).unwrap_err(); -/// assert_eq!(err, ssz_types::Error::OutOfBounds { i: 4, len: 3 }); -/// -/// // Create a `RuntimeVariableList` from a `Vec` that is shorter than the maximum. -/// let mut long: RuntimeVariableList<_> = RuntimeVariableList::new(base, 5).unwrap(); -/// assert_eq!(&long[..], &[1, 2, 3, 4]); -/// -/// // Push a value to if it does not exceed the maximum -/// long.push(5).unwrap(); -/// assert_eq!(&long[..], &[1, 2, 3, 4, 5]); -/// -/// // Push a value to if it _does_ exceed the maximum. -/// assert!(long.push(6).is_err()); -/// -/// ``` -#[derive(Clone, Serialize, Deserialize, Educe)] -#[educe(PartialEq, Eq, Hash(bound(T: std::hash::Hash)))] -#[serde(transparent)] -pub struct RuntimeVariableList { - vec: Vec, - #[serde(skip)] - max_len: usize, -} - -impl Debug for RuntimeVariableList { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?} (max_len={})", self.vec, self.max_len) - } -} - -impl RuntimeVariableList { - /// Returns `Ok` if the given `vec` equals the fixed length of `Self`. Otherwise returns - /// `Err(OutOfBounds { .. })`. - pub fn new(vec: Vec, max_len: usize) -> Result { - if vec.len() <= max_len { - Ok(Self { vec, max_len }) - } else { - Err(Error::OutOfBounds { - i: vec.len(), - len: max_len, - }) - } - } - - /// Create an empty list with the given `max_len`. - pub fn empty(max_len: usize) -> Self { - Self { - vec: vec![], - max_len, - } - } - - pub fn as_slice(&self) -> &[T] { - self.vec.as_slice() - } - - pub fn as_mut_slice(&mut self) -> &mut [T] { - self.vec.as_mut_slice() - } - - /// Returns the number of values presently in `self`. - pub fn len(&self) -> usize { - self.vec.len() - } - - /// True if `self` does not contain any values. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the type-level maximum length. - /// - /// Returns `None` if self is uninitialized with a max_len. - pub fn max_len(&self) -> usize { - self.max_len - } - - /// Appends `value` to the back of `self`. - /// - /// Returns `Err(())` when appending `value` would exceed the maximum length. - pub fn push(&mut self, value: T) -> Result<(), Error> { - if self.vec.len() < self.max_len { - self.vec.push(value); - Ok(()) - } else { - Err(Error::OutOfBounds { - i: self.vec.len().saturating_add(1), - len: self.max_len, - }) - } - } -} - -impl RuntimeVariableList { - pub fn from_ssz_bytes(bytes: &[u8], max_len: usize) -> Result { - let vec = if bytes.is_empty() { - vec![] - } else if ::is_ssz_fixed_len() { - let num_items = bytes - .len() - .checked_div(::ssz_fixed_len()) - .ok_or(ssz::DecodeError::ZeroLengthItem)?; - - if num_items > max_len { - return Err(ssz::DecodeError::BytesInvalid(format!( - "RuntimeVariableList of {} items exceeds maximum of {}", - num_items, max_len - ))); - } - - bytes.chunks(::ssz_fixed_len()).try_fold( - Vec::with_capacity(num_items), - |mut vec, chunk| { - vec.push(::from_ssz_bytes(chunk)?); - Ok(vec) - }, - )? - } else { - ssz::decode_list_of_variable_length_items(bytes, Some(max_len))? - }; - Ok(Self { vec, max_len }) - } -} - -impl From> for Vec { - fn from(list: RuntimeVariableList) -> Vec { - list.vec - } -} - -impl> Index for RuntimeVariableList { - type Output = I::Output; - - #[inline] - fn index(&self, index: I) -> &Self::Output { - Index::index(&self.vec, index) - } -} - -impl> IndexMut for RuntimeVariableList { - #[inline] - fn index_mut(&mut self, index: I) -> &mut Self::Output { - IndexMut::index_mut(&mut self.vec, index) - } -} - -impl Deref for RuntimeVariableList { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -impl<'a, T> IntoIterator for &'a RuntimeVariableList { - type Item = &'a T; - type IntoIter = std::slice::Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl IntoIterator for RuntimeVariableList { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.vec.into_iter() - } -} - -impl ssz::Encode for RuntimeVariableList -where - T: ssz::Encode, -{ - fn is_ssz_fixed_len() -> bool { - >::is_ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.vec.ssz_append(buf) - } - - fn ssz_fixed_len() -> usize { - >::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.vec.ssz_bytes_len() - } -} - -impl<'de, C, T> ContextDeserialize<'de, (C, usize)> for RuntimeVariableList -where - T: ContextDeserialize<'de, C>, - C: Clone, -{ - fn context_deserialize(deserializer: D, context: (C, usize)) -> Result - where - D: Deserializer<'de>, - { - // first parse out a Vec using the Vec impl you already have - let vec: Vec = Vec::context_deserialize(deserializer, context.0)?; - let vec_len = vec.len(); - RuntimeVariableList::new(vec, context.1).map_err(|e| { - DeError::custom(format!( - "RuntimeVariableList length {} exceeds max_len {}: {e:?}", - vec_len, context.1, - )) - }) - } -} - -impl TreeHash for RuntimeVariableList { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let root = runtime_vec_tree_hash_root::(&self.vec, self.max_len); - - tree_hash::mix_in_length(&root, self.len()) - } -} - -// We can delete this once the upstream `vec_tree_hash_root` is modified to use a runtime max len. -pub fn runtime_vec_tree_hash_root(vec: &[T], max_len: usize) -> Hash256 -where - T: TreeHash, -{ - match T::tree_hash_type() { - TreeHashType::Basic => { - let mut hasher = - MerkleHasher::with_leaves(max_len.div_ceil(T::tree_hash_packing_factor())); - - for item in vec { - hasher - .write(&item.tree_hash_packed_encoding()) - .expect("ssz_types variable vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types variable vec should not have a remaining buffer") - } - TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => { - let mut hasher = MerkleHasher::with_leaves(max_len); - - for item in vec { - hasher - .write(item.tree_hash_root().as_slice()) - .expect("ssz_types vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types vec should not have a remaining buffer") - } - } -} - -#[cfg(test)] -mod test { - use super::*; - use ssz::*; - use std::fmt::Debug; - - #[test] - fn new() { - let vec = vec![42; 5]; - let fixed: Result, _> = RuntimeVariableList::new(vec, 4); - assert!(fixed.is_err()); - - let vec = vec![42; 3]; - let fixed: Result, _> = RuntimeVariableList::new(vec, 4); - assert!(fixed.is_ok()); - - let vec = vec![42; 4]; - let fixed: Result, _> = RuntimeVariableList::new(vec, 4); - assert!(fixed.is_ok()); - } - - #[test] - fn indexing() { - let vec = vec![1, 2]; - - let mut fixed: RuntimeVariableList = - RuntimeVariableList::new(vec.clone(), 8192).unwrap(); - - assert_eq!(fixed[0], 1); - assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!(fixed[..].len(), 2); - - fixed[1] = 3; - assert_eq!(fixed[1], 3); - } - - #[test] - fn length() { - // Too long. - let vec = vec![42; 5]; - let err = RuntimeVariableList::::new(vec.clone(), 4).unwrap_err(); - assert_eq!(err, Error::OutOfBounds { i: 5, len: 4 }); - - let vec = vec![42; 3]; - let fixed: RuntimeVariableList = RuntimeVariableList::new(vec.clone(), 4).unwrap(); - assert_eq!(&fixed[0..3], &vec[..]); - assert_eq!(&fixed[..], &vec![42, 42, 42][..]); - - let vec = vec![]; - let fixed: RuntimeVariableList = RuntimeVariableList::new(vec, 4).unwrap(); - assert_eq!(&fixed[..], &[] as &[u64]); - } - - #[test] - fn deref() { - let vec = vec![0, 2, 4, 6]; - let fixed: RuntimeVariableList = RuntimeVariableList::new(vec, 4).unwrap(); - - assert_eq!(fixed.first(), Some(&0)); - assert_eq!(fixed.get(3), Some(&6)); - assert_eq!(fixed.get(4), None); - } - - #[test] - fn encode() { - let vec: RuntimeVariableList = RuntimeVariableList::new(vec![0; 2], 2).unwrap(); - assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); - assert_eq!( as Encode>::ssz_fixed_len(), 4); - } - - fn round_trip(item: RuntimeVariableList) { - let max_len = item.max_len(); - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!( - RuntimeVariableList::from_ssz_bytes(encoded, max_len), - Ok(item) - ); - } - - #[test] - fn u16_len_8() { - round_trip::(RuntimeVariableList::new(vec![42; 8], 8).unwrap()); - round_trip::(RuntimeVariableList::new(vec![0; 8], 8).unwrap()); - } -} diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/slashing/attester_slashing.rs similarity index 96% rename from consensus/types/src/attester_slashing.rs rename to consensus/types/src/slashing/attester_slashing.rs index 2bfb65653c..5c214b35f7 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/slashing/attester_slashing.rs @@ -1,9 +1,4 @@ -use crate::context_deserialize; -use crate::indexed_attestation::{ - IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, -}; -use crate::{ContextDeserialize, ForkName}; -use crate::{EthSpec, test_utils::TestRandom}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use rand::{Rng, RngCore}; use serde::{Deserialize, Deserializer, Serialize}; @@ -12,6 +7,13 @@ use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef}, + core::EthSpec, + fork::ForkName, + test_utils::TestRandom, +}; + #[superstruct( variants(Base, Electra), variant_attributes( diff --git a/consensus/types/src/slashing/mod.rs b/consensus/types/src/slashing/mod.rs new file mode 100644 index 0000000000..551b8e3137 --- /dev/null +++ b/consensus/types/src/slashing/mod.rs @@ -0,0 +1,8 @@ +mod attester_slashing; +mod proposer_slashing; + +pub use attester_slashing::{ + AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, AttesterSlashingOnDisk, + AttesterSlashingRef, AttesterSlashingRefOnDisk, +}; +pub use proposer_slashing::ProposerSlashing; diff --git a/consensus/types/src/proposer_slashing.rs b/consensus/types/src/slashing/proposer_slashing.rs similarity index 86% rename from consensus/types/src/proposer_slashing.rs rename to consensus/types/src/slashing/proposer_slashing.rs index f4d914c1e5..697bd1a9aa 100644 --- a/consensus/types/src/proposer_slashing.rs +++ b/consensus/types/src/slashing/proposer_slashing.rs @@ -1,12 +1,11 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, SignedBeaconBlockHeader}; - +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{block::SignedBeaconBlockHeader, fork::ForkName, test_utils::TestRandom}; + /// Two conflicting proposals from the same proposer (validator). /// /// Spec v0.12.1 diff --git a/consensus/types/src/activation_queue.rs b/consensus/types/src/state/activation_queue.rs similarity index 95% rename from consensus/types/src/activation_queue.rs rename to consensus/types/src/state/activation_queue.rs index dd3ce5f88c..0d920a20cf 100644 --- a/consensus/types/src/activation_queue.rs +++ b/consensus/types/src/state/activation_queue.rs @@ -1,6 +1,10 @@ -use crate::{ChainSpec, Epoch, Validator}; use std::collections::BTreeSet; +use crate::{ + core::{ChainSpec, Epoch}, + validator::Validator, +}; + /// Activation queue computed during epoch processing for use in the *next* epoch. #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, PartialEq, Eq, Default, Clone)] diff --git a/consensus/types/src/beacon_state/balance.rs b/consensus/types/src/state/balance.rs similarity index 100% rename from consensus/types/src/beacon_state/balance.rs rename to consensus/types/src/state/balance.rs diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/state/beacon_state.rs similarity index 88% rename from consensus/types/src/beacon_state.rs rename to consensus/types/src/state/beacon_state.rs index d13e223557..948899c98d 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -1,49 +1,55 @@ -use self::committee_cache::get_active_validator_indices; -use crate::ContextDeserialize; -use crate::FixedBytesExtended; -use crate::historical_summary::HistoricalSummary; -use crate::test_utils::TestRandom; -use crate::*; +use std::{fmt, hash::Hash, mem, sync::Arc}; + +use bls::{AggregatePublicKey, PublicKeyBytes, Signature}; use compare_fields::CompareFields; +use context_deserialize::ContextDeserialize; use educe::Educe; use ethereum_hashing::hash; +use fixed_bytes::FixedBytesExtended; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use metastruct::{NumFields, metastruct}; -pub use pubkey_cache::PubkeyCache; +use milhouse::{List, Vector}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, DecodeError, Encode, ssz_encode}; use ssz_derive::{Decode, Encode}; -use std::hash::Hash; -use std::{fmt, mem, sync::Arc}; +use ssz_types::{BitVector, FixedVector, typenum::Unsigned}; use superstruct::superstruct; use swap_or_not_shuffle::compute_shuffled_index; use test_random_derive::TestRandom; +use tracing::instrument; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -pub use self::committee_cache::{ - CommitteeCache, compute_committee_index_in_epoch, compute_committee_range_in_epoch, - epoch_committee_count, +use crate::{ + attestation::{ + AttestationDuty, BeaconCommittee, Checkpoint, CommitteeIndex, ParticipationFlags, + PendingAttestation, + }, + block::{BeaconBlock, BeaconBlockHeader, SignedBeaconBlockHash}, + consolidation::PendingConsolidation, + core::{ChainSpec, Domain, Epoch, EthSpec, Hash256, RelativeEpoch, RelativeEpochError, Slot}, + deposit::PendingDeposit, + execution::{ + Eth1Data, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, + }, + fork::{Fork, ForkName, ForkVersionDecode, InconsistentFork, map_fork_name}, + light_client::consts::{ + CURRENT_SYNC_COMMITTEE_INDEX, CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA, FINALIZED_ROOT_INDEX, + FINALIZED_ROOT_INDEX_ELECTRA, NEXT_SYNC_COMMITTEE_INDEX, NEXT_SYNC_COMMITTEE_INDEX_ELECTRA, + }, + state::{ + BlockRootsIter, CommitteeCache, EpochCache, EpochCacheError, ExitCache, HistoricalBatch, + HistoricalSummary, ProgressiveBalancesCache, PubkeyCache, SlashingsCache, + get_active_validator_indices, + }, + sync_committee::{SyncCommittee, SyncDuty}, + test_utils::TestRandom, + validator::Validator, + withdrawal::PendingPartialWithdrawal, }; -pub use crate::beacon_state::balance::Balance; -pub use crate::beacon_state::exit_cache::ExitCache; -pub use crate::beacon_state::progressive_balances_cache::*; -pub use crate::beacon_state::slashings_cache::SlashingsCache; -pub use eth_spec::*; -pub use iter::BlockRootsIter; -pub use milhouse::{List, Vector, interface::Interface}; -use tracing::instrument; - -#[macro_use] -mod committee_cache; -mod balance; -mod exit_cache; -mod iter; -mod progressive_balances_cache; -mod pubkey_cache; -mod slashings_cache; -mod tests; pub const CACHED_EPOCHS: usize = 3; const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; @@ -53,7 +59,7 @@ pub type Validators = List::ValidatorRegistryLimit> pub type Balances = List::ValidatorRegistryLimit>; #[derive(Debug, PartialEq, Clone)] -pub enum Error { +pub enum BeaconStateError { /// A state for a different hard-fork was required -- a severe logic error. IncorrectStateVariant, EpochOutOfBounds, @@ -197,7 +203,7 @@ enum AllowNextEpoch { } impl AllowNextEpoch { - fn upper_bound_of(self, current_epoch: Epoch) -> Result { + fn upper_bound_of(self, current_epoch: Epoch) -> Result { match self { AllowNextEpoch::True => Ok(current_epoch.safe_add(1)?), AllowNextEpoch::False => Ok(current_epoch), @@ -378,8 +384,14 @@ impl From for Hash256 { num_fields(all()), )) ), - cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), map_ref_mut_into(BeaconStateRef) )] #[cfg_attr( @@ -740,11 +752,11 @@ impl BeaconState { } /// Returns the `tree_hash_root` of the state. - pub fn canonical_root(&mut self) -> Result { + pub fn canonical_root(&mut self) -> Result { self.update_tree_hash_cache() } - pub fn historical_batch(&mut self) -> Result, Error> { + pub fn historical_batch(&mut self) -> Result, BeaconStateError> { // Updating before cloning makes the clone cheap and saves repeated hashing. self.block_roots_mut().apply_updates()?; self.state_roots_mut().apply_updates()?; @@ -758,7 +770,10 @@ impl BeaconState { /// This method ensures the state's pubkey cache is fully up-to-date before checking if the validator /// exists in the registry. If a validator pubkey exists in the validator registry, returns `Some(i)`, /// otherwise returns `None`. - pub fn get_validator_index(&mut self, pubkey: &PublicKeyBytes) -> Result, Error> { + pub fn get_validator_index( + &mut self, + pubkey: &PublicKeyBytes, + ) -> Result, BeaconStateError> { self.update_pubkey_cache()?; Ok(self.pubkey_cache().get(pubkey)) } @@ -783,7 +798,7 @@ impl BeaconState { /// The epoch following `self.current_epoch()`. /// /// Spec v0.12.1 - pub fn next_epoch(&self) -> Result { + pub fn next_epoch(&self) -> Result { Ok(self.current_epoch().safe_add(1)?) } @@ -792,7 +807,7 @@ impl BeaconState { /// Makes use of the committee cache and will fail if no cache exists for the slot's epoch. /// /// Spec v0.12.1 - pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result { + pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result { let cache = self.committee_cache_at_slot(slot)?; Ok(cache.committees_per_slot()) } @@ -800,7 +815,10 @@ impl BeaconState { /// Compute the number of committees in an entire epoch. /// /// Spec v0.12.1 - pub fn get_epoch_committee_count(&self, relative_epoch: RelativeEpoch) -> Result { + pub fn get_epoch_committee_count( + &self, + relative_epoch: RelativeEpoch, + ) -> Result { let cache = self.committee_cache(relative_epoch)?; Ok(cache.epoch_committee_count() as u64) } @@ -813,7 +831,7 @@ impl BeaconState { pub fn get_cached_active_validator_indices( &self, relative_epoch: RelativeEpoch, - ) -> Result<&[usize], Error> { + ) -> Result<&[usize], BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; Ok(cache.active_validator_indices()) @@ -826,7 +844,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { if epoch >= self.compute_activation_exit_epoch(self.current_epoch(), spec)? { Err(BeaconStateError::EpochOutOfBounds) } else { @@ -839,7 +857,10 @@ impl BeaconState { /// Note: the indices are shuffled (i.e., not in ascending order). /// /// Returns an error if that epoch is not cached, or the cache is not initialized. - pub fn get_shuffling(&self, relative_epoch: RelativeEpoch) -> Result<&[usize], Error> { + pub fn get_shuffling( + &self, + relative_epoch: RelativeEpoch, + ) -> Result<&[usize], BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; Ok(cache.shuffling()) @@ -854,14 +875,14 @@ impl BeaconState { &self, slot: Slot, index: CommitteeIndex, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let epoch = slot.epoch(E::slots_per_epoch()); let relative_epoch = RelativeEpoch::from_epoch(self.current_epoch(), epoch)?; let cache = self.committee_cache(relative_epoch)?; cache .get_beacon_committee(slot, index) - .ok_or(Error::NoCommittee { slot, index }) + .ok_or(BeaconStateError::NoCommittee { slot, index }) } /// Get all of the Beacon committees at a given slot. @@ -872,7 +893,7 @@ impl BeaconState { pub fn get_beacon_committees_at_slot( &self, slot: Slot, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { let cache = self.committee_cache_at_slot(slot)?; cache.get_beacon_committees_at_slot(slot) } @@ -885,7 +906,7 @@ impl BeaconState { pub fn get_beacon_committees_at_epoch( &self, relative_epoch: RelativeEpoch, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; cache.get_all_beacon_committees() } @@ -901,7 +922,7 @@ impl BeaconState { epoch: Epoch, block_root: Hash256, spec: &ChainSpec, - ) -> Result { + ) -> Result { let decision_slot = spec.proposer_shuffling_decision_slot::(epoch); if self.slot() <= decision_slot { Ok(block_root) @@ -917,7 +938,7 @@ impl BeaconState { &self, epoch: Epoch, head_block_root: Hash256, - ) -> Result { + ) -> Result { let decision_slot = epoch.saturating_sub(1u64).end_slot(E::slots_per_epoch()); if self.slot() <= decision_slot { Ok(head_block_root) @@ -937,11 +958,14 @@ impl BeaconState { &self, block_root: Hash256, spec: &ChainSpec, - ) -> Result { + ) -> Result { self.proposer_shuffling_decision_root_at_epoch(self.current_epoch(), block_root, spec) } - pub fn epoch_cache_decision_root(&self, block_root: Hash256) -> Result { + pub fn epoch_cache_decision_root( + &self, + block_root: Hash256, + ) -> Result { // Epoch cache decision root for the current epoch (N) is the block root at the end of epoch // N - 1. This is the same as the root that determines the next epoch attester shuffling. self.attester_shuffling_decision_root(block_root, RelativeEpoch::Next) @@ -958,7 +982,7 @@ impl BeaconState { &self, block_root: Hash256, relative_epoch: RelativeEpoch, - ) -> Result { + ) -> Result { let decision_slot = self.attester_shuffling_decision_slot(relative_epoch); if self.slot() == decision_slot { Ok(block_root) @@ -985,9 +1009,9 @@ impl BeaconState { indices: &[usize], seed: &[u8], spec: &ChainSpec, - ) -> Result { + ) -> Result { if indices.is_empty() { - return Err(Error::InsufficientValidators); + return Err(BeaconStateError::InsufficientValidators); } let max_effective_balance = spec.max_effective_balance_for_fork(self.fork_name_unchecked()); @@ -1005,10 +1029,10 @@ impl BeaconState { seed, spec.shuffle_round_count, ) - .ok_or(Error::UnableToShuffle)?; + .ok_or(BeaconStateError::UnableToShuffle)?; let candidate_index = *indices .get(shuffled_index) - .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(shuffled_index))?; let random_value = self.shuffling_random_value(i, seed)?; let effective_balance = self.get_effective_balance(candidate_index)?; if effective_balance.safe_mul(max_random_value)? @@ -1027,11 +1051,11 @@ impl BeaconState { seed: &[u8], indices: &[usize], spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { // Regardless of fork, we never support computing proposer indices for past epochs. let current_epoch = self.current_epoch(); if epoch < current_epoch { - return Err(Error::ComputeProposerIndicesPastEpoch { + return Err(BeaconStateError::ComputeProposerIndicesPastEpoch { current_epoch, request_epoch: epoch, }); @@ -1050,17 +1074,19 @@ impl BeaconState { if self.fork_name_unchecked().fulu_enabled() && epoch < current_epoch.safe_add(spec.min_seed_lookahead)? { - return Err(Error::ComputeProposerIndicesInsufficientLookahead { - current_epoch, - request_epoch: epoch, - }); + return Err( + BeaconStateError::ComputeProposerIndicesInsufficientLookahead { + current_epoch, + request_epoch: epoch, + }, + ); } } else { // Pre-Fulu the situation is reversed, we *should not* compute proposer indices using // too much lookahead. To do so would make us vulnerable to changes in the proposer // indices caused by effective balance changes. if epoch >= current_epoch.safe_add(spec.min_seed_lookahead)? { - return Err(Error::ComputeProposerIndicesExcessiveLookahead { + return Err(BeaconStateError::ComputeProposerIndicesExcessiveLookahead { current_epoch, request_epoch: epoch, }); @@ -1083,7 +1109,7 @@ impl BeaconState { /// In Electra and later, the random value is a 16-bit integer stored in a `u64`. /// /// Prior to Electra, the random value is an 8-bit integer stored in a `u64`. - fn shuffling_random_value(&self, i: usize, seed: &[u8]) -> Result { + fn shuffling_random_value(&self, i: usize, seed: &[u8]) -> Result { if self.fork_name_unchecked().electra_enabled() { Self::shuffling_random_u16_electra(i, seed).map(u64::from) } else { @@ -1094,37 +1120,39 @@ impl BeaconState { /// Get a random byte from the given `seed`. /// /// Used by the proposer & sync committee selection functions. - fn shuffling_random_byte(i: usize, seed: &[u8]) -> Result { + fn shuffling_random_byte(i: usize, seed: &[u8]) -> Result { let mut preimage = seed.to_vec(); preimage.append(&mut int_to_bytes8(i.safe_div(32)? as u64)); let index = i.safe_rem(32)?; hash(&preimage) .get(index) .copied() - .ok_or(Error::ShuffleIndexOutOfBounds(index)) + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(index)) } /// Get two random bytes from the given `seed`. /// /// This is used in place of `shuffling_random_byte` from Electra onwards. - fn shuffling_random_u16_electra(i: usize, seed: &[u8]) -> Result { + fn shuffling_random_u16_electra(i: usize, seed: &[u8]) -> Result { let mut preimage = seed.to_vec(); preimage.append(&mut int_to_bytes8(i.safe_div(16)? as u64)); let offset = i.safe_rem(16)?.safe_mul(2)?; hash(&preimage) .get(offset..offset.safe_add(2)?) - .ok_or(Error::ShuffleIndexOutOfBounds(offset))? + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(offset))? .try_into() .map(u16::from_le_bytes) - .map_err(|_| Error::ShuffleIndexOutOfBounds(offset)) + .map_err(|_| BeaconStateError::ShuffleIndexOutOfBounds(offset)) } /// Convenience accessor for the `execution_payload_header` as an `ExecutionPayloadHeaderRef`. pub fn latest_execution_payload_header( &self, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { match self { - BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), + BeaconState::Base(_) | BeaconState::Altair(_) => { + Err(BeaconStateError::IncorrectStateVariant) + } BeaconState::Bellatrix(state) => Ok(ExecutionPayloadHeaderRef::Bellatrix( &state.latest_execution_payload_header, )), @@ -1148,9 +1176,11 @@ impl BeaconState { pub fn latest_execution_payload_header_mut( &mut self, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { match self { - BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), + BeaconState::Base(_) | BeaconState::Altair(_) => { + Err(BeaconStateError::IncorrectStateVariant) + } BeaconState::Bellatrix(state) => Ok(ExecutionPayloadHeaderRefMut::Bellatrix( &mut state.latest_execution_payload_header, )), @@ -1181,7 +1211,7 @@ impl BeaconState { index: CommitteeIndex, slot_signature: &Signature, spec: &ChainSpec, - ) -> Result { + ) -> Result { let committee = self.get_beacon_committee(slot, index)?; let modulo = std::cmp::max( 1, @@ -1192,7 +1222,7 @@ impl BeaconState { signature_hash .get(0..8) .and_then(|bytes| bytes.try_into().ok()) - .ok_or(Error::IsAggregatorOutOfBounds)?, + .ok_or(BeaconStateError::IsAggregatorOutOfBounds)?, ); Ok(signature_hash_int.safe_rem(modulo)? == 0) @@ -1201,13 +1231,17 @@ impl BeaconState { /// Returns the beacon proposer index for the `slot` in `self.current_epoch()`. /// /// Spec v1.6.0-alpha.1 - pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result { + pub fn get_beacon_proposer_index( + &self, + slot: Slot, + spec: &ChainSpec, + ) -> Result { // Proposer indices are only known for the current epoch, due to the dependence on the // effective balances of validators, which change at every epoch transition. let epoch = slot.epoch(E::slots_per_epoch()); // TODO(EIP-7917): Explore allowing this function to be called with a slot one epoch in the future. if epoch != self.current_epoch() { - return Err(Error::SlotOutOfBounds); + return Err(BeaconStateError::SlotOutOfBounds); } if let Ok(proposer_lookahead) = self.proposer_lookahead() { @@ -1215,7 +1249,7 @@ impl BeaconState { let index = slot.as_usize().safe_rem(E::slots_per_epoch() as usize)?; proposer_lookahead .get(index) - .ok_or(Error::ProposerLookaheadOutOfBounds { i: index }) + .ok_or(BeaconStateError::ProposerLookaheadOutOfBounds { i: index }) .map(|index| *index as usize) } else { // Pre-Fulu @@ -1233,7 +1267,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { // This isn't in the spec, but we remove the footgun that is requesting the current epoch // for a Fulu state. if let Ok(proposer_lookahead) = self.proposer_lookahead() @@ -1263,7 +1297,11 @@ impl BeaconState { /// Compute the seed to use for the beacon proposer selection at the given `slot`. /// /// Spec v0.12.1 - pub fn get_beacon_proposer_seed(&self, slot: Slot, spec: &ChainSpec) -> Result, Error> { + pub fn get_beacon_proposer_seed( + &self, + slot: Slot, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { let epoch = slot.epoch(E::slots_per_epoch()); let mut preimage = self .get_seed(epoch, Domain::BeaconProposer, spec)? @@ -1278,7 +1316,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result<&Arc>, Error> { + ) -> Result<&Arc>, BeaconStateError> { let sync_committee_period = epoch.sync_committee_period(spec)?; let current_sync_committee_period = self.current_epoch().sync_committee_period(spec)?; let next_sync_committee_period = current_sync_committee_period.safe_add(1)?; @@ -1288,7 +1326,7 @@ impl BeaconState { } else if sync_committee_period == next_sync_committee_period { self.next_sync_committee() } else { - Err(Error::SyncCommitteeNotKnown { + Err(BeaconStateError::SyncCommitteeNotKnown { current_epoch: self.current_epoch(), epoch, }) @@ -1299,7 +1337,7 @@ impl BeaconState { pub fn get_sync_committee_indices( &mut self, sync_committee: &SyncCommittee, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { self.update_pubkey_cache()?; sync_committee .pubkeys @@ -1307,13 +1345,16 @@ impl BeaconState { .map(|pubkey| { self.pubkey_cache() .get(pubkey) - .ok_or(Error::PubkeyCacheInconsistent) + .ok_or(BeaconStateError::PubkeyCacheInconsistent) }) .collect() } /// Compute the sync committee indices for the next sync committee. - fn get_next_sync_committee_indices(&self, spec: &ChainSpec) -> Result, Error> { + fn get_next_sync_committee_indices( + &self, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { let epoch = self.current_epoch().safe_add(1)?; let active_validator_indices = self.get_active_validator_indices(epoch, spec)?; @@ -1336,10 +1377,10 @@ impl BeaconState { seed.as_slice(), spec.shuffle_round_count, ) - .ok_or(Error::UnableToShuffle)?; + .ok_or(BeaconStateError::UnableToShuffle)?; let candidate_index = *active_validator_indices .get(shuffled_index) - .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(shuffled_index))?; let random_value = self.shuffling_random_value(i, seed.as_slice())?; let effective_balance = self.get_validator(candidate_index)?.effective_balance; if effective_balance.safe_mul(max_random_value)? @@ -1353,7 +1394,10 @@ impl BeaconState { } /// Compute the next sync committee. - pub fn get_next_sync_committee(&self, spec: &ChainSpec) -> Result, Error> { + pub fn get_next_sync_committee( + &self, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { let sync_committee_indices = self.get_next_sync_committee_indices(spec)?; let pubkeys = sync_committee_indices @@ -1362,7 +1406,7 @@ impl BeaconState { self.validators() .get(index) .map(|v| v.pubkey) - .ok_or(Error::UnknownValidator(index)) + .ok_or(BeaconStateError::UnknownValidator(index)) }) .collect::, _>>()?; let decompressed_pubkeys = pubkeys @@ -1386,7 +1430,7 @@ impl BeaconState { epoch: Epoch, validator_indices: &[u64], spec: &ChainSpec, - ) -> Result, Error>>, Error> { + ) -> Result, BeaconStateError>>, BeaconStateError> { let sync_committee = self.get_built_sync_committee(epoch, spec)?; Ok(validator_indices @@ -1421,7 +1465,7 @@ impl BeaconState { /// Safely obtains the index for latest block roots, given some `slot`. /// /// Spec v0.12.1 - fn get_latest_block_roots_index(&self, slot: Slot) -> Result { + fn get_latest_block_roots_index(&self, slot: Slot) -> Result { if slot < self.slot() && self.slot() <= slot.safe_add(self.block_roots().len() as u64)? { Ok(slot.as_usize().safe_rem(self.block_roots().len())?) } else { @@ -1441,7 +1485,7 @@ impl BeaconState { let i = self.get_latest_block_roots_index(slot)?; self.block_roots() .get(i) - .ok_or(Error::BlockRootsOutOfBounds(i)) + .ok_or(BeaconStateError::BlockRootsOutOfBounds(i)) } /// Return the block root at a recent `epoch`. @@ -1461,12 +1505,12 @@ impl BeaconState { *self .block_roots_mut() .get_mut(i) - .ok_or(Error::BlockRootsOutOfBounds(i))? = block_root; + .ok_or(BeaconStateError::BlockRootsOutOfBounds(i))? = block_root; Ok(()) } /// Fill `randao_mixes` with - pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) -> Result<(), Error> { + pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) -> Result<(), BeaconStateError> { *self.randao_mixes_mut() = Vector::from_elem(index_root)?; Ok(()) } @@ -1478,7 +1522,7 @@ impl BeaconState { &self, epoch: Epoch, allow_next_epoch: AllowNextEpoch, - ) -> Result { + ) -> Result { let current_epoch = self.current_epoch(); let len = E::EpochsPerHistoricalVector::to_u64(); @@ -1487,7 +1531,7 @@ impl BeaconState { { Ok(epoch.as_usize().safe_rem(len as usize)?) } else { - Err(Error::EpochOutOfBounds) + Err(BeaconStateError::EpochOutOfBounds) } } @@ -1503,7 +1547,11 @@ impl BeaconState { /// # Errors: /// /// See `Self::get_randao_mix`. - pub fn update_randao_mix(&mut self, epoch: Epoch, signature: &Signature) -> Result<(), Error> { + pub fn update_randao_mix( + &mut self, + epoch: Epoch, + signature: &Signature, + ) -> Result<(), BeaconStateError> { let i = epoch .as_usize() .safe_rem(E::EpochsPerHistoricalVector::to_usize())?; @@ -1513,36 +1561,36 @@ impl BeaconState { *self .randao_mixes_mut() .get_mut(i) - .ok_or(Error::RandaoMixesOutOfBounds(i))? = + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i))? = *self.get_randao_mix(epoch)? ^ signature_hash; Ok(()) } /// Return the randao mix at a recent ``epoch``. - pub fn get_randao_mix(&self, epoch: Epoch) -> Result<&Hash256, Error> { + pub fn get_randao_mix(&self, epoch: Epoch) -> Result<&Hash256, BeaconStateError> { let i = self.get_randao_mix_index(epoch, AllowNextEpoch::False)?; self.randao_mixes() .get(i) - .ok_or(Error::RandaoMixesOutOfBounds(i)) + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i)) } /// Set the randao mix at a recent ``epoch``. /// /// Spec v0.12.1 - pub fn set_randao_mix(&mut self, epoch: Epoch, mix: Hash256) -> Result<(), Error> { + pub fn set_randao_mix(&mut self, epoch: Epoch, mix: Hash256) -> Result<(), BeaconStateError> { let i = self.get_randao_mix_index(epoch, AllowNextEpoch::True)?; *self .randao_mixes_mut() .get_mut(i) - .ok_or(Error::RandaoMixesOutOfBounds(i))? = mix; + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i))? = mix; Ok(()) } /// Safely obtains the index for latest state roots, given some `slot`. /// /// Spec v0.12.1 - fn get_latest_state_roots_index(&self, slot: Slot) -> Result { + fn get_latest_state_roots_index(&self, slot: Slot) -> Result { if slot < self.slot() && self.slot() <= slot.safe_add(self.state_roots().len() as u64)? { Ok(slot.as_usize().safe_rem(self.state_roots().len())?) } else { @@ -1551,38 +1599,42 @@ impl BeaconState { } /// Gets the state root for some slot. - pub fn get_state_root(&self, slot: Slot) -> Result<&Hash256, Error> { + pub fn get_state_root(&self, slot: Slot) -> Result<&Hash256, BeaconStateError> { let i = self.get_latest_state_roots_index(slot)?; self.state_roots() .get(i) - .ok_or(Error::StateRootsOutOfBounds(i)) + .ok_or(BeaconStateError::StateRootsOutOfBounds(i)) } /// Gets the state root for the start slot of some epoch. - pub fn get_state_root_at_epoch_start(&self, epoch: Epoch) -> Result { + pub fn get_state_root_at_epoch_start(&self, epoch: Epoch) -> Result { self.get_state_root(epoch.start_slot(E::slots_per_epoch())) .copied() } /// Gets the oldest (earliest slot) state root. - pub fn get_oldest_state_root(&self) -> Result<&Hash256, Error> { + pub fn get_oldest_state_root(&self) -> Result<&Hash256, BeaconStateError> { let oldest_slot = self.slot().saturating_sub(self.state_roots().len()); self.get_state_root(oldest_slot) } /// Gets the oldest (earliest slot) block root. - pub fn get_oldest_block_root(&self) -> Result<&Hash256, Error> { + pub fn get_oldest_block_root(&self) -> Result<&Hash256, BeaconStateError> { let oldest_slot = self.slot().saturating_sub(self.block_roots().len()); self.get_block_root(oldest_slot) } /// Sets the latest state root for slot. - pub fn set_state_root(&mut self, slot: Slot, state_root: Hash256) -> Result<(), Error> { + pub fn set_state_root( + &mut self, + slot: Slot, + state_root: Hash256, + ) -> Result<(), BeaconStateError> { let i = self.get_latest_state_roots_index(slot)?; *self .state_roots_mut() .get_mut(i) - .ok_or(Error::StateRootsOutOfBounds(i))? = state_root; + .ok_or(BeaconStateError::StateRootsOutOfBounds(i))? = state_root; Ok(()) } @@ -1591,7 +1643,7 @@ impl BeaconState { &self, epoch: Epoch, allow_next_epoch: AllowNextEpoch, - ) -> Result { + ) -> Result { // We allow the slashings vector to be accessed at any cached epoch at or before // the current epoch, or the next epoch if `AllowNextEpoch::True` is passed. let current_epoch = self.current_epoch(); @@ -1602,7 +1654,7 @@ impl BeaconState { .as_usize() .safe_rem(E::EpochsPerSlashingsVector::to_usize())?) } else { - Err(Error::EpochOutOfBounds) + Err(BeaconStateError::EpochOutOfBounds) } } @@ -1612,21 +1664,21 @@ impl BeaconState { } /// Get the total slashed balances for some epoch. - pub fn get_slashings(&self, epoch: Epoch) -> Result { + pub fn get_slashings(&self, epoch: Epoch) -> Result { let i = self.get_slashings_index(epoch, AllowNextEpoch::False)?; self.slashings() .get(i) .copied() - .ok_or(Error::SlashingsOutOfBounds(i)) + .ok_or(BeaconStateError::SlashingsOutOfBounds(i)) } /// Set the total slashed balances for some epoch. - pub fn set_slashings(&mut self, epoch: Epoch, value: u64) -> Result<(), Error> { + pub fn set_slashings(&mut self, epoch: Epoch, value: u64) -> Result<(), BeaconStateError> { let i = self.get_slashings_index(epoch, AllowNextEpoch::True)?; *self .slashings_mut() .get_mut(i) - .ok_or(Error::SlashingsOutOfBounds(i))? = value; + .ok_or(BeaconStateError::SlashingsOutOfBounds(i))? = value; Ok(()) } @@ -1666,10 +1718,10 @@ impl BeaconState { &mut ExitCache, &mut EpochCache, ), - Error, + BeaconStateError, > { match self { - BeaconState::Base(_) => Err(Error::IncorrectStateVariant), + BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(( &mut state.validators, &mut state.balances, @@ -1744,18 +1796,21 @@ impl BeaconState { } /// Get the balance of a single validator. - pub fn get_balance(&self, validator_index: usize) -> Result { + pub fn get_balance(&self, validator_index: usize) -> Result { self.balances() .get(validator_index) - .ok_or(Error::BalancesOutOfBounds(validator_index)) + .ok_or(BeaconStateError::BalancesOutOfBounds(validator_index)) .copied() } /// Get a mutable reference to the balance of a single validator. - pub fn get_balance_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { + pub fn get_balance_mut( + &mut self, + validator_index: usize, + ) -> Result<&mut u64, BeaconStateError> { self.balances_mut() .get_mut(validator_index) - .ok_or(Error::BalancesOutOfBounds(validator_index)) + .ok_or(BeaconStateError::BalancesOutOfBounds(validator_index)) } /// Generate a seed for the given `epoch`. @@ -1764,7 +1819,7 @@ impl BeaconState { epoch: Epoch, domain_type: Domain, spec: &ChainSpec, - ) -> Result { + ) -> Result { // Bypass the safe getter for RANDAO so we can gracefully handle the scenario where `epoch // == 0`. let mix = { @@ -1775,7 +1830,7 @@ impl BeaconState { let i_mod = i.as_usize().safe_rem(self.randao_mixes().len())?; self.randao_mixes() .get(i_mod) - .ok_or(Error::RandaoMixesOutOfBounds(i_mod))? + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i_mod))? }; let domain_bytes = int_to_bytes4(spec.get_domain_constant(domain_type)); let epoch_bytes = int_to_bytes8(epoch.as_u64()); @@ -1794,17 +1849,20 @@ impl BeaconState { } /// Safe indexer for the `validators` list. - pub fn get_validator(&self, validator_index: usize) -> Result<&Validator, Error> { + pub fn get_validator(&self, validator_index: usize) -> Result<&Validator, BeaconStateError> { self.validators() .get(validator_index) - .ok_or(Error::UnknownValidator(validator_index)) + .ok_or(BeaconStateError::UnknownValidator(validator_index)) } /// Safe mutator for the `validators` list. - pub fn get_validator_mut(&mut self, validator_index: usize) -> Result<&mut Validator, Error> { + pub fn get_validator_mut( + &mut self, + validator_index: usize, + ) -> Result<&mut Validator, BeaconStateError> { self.validators_mut() .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index)) + .ok_or(BeaconStateError::UnknownValidator(validator_index)) } /// Add a validator to the registry and return the validator index that was allocated for it. @@ -1814,7 +1872,7 @@ impl BeaconState { withdrawal_credentials: Hash256, amount: u64, spec: &ChainSpec, - ) -> Result { + ) -> Result { let index = self.validators().len(); let fork_name = self.fork_name_unchecked(); self.validators_mut().push(Validator::from_deposit( @@ -1846,7 +1904,7 @@ impl BeaconState { if pubkey_cache.len() == index { let success = pubkey_cache.insert(pubkey, index); if !success { - return Err(Error::PubkeyCacheInconsistent); + return Err(BeaconStateError::PubkeyCacheInconsistent); } } @@ -1857,14 +1915,14 @@ impl BeaconState { pub fn get_validator_cow( &mut self, validator_index: usize, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { self.validators_mut() .get_cow(validator_index) - .ok_or(Error::UnknownValidator(validator_index)) + .ok_or(BeaconStateError::UnknownValidator(validator_index)) } /// Return the effective balance for a validator with the given `validator_index`. - pub fn get_effective_balance(&self, validator_index: usize) -> Result { + pub fn get_effective_balance(&self, validator_index: usize) -> Result { self.get_validator(validator_index) .map(|v| v.effective_balance) } @@ -1872,20 +1930,27 @@ impl BeaconState { /// Get the inactivity score for a single validator. /// /// Will error if the state lacks an `inactivity_scores` field. - pub fn get_inactivity_score(&self, validator_index: usize) -> Result { + pub fn get_inactivity_score(&self, validator_index: usize) -> Result { self.inactivity_scores()? .get(validator_index) .copied() - .ok_or(Error::InactivityScoresOutOfBounds(validator_index)) + .ok_or(BeaconStateError::InactivityScoresOutOfBounds( + validator_index, + )) } /// Get a mutable reference to the inactivity score for a single validator. /// /// Will error if the state lacks an `inactivity_scores` field. - pub fn get_inactivity_score_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { + pub fn get_inactivity_score_mut( + &mut self, + validator_index: usize, + ) -> Result<&mut u64, BeaconStateError> { self.inactivity_scores_mut()? .get_mut(validator_index) - .ok_or(Error::InactivityScoresOutOfBounds(validator_index)) + .ok_or(BeaconStateError::InactivityScoresOutOfBounds( + validator_index, + )) } /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. @@ -1895,14 +1960,14 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result { + ) -> Result { Ok(spec.compute_activation_exit_epoch(epoch)?) } /// Return the churn limit for the current epoch (number of validators who can leave per epoch). /// /// Uses the current epoch committee cache, and will error if it isn't initialized. - pub fn get_validator_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_validator_churn_limit(&self, spec: &ChainSpec) -> Result { Ok(std::cmp::max( spec.min_per_epoch_churn_limit, (self @@ -1915,7 +1980,7 @@ impl BeaconState { /// Return the activation churn limit for the current epoch (number of validators who can enter per epoch). /// /// Uses the current epoch committee cache, and will error if it isn't initialized. - pub fn get_activation_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_activation_churn_limit(&self, spec: &ChainSpec) -> Result { Ok(match self { BeaconState::Base(_) | BeaconState::Altair(_) @@ -1941,7 +2006,7 @@ impl BeaconState { &self, validator_index: usize, relative_epoch: RelativeEpoch, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; Ok(cache.get_attestation_duties(validator_index)) @@ -1951,7 +2016,10 @@ impl BeaconState { /// /// This method should rarely be invoked because single-pass epoch processing keeps the total /// active balance cache up to date. - pub fn compute_total_active_balance_slow(&self, spec: &ChainSpec) -> Result { + pub fn compute_total_active_balance_slow( + &self, + spec: &ChainSpec, + ) -> Result { let current_epoch = self.current_epoch(); let mut total_active_balance = 0; @@ -1973,20 +2041,20 @@ impl BeaconState { /// the current committee cache is. /// /// Returns minimum `EFFECTIVE_BALANCE_INCREMENT`, to avoid div by 0. - pub fn get_total_active_balance(&self) -> Result { + pub fn get_total_active_balance(&self) -> Result { self.get_total_active_balance_at_epoch(self.current_epoch()) } /// Get the cached total active balance while checking that it is for the correct `epoch`. - pub fn get_total_active_balance_at_epoch(&self, epoch: Epoch) -> Result { + pub fn get_total_active_balance_at_epoch(&self, epoch: Epoch) -> Result { let (initialized_epoch, balance) = self .total_active_balance() - .ok_or(Error::TotalActiveBalanceCacheUninitialized)?; + .ok_or(BeaconStateError::TotalActiveBalanceCacheUninitialized)?; if initialized_epoch == epoch { Ok(balance) } else { - Err(Error::TotalActiveBalanceCacheInconsistent { + Err(BeaconStateError::TotalActiveBalanceCacheInconsistent { initialized_epoch, current_epoch: epoch, }) @@ -2006,7 +2074,10 @@ impl BeaconState { /// Build the total active balance cache for the current epoch if it is not already built. #[instrument(skip_all, level = "debug")] - pub fn build_total_active_balance_cache(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_total_active_balance_cache( + &mut self, + spec: &ChainSpec, + ) -> Result<(), BeaconStateError> { if self .get_total_active_balance_at_epoch(self.current_epoch()) .is_err() @@ -2020,7 +2091,7 @@ impl BeaconState { pub fn force_build_total_active_balance_cache( &mut self, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let total_active_balance = self.compute_total_active_balance_slow(spec)?; *self.total_active_balance_mut() = Some((self.current_epoch(), total_active_balance)); Ok(()) @@ -2037,7 +2108,7 @@ impl BeaconState { epoch: Epoch, previous_epoch: Epoch, current_epoch: Epoch, - ) -> Result<&mut List, Error> { + ) -> Result<&mut List, BeaconStateError> { if epoch == current_epoch { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), @@ -2067,7 +2138,7 @@ impl BeaconState { /// Build all caches (except the tree hash cache), if they need to be built. #[instrument(skip_all, level = "debug")] - pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { self.build_all_committee_caches(spec)?; self.update_pubkey_cache()?; self.build_exit_cache(spec)?; @@ -2078,7 +2149,7 @@ impl BeaconState { /// Build all committee caches, if they need to be built. #[instrument(skip_all, level = "debug")] - pub fn build_all_committee_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_all_committee_caches(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { self.build_committee_cache(RelativeEpoch::Previous, spec)?; self.build_committee_cache(RelativeEpoch::Current, spec)?; self.build_committee_cache(RelativeEpoch::Next, spec)?; @@ -2087,7 +2158,7 @@ impl BeaconState { /// Build the exit cache, if it needs to be built. #[instrument(skip_all, level = "debug")] - pub fn build_exit_cache(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_exit_cache(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { if self.exit_cache().check_initialized().is_err() { *self.exit_cache_mut() = ExitCache::new(self.validators(), spec)?; } @@ -2096,7 +2167,7 @@ impl BeaconState { /// Build the slashings cache if it needs to be built. #[instrument(skip_all, level = "debug")] - pub fn build_slashings_cache(&mut self) -> Result<(), Error> { + pub fn build_slashings_cache(&mut self) -> Result<(), BeaconStateError> { let latest_block_slot = self.latest_block_header().slot; if !self.slashings_cache().is_initialized(latest_block_slot) { *self.slashings_cache_mut() = SlashingsCache::new(latest_block_slot, self.validators()); @@ -2110,7 +2181,7 @@ impl BeaconState { } /// Drop all caches on the state. - pub fn drop_all_caches(&mut self) -> Result<(), Error> { + pub fn drop_all_caches(&mut self) -> Result<(), BeaconStateError> { self.drop_total_active_balance_cache(); self.drop_committee_cache(RelativeEpoch::Previous)?; self.drop_committee_cache(RelativeEpoch::Current)?; @@ -2138,7 +2209,7 @@ impl BeaconState { &mut self, relative_epoch: RelativeEpoch, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let i = Self::committee_cache_index(relative_epoch); let is_initialized = self .committee_cache_at_index(i)? @@ -2159,7 +2230,7 @@ impl BeaconState { &mut self, relative_epoch: RelativeEpoch, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let epoch = relative_epoch.into_epoch(self.current_epoch()); let i = Self::committee_cache_index(relative_epoch); @@ -2175,7 +2246,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { CommitteeCache::initialized(self, epoch, spec) } @@ -2185,7 +2256,7 @@ impl BeaconState { /// /// Note: this function will not build any new committee caches, nor will it update the total /// active balance cache. The total active balance cache must be updated separately. - pub fn advance_caches(&mut self) -> Result<(), Error> { + pub fn advance_caches(&mut self) -> Result<(), BeaconStateError> { self.committee_caches_mut().rotate_left(1); let next = Self::committee_cache_index(RelativeEpoch::Next); @@ -2204,27 +2275,33 @@ impl BeaconState { /// Get the committee cache for some `slot`. /// /// Return an error if the cache for the slot's epoch is not initialized. - fn committee_cache_at_slot(&self, slot: Slot) -> Result<&Arc, Error> { + fn committee_cache_at_slot( + &self, + slot: Slot, + ) -> Result<&Arc, BeaconStateError> { let epoch = slot.epoch(E::slots_per_epoch()); let relative_epoch = RelativeEpoch::from_epoch(self.current_epoch(), epoch)?; self.committee_cache(relative_epoch) } /// Get the committee cache at a given index. - fn committee_cache_at_index(&self, index: usize) -> Result<&Arc, Error> { + fn committee_cache_at_index( + &self, + index: usize, + ) -> Result<&Arc, BeaconStateError> { self.committee_caches() .get(index) - .ok_or(Error::CommitteeCachesOutOfBounds(index)) + .ok_or(BeaconStateError::CommitteeCachesOutOfBounds(index)) } /// Get a mutable reference to the committee cache at a given index. fn committee_cache_at_index_mut( &mut self, index: usize, - ) -> Result<&mut Arc, Error> { + ) -> Result<&mut Arc, BeaconStateError> { self.committee_caches_mut() .get_mut(index) - .ok_or(Error::CommitteeCachesOutOfBounds(index)) + .ok_or(BeaconStateError::CommitteeCachesOutOfBounds(index)) } /// Returns the cache for some `RelativeEpoch`. Returns an error if the cache has not been @@ -2232,19 +2309,24 @@ impl BeaconState { pub fn committee_cache( &self, relative_epoch: RelativeEpoch, - ) -> Result<&Arc, Error> { + ) -> Result<&Arc, BeaconStateError> { let i = Self::committee_cache_index(relative_epoch); let cache = self.committee_cache_at_index(i)?; if cache.is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) { Ok(cache) } else { - Err(Error::CommitteeCacheUninitialized(Some(relative_epoch))) + Err(BeaconStateError::CommitteeCacheUninitialized(Some( + relative_epoch, + ))) } } /// Drops the cache, leaving it in an uninitialized state. - pub fn drop_committee_cache(&mut self, relative_epoch: RelativeEpoch) -> Result<(), Error> { + pub fn drop_committee_cache( + &mut self, + relative_epoch: RelativeEpoch, + ) -> Result<(), BeaconStateError> { *self.committee_cache_at_index_mut(Self::committee_cache_index(relative_epoch))? = Arc::new(CommitteeCache::default()); Ok(()) @@ -2255,7 +2337,7 @@ impl BeaconState { /// Adds all `pubkeys` from the `validators` which are not already in the cache. Will /// never re-add a pubkey. #[instrument(skip_all, level = "debug")] - pub fn update_pubkey_cache(&mut self) -> Result<(), Error> { + pub fn update_pubkey_cache(&mut self) -> Result<(), BeaconStateError> { let mut pubkey_cache = mem::take(self.pubkey_cache_mut()); let start_index = pubkey_cache.len(); @@ -2263,7 +2345,7 @@ impl BeaconState { let index = start_index.safe_add(i)?; let success = pubkey_cache.insert(validator.pubkey, index); if !success { - return Err(Error::PubkeyCacheInconsistent); + return Err(BeaconStateError::PubkeyCacheInconsistent); } } *self.pubkey_cache_mut() = pubkey_cache; @@ -2341,7 +2423,7 @@ impl BeaconState { /// /// Initialize the tree hash cache if it isn't already initialized. #[instrument(skip_all, level = "debug")] - pub fn update_tree_hash_cache<'a>(&'a mut self) -> Result { + pub fn update_tree_hash_cache<'a>(&'a mut self) -> Result { self.apply_pending_mutations()?; map_beacon_state_ref!(&'a _, self.to_ref(), |inner, cons| { let root = inner.tree_hash_root(); @@ -2353,7 +2435,7 @@ impl BeaconState { /// Compute the tree hash root of the validators using the tree hash cache. /// /// Initialize the tree hash cache if it isn't already initialized. - pub fn update_validators_tree_hash_cache(&mut self) -> Result { + pub fn update_validators_tree_hash_cache(&mut self) -> Result { self.validators_mut().apply_updates()?; Ok(self.validators().tree_hash_root()) } @@ -2364,7 +2446,7 @@ impl BeaconState { &self, previous_epoch: Epoch, val: &Validator, - ) -> Result { + ) -> Result { Ok(val.is_active_at(previous_epoch) || (val.slashed && previous_epoch.safe_add(Epoch::new(1))? < val.withdrawable_epoch)) } @@ -2388,7 +2470,7 @@ impl BeaconState { pub fn get_sync_committee_for_next_slot( &self, spec: &ChainSpec, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { let next_slot_epoch = self .slot() .saturating_add(Slot::new(1)) @@ -2414,7 +2496,7 @@ impl BeaconState { // ******* Electra accessors ******* /// Return the churn limit for the current epoch. - pub fn get_balance_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_balance_churn_limit(&self, spec: &ChainSpec) -> Result { let total_active_balance = self.get_total_active_balance()?; let churn = std::cmp::max( spec.min_per_epoch_churn_limit_electra, @@ -2425,20 +2507,26 @@ impl BeaconState { } /// Return the churn limit for the current epoch dedicated to activations and exits. - pub fn get_activation_exit_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_activation_exit_churn_limit( + &self, + spec: &ChainSpec, + ) -> Result { Ok(std::cmp::min( spec.max_per_epoch_activation_exit_churn_limit, self.get_balance_churn_limit(spec)?, )) } - pub fn get_consolidation_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_consolidation_churn_limit(&self, spec: &ChainSpec) -> Result { self.get_balance_churn_limit(spec)? .safe_sub(self.get_activation_exit_churn_limit(spec)?) .map_err(Into::into) } - pub fn get_pending_balance_to_withdraw(&self, validator_index: usize) -> Result { + pub fn get_pending_balance_to_withdraw( + &self, + validator_index: usize, + ) -> Result { let mut pending_balance = 0; for withdrawal in self .pending_partial_withdrawals()? @@ -2456,11 +2544,11 @@ impl BeaconState { &mut self, validator_index: usize, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let balance = self .balances_mut() .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; if *balance > spec.min_activation_balance { let excess_balance = balance.safe_sub(spec.min_activation_balance)?; *balance = spec.min_activation_balance; @@ -2481,11 +2569,11 @@ impl BeaconState { &mut self, validator_index: usize, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let validator = self .validators_mut() .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; AsMut::<[u8; 32]>::as_mut(&mut validator.withdrawal_credentials)[0] = spec.compounding_withdrawal_prefix_byte; @@ -2497,7 +2585,7 @@ impl BeaconState { &mut self, exit_balance: u64, spec: &ChainSpec, - ) -> Result { + ) -> Result { let mut earliest_exit_epoch = std::cmp::max( self.earliest_exit_epoch()?, self.compute_activation_exit_epoch(self.current_epoch(), spec)?, @@ -2527,7 +2615,7 @@ impl BeaconState { | BeaconState::Altair(_) | BeaconState::Bellatrix(_) | BeaconState::Capella(_) - | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), + | BeaconState::Deneb(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Electra(_) | BeaconState::Fulu(_) | BeaconState::Gloas(_) => { // Consume the balance and update state variables *self.exit_balance_to_consume_mut()? = @@ -2542,7 +2630,7 @@ impl BeaconState { &mut self, consolidation_balance: u64, spec: &ChainSpec, - ) -> Result { + ) -> Result { let mut earliest_consolidation_epoch = std::cmp::max( self.earliest_consolidation_epoch()?, self.compute_activation_exit_epoch(self.current_epoch(), spec)?, @@ -2574,7 +2662,7 @@ impl BeaconState { | BeaconState::Altair(_) | BeaconState::Bellatrix(_) | BeaconState::Capella(_) - | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), + | BeaconState::Deneb(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Electra(_) | BeaconState::Fulu(_) | BeaconState::Gloas(_) => { // Consume the balance and update state variables. *self.consolidation_balance_to_consume_mut()? = @@ -2586,7 +2674,7 @@ impl BeaconState { } #[allow(clippy::arithmetic_side_effects)] - pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { + pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), BeaconStateError> { // Required for macros (which use type-hints internally). match (&mut *self, base) { @@ -2677,7 +2765,11 @@ impl BeaconState { Ok(()) } - pub fn rebase_caches_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { + pub fn rebase_caches_on( + &mut self, + base: &Self, + spec: &ChainSpec, + ) -> Result<(), BeaconStateError> { // Use pubkey cache from `base` if it contains superior information (likely if our cache is // uninitialized). Be careful not to use a cache which has *more* validators than expected, // as other code expects `self.pubkey_cache().len() <= self.validators.len()`. @@ -2766,7 +2858,7 @@ impl BeaconState { } #[allow(clippy::arithmetic_side_effects)] - pub fn apply_pending_mutations(&mut self) -> Result<(), Error> { + pub fn apply_pending_mutations(&mut self) -> Result<(), BeaconStateError> { match self { Self::Base(inner) => { map_beacon_state_base_tree_list_fields!(inner, |_, x| { x.apply_updates() }) @@ -2796,43 +2888,43 @@ impl BeaconState { Ok(()) } - pub fn compute_current_sync_committee_proof(&self) -> Result, Error> { + pub fn compute_current_sync_committee_proof(&self) -> Result, BeaconStateError> { // Sync committees are top-level fields, subtract off the generalized indices // for the internal nodes. Result should be 22 or 23, the field offset of the committee // in the `BeaconState`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate let field_gindex = if self.fork_name_unchecked().electra_enabled() { - light_client_update::CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA } else { - light_client_update::CURRENT_SYNC_COMMITTEE_INDEX + CURRENT_SYNC_COMMITTEE_INDEX }; let field_index = field_gindex.safe_sub(self.num_fields_pow2())?; let leaves = self.get_beacon_state_leaves(); self.generate_proof(field_index, &leaves) } - pub fn compute_next_sync_committee_proof(&self) -> Result, Error> { + pub fn compute_next_sync_committee_proof(&self) -> Result, BeaconStateError> { // Sync committees are top-level fields, subtract off the generalized indices // for the internal nodes. Result should be 22 or 23, the field offset of the committee // in the `BeaconState`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate let field_gindex = if self.fork_name_unchecked().electra_enabled() { - light_client_update::NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + NEXT_SYNC_COMMITTEE_INDEX_ELECTRA } else { - light_client_update::NEXT_SYNC_COMMITTEE_INDEX + NEXT_SYNC_COMMITTEE_INDEX }; let field_index = field_gindex.safe_sub(self.num_fields_pow2())?; let leaves = self.get_beacon_state_leaves(); self.generate_proof(field_index, &leaves) } - pub fn compute_finalized_root_proof(&self) -> Result, Error> { + pub fn compute_finalized_root_proof(&self) -> Result, BeaconStateError> { // Finalized root is the right child of `finalized_checkpoint`, divide by two to get // the generalized index of `state.finalized_checkpoint`. let checkpoint_root_gindex = if self.fork_name_unchecked().electra_enabled() { - light_client_update::FINALIZED_ROOT_INDEX_ELECTRA + FINALIZED_ROOT_INDEX_ELECTRA } else { - light_client_update::FINALIZED_ROOT_INDEX + FINALIZED_ROOT_INDEX }; let checkpoint_gindex = checkpoint_root_gindex / 2; @@ -2855,9 +2947,9 @@ impl BeaconState { &self, field_index: usize, leaves: &[Hash256], - ) -> Result, Error> { + ) -> Result, BeaconStateError> { if field_index >= leaves.len() { - return Err(Error::IndexNotSupported(field_index)); + return Err(BeaconStateError::IndexNotSupported(field_index)); } let depth = self.num_fields_pow2().ilog2() as usize; @@ -2916,45 +3008,45 @@ impl BeaconState { } } -impl From for Error { - fn from(e: RelativeEpochError) -> Error { - Error::RelativeEpochError(e) +impl From for BeaconStateError { + fn from(e: RelativeEpochError) -> BeaconStateError { + BeaconStateError::RelativeEpochError(e) } } -impl From for Error { - fn from(e: ssz_types::Error) -> Error { - Error::SszTypesError(e) +impl From for BeaconStateError { + fn from(e: ssz_types::Error) -> BeaconStateError { + BeaconStateError::SszTypesError(e) } } -impl From for Error { - fn from(e: bls::Error) -> Error { - Error::BlsError(e) +impl From for BeaconStateError { + fn from(e: bls::Error) -> BeaconStateError { + BeaconStateError::BlsError(e) } } -impl From for Error { - fn from(e: tree_hash::Error) -> Error { - Error::TreeHashError(e) +impl From for BeaconStateError { + fn from(e: tree_hash::Error) -> BeaconStateError { + BeaconStateError::TreeHashError(e) } } -impl From for Error { - fn from(e: merkle_proof::MerkleTreeError) -> Error { - Error::MerkleTreeError(e) +impl From for BeaconStateError { + fn from(e: merkle_proof::MerkleTreeError) -> BeaconStateError { + BeaconStateError::MerkleTreeError(e) } } -impl From for Error { - fn from(e: ArithError) -> Error { - Error::ArithError(e) +impl From for BeaconStateError { + fn from(e: ArithError) -> BeaconStateError { + BeaconStateError::ArithError(e) } } -impl From for Error { - fn from(e: milhouse::Error) -> Self { - Self::MilhouseError(e) +impl From for BeaconStateError { + fn from(e: milhouse::Error) -> BeaconStateError { + BeaconStateError::MilhouseError(e) } } diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/state/committee_cache.rs similarity index 93% rename from consensus/types/src/beacon_state/committee_cache.rs rename to consensus/types/src/state/committee_cache.rs index 408c269da5..15f6a4cd37 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/state/committee_cache.rs @@ -1,17 +1,20 @@ #![allow(clippy::arithmetic_side_effects)] -use crate::*; -use core::num::NonZeroUsize; +use std::{num::NonZeroUsize, ops::Range, sync::Arc}; + use educe::Educe; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode, four_byte_option_impl}; use ssz_derive::{Decode, Encode}; -use std::ops::Range; -use std::sync::Arc; use swap_or_not_shuffle::shuffle_list; -mod tests; +use crate::{ + attestation::{AttestationDuty, BeaconCommittee, CommitteeIndex}, + core::{ChainSpec, Domain, Epoch, EthSpec, Slot}, + state::{BeaconState, BeaconStateError}, + validator::Validator, +}; // Define "legacy" implementations of `Option`, `Option` which use four bytes // for encoding the union selector. @@ -66,7 +69,7 @@ impl CommitteeCache { state: &BeaconState, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { // Check that the cache is being built for an in-range epoch. // // We allow caches to be constructed for historic epochs, per: @@ -77,23 +80,23 @@ impl CommitteeCache { .saturating_sub(1u64); if reqd_randao_epoch < state.min_randao_epoch() || epoch > state.current_epoch() + 1 { - return Err(Error::EpochOutOfBounds); + return Err(BeaconStateError::EpochOutOfBounds); } // May cause divide-by-zero errors. if E::slots_per_epoch() == 0 { - return Err(Error::ZeroSlotsPerEpoch); + return Err(BeaconStateError::ZeroSlotsPerEpoch); } // The use of `NonZeroUsize` reduces the maximum number of possible validators by one. if state.validators().len() == usize::MAX { - return Err(Error::TooManyValidators); + return Err(BeaconStateError::TooManyValidators); } let active_validator_indices = get_active_validator_indices(state.validators(), epoch); if active_validator_indices.is_empty() { - return Err(Error::InsufficientValidators); + return Err(BeaconStateError::InsufficientValidators); } let committees_per_slot = @@ -107,13 +110,14 @@ impl CommitteeCache { &seed[..], false, ) - .ok_or(Error::UnableToShuffle)?; + .ok_or(BeaconStateError::UnableToShuffle)?; let mut shuffling_positions = vec![<_>::default(); state.validators().len()]; for (i, &v) in shuffling.iter().enumerate() { *shuffling_positions .get_mut(v) - .ok_or(Error::ShuffleIndexOutOfBounds(v))? = NonZeroUsize::new(i + 1).into(); + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(v))? = + NonZeroUsize::new(i + 1).into(); } Ok(Arc::new(CommitteeCache { @@ -188,24 +192,24 @@ impl CommitteeCache { pub fn get_beacon_committees_at_slot( &self, slot: Slot, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { if self.initialized_epoch.is_none() { - return Err(Error::CommitteeCacheUninitialized(None)); + return Err(BeaconStateError::CommitteeCacheUninitialized(None)); } (0..self.committees_per_slot()) .map(|index| { self.get_beacon_committee(slot, index) - .ok_or(Error::NoCommittee { slot, index }) + .ok_or(BeaconStateError::NoCommittee { slot, index }) }) .collect() } /// Returns all committees for `self.initialized_epoch`. - pub fn get_all_beacon_committees(&self) -> Result>, Error> { + pub fn get_all_beacon_committees(&self) -> Result>, BeaconStateError> { let initialized_epoch = self .initialized_epoch - .ok_or(Error::CommitteeCacheUninitialized(None))?; + .ok_or(BeaconStateError::CommitteeCacheUninitialized(None))?; initialized_epoch.slot_iter(self.slots_per_epoch).try_fold( Vec::with_capacity(self.epoch_committee_count()), diff --git a/consensus/types/src/epoch_cache.rs b/consensus/types/src/state/epoch_cache.rs similarity index 97% rename from consensus/types/src/epoch_cache.rs rename to consensus/types/src/state/epoch_cache.rs index 9956cb400a..cdea0d143d 100644 --- a/consensus/types/src/epoch_cache.rs +++ b/consensus/types/src/state/epoch_cache.rs @@ -1,7 +1,12 @@ -use crate::{ActivationQueue, BeaconStateError, ChainSpec, Epoch, Hash256, Slot}; -use safe_arith::{ArithError, SafeArith}; use std::sync::Arc; +use safe_arith::{ArithError, SafeArith}; + +use crate::{ + core::{ChainSpec, Epoch, Hash256, Slot}, + state::{ActivationQueue, BeaconStateError}, +}; + /// Cache of values which are uniquely determined at the start of an epoch. /// /// The values are fixed with respect to the last block of the _prior_ epoch, which we refer diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/state/exit_cache.rs similarity index 97% rename from consensus/types/src/beacon_state/exit_cache.rs rename to consensus/types/src/state/exit_cache.rs index 2828a6138c..43809d1af0 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/state/exit_cache.rs @@ -1,7 +1,13 @@ -use super::{BeaconStateError, ChainSpec, Epoch, Validator}; -use safe_arith::SafeArith; use std::cmp::Ordering; +use safe_arith::SafeArith; + +use crate::{ + core::{ChainSpec, Epoch}, + state::BeaconStateError, + validator::Validator, +}; + /// Map from exit epoch to the number of validators with that exit epoch. #[derive(Debug, Default, Clone, PartialEq)] pub struct ExitCache { diff --git a/consensus/types/src/historical_batch.rs b/consensus/types/src/state/historical_batch.rs similarity index 81% rename from consensus/types/src/historical_batch.rs rename to consensus/types/src/state/historical_batch.rs index 55377f2489..0167d64f62 100644 --- a/consensus/types/src/historical_batch.rs +++ b/consensus/types/src/state/historical_batch.rs @@ -1,11 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; - +use context_deserialize::context_deserialize; +use milhouse::Vector; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, Hash256}, + fork::ForkName, + test_utils::TestRandom, +}; + /// Historical block and state roots. /// /// Spec v0.12.1 @@ -26,6 +31,7 @@ pub struct HistoricalBatch { #[cfg(test)] mod tests { use super::*; + use crate::core::MainnetEthSpec; pub type FoundationHistoricalBatch = HistoricalBatch; diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/state/historical_summary.rs similarity index 87% rename from consensus/types/src/historical_summary.rs rename to consensus/types/src/state/historical_summary.rs index dc147ad042..f520e46483 100644 --- a/consensus/types/src/historical_summary.rs +++ b/consensus/types/src/state/historical_summary.rs @@ -1,13 +1,18 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{BeaconState, EthSpec, ForkName, Hash256}; use compare_fields::CompareFields; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, Hash256}, + fork::ForkName, + state::BeaconState, + test_utils::TestRandom, +}; + /// `HistoricalSummary` matches the components of the phase0 `HistoricalBatch` /// making the two hash_tree_root-compatible. This struct is introduced into the beacon state /// in the Capella hard fork. diff --git a/consensus/types/src/beacon_state/iter.rs b/consensus/types/src/state/iter.rs similarity index 96% rename from consensus/types/src/beacon_state/iter.rs rename to consensus/types/src/state/iter.rs index d99c769e40..d761a6bd85 100644 --- a/consensus/types/src/beacon_state/iter.rs +++ b/consensus/types/src/state/iter.rs @@ -1,4 +1,7 @@ -use crate::*; +use crate::{ + core::{EthSpec, Hash256, Slot}, + state::{BeaconState, BeaconStateError}, +}; /// Returns an iterator across the past block roots of `state` in descending slot-order. /// @@ -28,7 +31,7 @@ impl<'a, E: EthSpec> BlockRootsIter<'a, E> { } impl Iterator for BlockRootsIter<'_, E> { - type Item = Result<(Slot, Hash256), Error>; + type Item = Result<(Slot, Hash256), BeaconStateError>; fn next(&mut self) -> Option { if self.prev > self.genesis_slot diff --git a/consensus/types/src/state/mod.rs b/consensus/types/src/state/mod.rs new file mode 100644 index 0000000000..309796d359 --- /dev/null +++ b/consensus/types/src/state/mod.rs @@ -0,0 +1,35 @@ +mod activation_queue; +mod balance; +mod beacon_state; +#[macro_use] +mod committee_cache; +mod epoch_cache; +mod exit_cache; +mod historical_batch; +mod historical_summary; +mod iter; +mod progressive_balances_cache; +mod pubkey_cache; +mod slashings_cache; + +pub use activation_queue::ActivationQueue; +pub use balance::Balance; +pub use beacon_state::{ + BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateBellatrix, BeaconStateCapella, + BeaconStateDeneb, BeaconStateElectra, BeaconStateError, BeaconStateFulu, BeaconStateGloas, + BeaconStateHash, BeaconStateRef, CACHED_EPOCHS, +}; +pub use committee_cache::{ + CommitteeCache, compute_committee_index_in_epoch, compute_committee_range_in_epoch, + epoch_committee_count, get_active_validator_indices, +}; +pub use epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; +pub use exit_cache::ExitCache; +pub use historical_batch::HistoricalBatch; +pub use historical_summary::HistoricalSummary; +pub use iter::BlockRootsIter; +pub use progressive_balances_cache::{ + EpochTotalBalances, ProgressiveBalancesCache, is_progressive_balances_enabled, +}; +pub use pubkey_cache::PubkeyCache; +pub use slashings_cache::SlashingsCache; diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/state/progressive_balances_cache.rs similarity index 98% rename from consensus/types/src/beacon_state/progressive_balances_cache.rs rename to consensus/types/src/state/progressive_balances_cache.rs index 67d1155dbf..1e4c311f9a 100644 --- a/consensus/types/src/beacon_state/progressive_balances_cache.rs +++ b/consensus/types/src/state/progressive_balances_cache.rs @@ -1,15 +1,17 @@ -use crate::beacon_state::balance::Balance; -use crate::{ - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, - consts::altair::{ - NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, - TIMELY_TARGET_FLAG_INDEX, - }, -}; #[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; use safe_arith::SafeArith; +use crate::{ + attestation::ParticipationFlags, + core::consts::altair::{ + NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, + TIMELY_TARGET_FLAG_INDEX, + }, + core::{ChainSpec, Epoch, EthSpec}, + state::{Balance, BeaconState, BeaconStateError}, +}; + /// This cache keeps track of the accumulated target attestation balance for the current & previous /// epochs. The cached values can be utilised by fork choice to calculate unrealized justification /// and finalization instead of converting epoch participation arrays to balances for each block we diff --git a/consensus/types/src/beacon_state/pubkey_cache.rs b/consensus/types/src/state/pubkey_cache.rs similarity index 98% rename from consensus/types/src/beacon_state/pubkey_cache.rs rename to consensus/types/src/state/pubkey_cache.rs index 85ed00340d..e62fafb53a 100644 --- a/consensus/types/src/beacon_state/pubkey_cache.rs +++ b/consensus/types/src/state/pubkey_cache.rs @@ -1,4 +1,4 @@ -use crate::*; +use bls::PublicKeyBytes; use rpds::HashTrieMapSync as HashTrieMap; type ValidatorIndex = usize; diff --git a/consensus/types/src/beacon_state/slashings_cache.rs b/consensus/types/src/state/slashings_cache.rs similarity index 96% rename from consensus/types/src/beacon_state/slashings_cache.rs rename to consensus/types/src/state/slashings_cache.rs index 6530f795e9..b6ed583df8 100644 --- a/consensus/types/src/beacon_state/slashings_cache.rs +++ b/consensus/types/src/state/slashings_cache.rs @@ -1,8 +1,9 @@ -use crate::{BeaconStateError, Slot, Validator}; #[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; use rpds::HashTrieSetSync as HashTrieSet; +use crate::{core::Slot, state::BeaconStateError, validator::Validator}; + /// Persistent (cheap to clone) cache of all slashed validator indices. #[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(Debug, Default, Clone, PartialEq)] diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/sync_committee/contribution_and_proof.rs similarity index 88% rename from consensus/types/src/contribution_and_proof.rs rename to consensus/types/src/sync_committee/contribution_and_proof.rs index 4d70cd1f8a..2a344b89de 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/sync_committee/contribution_and_proof.rs @@ -1,14 +1,17 @@ -use super::{ - ChainSpec, EthSpec, Fork, ForkName, Hash256, SecretKey, Signature, SignedRoot, - SyncCommitteeContribution, SyncSelectionProof, -}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + sync_committee::{SyncCommitteeContribution, SyncSelectionProof}, + test_utils::TestRandom, +}; + /// A Validators aggregate sync committee contribution and selection proof. #[cfg_attr( feature = "arbitrary", diff --git a/consensus/types/src/sync_committee/mod.rs b/consensus/types/src/sync_committee/mod.rs new file mode 100644 index 0000000000..5a75975fe0 --- /dev/null +++ b/consensus/types/src/sync_committee/mod.rs @@ -0,0 +1,25 @@ +mod contribution_and_proof; +mod signed_contribution_and_proof; +mod sync_aggregate; +mod sync_aggregator_selection_data; +mod sync_committee; +mod sync_committee_contribution; +mod sync_committee_message; +mod sync_committee_subscription; +mod sync_duty; +mod sync_selection_proof; +mod sync_subnet_id; + +pub use contribution_and_proof::ContributionAndProof; +pub use signed_contribution_and_proof::SignedContributionAndProof; +pub use sync_aggregate::{Error as SyncAggregateError, SyncAggregate}; +pub use sync_aggregator_selection_data::SyncAggregatorSelectionData; +pub use sync_committee::{Error as SyncCommitteeError, SyncCommittee}; +pub use sync_committee_contribution::{ + Error as SyncCommitteeContributionError, SyncCommitteeContribution, SyncContributionData, +}; +pub use sync_committee_message::SyncCommitteeMessage; +pub use sync_committee_subscription::SyncCommitteeSubscription; +pub use sync_duty::SyncDuty; +pub use sync_selection_proof::SyncSelectionProof; +pub use sync_subnet_id::{SyncSubnetId, sync_subnet_id_to_string}; diff --git a/consensus/types/src/signed_contribution_and_proof.rs b/consensus/types/src/sync_committee/signed_contribution_and_proof.rs similarity index 87% rename from consensus/types/src/signed_contribution_and_proof.rs rename to consensus/types/src/sync_committee/signed_contribution_and_proof.rs index 51c453d32f..0027003b9f 100644 --- a/consensus/types/src/signed_contribution_and_proof.rs +++ b/consensus/types/src/sync_committee/signed_contribution_and_proof.rs @@ -1,14 +1,17 @@ -use super::{ - ChainSpec, ContributionAndProof, Domain, EthSpec, Fork, ForkName, Hash256, SecretKey, - Signature, SignedRoot, SyncCommitteeContribution, SyncSelectionProof, -}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + sync_committee::{ContributionAndProof, SyncCommitteeContribution, SyncSelectionProof}, + test_utils::TestRandom, +}; + /// A Validators signed contribution proof to publish on the `sync_committee_contribution_and_proof` /// gossipsub topic. #[cfg_attr( diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_committee/sync_aggregate.rs similarity index 91% rename from consensus/types/src/sync_aggregate.rs rename to consensus/types/src/sync_committee/sync_aggregate.rs index ba6d840a52..e5848aa22c 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_committee/sync_aggregate.rs @@ -1,14 +1,20 @@ -use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{AggregateSignature, BitVector, EthSpec, ForkName, SyncCommitteeContribution}; +use bls::AggregateSignature; +use context_deserialize::context_deserialize; use educe::Educe; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::BitVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, consts::altair::SYNC_COMMITTEE_SUBNET_COUNT}, + fork::ForkName, + sync_committee::SyncCommitteeContribution, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq)] pub enum Error { SszTypesError(ssz_types::Error), diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_committee/sync_aggregator_selection_data.rs similarity index 82% rename from consensus/types/src/sync_aggregator_selection_data.rs rename to consensus/types/src/sync_committee/sync_aggregator_selection_data.rs index a280369fea..e905ca036b 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_committee/sync_aggregator_selection_data.rs @@ -1,11 +1,15 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, SignedRoot, Slot}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{SignedRoot, Slot}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Clone, Serialize, Deserialize, Hash, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/sync_committee.rs b/consensus/types/src/sync_committee/sync_committee.rs similarity index 95% rename from consensus/types/src/sync_committee.rs rename to consensus/types/src/sync_committee/sync_committee.rs index a9fde42554..5448411800 100644 --- a/consensus/types/src/sync_committee.rs +++ b/consensus/types/src/sync_committee/sync_committee.rs @@ -1,14 +1,16 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{EthSpec, FixedVector, ForkName, SyncSubnetId}; +use std::collections::HashMap; + use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use std::collections::HashMap; +use ssz_types::FixedVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::EthSpec, fork::ForkName, sync_committee::SyncSubnetId, test_utils::TestRandom}; + #[derive(Debug, PartialEq)] pub enum Error { ArithError(ArithError), diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee/sync_committee_contribution.rs similarity index 93% rename from consensus/types/src/sync_committee_contribution.rs rename to consensus/types/src/sync_committee/sync_committee_contribution.rs index db22a3bdbc..09376fbe5c 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee/sync_committee_contribution.rs @@ -1,12 +1,18 @@ -use super::{AggregateSignature, EthSpec, ForkName, SignedRoot}; -use crate::context_deserialize; -use crate::slot_data::SlotData; -use crate::{BitVector, Hash256, Slot, SyncCommitteeMessage, test_utils::TestRandom}; +use bls::AggregateSignature; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::BitVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, Hash256, SignedRoot, Slot, SlotData}, + fork::ForkName, + sync_committee::SyncCommitteeMessage, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq)] pub enum Error { SszTypesError(ssz_types::Error), diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee/sync_committee_message.rs similarity index 88% rename from consensus/types/src/sync_committee_message.rs rename to consensus/types/src/sync_committee/sync_committee_message.rs index d5bb7250bb..ed42555c43 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee/sync_committee_message.rs @@ -1,14 +1,16 @@ -use crate::context_deserialize; -use crate::slot_data::SlotData; -use crate::test_utils::TestRandom; -use crate::{ - ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, SecretKey, Signature, SignedRoot, Slot, -}; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot, SlotData}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// The data upon which a `SyncCommitteeContribution` is based. #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] diff --git a/consensus/types/src/sync_committee_subscription.rs b/consensus/types/src/sync_committee/sync_committee_subscription.rs similarity index 96% rename from consensus/types/src/sync_committee_subscription.rs rename to consensus/types/src/sync_committee/sync_committee_subscription.rs index 8e040279d7..6365b015dd 100644 --- a/consensus/types/src/sync_committee_subscription.rs +++ b/consensus/types/src/sync_committee/sync_committee_subscription.rs @@ -1,7 +1,8 @@ -use crate::Epoch; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use crate::core::Epoch; + /// A sync committee subscription created when a validator subscribes to sync committee subnets to perform /// sync committee duties. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] diff --git a/consensus/types/src/sync_duty.rs b/consensus/types/src/sync_committee/sync_duty.rs similarity index 96% rename from consensus/types/src/sync_duty.rs rename to consensus/types/src/sync_committee/sync_duty.rs index 59fbc960db..773cc008f9 100644 --- a/consensus/types/src/sync_duty.rs +++ b/consensus/types/src/sync_committee/sync_duty.rs @@ -1,8 +1,13 @@ -use crate::{EthSpec, SyncCommittee, SyncSubnetId}; +use std::collections::HashSet; + use bls::PublicKeyBytes; use safe_arith::ArithError; use serde::{Deserialize, Serialize}; -use std::collections::HashSet; + +use crate::{ + core::EthSpec, + sync_committee::{SyncCommittee, SyncSubnetId}, +}; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncDuty { diff --git a/consensus/types/src/sync_selection_proof.rs b/consensus/types/src/sync_committee/sync_selection_proof.rs similarity index 92% rename from consensus/types/src/sync_selection_proof.rs rename to consensus/types/src/sync_committee/sync_selection_proof.rs index b1e9e8186f..7efc6c4c76 100644 --- a/consensus/types/src/sync_selection_proof.rs +++ b/consensus/types/src/sync_committee/sync_selection_proof.rs @@ -1,16 +1,20 @@ -use crate::consts::altair::{ - SYNC_COMMITTEE_SUBNET_COUNT, TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE, -}; -use crate::{ - ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, - SyncAggregatorSelectionData, -}; +use std::cmp; + +use bls::{PublicKey, SecretKey, Signature}; use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_types::typenum::Unsigned; -use std::cmp; + +use crate::{ + core::{ + ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot, + consts::altair::{SYNC_COMMITTEE_SUBNET_COUNT, TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE}, + }, + fork::Fork, + sync_committee::SyncAggregatorSelectionData, +}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_committee/sync_subnet_id.rs similarity index 92% rename from consensus/types/src/sync_subnet_id.rs rename to consensus/types/src/sync_committee/sync_subnet_id.rs index 3d0d853fca..fb58146178 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_committee/sync_subnet_id.rs @@ -1,13 +1,16 @@ //! Identifies each sync committee subnet by an integer identifier. -use crate::EthSpec; -use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; +use std::{ + collections::HashSet, + fmt::{self, Display}, + ops::{Deref, DerefMut}, + sync::LazyLock, +}; + use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz_types::typenum::Unsigned; -use std::collections::HashSet; -use std::fmt::{self, Display}; -use std::ops::{Deref, DerefMut}; -use std::sync::LazyLock; + +use crate::core::{EthSpec, consts::altair::SYNC_COMMITTEE_SUBNET_COUNT}; static SYNC_SUBNET_ID_TO_STRING: LazyLock> = LazyLock::new(|| { let mut v = Vec::with_capacity(SYNC_COMMITTEE_SUBNET_COUNT as usize); diff --git a/consensus/types/src/test_utils/generate_deterministic_keypairs.rs b/consensus/types/src/test_utils/generate_deterministic_keypairs.rs index f30afda257..5ccd748c25 100644 --- a/consensus/types/src/test_utils/generate_deterministic_keypairs.rs +++ b/consensus/types/src/test_utils/generate_deterministic_keypairs.rs @@ -1,7 +1,8 @@ -use crate::*; +use std::path::PathBuf; + +use bls::Keypair; use eth2_interop_keypairs::{keypair, keypairs_from_yaml_file}; use rayon::prelude::*; -use std::path::PathBuf; use tracing::debug; /// Generates `validator_count` keypairs where the secret key is derived solely from the index of diff --git a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs index 8f4908291e..cf7b5df891 100644 --- a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs +++ b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs @@ -1,11 +1,16 @@ +use bls::Signature; +use kzg::{KzgCommitment, KzgProof}; use rand::Rng; -use kzg::{KzgCommitment, KzgProof}; - -use crate::beacon_block_body::KzgCommitments; -use crate::*; - -use super::*; +use crate::{ + block::{BeaconBlock, SignedBeaconBlock}, + core::{EthSpec, MainnetEthSpec}, + data::{Blob, BlobSidecar, BlobsList}, + execution::FullPayload, + fork::{ForkName, map_fork_name}, + kzg_ext::{KzgCommitments, KzgProofs}, + test_utils::TestRandom, +}; type BlobsBundle = (KzgCommitments, KzgProofs, BlobsList); @@ -73,6 +78,7 @@ pub fn generate_blobs(n_blobs: usize) -> Result, Stri mod test { use super::*; use rand::rng; + use ssz_types::FixedVector; #[test] fn test_verify_blob_inclusion_proof() { diff --git a/consensus/types/src/test_utils/mod.rs b/consensus/types/src/test_utils/mod.rs index 37d58d4342..c4409b4392 100644 --- a/consensus/types/src/test_utils/mod.rs +++ b/consensus/types/src/test_utils/mod.rs @@ -1,17 +1,5 @@ #![allow(clippy::arithmetic_side_effects)] -use std::fmt::Debug; - -pub use rand::{RngCore, SeedableRng}; -pub use rand_xorshift::XorShiftRng; - -pub use generate_deterministic_keypairs::generate_deterministic_keypair; -pub use generate_deterministic_keypairs::generate_deterministic_keypairs; -pub use generate_deterministic_keypairs::load_keypairs_from_yaml; -use ssz::{Decode, Encode, ssz_encode}; -pub use test_random::{TestRandom, test_random_instance}; -use tree_hash::TreeHash; - #[macro_use] mod macros; mod generate_deterministic_keypairs; @@ -19,6 +7,18 @@ mod generate_deterministic_keypairs; mod generate_random_block_and_blobs; mod test_random; +pub use generate_deterministic_keypairs::generate_deterministic_keypair; +pub use generate_deterministic_keypairs::generate_deterministic_keypairs; +pub use generate_deterministic_keypairs::load_keypairs_from_yaml; +pub use test_random::{TestRandom, test_random_instance}; + +pub use rand::{RngCore, SeedableRng}; +pub use rand_xorshift::XorShiftRng; + +use ssz::{Decode, Encode, ssz_encode}; +use std::fmt::Debug; +use tree_hash::TreeHash; + pub fn test_ssz_tree_hash_pair(v1: &T, v2: &U) where T: TreeHash + Encode + Decode + Debug + PartialEq, diff --git a/consensus/types/src/test_utils/test_random/address.rs b/consensus/types/src/test_utils/test_random/address.rs index 421801ce53..2f601cb91e 100644 --- a/consensus/types/src/test_utils/test_random/address.rs +++ b/consensus/types/src/test_utils/test_random/address.rs @@ -1,7 +1,7 @@ -use super::*; +use crate::{core::Address, test_utils::TestRandom}; impl TestRandom for Address { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut key_bytes = vec![0; 20]; rng.fill_bytes(&mut key_bytes); Address::from_slice(&key_bytes[..]) diff --git a/consensus/types/src/test_utils/test_random/aggregate_signature.rs b/consensus/types/src/test_utils/test_random/aggregate_signature.rs index 772f284431..f9f3dd9567 100644 --- a/consensus/types/src/test_utils/test_random/aggregate_signature.rs +++ b/consensus/types/src/test_utils/test_random/aggregate_signature.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::{AggregateSignature, Signature}; + +use crate::test_utils::TestRandom; impl TestRandom for AggregateSignature { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let signature = Signature::random_for_test(rng); let mut aggregate_signature = AggregateSignature::infinity(); aggregate_signature.add_assign(&signature); diff --git a/consensus/types/src/test_utils/test_random/bitfield.rs b/consensus/types/src/test_utils/test_random/bitfield.rs index e335ac7fe8..3bc0d37c62 100644 --- a/consensus/types/src/test_utils/test_random/bitfield.rs +++ b/consensus/types/src/test_utils/test_random/bitfield.rs @@ -1,8 +1,10 @@ -use super::*; use smallvec::smallvec; +use ssz_types::{BitList, BitVector, typenum::Unsigned}; + +use crate::test_utils::TestRandom; impl TestRandom for BitList { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let initial_len = std::cmp::max(1, N::to_usize().div_ceil(8)); let mut raw_bytes = smallvec![0; initial_len]; rng.fill_bytes(&mut raw_bytes); @@ -23,7 +25,7 @@ impl TestRandom for BitList { } impl TestRandom for BitVector { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut raw_bytes = smallvec![0; std::cmp::max(1, N::to_usize().div_ceil(8))]; rng.fill_bytes(&mut raw_bytes); // If N isn't divisible by 8 diff --git a/consensus/types/src/test_utils/test_random/hash256.rs b/consensus/types/src/test_utils/test_random/hash256.rs index 21d443c0e2..4d7570fb55 100644 --- a/consensus/types/src/test_utils/test_random/hash256.rs +++ b/consensus/types/src/test_utils/test_random/hash256.rs @@ -1,7 +1,7 @@ -use super::*; +use crate::{core::Hash256, test_utils::TestRandom}; impl TestRandom for Hash256 { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut key_bytes = vec![0; 32]; rng.fill_bytes(&mut key_bytes); Hash256::from_slice(&key_bytes[..]) diff --git a/consensus/types/src/test_utils/test_random/kzg_commitment.rs b/consensus/types/src/test_utils/test_random/kzg_commitment.rs index a4030f2b6a..31e316a198 100644 --- a/consensus/types/src/test_utils/test_random/kzg_commitment.rs +++ b/consensus/types/src/test_utils/test_random/kzg_commitment.rs @@ -1,4 +1,6 @@ -use super::*; +use kzg::KzgCommitment; + +use crate::test_utils::TestRandom; impl TestRandom for KzgCommitment { fn random_for_test(rng: &mut impl rand::RngCore) -> Self { diff --git a/consensus/types/src/test_utils/test_random/kzg_proof.rs b/consensus/types/src/test_utils/test_random/kzg_proof.rs index 7e771ca566..4465d5ab39 100644 --- a/consensus/types/src/test_utils/test_random/kzg_proof.rs +++ b/consensus/types/src/test_utils/test_random/kzg_proof.rs @@ -1,8 +1,9 @@ -use super::*; -use kzg::BYTES_PER_COMMITMENT; +use kzg::{BYTES_PER_COMMITMENT, KzgProof}; + +use crate::test_utils::TestRandom; impl TestRandom for KzgProof { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut bytes = [0; BYTES_PER_COMMITMENT]; rng.fill_bytes(&mut bytes); Self(bytes) diff --git a/consensus/types/src/test_utils/test_random/mod.rs b/consensus/types/src/test_utils/test_random/mod.rs new file mode 100644 index 0000000000..41812593fa --- /dev/null +++ b/consensus/types/src/test_utils/test_random/mod.rs @@ -0,0 +1,15 @@ +mod address; +mod aggregate_signature; +mod bitfield; +mod hash256; +mod kzg_commitment; +mod kzg_proof; +mod public_key; +mod public_key_bytes; +mod secret_key; +mod signature; +mod signature_bytes; +mod test_random; +mod uint256; + +pub use test_random::{TestRandom, test_random_instance}; diff --git a/consensus/types/src/test_utils/test_random/public_key.rs b/consensus/types/src/test_utils/test_random/public_key.rs index d33e9ac704..9d287c23d7 100644 --- a/consensus/types/src/test_utils/test_random/public_key.rs +++ b/consensus/types/src/test_utils/test_random/public_key.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::{PublicKey, SecretKey}; + +use crate::test_utils::TestRandom; impl TestRandom for PublicKey { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { SecretKey::random_for_test(rng).public_key() } } diff --git a/consensus/types/src/test_utils/test_random/public_key_bytes.rs b/consensus/types/src/test_utils/test_random/public_key_bytes.rs index 6e5cafc4f0..587c3baf8f 100644 --- a/consensus/types/src/test_utils/test_random/public_key_bytes.rs +++ b/consensus/types/src/test_utils/test_random/public_key_bytes.rs @@ -1,9 +1,9 @@ -use bls::PUBLIC_KEY_BYTES_LEN; +use bls::{PUBLIC_KEY_BYTES_LEN, PublicKey, PublicKeyBytes}; -use super::*; +use crate::test_utils::TestRandom; impl TestRandom for PublicKeyBytes { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { //50-50 chance for signature to be "valid" or invalid if bool::random_for_test(rng) { //valid signature diff --git a/consensus/types/src/test_utils/test_random/secret_key.rs b/consensus/types/src/test_utils/test_random/secret_key.rs index da1614aa24..a8295d968a 100644 --- a/consensus/types/src/test_utils/test_random/secret_key.rs +++ b/consensus/types/src/test_utils/test_random/secret_key.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::SecretKey; + +use crate::test_utils::TestRandom; impl TestRandom for SecretKey { - fn random_for_test(_rng: &mut impl RngCore) -> Self { + fn random_for_test(_rng: &mut impl rand::RngCore) -> Self { // TODO: Not deterministic generation. Using `SecretKey::deserialize` results in // `BlstError(BLST_BAD_ENCODING)`, need to debug with blst source on what encoding expects. SecretKey::random() diff --git a/consensus/types/src/test_utils/test_random/signature.rs b/consensus/types/src/test_utils/test_random/signature.rs index 8bc0d71110..006aba9650 100644 --- a/consensus/types/src/test_utils/test_random/signature.rs +++ b/consensus/types/src/test_utils/test_random/signature.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::Signature; + +use crate::test_utils::TestRandom; impl TestRandom for Signature { - fn random_for_test(_rng: &mut impl RngCore) -> Self { + fn random_for_test(_rng: &mut impl rand::RngCore) -> Self { // TODO: `SecretKey::random_for_test` does not return a deterministic signature. Since this // signature will not pass verification we could just return the generator point or the // generator point multiplied by a random scalar if we want disctint signatures. diff --git a/consensus/types/src/test_utils/test_random/signature_bytes.rs b/consensus/types/src/test_utils/test_random/signature_bytes.rs index 2117a48232..6992e57467 100644 --- a/consensus/types/src/test_utils/test_random/signature_bytes.rs +++ b/consensus/types/src/test_utils/test_random/signature_bytes.rs @@ -1,9 +1,9 @@ -use bls::SIGNATURE_BYTES_LEN; +use bls::{SIGNATURE_BYTES_LEN, Signature, SignatureBytes}; -use super::*; +use crate::test_utils::TestRandom; impl TestRandom for SignatureBytes { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { //50-50 chance for signature to be "valid" or invalid if bool::random_for_test(rng) { //valid signature diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random/test_random.rs similarity index 90% rename from consensus/types/src/test_utils/test_random.rs rename to consensus/types/src/test_utils/test_random/test_random.rs index 7c8f86e14d..f31be97c03 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random/test_random.rs @@ -1,23 +1,9 @@ -use crate::*; -use rand::RngCore; -use rand::SeedableRng; +use std::{marker::PhantomData, sync::Arc}; + +use rand::{RngCore, SeedableRng}; use rand_xorshift::XorShiftRng; use smallvec::{SmallVec, smallvec}; -use std::marker::PhantomData; -use std::sync::Arc; - -mod address; -mod aggregate_signature; -mod bitfield; -mod hash256; -mod kzg_commitment; -mod kzg_proof; -mod public_key; -mod public_key_bytes; -mod secret_key; -mod signature; -mod signature_bytes; -mod uint256; +use ssz_types::{VariableList, typenum::Unsigned}; pub fn test_random_instance() -> T { let mut rng = XorShiftRng::from_seed([0x42; 16]); diff --git a/consensus/types/src/test_utils/test_random/uint256.rs b/consensus/types/src/test_utils/test_random/uint256.rs index 30077f0e0f..eccf476595 100644 --- a/consensus/types/src/test_utils/test_random/uint256.rs +++ b/consensus/types/src/test_utils/test_random/uint256.rs @@ -1,7 +1,7 @@ -use super::*; +use crate::{core::Uint256, test_utils::TestRandom}; impl TestRandom for Uint256 { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut key_bytes = [0; 32]; rng.fill_bytes(&mut key_bytes); Self::from_le_slice(&key_bytes[..]) diff --git a/consensus/types/src/validator/mod.rs b/consensus/types/src/validator/mod.rs new file mode 100644 index 0000000000..8a67407821 --- /dev/null +++ b/consensus/types/src/validator/mod.rs @@ -0,0 +1,9 @@ +mod proposer_preparation_data; +mod validator; +mod validator_registration_data; +mod validator_subscription; + +pub use proposer_preparation_data::ProposerPreparationData; +pub use validator::{Validator, is_compounding_withdrawal_credential}; +pub use validator_registration_data::{SignedValidatorRegistrationData, ValidatorRegistrationData}; +pub use validator_subscription::ValidatorSubscription; diff --git a/consensus/types/src/proposer_preparation_data.rs b/consensus/types/src/validator/proposer_preparation_data.rs similarity index 95% rename from consensus/types/src/proposer_preparation_data.rs rename to consensus/types/src/validator/proposer_preparation_data.rs index 477fb3b9d1..8ef675de4f 100644 --- a/consensus/types/src/proposer_preparation_data.rs +++ b/consensus/types/src/validator/proposer_preparation_data.rs @@ -1,6 +1,7 @@ -use crate::*; use serde::{Deserialize, Serialize}; +use crate::core::Address; + /// A proposer preparation, created when a validator prepares the beacon node for potential proposers /// by supplying information required when proposing blocks for the given validators. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator/validator.rs similarity index 97% rename from consensus/types/src/validator.rs rename to consensus/types/src/validator/validator.rs index dec8bba627..7898ab9073 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator/validator.rs @@ -1,13 +1,19 @@ -use crate::context_deserialize; -use crate::{ - Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, ForkName, - Hash256, PublicKeyBytes, test_utils::TestRandom, -}; +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::Checkpoint, + core::{Address, ChainSpec, Epoch, EthSpec, Hash256}, + fork::ForkName, + state::BeaconState, + test_utils::TestRandom, +}; + /// Information about a `BeaconChain` validator. /// /// Spec v0.12.1 diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator/validator_registration_data.rs similarity index 93% rename from consensus/types/src/validator_registration_data.rs rename to consensus/types/src/validator/validator_registration_data.rs index 345771074c..a0a1df7dc5 100644 --- a/consensus/types/src/validator_registration_data.rs +++ b/consensus/types/src/validator/validator_registration_data.rs @@ -1,8 +1,10 @@ -use crate::*; +use bls::{PublicKeyBytes, Signature}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use tree_hash_derive::TreeHash; +use crate::core::{Address, ChainSpec, SignedRoot}; + /// Validator registration, for use in interacting with servers implementing the builder API. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] pub struct SignedValidatorRegistrationData { diff --git a/consensus/types/src/validator_subscription.rs b/consensus/types/src/validator/validator_subscription.rs similarity index 93% rename from consensus/types/src/validator_subscription.rs rename to consensus/types/src/validator/validator_subscription.rs index 62932638ec..92fb200e10 100644 --- a/consensus/types/src/validator_subscription.rs +++ b/consensus/types/src/validator/validator_subscription.rs @@ -1,7 +1,8 @@ -use crate::*; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use crate::{attestation::CommitteeIndex, core::Slot}; + /// A validator subscription, created when a validator subscribes to a slot to perform optional aggregation /// duties. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode, Eq, PartialOrd, Ord)] diff --git a/consensus/types/src/withdrawal/mod.rs b/consensus/types/src/withdrawal/mod.rs new file mode 100644 index 0000000000..bac80d00be --- /dev/null +++ b/consensus/types/src/withdrawal/mod.rs @@ -0,0 +1,9 @@ +mod pending_partial_withdrawal; +mod withdrawal; +mod withdrawal_credentials; +mod withdrawal_request; + +pub use pending_partial_withdrawal::PendingPartialWithdrawal; +pub use withdrawal::{Withdrawal, Withdrawals}; +pub use withdrawal_credentials::WithdrawalCredentials; +pub use withdrawal_request::WithdrawalRequest; diff --git a/consensus/types/src/pending_partial_withdrawal.rs b/consensus/types/src/withdrawal/pending_partial_withdrawal.rs similarity index 85% rename from consensus/types/src/pending_partial_withdrawal.rs rename to consensus/types/src/withdrawal/pending_partial_withdrawal.rs index e9b10f79b5..cd866369a4 100644 --- a/consensus/types/src/pending_partial_withdrawal.rs +++ b/consensus/types/src/withdrawal/pending_partial_withdrawal.rs @@ -1,11 +1,11 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{Epoch, ForkName}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Epoch, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal/withdrawal.rs similarity index 73% rename from consensus/types/src/withdrawal.rs rename to consensus/types/src/withdrawal/withdrawal.rs index ef4a1f285d..d75bd4f501 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal/withdrawal.rs @@ -1,10 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, @@ -21,6 +27,8 @@ pub struct Withdrawal { pub amount: u64, } +pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/withdrawal_credentials.rs b/consensus/types/src/withdrawal/withdrawal_credentials.rs similarity index 91% rename from consensus/types/src/withdrawal_credentials.rs rename to consensus/types/src/withdrawal/withdrawal_credentials.rs index 52d51ed559..b732222ca1 100644 --- a/consensus/types/src/withdrawal_credentials.rs +++ b/consensus/types/src/withdrawal/withdrawal_credentials.rs @@ -1,5 +1,6 @@ -use crate::*; -use bls::get_withdrawal_credentials; +use bls::{PublicKey, get_withdrawal_credentials}; + +use crate::core::{Address, ChainSpec, Hash256}; pub struct WithdrawalCredentials(Hash256); @@ -27,7 +28,7 @@ impl From for Hash256 { #[cfg(test)] mod test { use super::*; - use crate::test_utils::generate_deterministic_keypair; + use crate::{EthSpec, MainnetEthSpec, test_utils::generate_deterministic_keypair}; use std::str::FromStr; #[test] diff --git a/consensus/types/src/withdrawal_request.rs b/consensus/types/src/withdrawal/withdrawal_request.rs similarity index 87% rename from consensus/types/src/withdrawal_request.rs rename to consensus/types/src/withdrawal/withdrawal_request.rs index c08921a68c..98a40016f9 100644 --- a/consensus/types/src/withdrawal_request.rs +++ b/consensus/types/src/withdrawal/withdrawal_request.rs @@ -1,12 +1,13 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{Address, ForkName, PublicKeyBytes}; +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Address, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/tests/committee_cache.rs similarity index 97% rename from consensus/types/src/beacon_state/committee_cache/tests.rs rename to consensus/types/tests/committee_cache.rs index 1d2ca4ccdb..751ef05d29 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/tests/committee_cache.rs @@ -1,9 +1,14 @@ #![cfg(test)] -use crate::test_utils::*; -use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; -use beacon_chain::types::*; use std::sync::LazyLock; + +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use bls::Keypair; +use fixed_bytes::FixedBytesExtended; +use milhouse::Vector; use swap_or_not_shuffle::shuffle_list; +use types::*; + +use crate::test_utils::generate_deterministic_keypairs; pub const VALIDATOR_COUNT: usize = 16; diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/tests/state.rs similarity index 97% rename from consensus/types/src/beacon_state/tests.rs rename to consensus/types/tests/state.rs index e5b05a4a5b..63ab3b8084 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/tests/state.rs @@ -1,15 +1,17 @@ #![cfg(test)] -use crate::test_utils::*; -use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; -use beacon_chain::types::{ - BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateError, ChainSpec, Domain, Epoch, - EthSpec, FixedBytesExtended, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, RelativeEpoch, - Slot, Vector, test_utils::TestRandom, -}; -use ssz::Encode; use std::ops::Mul; use std::sync::LazyLock; + +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use bls::Keypair; +use fixed_bytes::FixedBytesExtended; +use milhouse::Vector; +use rand::SeedableRng; +use rand_xorshift::XorShiftRng; +use ssz::Encode; use swap_or_not_shuffle::compute_shuffled_index; +use types::test_utils::{TestRandom, generate_deterministic_keypairs}; +use types::*; pub const MAX_VALIDATOR_COUNT: usize = 129; pub const SLOT_OFFSET: Slot = Slot::new(1); From 7c789aaf4f6fb10a245b1e63a35ce4943b6790ea Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 5 Dec 2025 10:43:29 +1100 Subject: [PATCH 10/26] Merge `unstable` into `gloas-containers`. Co-authored-by: ethDreamer --- Cargo.lock | 2 + Cargo.toml | 2 +- .../beacon_chain/src/attester_cache.rs | 2 +- beacon_node/beacon_chain/src/beacon_chain.rs | 1 + .../overflow_lru_cache.rs | 4 +- .../src/sync_committee_verification.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 1 + .../src/test_utils/mock_builder.rs | 7 +- beacon_node/http_api/src/beacon/mod.rs | 1 + beacon_node/http_api/src/beacon/states.rs | 787 ++++++++++++++++++ beacon_node/http_api/src/block_id.rs | 4 +- beacon_node/http_api/src/lib.rs | 705 +--------------- beacon_node/http_api/src/light_client.rs | 3 +- beacon_node/http_api/src/produce_block.rs | 1 + beacon_node/http_api/src/utils.rs | 3 + beacon_node/http_api/src/version.rs | 12 +- .../lighthouse_network/src/rpc/codec.rs | 10 +- .../lighthouse_network/src/rpc/methods.rs | 4 +- .../lighthouse_network/tests/rpc_tests.rs | 4 +- .../src/network_beacon_processor/tests.rs | 5 +- .../src/sync/block_sidecar_coupling.rs | 3 +- beacon_node/network/src/sync/tests/lookups.rs | 2 +- common/eth2/Cargo.toml | 1 + .../eth2}/src/beacon_response.rs | 8 +- common/eth2/src/lib.rs | 7 +- common/eth2/src/lighthouse_vc/types.rs | 1 - common/eth2/src/types.rs | 7 +- consensus/types/Cargo.toml | 3 + .../{ => attestation}/aggregate_and_proof.rs | 19 +- .../src/{ => attestation}/attestation.rs | 29 +- .../src/{ => attestation}/attestation_data.rs | 11 +- .../src/{ => attestation}/attestation_duty.rs | 3 +- .../src/{ => attestation}/beacon_committee.rs | 2 +- .../types/src/{ => attestation}/checkpoint.rs | 8 +- .../{ => attestation}/indexed_attestation.rs | 21 +- .../indexed_payload_attestation.rs | 0 consensus/types/src/attestation/mod.rs | 45 + .../{ => attestation}/participation_flags.rs | 6 +- .../{ => attestation}/payload_attestation.rs | 1 + .../payload_attestation_data.rs | 0 .../payload_attestation_message.rs | 4 +- .../{ => attestation}/pending_attestation.rs | 7 +- .../src/{ => attestation}/selection_proof.rs | 12 +- .../src/{ => attestation}/shuffling_id.rs | 9 +- .../signed_aggregate_and_proof.rs | 21 +- .../types/src/{ => attestation}/subnet_id.rs | 14 +- .../types/src/{ => block}/beacon_block.rs | 41 +- .../src/{ => block}/beacon_block_body.rs | 98 ++- .../src/{ => block}/beacon_block_header.rs | 11 +- consensus/types/src/block/mod.rs | 26 + .../src/{ => block}/signed_beacon_block.rs | 36 +- .../{ => block}/signed_beacon_block_header.rs | 14 +- .../types/src/{ => builder}/builder_bid.rs | 20 +- .../{ => builder}/builder_pending_payment.rs | 0 .../builder_pending_withdrawal.rs | 0 consensus/types/src/builder/mod.rs | 10 + .../consolidation_request.rs | 10 +- consensus/types/src/consolidation/mod.rs | 5 + .../pending_consolidation.rs | 6 +- .../src/{ => core}/application_domain.rs | 0 consensus/types/src/{ => core}/chain_spec.rs | 23 +- .../types/src/{ => core}/config_and_preset.rs | 15 +- consensus/types/src/{ => core}/consts.rs | 2 +- consensus/types/src/{ => core}/enr_fork_id.rs | 5 +- consensus/types/src/{ => core}/eth_spec.rs | 20 +- consensus/types/src/{ => core}/graffiti.rs | 11 +- consensus/types/src/core/mod.rs | 44 + .../types/src/{ => core}/non_zero_usize.rs | 0 consensus/types/src/{ => core}/preset.rs | 4 +- .../types/src/{ => core}/relative_epoch.rs | 3 +- .../types/src/{ => core}/signing_data.rs | 7 +- consensus/types/src/{ => core}/slot_data.rs | 2 +- consensus/types/src/{ => core}/slot_epoch.rs | 10 +- .../types/src/{ => core}/slot_epoch_macros.rs | 0 consensus/types/src/{ => core}/sqlite.rs | 3 +- .../types/src/{ => data}/blob_sidecar.rs | 28 +- .../{ => data}/data_column_custody_group.rs | 10 +- .../src/{ => data}/data_column_sidecar.rs | 21 +- .../src/{ => data}/data_column_subnet_id.rs | 25 +- consensus/types/src/data/mod.rs | 23 + consensus/types/src/{ => deposit}/deposit.rs | 8 +- .../types/src/{ => deposit}/deposit_data.rs | 11 +- .../src/{ => deposit}/deposit_message.rs | 11 +- .../src/{ => deposit}/deposit_request.rs | 8 +- .../{ => deposit}/deposit_tree_snapshot.rs | 5 +- consensus/types/src/deposit/mod.rs | 13 + .../src/{ => deposit}/pending_deposit.rs | 10 +- .../bls_to_execution_change.rs | 11 +- .../types/src/{ => execution}/dumb_macros.rs | 0 .../types/src/{ => execution}/eth1_data.rs | 7 +- .../{ => execution}/execution_block_hash.rs | 9 +- .../{ => execution}/execution_block_header.rs | 7 +- .../src/{ => execution}/execution_payload.rs | 26 +- .../{ => execution}/execution_payload_bid.rs | 0 .../execution_payload_envelope.rs | 0 .../execution_payload_header.rs | 28 +- .../src/{ => execution}/execution_requests.rs | 13 +- consensus/types/src/execution/mod.rs | 45 + .../types/src/{ => execution}/payload.rs | 125 +-- .../signed_bls_to_execution_change.rs | 6 +- .../signed_execution_payload_bid.rs | 1 + .../signed_execution_payload_envelope.rs | 1 + consensus/types/src/exit/mod.rs | 5 + .../src/{ => exit}/signed_voluntary_exit.rs | 6 +- .../types/src/{ => exit}/voluntary_exit.rs | 15 +- consensus/types/src/{ => fork}/fork.rs | 5 +- .../types/src/{ => fork}/fork_context.rs | 11 +- consensus/types/src/{ => fork}/fork_data.rs | 9 +- consensus/types/src/fork/fork_macros.rs | 60 ++ consensus/types/src/{ => fork}/fork_name.rs | 71 +- .../types/src/fork/fork_version_decode.rs | 6 + consensus/types/src/fork/mod.rs | 15 + consensus/types/src/kzg_ext/consts.rs | 3 + consensus/types/src/kzg_ext/mod.rs | 27 + consensus/types/src/lib.rs | 446 ++++------ consensus/types/src/light_client/consts.rs | 21 + consensus/types/src/light_client/error.rs | 42 + .../light_client_bootstrap.rs | 62 +- .../light_client_finality_update.rs | 51 +- .../{ => light_client}/light_client_header.rs | 53 +- .../light_client_optimistic_update.rs | 32 +- .../{ => light_client}/light_client_update.rs | 149 ++-- consensus/types/src/light_client/mod.rs | 35 + consensus/types/src/runtime_fixed_vector.rs | 90 -- consensus/types/src/runtime_var_list.rs | 387 --------- .../src/{ => slashing}/attester_slashing.rs | 14 +- consensus/types/src/slashing/mod.rs | 8 + .../src/{ => slashing}/proposer_slashing.rs | 7 +- .../types/src/{ => state}/activation_queue.rs | 6 +- .../src/{beacon_state => state}/balance.rs | 0 .../types/src/{ => state}/beacon_state.rs | 535 +++++++----- .../committee_cache.rs | 38 +- .../types/src/{ => state}/epoch_cache.rs | 9 +- .../src/{beacon_state => state}/exit_cache.rs | 10 +- .../types/src/{ => state}/historical_batch.rs | 12 +- .../src/{ => state}/historical_summary.rs | 11 +- .../types/src/{beacon_state => state}/iter.rs | 7 +- consensus/types/src/state/mod.rs | 35 + .../progressive_balances_cache.rs | 18 +- .../{beacon_state => state}/pubkey_cache.rs | 2 +- .../slashings_cache.rs | 3 +- .../contribution_and_proof.rs | 15 +- consensus/types/src/sync_committee/mod.rs | 25 + .../signed_contribution_and_proof.rs | 15 +- .../{ => sync_committee}/sync_aggregate.rs | 14 +- .../sync_aggregator_selection_data.rs | 10 +- .../{ => sync_committee}/sync_committee.rs | 10 +- .../sync_committee_contribution.rs | 14 +- .../sync_committee_message.rs | 14 +- .../sync_committee_subscription.rs | 3 +- .../src/{ => sync_committee}/sync_duty.rs | 9 +- .../sync_selection_proof.rs | 20 +- .../{ => sync_committee}/sync_subnet_id.rs | 15 +- .../generate_deterministic_keypairs.rs | 5 +- .../generate_random_block_and_blobs.rs | 18 +- consensus/types/src/test_utils/mod.rs | 24 +- .../src/test_utils/test_random/address.rs | 4 +- .../test_random/aggregate_signature.rs | 6 +- .../src/test_utils/test_random/bitfield.rs | 8 +- .../src/test_utils/test_random/hash256.rs | 4 +- .../test_utils/test_random/kzg_commitment.rs | 4 +- .../src/test_utils/test_random/kzg_proof.rs | 7 +- .../types/src/test_utils/test_random/mod.rs | 15 + .../src/test_utils/test_random/public_key.rs | 6 +- .../test_random/public_key_bytes.rs | 6 +- .../src/test_utils/test_random/secret_key.rs | 6 +- .../src/test_utils/test_random/signature.rs | 6 +- .../test_utils/test_random/signature_bytes.rs | 6 +- .../{ => test_random}/test_random.rs | 22 +- .../src/test_utils/test_random/uint256.rs | 4 +- consensus/types/src/validator/mod.rs | 9 + .../proposer_preparation_data.rs | 3 +- .../types/src/{ => validator}/validator.rs | 16 +- .../validator_registration_data.rs | 4 +- .../{ => validator}/validator_subscription.rs | 3 +- consensus/types/src/withdrawal/mod.rs | 9 + .../pending_partial_withdrawal.rs | 6 +- .../types/src/{ => withdrawal}/withdrawal.rs | 12 +- .../withdrawal_credentials.rs | 7 +- .../{ => withdrawal}/withdrawal_request.rs | 7 +- .../tests.rs => tests/committee_cache.rs} | 11 +- .../beacon_state/tests.rs => tests/state.rs} | 18 +- 182 files changed, 2979 insertions(+), 2439 deletions(-) create mode 100644 beacon_node/http_api/src/beacon/mod.rs create mode 100644 beacon_node/http_api/src/beacon/states.rs create mode 100644 beacon_node/http_api/src/utils.rs rename {consensus/types => common/eth2}/src/beacon_response.rs (97%) rename consensus/types/src/{ => attestation}/aggregate_and_proof.rs (93%) rename consensus/types/src/{ => attestation}/attestation.rs (97%) rename consensus/types/src/{ => attestation}/attestation_data.rs (87%) rename consensus/types/src/{ => attestation}/attestation_duty.rs (92%) rename consensus/types/src/{ => attestation}/beacon_committee.rs (92%) rename consensus/types/src/{ => attestation}/checkpoint.rs (88%) rename consensus/types/src/{ => attestation}/indexed_attestation.rs (96%) rename consensus/types/src/{ => attestation}/indexed_payload_attestation.rs (100%) create mode 100644 consensus/types/src/attestation/mod.rs rename consensus/types/src/{ => attestation}/participation_flags.rs (96%) rename consensus/types/src/{ => attestation}/payload_attestation.rs (91%) rename consensus/types/src/{ => attestation}/payload_attestation_data.rs (100%) rename consensus/types/src/{ => attestation}/payload_attestation_message.rs (82%) rename consensus/types/src/{ => attestation}/pending_attestation.rs (84%) rename consensus/types/src/{ => attestation}/selection_proof.rs (95%) rename consensus/types/src/{ => attestation}/shuffling_id.rs (93%) rename consensus/types/src/{ => attestation}/signed_aggregate_and_proof.rs (90%) rename consensus/types/src/{ => attestation}/subnet_id.rs (97%) rename consensus/types/src/{ => block}/beacon_block.rs (96%) rename consensus/types/src/{ => block}/beacon_block_body.rs (93%) rename consensus/types/src/{ => block}/beacon_block_header.rs (90%) create mode 100644 consensus/types/src/block/mod.rs rename consensus/types/src/{ => block}/signed_beacon_block.rs (96%) rename consensus/types/src/{ => block}/signed_beacon_block_header.rs (84%) rename consensus/types/src/{ => builder}/builder_bid.rs (93%) rename consensus/types/src/{ => builder}/builder_pending_payment.rs (100%) rename consensus/types/src/{ => builder}/builder_pending_withdrawal.rs (100%) create mode 100644 consensus/types/src/builder/mod.rs rename consensus/types/src/{ => consolidation}/consolidation_request.rs (84%) create mode 100644 consensus/types/src/consolidation/mod.rs rename consensus/types/src/{ => consolidation}/pending_consolidation.rs (86%) rename consensus/types/src/{ => core}/application_domain.rs (100%) rename consensus/types/src/{ => core}/chain_spec.rs (99%) rename consensus/types/src/{ => core}/config_and_preset.rs (95%) rename consensus/types/src/{ => core}/consts.rs (94%) rename consensus/types/src/{ => core}/enr_fork_id.rs (95%) rename consensus/types/src/{ => core}/eth_spec.rs (98%) rename consensus/types/src/{ => core}/graffiti.rs (98%) create mode 100644 consensus/types/src/core/mod.rs rename consensus/types/src/{ => core}/non_zero_usize.rs (100%) rename consensus/types/src/{ => core}/preset.rs (99%) rename consensus/types/src/{ => core}/relative_epoch.rs (99%) rename consensus/types/src/{ => core}/signing_data.rs (85%) rename consensus/types/src/{ => core}/slot_data.rs (92%) rename consensus/types/src/{ => core}/slot_epoch.rs (98%) rename consensus/types/src/{ => core}/slot_epoch_macros.rs (100%) rename consensus/types/src/{ => core}/sqlite.rs (96%) rename consensus/types/src/{ => data}/blob_sidecar.rs (94%) rename consensus/types/src/{ => data}/data_column_custody_group.rs (98%) rename consensus/types/src/{ => data}/data_column_sidecar.rs (94%) rename consensus/types/src/{ => data}/data_column_subnet_id.rs (80%) create mode 100644 consensus/types/src/data/mod.rs rename consensus/types/src/{ => deposit}/deposit.rs (78%) rename consensus/types/src/{ => deposit}/deposit_data.rs (86%) rename consensus/types/src/{ => deposit}/deposit_message.rs (81%) rename consensus/types/src/{ => deposit}/deposit_request.rs (86%) rename consensus/types/src/{ => deposit}/deposit_tree_snapshot.rs (95%) create mode 100644 consensus/types/src/deposit/mod.rs rename consensus/types/src/{ => deposit}/pending_deposit.rs (78%) rename consensus/types/src/{ => execution}/bls_to_execution_change.rs (83%) rename consensus/types/src/{ => execution}/dumb_macros.rs (100%) rename consensus/types/src/{ => execution}/eth1_data.rs (86%) rename consensus/types/src/{ => execution}/execution_block_hash.rs (96%) rename consensus/types/src/{ => execution}/execution_block_header.rs (98%) rename consensus/types/src/{ => execution}/execution_payload.rs (92%) rename consensus/types/src/{ => execution}/execution_payload_bid.rs (100%) rename consensus/types/src/{ => execution}/execution_payload_envelope.rs (100%) rename consensus/types/src/{ => execution}/execution_payload_header.rs (95%) rename consensus/types/src/{ => execution}/execution_requests.rs (93%) create mode 100644 consensus/types/src/execution/mod.rs rename consensus/types/src/{ => execution}/payload.rs (90%) rename consensus/types/src/{ => execution}/signed_bls_to_execution_change.rs (78%) rename consensus/types/src/{ => execution}/signed_execution_payload_bid.rs (93%) rename consensus/types/src/{ => execution}/signed_execution_payload_envelope.rs (89%) create mode 100644 consensus/types/src/exit/mod.rs rename consensus/types/src/{ => exit}/signed_voluntary_exit.rs (84%) rename consensus/types/src/{ => exit}/voluntary_exit.rs (90%) rename consensus/types/src/{ => fork}/fork.rs (96%) rename consensus/types/src/{ => fork}/fork_context.rs (98%) rename consensus/types/src/{ => fork}/fork_data.rs (88%) create mode 100644 consensus/types/src/fork/fork_macros.rs rename consensus/types/src/{ => fork}/fork_name.rs (84%) create mode 100644 consensus/types/src/fork/fork_version_decode.rs create mode 100644 consensus/types/src/fork/mod.rs create mode 100644 consensus/types/src/kzg_ext/consts.rs create mode 100644 consensus/types/src/kzg_ext/mod.rs create mode 100644 consensus/types/src/light_client/consts.rs create mode 100644 consensus/types/src/light_client/error.rs rename consensus/types/src/{ => light_client}/light_client_bootstrap.rs (87%) rename consensus/types/src/{ => light_client}/light_client_finality_update.rs (89%) rename consensus/types/src/{ => light_client}/light_client_header.rs (90%) rename consensus/types/src/{ => light_client}/light_client_optimistic_update.rs (93%) rename consensus/types/src/{ => light_client}/light_client_update.rs (86%) create mode 100644 consensus/types/src/light_client/mod.rs delete mode 100644 consensus/types/src/runtime_fixed_vector.rs delete mode 100644 consensus/types/src/runtime_var_list.rs rename consensus/types/src/{ => slashing}/attester_slashing.rs (96%) create mode 100644 consensus/types/src/slashing/mod.rs rename consensus/types/src/{ => slashing}/proposer_slashing.rs (86%) rename consensus/types/src/{ => state}/activation_queue.rs (95%) rename consensus/types/src/{beacon_state => state}/balance.rs (100%) rename consensus/types/src/{ => state}/beacon_state.rs (88%) rename consensus/types/src/{beacon_state => state}/committee_cache.rs (93%) rename consensus/types/src/{ => state}/epoch_cache.rs (97%) rename consensus/types/src/{beacon_state => state}/exit_cache.rs (97%) rename consensus/types/src/{ => state}/historical_batch.rs (81%) rename consensus/types/src/{ => state}/historical_summary.rs (87%) rename consensus/types/src/{beacon_state => state}/iter.rs (96%) create mode 100644 consensus/types/src/state/mod.rs rename consensus/types/src/{beacon_state => state}/progressive_balances_cache.rs (98%) rename consensus/types/src/{beacon_state => state}/pubkey_cache.rs (98%) rename consensus/types/src/{beacon_state => state}/slashings_cache.rs (96%) rename consensus/types/src/{ => sync_committee}/contribution_and_proof.rs (88%) create mode 100644 consensus/types/src/sync_committee/mod.rs rename consensus/types/src/{ => sync_committee}/signed_contribution_and_proof.rs (87%) rename consensus/types/src/{ => sync_committee}/sync_aggregate.rs (91%) rename consensus/types/src/{ => sync_committee}/sync_aggregator_selection_data.rs (82%) rename consensus/types/src/{ => sync_committee}/sync_committee.rs (95%) rename consensus/types/src/{ => sync_committee}/sync_committee_contribution.rs (93%) rename consensus/types/src/{ => sync_committee}/sync_committee_message.rs (88%) rename consensus/types/src/{ => sync_committee}/sync_committee_subscription.rs (96%) rename consensus/types/src/{ => sync_committee}/sync_duty.rs (96%) rename consensus/types/src/{ => sync_committee}/sync_selection_proof.rs (92%) rename consensus/types/src/{ => sync_committee}/sync_subnet_id.rs (92%) create mode 100644 consensus/types/src/test_utils/test_random/mod.rs rename consensus/types/src/test_utils/{ => test_random}/test_random.rs (90%) create mode 100644 consensus/types/src/validator/mod.rs rename consensus/types/src/{ => validator}/proposer_preparation_data.rs (95%) rename consensus/types/src/{ => validator}/validator.rs (97%) rename consensus/types/src/{ => validator}/validator_registration_data.rs (93%) rename consensus/types/src/{ => validator}/validator_subscription.rs (93%) create mode 100644 consensus/types/src/withdrawal/mod.rs rename consensus/types/src/{ => withdrawal}/pending_partial_withdrawal.rs (85%) rename consensus/types/src/{ => withdrawal}/withdrawal.rs (73%) rename consensus/types/src/{ => withdrawal}/withdrawal_credentials.rs (91%) rename consensus/types/src/{ => withdrawal}/withdrawal_request.rs (87%) rename consensus/types/{src/beacon_state/committee_cache/tests.rs => tests/committee_cache.rs} (97%) rename consensus/types/{src/beacon_state/tests.rs => tests/state.rs} (97%) diff --git a/Cargo.lock b/Cargo.lock index 7ddcad7239..481808b41f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3125,6 +3125,7 @@ dependencies = [ name = "eth2" version = "0.1.0" dependencies = [ + "context_deserialize", "educe", "eip_3076", "either", @@ -8547,6 +8548,7 @@ checksum = "1fc20a89bab2dabeee65e9c9eb96892dc222c23254b401e1319b85efd852fa31" dependencies = [ "arbitrary", "context_deserialize", + "educe", "ethereum_serde_utils", "ethereum_ssz", "itertools 0.14.0", diff --git a/Cargo.toml b/Cargo.toml index 6ccf429b6c..21cf551c48 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -227,7 +227,7 @@ slashing_protection = { path = "validator_client/slashing_protection" } slot_clock = { path = "common/slot_clock" } smallvec = { version = "1.11.2", features = ["arbitrary"] } snap = "1" -ssz_types = { version = "0.14.0", features = ["context_deserialize"] } +ssz_types = { version = "0.14.0", features = ["context_deserialize", "runtime_types"] } state_processing = { path = "consensus/state_processing" } store = { path = "beacon_node/store" } strum = { version = "0.24", features = ["derive"] } diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs index f879adfb49..beaa1e581c 100644 --- a/beacon_node/beacon_chain/src/attester_cache.rs +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -17,7 +17,7 @@ use std::ops::Range; use types::{ BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, Hash256, RelativeEpoch, Slot, - attestation::Error as AttestationError, + attestation::AttestationError, beacon_state::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, }, diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index fc9acd850b..079fabd8a5 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -74,6 +74,7 @@ use crate::{ AvailabilityPendingExecutedBlock, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead, metrics, }; +use eth2::beacon_response::ForkVersionedResponse; use eth2::types::{ EventKind, SseBlobSidecar, SseBlock, SseDataColumnSidecar, SseExtendedPayloadAttributes, }; diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index c383c20f9f..e7c536c0d8 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -12,6 +12,7 @@ use crate::{BeaconChainTypes, BlockProcessStatus}; use lighthouse_tracing::SPAN_PENDING_COMPONENTS; use lru::LruCache; use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use ssz_types::{RuntimeFixedVector, RuntimeVariableList}; use std::cmp::Ordering; use std::num::NonZeroUsize; use std::sync::Arc; @@ -20,8 +21,7 @@ use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobIdentifier; use types::{ BlobSidecar, BlockImportSource, ChainSpec, ColumnIndex, DataColumnSidecar, - DataColumnSidecarList, Epoch, EthSpec, Hash256, RuntimeFixedVector, RuntimeVariableList, - SignedBeaconBlock, + DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, }; #[derive(Clone)] diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index e72e9a6b21..88b040a6e5 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -49,7 +49,7 @@ use tree_hash_derive::TreeHash; use types::ChainSpec; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::slot_data::SlotData; -use types::sync_committee::Error as SyncCommitteeError; +use types::sync_committee::SyncCommitteeError; use types::{ AggregateSignature, BeaconStateError, EthSpec, Hash256, SignedContributionAndProof, Slot, SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 322b30c2e9..e3a8eecdb9 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -46,6 +46,7 @@ use rand::seq::SliceRandom; use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slot_clock::{SlotClock, TestingSlotClock}; +use ssz_types::RuntimeVariableList; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 36e5ba7c7d..35140764b6 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,6 +1,7 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes, PayloadParameters}; use bytes::Bytes; +use eth2::beacon_response::ForkVersionedResponse; use eth2::types::PublishBlockRequest; use eth2::types::{ BlobsBundle, BlockId, BroadcastValidation, EndpointVersion, EventKind, EventTopic, @@ -31,9 +32,9 @@ use types::builder_bid::{ }; use types::{ Address, BeaconState, ChainSpec, Epoch, EthSpec, ExecPayload, ExecutionPayload, - ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, - ForkVersionedResponse, Hash256, PublicKeyBytes, Signature, SignedBlindedBeaconBlock, - SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, + ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, Hash256, + PublicKeyBytes, Signature, SignedBlindedBeaconBlock, SignedRoot, + SignedValidatorRegistrationData, Slot, Uint256, }; use types::{ExecutionBlockHash, SecretKey}; use warp::reply::{self, Reply}; diff --git a/beacon_node/http_api/src/beacon/mod.rs b/beacon_node/http_api/src/beacon/mod.rs new file mode 100644 index 0000000000..20394784ae --- /dev/null +++ b/beacon_node/http_api/src/beacon/mod.rs @@ -0,0 +1 @@ +pub mod states; diff --git a/beacon_node/http_api/src/beacon/states.rs b/beacon_node/http_api/src/beacon/states.rs new file mode 100644 index 0000000000..6d06bcc77d --- /dev/null +++ b/beacon_node/http_api/src/beacon/states.rs @@ -0,0 +1,787 @@ +use crate::StateId; +use crate::task_spawner::{Priority, TaskSpawner}; +use crate::utils::ResponseFilter; +use crate::validator::pubkey_to_validator_index; +use crate::version::{ + ResponseIncludesVersion, add_consensus_version_header, + execution_optimistic_finalized_beacon_response, +}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; +use eth2::types::{ + ValidatorBalancesRequestBody, ValidatorId, ValidatorIdentitiesRequestBody, + ValidatorsRequestBody, +}; +use std::sync::Arc; +use types::{ + AttestationShufflingId, CommitteeCache, Error as BeaconStateError, EthSpec, RelativeEpoch, +}; +use warp::filters::BoxedFilter; +use warp::{Filter, Reply}; +use warp_utils::query::multi_key_query; + +type BeaconStatesPath = BoxedFilter<( + StateId, + TaskSpawner<::EthSpec>, + Arc>, +)>; + +// GET beacon/states/{state_id}/pending_consolidations +pub fn get_beacon_state_pending_consolidations( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("pending_consolidations")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(consolidations) = state.pending_consolidations() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending consolidations not found".to_string(), + )); + }; + + Ok(( + consolidations.clone(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) + }, + )?; + + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + data, + ) + .map(|res| warp::reply::json(&res).into_response()) + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/pending_partial_withdrawals +pub fn get_beacon_state_pending_partial_withdrawals( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("pending_partial_withdrawals")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(withdrawals) = state.pending_partial_withdrawals() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending withdrawals not found".to_string(), + )); + }; + + Ok(( + withdrawals.clone(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) + }, + )?; + + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + data, + ) + .map(|res| warp::reply::json(&res).into_response()) + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/pending_deposits +pub fn get_beacon_state_pending_deposits( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("pending_deposits")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(deposits) = state.pending_deposits() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending deposits not found".to_string(), + )); + }; + + Ok(( + deposits.clone(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) + }, + )?; + + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + data, + ) + .map(|res| warp::reply::json(&res).into_response()) + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/randao?epoch +pub fn get_beacon_state_randao( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("randao")) + .and(warp::query::()) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: eth2::types::RandaoQuery| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (randao, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let epoch = query.epoch.unwrap_or_else(|| state.current_epoch()); + let randao = *state.get_randao_mix(epoch).map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "epoch out of range: {e:?}" + )) + })?; + Ok((randao, execution_optimistic, finalized)) + }, + )?; + + Ok( + eth2::types::GenericResponse::from(eth2::types::RandaoMix { randao }) + .add_execution_optimistic_finalized(execution_optimistic, finalized), + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/sync_committees?epoch +pub fn get_beacon_state_sync_committees( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("sync_committees")) + .and(warp::query::()) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: eth2::types::SyncCommitteesQuery| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (sync_committee, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); + Ok(( + state + .get_built_sync_committee(epoch, &chain.spec) + .cloned() + .map_err(|e| match e { + BeaconStateError::SyncCommitteeNotKnown { .. } => { + warp_utils::reject::custom_bad_request(format!( + "state at epoch {} has no \ + sync committee for epoch {}", + current_epoch, epoch + )) + } + BeaconStateError::IncorrectStateVariant => { + warp_utils::reject::custom_bad_request(format!( + "state at epoch {} is not activated for Altair", + current_epoch, + )) + } + e => warp_utils::reject::beacon_state_error(e), + })?, + execution_optimistic, + finalized, + )) + }, + )?; + + let validators = chain + .validator_indices(sync_committee.pubkeys.iter()) + .map_err(warp_utils::reject::unhandled_error)?; + + let validator_aggregates = validators + .chunks_exact(T::EthSpec::sync_subcommittee_size()) + .map(|indices| eth2::types::SyncSubcommittee { + indices: indices.to_vec(), + }) + .collect(); + + let response = eth2::types::SyncCommitteeByValidatorIndices { + validators, + validator_aggregates, + }; + + Ok(eth2::types::GenericResponse::from(response) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/committees?slot,index,epoch +pub fn get_beacon_state_committees( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("committees")) + .and(warp::query::()) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: eth2::types::CommitteesQuery| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); + + // Attempt to obtain the committee_cache from the beacon chain + let decision_slot = (epoch.saturating_sub(2u64)) + .end_slot(T::EthSpec::slots_per_epoch()); + // Find the decision block and skip to another method on any kind + // of failure + let shuffling_id = if let Ok(Some(shuffling_decision_block)) = + chain.block_root_at_slot(decision_slot, WhenSlotSkipped::Prev) + { + Some(AttestationShufflingId { + shuffling_epoch: epoch, + shuffling_decision_block, + }) + } else { + None + }; + + // Attempt to read from the chain cache if there exists a + // shuffling_id + let maybe_cached_shuffling = if let Some(shuffling_id) = + shuffling_id.as_ref() + { + chain + .shuffling_cache + .try_write_for(std::time::Duration::from_secs(1)) + .and_then(|mut cache_write| cache_write.get(shuffling_id)) + .and_then(|cache_item| cache_item.wait().ok()) + } else { + None + }; + + let committee_cache = + if let Some(shuffling) = maybe_cached_shuffling { + shuffling + } else { + let possibly_built_cache = + match RelativeEpoch::from_epoch(current_epoch, epoch) { + Ok(relative_epoch) + if state.committee_cache_is_initialized( + relative_epoch, + ) => + { + state.committee_cache(relative_epoch).cloned() + } + _ => CommitteeCache::initialized( + state, + epoch, + &chain.spec, + ), + } + .map_err( + |e| match e { + BeaconStateError::EpochOutOfBounds => { + let max_sprp = + T::EthSpec::slots_per_historical_root() + as u64; + let first_subsequent_restore_point_slot = + ((epoch.start_slot( + T::EthSpec::slots_per_epoch(), + ) / max_sprp) + + 1) + * max_sprp; + if epoch < current_epoch { + warp_utils::reject::custom_bad_request( + format!( + "epoch out of bounds, \ + try state at slot {}", + first_subsequent_restore_point_slot, + ), + ) + } else { + warp_utils::reject::custom_bad_request( + "epoch out of bounds, \ + too far in future" + .into(), + ) + } + } + _ => warp_utils::reject::unhandled_error( + BeaconChainError::from(e), + ), + }, + )?; + + // Attempt to write to the beacon cache (only if the cache + // size is not the default value). + if chain.config.shuffling_cache_size + != beacon_chain::shuffling_cache::DEFAULT_CACHE_SIZE + && let Some(shuffling_id) = shuffling_id + && let Some(mut cache_write) = chain + .shuffling_cache + .try_write_for(std::time::Duration::from_secs(1)) + { + cache_write.insert_committee_cache( + shuffling_id, + &possibly_built_cache, + ); + } + + possibly_built_cache + }; + + // Use either the supplied slot or all slots in the epoch. + let slots = + query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { + epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() + }); + + // Use either the supplied committee index or all available indices. + let indices = + query.index.map(|index| vec![index]).unwrap_or_else(|| { + (0..committee_cache.committees_per_slot()).collect() + }); + + let mut response = Vec::with_capacity(slots.len() * indices.len()); + + for slot in slots { + // It is not acceptable to query with a slot that is not within the + // specified epoch. + if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { + return Err(warp_utils::reject::custom_bad_request( + format!("{} is not in epoch {}", slot, epoch), + )); + } + + for &index in &indices { + let committee = committee_cache + .get_beacon_committee(slot, index) + .ok_or_else(|| { + warp_utils::reject::custom_bad_request(format!( + "committee index {} does not exist in epoch {}", + index, epoch + )) + })?; + + response.push(eth2::types::CommitteeData { + index, + slot, + validators: committee + .committee + .iter() + .map(|i| *i as u64) + .collect(), + }); + } + } + + Ok((response, execution_optimistic, finalized)) + }, + )?; + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/validators/{validator_id} +pub fn get_beacon_state_validators_id( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid validator ID".to_string(), + )) + })) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + validator_id: ValidatorId| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let index_opt = match &validator_id { + ValidatorId::PublicKey(pubkey) => pubkey_to_validator_index( + &chain, state, pubkey, + ) + .map_err(|e| { + warp_utils::reject::custom_not_found(format!( + "unable to access pubkey cache: {e:?}", + )) + })?, + ValidatorId::Index(index) => Some(*index as usize), + }; + + Ok(( + index_opt + .and_then(|index| { + let validator = state.validators().get(index)?; + let balance = *state.balances().get(index)?; + let epoch = state.current_epoch(); + let far_future_epoch = chain.spec.far_future_epoch; + + Some(eth2::types::ValidatorData { + index: index as u64, + balance, + status: + eth2::types::ValidatorStatus::from_validator( + validator, + epoch, + far_future_epoch, + ), + validator: validator.clone(), + }) + }) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "unknown validator: {}", + validator_id + )) + })?, + execution_optimistic, + finalized, + )) + }, + )?; + + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// POST beacon/states/{state_id}/validators +pub fn post_beacon_state_validators( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorsRequestBody| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + crate::validators::get_beacon_state_validators( + state_id, + chain, + &query.ids, + &query.statuses, + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/validators?id,status +pub fn get_beacon_state_validators( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::end()) + .and(multi_key_query::()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query_res: Result| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + let query = query_res?; + crate::validators::get_beacon_state_validators( + state_id, + chain, + &query.id, + &query.status, + ) + }) + }, + ) + .boxed() +} + +// POST beacon/states/{state_id}/validator_identities +pub fn post_beacon_state_validator_identities( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validator_identities")) + .and(warp::path::end()) + .and(warp_utils::json::json_no_body()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorIdentitiesRequestBody| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + crate::validators::get_beacon_state_validator_identities( + state_id, + chain, + Some(&query.ids), + ) + }) + }, + ) + .boxed() +} + +// POST beacon/states/{state_id}/validator_balances +pub fn post_beacon_state_validator_balances( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validator_balances")) + .and(warp::path::end()) + .and(warp_utils::json::json_no_body()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorBalancesRequestBody| { + task_spawner.blocking_json_task(Priority::P1, move || { + crate::validators::get_beacon_state_validator_balances( + state_id, + chain, + Some(&query.ids), + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/validator_balances?id +pub fn get_beacon_state_validator_balances( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validator_balances")) + .and(warp::path::end()) + .and(multi_key_query::()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query_res: Result| { + task_spawner.blocking_json_task(Priority::P1, move || { + let query = query_res?; + crate::validators::get_beacon_state_validator_balances( + state_id, + chain, + query.id.as_deref(), + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/finality_checkpoints +pub fn get_beacon_state_finality_checkpoints( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("finality_checkpoints")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + Ok(( + eth2::types::FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + execution_optimistic, + finalized, + )) + }, + )?; + + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/fork +pub fn get_beacon_state_fork( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("fork")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (fork, execution_optimistic, finalized) = + state_id.fork_and_execution_optimistic_and_finalized(&chain)?; + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data: fork, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/root +pub fn get_beacon_state_root( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .and(warp::path("root")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (root, execution_optimistic, finalized) = state_id.root(&chain)?; + Ok(eth2::types::GenericResponse::from( + eth2::types::RootData::from(root), + )) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) + }) + }, + ) + .boxed() +} diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index e088005f20..64f5451560 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -2,6 +2,7 @@ use crate::version::inconsistent_fork_rejection; use crate::{ExecutionOptimistic, state_id::checkpoint_slot_and_execution_optimistic}; use beacon_chain::kzg_utils::reconstruct_blobs; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; +use eth2::beacon_response::{ExecutionOptimisticFinalizedMetadata, UnversionedResponse}; use eth2::types::BlockId as CoreBlockId; use eth2::types::DataColumnIndicesQuery; use eth2::types::{BlobIndicesQuery, BlobWrapper, BlobsVersionedHashesQuery}; @@ -10,8 +11,7 @@ use std::str::FromStr; use std::sync::Arc; use types::{ BlobSidecarList, DataColumnSidecarList, EthSpec, FixedBytesExtended, ForkName, Hash256, - SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, UnversionedResponse, - beacon_response::ExecutionOptimisticFinalizedMetadata, + SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, }; use warp::Rejection; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 6389b34961..ccd0698161 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -8,6 +8,7 @@ mod aggregate_attestation; mod attestation_performance; mod attester_duties; +mod beacon; mod block_id; mod block_packing_efficiency; mod block_rewards; @@ -29,13 +30,16 @@ mod sync_committees; mod task_spawner; pub mod test_utils; mod ui; +mod utils; mod validator; mod validator_inclusion; mod validators; mod version; + use crate::light_client::{get_light_client_bootstrap, get_light_client_updates}; use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; use crate::version::beacon_response; +use beacon::states; use beacon_chain::{ AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped, attestation_verification::VerifiedAttestation, @@ -50,8 +54,7 @@ use eth2::StatusCode; use eth2::types::{ self as api_types, BroadcastValidation, ContextDeserialize, EndpointVersion, ForkChoice, ForkChoiceExtraData, ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, - StateId as CoreStateId, ValidatorBalancesRequestBody, ValidatorId, - ValidatorIdentitiesRequestBody, ValidatorStatus, ValidatorsRequestBody, + StateId as CoreStateId, ValidatorId, ValidatorStatus, }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use health_metrics::observe::Observe; @@ -90,14 +93,12 @@ use tokio_stream::{ }; use tracing::{debug, error, info, warn}; use types::{ - Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, - ChainSpec, Checkpoint, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, Hash256, - ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, - SignedBlindedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, - SignedValidatorRegistrationData, SignedVoluntaryExit, SingleAttestation, Slot, - SyncCommitteeMessage, SyncContributionData, + Attestation, AttestationData, AttesterSlashing, BeaconStateError, ChainSpec, Checkpoint, + ConfigAndPreset, Epoch, EthSpec, ForkName, Hash256, ProposerPreparationData, ProposerSlashing, + SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, + SingleAttestation, Slot, SyncCommitteeMessage, SyncContributionData, }; -use validator::pubkey_to_validator_index; use version::{ ResponseIncludesVersion, V1, V2, V3, add_consensus_version_header, add_ssz_content_type_header, execution_optimistic_finalized_beacon_response, inconsistent_fork_rejection, @@ -583,693 +584,65 @@ pub fn serve( )) })) .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); + .and(chain_filter.clone()) + .boxed(); // GET beacon/states/{state_id}/root - let get_beacon_state_root = beacon_states_path - .clone() - .and(warp::path("root")) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (root, execution_optimistic, finalized) = state_id.root(&chain)?; - Ok(api_types::GenericResponse::from(api_types::RootData::from( - root, - ))) - .map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) - }) - }, - ); + let get_beacon_state_root = states::get_beacon_state_root(beacon_states_path.clone()); // GET beacon/states/{state_id}/fork - let get_beacon_state_fork = beacon_states_path - .clone() - .and(warp::path("fork")) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (fork, execution_optimistic, finalized) = - state_id.fork_and_execution_optimistic_and_finalized(&chain)?; - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data: fork, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) - }) - }, - ); + let get_beacon_state_fork = states::get_beacon_state_fork(beacon_states_path.clone()); // GET beacon/states/{state_id}/finality_checkpoints - let get_beacon_state_finality_checkpoints = beacon_states_path - .clone() - .and(warp::path("finality_checkpoints")) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - Ok(( - api_types::FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }, - execution_optimistic, - finalized, - )) - }, - )?; - - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) - }) - }, - ); + let get_beacon_state_finality_checkpoints = + states::get_beacon_state_finality_checkpoints(beacon_states_path.clone()); // GET beacon/states/{state_id}/validator_balances?id - let get_beacon_state_validator_balances = beacon_states_path - .clone() - .and(warp::path("validator_balances")) - .and(warp::path::end()) - .and(multi_key_query::()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query_res: Result| { - task_spawner.blocking_json_task(Priority::P1, move || { - let query = query_res?; - crate::validators::get_beacon_state_validator_balances( - state_id, - chain, - query.id.as_deref(), - ) - }) - }, - ); + let get_beacon_state_validator_balances = + states::get_beacon_state_validator_balances(beacon_states_path.clone()); // POST beacon/states/{state_id}/validator_balances - let post_beacon_state_validator_balances = beacon_states_path - .clone() - .and(warp::path("validator_balances")) - .and(warp::path::end()) - .and(warp_utils::json::json_no_body()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: ValidatorBalancesRequestBody| { - task_spawner.blocking_json_task(Priority::P1, move || { - crate::validators::get_beacon_state_validator_balances( - state_id, - chain, - Some(&query.ids), - ) - }) - }, - ); + let post_beacon_state_validator_balances = + states::post_beacon_state_validator_balances(beacon_states_path.clone()); // POST beacon/states/{state_id}/validator_identities - let post_beacon_state_validator_identities = beacon_states_path - .clone() - .and(warp::path("validator_identities")) - .and(warp::path::end()) - .and(warp_utils::json::json_no_body()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: ValidatorIdentitiesRequestBody| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - crate::validators::get_beacon_state_validator_identities( - state_id, - chain, - Some(&query.ids), - ) - }) - }, - ); + let post_beacon_state_validator_identities = + states::post_beacon_state_validator_identities(beacon_states_path.clone()); // GET beacon/states/{state_id}/validators?id,status - let get_beacon_state_validators = beacon_states_path - .clone() - .and(warp::path("validators")) - .and(warp::path::end()) - .and(multi_key_query::()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query_res: Result| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - let query = query_res?; - crate::validators::get_beacon_state_validators( - state_id, - chain, - &query.id, - &query.status, - ) - }) - }, - ); + let get_beacon_state_validators = + states::get_beacon_state_validators(beacon_states_path.clone()); // POST beacon/states/{state_id}/validators - let post_beacon_state_validators = beacon_states_path - .clone() - .and(warp::path("validators")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: ValidatorsRequestBody| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - crate::validators::get_beacon_state_validators( - state_id, - chain, - &query.ids, - &query.statuses, - ) - }) - }, - ); + let post_beacon_state_validators = + states::post_beacon_state_validators(beacon_states_path.clone()); // GET beacon/states/{state_id}/validators/{validator_id} - let get_beacon_state_validators_id = beacon_states_path - .clone() - .and(warp::path("validators")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid validator ID".to_string(), - )) - })) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - validator_id: ValidatorId| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let index_opt = match &validator_id { - ValidatorId::PublicKey(pubkey) => pubkey_to_validator_index( - &chain, state, pubkey, - ) - .map_err(|e| { - warp_utils::reject::custom_not_found(format!( - "unable to access pubkey cache: {e:?}", - )) - })?, - ValidatorId::Index(index) => Some(*index as usize), - }; - - Ok(( - index_opt - .and_then(|index| { - let validator = state.validators().get(index)?; - let balance = *state.balances().get(index)?; - let epoch = state.current_epoch(); - let far_future_epoch = chain.spec.far_future_epoch; - - Some(api_types::ValidatorData { - index: index as u64, - balance, - status: api_types::ValidatorStatus::from_validator( - validator, - epoch, - far_future_epoch, - ), - validator: validator.clone(), - }) - }) - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "unknown validator: {}", - validator_id - )) - })?, - execution_optimistic, - finalized, - )) - }, - )?; - - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) - }) - }, - ); + let get_beacon_state_validators_id = + states::get_beacon_state_validators_id(beacon_states_path.clone()); // GET beacon/states/{state_id}/committees?slot,index,epoch - let get_beacon_state_committees = beacon_states_path - .clone() - .and(warp::path("committees")) - .and(warp::query::()) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: api_types::CommitteesQuery| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let current_epoch = state.current_epoch(); - let epoch = query.epoch.unwrap_or(current_epoch); - - // Attempt to obtain the committee_cache from the beacon chain - let decision_slot = (epoch.saturating_sub(2u64)) - .end_slot(T::EthSpec::slots_per_epoch()); - // Find the decision block and skip to another method on any kind - // of failure - let shuffling_id = if let Ok(Some(shuffling_decision_block)) = - chain.block_root_at_slot(decision_slot, WhenSlotSkipped::Prev) - { - Some(AttestationShufflingId { - shuffling_epoch: epoch, - shuffling_decision_block, - }) - } else { - None - }; - - // Attempt to read from the chain cache if there exists a - // shuffling_id - let maybe_cached_shuffling = if let Some(shuffling_id) = - shuffling_id.as_ref() - { - chain - .shuffling_cache - .try_write_for(std::time::Duration::from_secs(1)) - .and_then(|mut cache_write| cache_write.get(shuffling_id)) - .and_then(|cache_item| cache_item.wait().ok()) - } else { - None - }; - - let committee_cache = - if let Some(shuffling) = maybe_cached_shuffling { - shuffling - } else { - let possibly_built_cache = - match RelativeEpoch::from_epoch(current_epoch, epoch) { - Ok(relative_epoch) - if state.committee_cache_is_initialized( - relative_epoch, - ) => - { - state.committee_cache(relative_epoch).cloned() - } - _ => CommitteeCache::initialized( - state, - epoch, - &chain.spec, - ), - } - .map_err( - |e| match e { - BeaconStateError::EpochOutOfBounds => { - let max_sprp = - T::EthSpec::slots_per_historical_root() - as u64; - let first_subsequent_restore_point_slot = - ((epoch.start_slot( - T::EthSpec::slots_per_epoch(), - ) / max_sprp) - + 1) - * max_sprp; - if epoch < current_epoch { - warp_utils::reject::custom_bad_request( - format!( - "epoch out of bounds, \ - try state at slot {}", - first_subsequent_restore_point_slot, - ), - ) - } else { - warp_utils::reject::custom_bad_request( - "epoch out of bounds, \ - too far in future" - .into(), - ) - } - } - _ => warp_utils::reject::unhandled_error( - BeaconChainError::from(e), - ), - }, - )?; - - // Attempt to write to the beacon cache (only if the cache - // size is not the default value). - if chain.config.shuffling_cache_size - != beacon_chain::shuffling_cache::DEFAULT_CACHE_SIZE - && let Some(shuffling_id) = shuffling_id - && let Some(mut cache_write) = chain - .shuffling_cache - .try_write_for(std::time::Duration::from_secs(1)) - { - cache_write.insert_committee_cache( - shuffling_id, - &possibly_built_cache, - ); - } - - possibly_built_cache - }; - - // Use either the supplied slot or all slots in the epoch. - let slots = - query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { - epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() - }); - - // Use either the supplied committee index or all available indices. - let indices = - query.index.map(|index| vec![index]).unwrap_or_else(|| { - (0..committee_cache.committees_per_slot()).collect() - }); - - let mut response = Vec::with_capacity(slots.len() * indices.len()); - - for slot in slots { - // It is not acceptable to query with a slot that is not within the - // specified epoch. - if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { - return Err(warp_utils::reject::custom_bad_request( - format!("{} is not in epoch {}", slot, epoch), - )); - } - - for &index in &indices { - let committee = committee_cache - .get_beacon_committee(slot, index) - .ok_or_else(|| { - warp_utils::reject::custom_bad_request(format!( - "committee index {} does not exist in epoch {}", - index, epoch - )) - })?; - - response.push(api_types::CommitteeData { - index, - slot, - validators: committee - .committee - .iter() - .map(|i| *i as u64) - .collect(), - }); - } - } - - Ok((response, execution_optimistic, finalized)) - }, - )?; - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) - }) - }, - ); + let get_beacon_state_committees = + states::get_beacon_state_committees(beacon_states_path.clone()); // GET beacon/states/{state_id}/sync_committees?epoch - let get_beacon_state_sync_committees = beacon_states_path - .clone() - .and(warp::path("sync_committees")) - .and(warp::query::()) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: api_types::SyncCommitteesQuery| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (sync_committee, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let current_epoch = state.current_epoch(); - let epoch = query.epoch.unwrap_or(current_epoch); - Ok(( - state - .get_built_sync_committee(epoch, &chain.spec) - .cloned() - .map_err(|e| match e { - BeaconStateError::SyncCommitteeNotKnown { .. } => { - warp_utils::reject::custom_bad_request(format!( - "state at epoch {} has no \ - sync committee for epoch {}", - current_epoch, epoch - )) - } - BeaconStateError::IncorrectStateVariant => { - warp_utils::reject::custom_bad_request(format!( - "state at epoch {} is not activated for Altair", - current_epoch, - )) - } - e => warp_utils::reject::beacon_state_error(e), - })?, - execution_optimistic, - finalized, - )) - }, - )?; - - let validators = chain - .validator_indices(sync_committee.pubkeys.iter()) - .map_err(warp_utils::reject::unhandled_error)?; - - let validator_aggregates = validators - .chunks_exact(T::EthSpec::sync_subcommittee_size()) - .map(|indices| api_types::SyncSubcommittee { - indices: indices.to_vec(), - }) - .collect(); - - let response = api_types::SyncCommitteeByValidatorIndices { - validators, - validator_aggregates, - }; - - Ok(api_types::GenericResponse::from(response) - .add_execution_optimistic_finalized(execution_optimistic, finalized)) - }) - }, - ); + let get_beacon_state_sync_committees = + states::get_beacon_state_sync_committees(beacon_states_path.clone()); // GET beacon/states/{state_id}/randao?epoch - let get_beacon_state_randao = beacon_states_path - .clone() - .and(warp::path("randao")) - .and(warp::query::()) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: api_types::RandaoQuery| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (randao, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let epoch = query.epoch.unwrap_or_else(|| state.current_epoch()); - let randao = *state.get_randao_mix(epoch).map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "epoch out of range: {e:?}" - )) - })?; - Ok((randao, execution_optimistic, finalized)) - }, - )?; - - Ok( - api_types::GenericResponse::from(api_types::RandaoMix { randao }) - .add_execution_optimistic_finalized(execution_optimistic, finalized), - ) - }) - }, - ); + let get_beacon_state_randao = states::get_beacon_state_randao(beacon_states_path.clone()); // GET beacon/states/{state_id}/pending_deposits - let get_beacon_state_pending_deposits = beacon_states_path - .clone() - .and(warp::path("pending_deposits")) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P1, move || { - let (data, execution_optimistic, finalized, fork_name) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let Ok(deposits) = state.pending_deposits() else { - return Err(warp_utils::reject::custom_bad_request( - "Pending deposits not found".to_string(), - )); - }; - - Ok(( - deposits.clone(), - execution_optimistic, - finalized, - state.fork_name_unchecked(), - )) - }, - )?; - - execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - data, - ) - .map(|res| warp::reply::json(&res).into_response()) - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); + let get_beacon_state_pending_deposits = + states::get_beacon_state_pending_deposits(beacon_states_path.clone()); // GET beacon/states/{state_id}/pending_partial_withdrawals - let get_beacon_state_pending_partial_withdrawals = beacon_states_path - .clone() - .and(warp::path("pending_partial_withdrawals")) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P1, move || { - let (data, execution_optimistic, finalized, fork_name) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let Ok(withdrawals) = state.pending_partial_withdrawals() else { - return Err(warp_utils::reject::custom_bad_request( - "Pending withdrawals not found".to_string(), - )); - }; - - Ok(( - withdrawals.clone(), - execution_optimistic, - finalized, - state.fork_name_unchecked(), - )) - }, - )?; - - execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - data, - ) - .map(|res| warp::reply::json(&res).into_response()) - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); + let get_beacon_state_pending_partial_withdrawals = + states::get_beacon_state_pending_partial_withdrawals(beacon_states_path.clone()); // GET beacon/states/{state_id}/pending_consolidations - let get_beacon_state_pending_consolidations = beacon_states_path - .clone() - .and(warp::path("pending_consolidations")) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P1, move || { - let (data, execution_optimistic, finalized, fork_name) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let Ok(consolidations) = state.pending_consolidations() else { - return Err(warp_utils::reject::custom_bad_request( - "Pending consolidations not found".to_string(), - )); - }; - - Ok(( - consolidations.clone(), - execution_optimistic, - finalized, - state.fork_name_unchecked(), - )) - }, - )?; - - execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - data, - ) - .map(|res| warp::reply::json(&res).into_response()) - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); + let get_beacon_state_pending_consolidations = + states::get_beacon_state_pending_consolidations(beacon_states_path.clone()); // GET beacon/headers // diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs index ca9b86990c..86eef03218 100644 --- a/beacon_node/http_api/src/light_client.rs +++ b/beacon_node/http_api/src/light_client.rs @@ -3,13 +3,14 @@ use crate::version::{ beacon_response, }; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::beacon_response::BeaconResponse; use eth2::types::{ self as api_types, LightClientUpdate, LightClientUpdateResponseChunk, LightClientUpdateResponseChunkInner, LightClientUpdatesQuery, }; use ssz::Encode; use std::sync::Arc; -use types::{BeaconResponse, EthSpec, ForkName, Hash256, LightClientBootstrap}; +use types::{EthSpec, ForkName, Hash256, LightClientBootstrap}; use warp::{ Rejection, hyper::{Body, Response}, diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index 367e09969b..472ec0b65e 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -9,6 +9,7 @@ use crate::{ use beacon_chain::{ BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, ProduceBlockVerification, }; +use eth2::beacon_response::ForkVersionedResponse; use eth2::types::{self as api_types, ProduceBlockV3Metadata, SkipRandaoVerification}; use lighthouse_tracing::{SPAN_PRODUCE_BLOCK_V2, SPAN_PRODUCE_BLOCK_V3}; use ssz::Encode; diff --git a/beacon_node/http_api/src/utils.rs b/beacon_node/http_api/src/utils.rs new file mode 100644 index 0000000000..cf61fa481c --- /dev/null +++ b/beacon_node/http_api/src/utils.rs @@ -0,0 +1,3 @@ +use warp::filters::BoxedFilter; + +pub type ResponseFilter = BoxedFilter<(warp::reply::Response,)>; diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index 871a10e7d4..371064c886 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,16 +1,14 @@ use crate::api_types::EndpointVersion; +use eth2::beacon_response::{ + BeaconResponse, ExecutionOptimisticFinalizedBeaconResponse, + ExecutionOptimisticFinalizedMetadata, ForkVersionedResponse, UnversionedResponse, +}; use eth2::{ CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, EXECUTION_PAYLOAD_BLINDED_HEADER, EXECUTION_PAYLOAD_VALUE_HEADER, SSZ_CONTENT_TYPE_HEADER, }; use serde::Serialize; -use types::{ - BeaconResponse, ForkName, ForkVersionedResponse, InconsistentFork, Uint256, - UnversionedResponse, - beacon_response::{ - ExecutionOptimisticFinalizedBeaconResponse, ExecutionOptimisticFinalizedMetadata, - }, -}; +use types::{ForkName, InconsistentFork, Uint256}; use warp::reply::{self, Reply, Response}; pub const V1: EndpointVersion = EndpointVersion(1); diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 77d2a34e16..5b3574d48a 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -8,7 +8,7 @@ use libp2p::bytes::BytesMut; use snap::read::FrameDecoder; use snap::write::FrameEncoder; use ssz::{Decode, Encode}; -use ssz_types::VariableList; +use ssz_types::{RuntimeVariableList, VariableList}; use std::io::Cursor; use std::io::ErrorKind; use std::io::{Read, Write}; @@ -18,10 +18,10 @@ use tokio_util::codec::{Decoder, Encoder}; use types::{ BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EthSpec, ForkContext, ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, - SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, - SignedBeaconBlockFulu, SignedBeaconBlockGloas, + LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, + SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, + SignedBeaconBlockGloas, }; use unsigned_varint::codec::Uvi; diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 9aab079952..a9b4aa2fba 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -5,7 +5,7 @@ use regex::bytes::Regex; use serde::Serialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::{VariableList, typenum::U256}; +use ssz_types::{RuntimeVariableList, VariableList, typenum::U256}; use std::fmt::Display; use std::marker::PhantomData; use std::ops::Deref; @@ -17,7 +17,7 @@ use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnsByRootIdentifier, Epoch, EthSpec, ForkContext, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, + LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, Slot, blob_sidecar::BlobSidecar, }; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 60e3e3da97..8613edf5f5 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -7,7 +7,7 @@ use lighthouse_network::rpc::{RequestType, methods::*}; use lighthouse_network::service::api_types::AppRequestId; use lighthouse_network::{NetworkEvent, ReportSource, Response}; use ssz::Encode; -use ssz_types::VariableList; +use ssz_types::{RuntimeVariableList, VariableList}; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::runtime::Runtime; @@ -17,7 +17,7 @@ use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EmptyBlock, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, KzgCommitment, KzgProof, MinimalEthSpec, - RuntimeVariableList, Signature, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + Signature, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; type E = MinimalEthSpec; diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index ea7af4e838..ed04fe7bb9 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -33,6 +33,7 @@ use lighthouse_network::{ }; use matches::assert_matches; use slot_clock::SlotClock; +use ssz_types::RuntimeVariableList; use std::collections::HashSet; use std::iter::Iterator; use std::sync::Arc; @@ -42,8 +43,8 @@ use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; use types::{ AttesterSlashing, BlobSidecar, BlobSidecarList, ChainSpec, DataColumnSidecarList, DataColumnSubnetId, Epoch, EthSpec, Hash256, MainnetEthSpec, ProposerSlashing, - RuntimeVariableList, SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, - SingleAttestation, Slot, SubnetId, + SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, SingleAttestation, Slot, + SubnetId, }; type E = MainnetEthSpec; diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 01929cbf90..ed9a11a03d 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -7,11 +7,12 @@ use lighthouse_network::{ BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, }, }; +use ssz_types::RuntimeVariableList; use std::{collections::HashMap, sync::Arc}; use tracing::{Span, debug}; use types::{ BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, - Hash256, RuntimeVariableList, SignedBeaconBlock, + Hash256, SignedBeaconBlock, }; use crate::sync::network_context::MAX_COLUMN_RETRIES; diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 63bcd176f5..ef52f89678 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -1929,8 +1929,8 @@ mod deneb_only { block_verification_types::{AsBlock, RpcBlock}, data_availability_checker::AvailabilityCheckError, }; + use ssz_types::RuntimeVariableList; use std::collections::VecDeque; - use types::RuntimeVariableList; struct DenebTester { rig: TestRig, diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 7a75bdc80a..f7e6cde210 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -9,6 +9,7 @@ default = ["lighthouse"] lighthouse = [] [dependencies] +context_deserialize = { workspace = true } educe = { workspace = true } eip_3076 = { workspace = true } either = { workspace = true } diff --git a/consensus/types/src/beacon_response.rs b/common/eth2/src/beacon_response.rs similarity index 97% rename from consensus/types/src/beacon_response.rs rename to common/eth2/src/beacon_response.rs index fc59fc9432..d58734997c 100644 --- a/consensus/types/src/beacon_response.rs +++ b/common/eth2/src/beacon_response.rs @@ -1,12 +1,8 @@ -use crate::{ContextDeserialize, ForkName}; +use context_deserialize::ContextDeserialize; use serde::de::DeserializeOwned; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::value::Value; - -pub trait ForkVersionDecode: Sized { - /// SSZ decode with explicit fork variant. - fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result; -} +use types::ForkName; /// The metadata of type M should be set to `EmptyMetadata` if you don't care about adding fields other than /// version. If you *do* care about adding other fields you can mix in any type that implements diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index bcd979daca..4e832a11df 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -7,6 +7,7 @@ //! Eventually it would be ideal to publish this crate on crates.io, however we have some local //! dependencies preventing this presently. +pub mod beacon_response; pub mod error; #[cfg(feature = "lighthouse")] pub mod lighthouse; @@ -15,10 +16,14 @@ pub mod lighthouse_vc; pub mod mixin; pub mod types; +pub use beacon_response::{ + BeaconResponse, EmptyMetadata, ExecutionOptimisticFinalizedBeaconResponse, + ExecutionOptimisticFinalizedMetadata, ForkVersionedResponse, UnversionedResponse, +}; + pub use self::error::{Error, ok_or_error, success_or_error}; use self::mixin::{RequestAccept, ResponseOptional}; use self::types::*; -use ::types::beacon_response::ExecutionOptimisticFinalizedBeaconResponse; use educe::Educe; use futures::Stream; use futures_util::StreamExt; diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 4407e30e43..8e1d90f8f9 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -2,7 +2,6 @@ pub use crate::lighthouse::Health; pub use crate::lighthouse_vc::std_types::*; pub use crate::types::{GenericResponse, VersionData}; use eth2_keystore::Keystore; -use graffiti::GraffitiString; use serde::{Deserialize, Serialize}; use std::path::PathBuf; pub use types::*; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 6aad00301a..cbdaa004d0 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -19,10 +19,15 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use test_random_derive::TestRandom; -use types::beacon_block_body::KzgCommitments; use types::test_utils::TestRandom; pub use types::*; +// TODO(mac): Temporary module and re-export hack to expose old `consensus/types` via `eth2/types`. +pub use crate::beacon_response::*; +pub mod beacon_response { + pub use crate::beacon_response::*; +} + #[cfg(feature = "lighthouse")] use crate::lighthouse::BlockReward; diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 1f527c0de8..559a181948 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -73,6 +73,9 @@ paste = { workspace = true } state_processing = { workspace = true } tokio = { workspace = true } +[lints.clippy] +module_inception = "allow" + [[bench]] name = "benches" harness = false diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/attestation/aggregate_and_proof.rs similarity index 93% rename from consensus/types/src/aggregate_and_proof.rs rename to consensus/types/src/attestation/aggregate_and_proof.rs index e76ba48bf4..4c6e775e56 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/attestation/aggregate_and_proof.rs @@ -1,17 +1,20 @@ -use super::{AttestationBase, AttestationElectra, AttestationRef}; -use super::{ - ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, PublicKey, SecretKey, SelectionProof, - Signature, SignedRoot, -}; -use crate::Attestation; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{PublicKey, SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{ + Attestation, AttestationBase, AttestationElectra, AttestationRef, SelectionProof, + }, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + #[superstruct( variants(Base, Electra), variant_attributes( diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation/attestation.rs similarity index 97% rename from consensus/types/src/attestation.rs rename to consensus/types/src/attestation/attestation.rs index 1430582658..693b5889f5 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation/attestation.rs @@ -1,23 +1,28 @@ -use super::{ - AggregateSignature, AttestationData, BitList, ChainSpec, Domain, EthSpec, Fork, SecretKey, - Signature, SignedRoot, +use std::{ + collections::HashSet, + hash::{Hash, Hasher}, }; -use crate::slot_data::SlotData; -use crate::{ - Checkpoint, ContextDeserialize, ForkName, IndexedAttestationBase, IndexedAttestationElectra, -}; -use crate::{Hash256, Slot, test_utils::TestRandom}; -use crate::{IndexedAttestation, context_deserialize}; + +use bls::{AggregateSignature, SecretKey, Signature}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; -use ssz_types::BitVector; -use std::collections::HashSet; -use std::hash::{Hash, Hasher}; +use ssz_types::{BitList, BitVector}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{ + AttestationData, Checkpoint, IndexedAttestation, IndexedAttestationBase, + IndexedAttestationElectra, + }, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot, SlotData}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation/attestation_data.rs similarity index 87% rename from consensus/types/src/attestation_data.rs rename to consensus/types/src/attestation/attestation_data.rs index a4643e5474..f3fceb9b70 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation/attestation_data.rs @@ -1,11 +1,16 @@ -use crate::slot_data::SlotData; -use crate::test_utils::TestRandom; -use crate::{Checkpoint, ForkName, Hash256, SignedRoot, Slot}; use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; + +use crate::{ + attestation::Checkpoint, + core::{Hash256, SignedRoot, Slot, SlotData}, + fork::ForkName, + test_utils::TestRandom, +}; + /// The data upon which an attestation is based. /// /// Spec v0.12.1 diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation/attestation_duty.rs similarity index 92% rename from consensus/types/src/attestation_duty.rs rename to consensus/types/src/attestation/attestation_duty.rs index 70c7c5c170..fe3da79a2b 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation/attestation_duty.rs @@ -1,6 +1,7 @@ -use crate::*; use serde::{Deserialize, Serialize}; +use crate::{attestation::CommitteeIndex, core::Slot}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] pub struct AttestationDuty { diff --git a/consensus/types/src/beacon_committee.rs b/consensus/types/src/attestation/beacon_committee.rs similarity index 92% rename from consensus/types/src/beacon_committee.rs rename to consensus/types/src/attestation/beacon_committee.rs index 04fe763a11..2dba30bad3 100644 --- a/consensus/types/src/beacon_committee.rs +++ b/consensus/types/src/attestation/beacon_committee.rs @@ -1,4 +1,4 @@ -use crate::*; +use crate::{attestation::CommitteeIndex, core::Slot}; #[derive(Default, Clone, Debug, PartialEq)] pub struct BeaconCommittee<'a> { diff --git a/consensus/types/src/checkpoint.rs b/consensus/types/src/attestation/checkpoint.rs similarity index 88% rename from consensus/types/src/checkpoint.rs rename to consensus/types/src/attestation/checkpoint.rs index 545af59985..f5a95f0ad9 100644 --- a/consensus/types/src/checkpoint.rs +++ b/consensus/types/src/attestation/checkpoint.rs @@ -1,11 +1,15 @@ -use crate::test_utils::TestRandom; -use crate::{Epoch, ForkName, Hash256}; use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Epoch, Hash256}, + fork::ForkName, + test_utils::TestRandom, +}; + /// Casper FFG checkpoint, used in attestations. /// /// Spec v0.12.1 diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/attestation/indexed_attestation.rs similarity index 96% rename from consensus/types/src/indexed_attestation.rs rename to consensus/types/src/attestation/indexed_attestation.rs index dc32884217..272b015d90 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/attestation/indexed_attestation.rs @@ -1,17 +1,21 @@ -use crate::context_deserialize; -use crate::{ - AggregateSignature, AttestationData, EthSpec, ForkName, VariableList, test_utils::TestRandom, +use std::{ + hash::{Hash, Hasher}, + slice::Iter, }; -use core::slice::Iter; + +use bls::AggregateSignature; +use context_deserialize::context_deserialize; use educe::Educe; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use std::hash::{Hash, Hasher}; +use ssz_types::VariableList; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{attestation::AttestationData, core::EthSpec, fork::ForkName, test_utils::TestRandom}; + /// Details an attestation that can be slashable. /// /// To be included in an `AttesterSlashing`. @@ -208,9 +212,10 @@ impl Hash for IndexedAttestation { #[cfg(test)] mod tests { use super::*; - use crate::MainnetEthSpec; - use crate::slot_epoch::Epoch; - use crate::test_utils::{SeedableRng, XorShiftRng}; + use crate::{ + core::{Epoch, MainnetEthSpec}, + test_utils::{SeedableRng, XorShiftRng}, + }; #[test] pub fn test_is_double_vote_true() { diff --git a/consensus/types/src/indexed_payload_attestation.rs b/consensus/types/src/attestation/indexed_payload_attestation.rs similarity index 100% rename from consensus/types/src/indexed_payload_attestation.rs rename to consensus/types/src/attestation/indexed_payload_attestation.rs diff --git a/consensus/types/src/attestation/mod.rs b/consensus/types/src/attestation/mod.rs new file mode 100644 index 0000000000..f9cfdb5c65 --- /dev/null +++ b/consensus/types/src/attestation/mod.rs @@ -0,0 +1,45 @@ +mod aggregate_and_proof; +mod attestation; +mod attestation_data; +mod attestation_duty; +mod beacon_committee; +mod checkpoint; +mod indexed_attestation; +mod participation_flags; +mod payload_attestation; +mod payload_attestation_data; +mod payload_attestation_message; +mod pending_attestation; +mod selection_proof; +mod shuffling_id; +mod signed_aggregate_and_proof; +mod subnet_id; + +pub use aggregate_and_proof::{ + AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, +}; +pub use attestation::{ + Attestation, AttestationBase, AttestationElectra, AttestationOnDisk, AttestationRef, + AttestationRefMut, AttestationRefOnDisk, Error as AttestationError, SingleAttestation, +}; +pub use attestation_data::AttestationData; +pub use attestation_duty::AttestationDuty; +pub use beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; +pub use checkpoint::Checkpoint; +pub use indexed_attestation::{ + IndexedAttestation, IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, +}; +pub use participation_flags::ParticipationFlags; +pub use payload_attestation::PayloadAttestation; +pub use payload_attestation_data::PayloadAttestationData; +pub use payload_attestation_message::PayloadAttestationMessage; +pub use pending_attestation::PendingAttestation; +pub use selection_proof::SelectionProof; +pub use shuffling_id::AttestationShufflingId; +pub use signed_aggregate_and_proof::{ + SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, + SignedAggregateAndProofRefMut, +}; +pub use subnet_id::SubnetId; + +pub type CommitteeIndex = u64; diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/attestation/participation_flags.rs similarity index 96% rename from consensus/types/src/participation_flags.rs rename to consensus/types/src/attestation/participation_flags.rs index e59efc5170..66831abfac 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/attestation/participation_flags.rs @@ -1,10 +1,14 @@ -use crate::{Hash256, consts::altair::NUM_FLAG_INDICES, test_utils::TestRandom}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use test_random_derive::TestRandom; use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; +use crate::{ + core::{Hash256, consts::altair::NUM_FLAG_INDICES}, + test_utils::TestRandom, +}; + #[derive(Debug, Default, Clone, Copy, PartialEq, Deserialize, Serialize, TestRandom)] #[serde(transparent)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] diff --git a/consensus/types/src/payload_attestation.rs b/consensus/types/src/attestation/payload_attestation.rs similarity index 91% rename from consensus/types/src/payload_attestation.rs rename to consensus/types/src/attestation/payload_attestation.rs index d4e2dd2138..fa1bfc0293 100644 --- a/consensus/types/src/payload_attestation.rs +++ b/consensus/types/src/attestation/payload_attestation.rs @@ -1,3 +1,4 @@ +use crate::attestation::payload_attestation_data::PayloadAttestationData; use crate::test_utils::TestRandom; use crate::*; use educe::Educe; diff --git a/consensus/types/src/payload_attestation_data.rs b/consensus/types/src/attestation/payload_attestation_data.rs similarity index 100% rename from consensus/types/src/payload_attestation_data.rs rename to consensus/types/src/attestation/payload_attestation_data.rs diff --git a/consensus/types/src/payload_attestation_message.rs b/consensus/types/src/attestation/payload_attestation_message.rs similarity index 82% rename from consensus/types/src/payload_attestation_message.rs rename to consensus/types/src/attestation/payload_attestation_message.rs index d1b42cc03f..24744114a2 100644 --- a/consensus/types/src/payload_attestation_message.rs +++ b/consensus/types/src/attestation/payload_attestation_message.rs @@ -1,5 +1,7 @@ +use crate::attestation::payload_attestation_data::PayloadAttestationData; use crate::test_utils::TestRandom; -use crate::*; +use crate::{ForkName, context_deserialize}; +use bls::Signature; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/attestation/pending_attestation.rs similarity index 84% rename from consensus/types/src/pending_attestation.rs rename to consensus/types/src/attestation/pending_attestation.rs index 4a00a0495a..84353ac118 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/attestation/pending_attestation.rs @@ -1,11 +1,12 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{AttestationData, BitList, EthSpec, ForkName}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::BitList; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{attestation::AttestationData, core::EthSpec, fork::ForkName, test_utils::TestRandom}; + /// An attestation that has been included in the state but not yet fully processed. /// /// Spec v0.12.1 diff --git a/consensus/types/src/selection_proof.rs b/consensus/types/src/attestation/selection_proof.rs similarity index 95% rename from consensus/types/src/selection_proof.rs rename to consensus/types/src/attestation/selection_proof.rs index aa8c0c5658..b4c48d0078 100644 --- a/consensus/types/src/selection_proof.rs +++ b/consensus/types/src/attestation/selection_proof.rs @@ -1,11 +1,15 @@ -use crate::{ - ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, -}; +use std::cmp; + +use bls::{PublicKey, SecretKey, Signature}; use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::Encode; -use std::cmp; + +use crate::{ + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot}, + fork::Fork, +}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] diff --git a/consensus/types/src/shuffling_id.rs b/consensus/types/src/attestation/shuffling_id.rs similarity index 93% rename from consensus/types/src/shuffling_id.rs rename to consensus/types/src/attestation/shuffling_id.rs index df16f605ed..25217288f6 100644 --- a/consensus/types/src/shuffling_id.rs +++ b/consensus/types/src/attestation/shuffling_id.rs @@ -1,7 +1,12 @@ -use crate::*; +use std::hash::Hash; + use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use std::hash::Hash; + +use crate::{ + core::{Epoch, EthSpec, Hash256, RelativeEpoch}, + state::{BeaconState, BeaconStateError}, +}; /// Can be used to key (ID) the shuffling in some chain, in some epoch. /// diff --git a/consensus/types/src/signed_aggregate_and_proof.rs b/consensus/types/src/attestation/signed_aggregate_and_proof.rs similarity index 90% rename from consensus/types/src/signed_aggregate_and_proof.rs rename to consensus/types/src/attestation/signed_aggregate_and_proof.rs index 758ac2734b..48c3f4c567 100644 --- a/consensus/types/src/signed_aggregate_and_proof.rs +++ b/consensus/types/src/attestation/signed_aggregate_and_proof.rs @@ -1,18 +1,21 @@ -use super::{ - AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, -}; -use super::{ - Attestation, AttestationRef, ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, SecretKey, - SelectionProof, Signature, SignedRoot, -}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{ + AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, + Attestation, AttestationRef, SelectionProof, + }, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// A Validators signed aggregate proof to publish on the `beacon_aggregate_and_proof` /// gossipsub topic. /// diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/attestation/subnet_id.rs similarity index 97% rename from consensus/types/src/subnet_id.rs rename to consensus/types/src/attestation/subnet_id.rs index 6ec8ca4a27..9585d077b5 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/attestation/subnet_id.rs @@ -1,11 +1,17 @@ //! Identifies each shard by an integer identifier. -use crate::SingleAttestation; -use crate::{AttestationRef, ChainSpec, CommitteeIndex, EthSpec, Slot}; +use std::{ + ops::{Deref, DerefMut}, + sync::LazyLock, +}; + use alloy_primitives::{U256, bytes::Buf}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; -use std::ops::{Deref, DerefMut}; -use std::sync::LazyLock; + +use crate::{ + attestation::{AttestationRef, CommitteeIndex, SingleAttestation}, + core::{ChainSpec, EthSpec, Slot}, +}; const MAX_SUBNET_ID: usize = 64; diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/block/beacon_block.rs similarity index 96% rename from consensus/types/src/beacon_block.rs rename to consensus/types/src/block/beacon_block.rs index 4cf01d09f5..c42ed5fa07 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/block/beacon_block.rs @@ -1,18 +1,40 @@ -use crate::attestation::AttestationBase; -use crate::test_utils::TestRandom; -use crate::*; +use std::{fmt, marker::PhantomData}; + +use bls::{AggregateSignature, PublicKeyBytes, SecretKey, Signature, SignatureBytes}; +use context_deserialize::ContextDeserialize; use educe::Educe; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; -use std::fmt; -use std::marker::PhantomData; +use ssz_types::{BitList, BitVector, FixedVector, VariableList, typenum::Unsigned}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -use self::indexed_attestation::IndexedAttestationBase; +use crate::{ + SignedExecutionPayloadBid, + attestation::{AttestationBase, AttestationData, IndexedAttestationBase}, + block::{ + BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyBellatrix, + BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, + BeaconBlockBodyGloas, BeaconBlockBodyRef, BeaconBlockBodyRefMut, BeaconBlockHeader, + SignedBeaconBlock, SignedBeaconBlockHeader, + }, + core::{ChainSpec, Domain, Epoch, EthSpec, Graffiti, Hash256, SignedRoot, Slot}, + deposit::{Deposit, DepositData}, + execution::{ + AbstractExecPayload, BlindedPayload, Eth1Data, ExecutionPayload, ExecutionRequests, + FullPayload, + }, + exit::{SignedVoluntaryExit, VoluntaryExit}, + fork::{Fork, ForkName, InconsistentFork, map_fork_name}, + slashing::{AttesterSlashingBase, ProposerSlashing}, + state::BeaconStateError, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; /// A block of the `BeaconChain`. #[superstruct( @@ -283,7 +305,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockRef<'a, E, Payl /// Extracts a reference to an execution payload from a block, returning an error if the block /// is pre-merge. - pub fn execution_payload(&self) -> Result, Error> { + pub fn execution_payload(&self) -> Result, BeaconStateError> { self.body().execution_payload() } } @@ -891,7 +913,10 @@ impl fmt::Display for BlockImportSource { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, XorShiftRng, test_ssz_tree_hash_pair_with}; + use crate::{ + core::MainnetEthSpec, + test_utils::{SeedableRng, XorShiftRng, test_ssz_tree_hash_pair_with}, + }; use ssz::Encode; type BeaconBlock = super::BeaconBlock; diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/block/beacon_block_body.rs similarity index 93% rename from consensus/types/src/beacon_block_body.rs rename to consensus/types/src/block/beacon_block_body.rs index 64d929e314..1a0b385900 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/block/beacon_block_body.rs @@ -1,18 +1,43 @@ -use crate::test_utils::TestRandom; -use crate::*; +use std::marker::PhantomData; + +use bls::Signature; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use merkle_proof::{MerkleTree, MerkleTreeError}; use metastruct::metastruct; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; -use std::marker::PhantomData; +use ssz_types::{FixedVector, VariableList}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::{BYTES_PER_CHUNK, TreeHash}; use tree_hash_derive::TreeHash; -pub type KzgCommitments = - VariableList::MaxBlobCommitmentsPerBlock>; +use crate::payload_attestation::PayloadAttestation; +use crate::{ + SignedExecutionPayloadBid, + attestation::{AttestationBase, AttestationElectra, AttestationRef, AttestationRefMut}, + core::{EthSpec, Graffiti, Hash256}, + deposit::Deposit, + execution::{ + AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, + BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, Eth1Data, ExecutionPayload, + ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, + ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, ExecutionRequests, + FullPayload, FullPayloadBellatrix, FullPayloadCapella, FullPayloadDeneb, + FullPayloadElectra, FullPayloadFulu, SignedBlsToExecutionChange, + }, + exit::SignedVoluntaryExit, + fork::{ForkName, map_fork_name}, + kzg_ext::KzgCommitments, + light_client::consts::{EXECUTION_PAYLOAD_INDEX, EXECUTION_PAYLOAD_PROOF_LEN}, + slashing::{ + AttesterSlashingBase, AttesterSlashingElectra, AttesterSlashingRef, ProposerSlashing, + }, + state::BeaconStateError, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; /// The number of leaves (including padding) on the `BeaconBlockBody` Merkle tree. /// @@ -63,8 +88,14 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; Fulu(metastruct(mappings(beacon_block_body_fulu_fields(groups(fields))))), Gloas(metastruct(mappings(beacon_block_body_gloas_fields(groups(fields))))), ), - cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) )] #[cfg_attr( feature = "arbitrary", @@ -148,7 +179,7 @@ pub struct BeaconBlockBody = FullPay } impl> BeaconBlockBody { - pub fn execution_payload(&self) -> Result, Error> { + pub fn execution_payload(&self) -> Result, BeaconStateError> { self.to_ref().execution_payload() } @@ -159,15 +190,15 @@ impl> BeaconBlockBody { } impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Payload> { - pub fn execution_payload(&self) -> Result, Error> { + pub fn execution_payload(&self) -> Result, BeaconStateError> { match self { - Self::Base(_) | Self::Altair(_) => Err(Error::IncorrectStateVariant), + Self::Base(_) | Self::Altair(_) => Err(BeaconStateError::IncorrectStateVariant), Self::Bellatrix(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Capella(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Deneb(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Electra(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Fulu(body) => Ok(Payload::Ref::from(&body.execution_payload)), - Self::Gloas(_) => Err(Error::IncorrectStateVariant), + Self::Gloas(_) => Err(BeaconStateError::IncorrectStateVariant), } } @@ -217,7 +248,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, pub fn kzg_commitment_merkle_proof( &self, index: usize, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let kzg_commitments_proof = self.kzg_commitments_merkle_proof()?; let proof = self.complete_kzg_commitment_merkle_proof(index, &kzg_commitments_proof)?; Ok(proof) @@ -230,13 +261,13 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, &self, index: usize, kzg_commitments_proof: &[Hash256], - ) -> Result, Error> { + ) -> Result, BeaconStateError> { match self { Self::Base(_) | Self::Altair(_) | Self::Bellatrix(_) | Self::Capella(_) - | Self::Gloas(_) => Err(Error::IncorrectStateVariant), + | Self::Gloas(_) => Err(BeaconStateError::IncorrectStateVariant), Self::Deneb(_) | Self::Electra(_) | Self::Fulu(_) => { // We compute the branches by generating 2 merkle trees: // 1. Merkle tree for the `blob_kzg_commitments` List object @@ -257,7 +288,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, let tree = MerkleTree::create(&blob_leaves, depth as usize); let (_, mut proof) = tree .generate_proof(index, depth as usize) - .map_err(Error::MerkleTreeError)?; + .map_err(BeaconStateError::MerkleTreeError)?; // Add the branch corresponding to the length mix-in. let length = blob_leaves.len(); @@ -265,7 +296,9 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, let mut length_bytes = [0; BYTES_PER_CHUNK]; length_bytes .get_mut(0..usize_len) - .ok_or(Error::MerkleTreeError(MerkleTreeError::PleaseNotifyTheDevs))? + .ok_or(BeaconStateError::MerkleTreeError( + MerkleTreeError::PleaseNotifyTheDevs, + ))? .copy_from_slice(&length.to_le_bytes()); let length_root = Hash256::from_slice(length_bytes.as_slice()); proof.push(length_root); @@ -283,32 +316,41 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, /// Produces the proof of inclusion for `self.blob_kzg_commitments`. pub fn kzg_commitments_merkle_proof( &self, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let body_leaves = self.body_merkle_leaves(); let beacon_block_body_depth = body_leaves.len().next_power_of_two().ilog2() as usize; let tree = MerkleTree::create(&body_leaves, beacon_block_body_depth); let (_, proof) = tree .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) - .map_err(Error::MerkleTreeError)?; + .map_err(BeaconStateError::MerkleTreeError)?; Ok(FixedVector::new(proof)?) } - pub fn block_body_merkle_proof(&self, generalized_index: usize) -> Result, Error> { + pub fn block_body_merkle_proof( + &self, + generalized_index: usize, + ) -> Result, BeaconStateError> { let field_index = match generalized_index { - light_client_update::EXECUTION_PAYLOAD_INDEX => { + EXECUTION_PAYLOAD_INDEX => { // Execution payload is a top-level field, subtract off the generalized indices // for the internal nodes. Result should be 9, the field offset of the execution // payload in the `BeaconBlockBody`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/deneb/beacon-chain.md#beaconblockbody generalized_index .checked_sub(NUM_BEACON_BLOCK_BODY_HASH_TREE_ROOT_LEAVES) - .ok_or(Error::GeneralizedIndexNotSupported(generalized_index))? + .ok_or(BeaconStateError::GeneralizedIndexNotSupported( + generalized_index, + ))? + } + _ => { + return Err(BeaconStateError::GeneralizedIndexNotSupported( + generalized_index, + )); } - _ => return Err(Error::GeneralizedIndexNotSupported(generalized_index)), }; let leaves = self.body_merkle_leaves(); - let depth = light_client_update::EXECUTION_PAYLOAD_PROOF_LEN; + let depth = EXECUTION_PAYLOAD_PROOF_LEN; let tree = merkle_proof::MerkleTree::create(&leaves, depth); let (_, proof) = tree.generate_proof(field_index, depth)?; @@ -1111,22 +1153,16 @@ impl<'de, E: EthSpec, Payload: AbstractExecPayload> ContextDeserialize<'de, F } } -/// Util method helpful for logging. -pub fn format_kzg_commitments(commitments: &[KzgCommitment]) -> String { - let commitment_strings: Vec = commitments.iter().map(|x| x.to_string()).collect(); - let commitments_joined = commitment_strings.join(", "); - let surrounded_commitments = format!("[{}]", commitments_joined); - surrounded_commitments -} - #[cfg(test)] mod tests { mod base { use super::super::*; + use crate::core::MainnetEthSpec; ssz_and_tree_hash_tests!(BeaconBlockBodyBase); } mod altair { use super::super::*; + use crate::core::MainnetEthSpec; ssz_and_tree_hash_tests!(BeaconBlockBodyAltair); } } diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/block/beacon_block_header.rs similarity index 90% rename from consensus/types/src/beacon_block_header.rs rename to consensus/types/src/block/beacon_block_header.rs index e14a9fc8af..06e1023d91 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/block/beacon_block_header.rs @@ -1,6 +1,4 @@ -use crate::test_utils::TestRandom; -use crate::*; - +use bls::SecretKey; use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -8,6 +6,13 @@ use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBeaconBlockHeader, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// A header of a `BeaconBlock`. /// /// Spec v0.12.1 diff --git a/consensus/types/src/block/mod.rs b/consensus/types/src/block/mod.rs new file mode 100644 index 0000000000..81c8ffbd63 --- /dev/null +++ b/consensus/types/src/block/mod.rs @@ -0,0 +1,26 @@ +mod beacon_block; +mod beacon_block_body; +mod beacon_block_header; +mod signed_beacon_block; +mod signed_beacon_block_header; + +pub use beacon_block::{ + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockCapella, + BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockFulu, BeaconBlockGloas, BeaconBlockRef, + BeaconBlockRefMut, BlindedBeaconBlock, BlockImportSource, EmptyBlock, +}; +pub use beacon_block_body::{ + BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, + BeaconBlockBodyBellatrix, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, + BeaconBlockBodyFulu, BeaconBlockBodyGloas, BeaconBlockBodyRef, BeaconBlockBodyRefMut, + NUM_BEACON_BLOCK_BODY_HASH_TREE_ROOT_LEAVES, +}; +pub use beacon_block_header::BeaconBlockHeader; + +pub use signed_beacon_block::{ + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + SignedBeaconBlockFulu, SignedBeaconBlockGloas, SignedBeaconBlockHash, SignedBlindedBeaconBlock, + ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, +}; +pub use signed_beacon_block_header::SignedBeaconBlockHeader; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/block/signed_beacon_block.rs similarity index 96% rename from consensus/types/src/signed_beacon_block.rs rename to consensus/types/src/block/signed_beacon_block.rs index 12ebcaaa57..aeb3c18d95 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/block/signed_beacon_block.rs @@ -1,17 +1,40 @@ -use crate::beacon_block_body::{BLOB_KZG_COMMITMENTS_INDEX, format_kzg_commitments}; -use crate::test_utils::TestRandom; -use crate::*; +use std::fmt; + +use bls::{PublicKey, Signature}; +use context_deserialize::ContextDeserialize; use educe::Educe; use merkle_proof::MerkleTree; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; -use std::fmt; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tracing::instrument; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::{ + BLOB_KZG_COMMITMENTS_INDEX, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, + BeaconBlockBellatrix, BeaconBlockBodyBellatrix, BeaconBlockBodyCapella, + BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, BeaconBlockCapella, + BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockFulu, BeaconBlockGloas, BeaconBlockHeader, + BeaconBlockRef, BeaconBlockRefMut, SignedBeaconBlockHeader, + }, + core::{ChainSpec, Domain, Epoch, EthSpec, Hash256, SignedRoot, SigningData, Slot}, + execution::{ + AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, + BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, ExecutionPayload, + ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, + ExecutionPayloadElectra, ExecutionPayloadFulu, FullPayload, FullPayloadBellatrix, + FullPayloadCapella, FullPayloadDeneb, FullPayloadElectra, FullPayloadFulu, + }, + fork::{Fork, ForkName, ForkVersionDecode, InconsistentFork, map_fork_name}, + kzg_ext::format_kzg_commitments, + state::BeaconStateError, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Eq, Hash, Clone, Copy)] pub struct SignedBeaconBlockHash(Hash256); @@ -272,7 +295,7 @@ impl> SignedBeaconBlock SignedBeaconBlockHeader, FixedVector, ), - Error, + BeaconStateError, > { // Create the block body merkle tree let body_leaves = self.message().body().body_merkle_leaves(); @@ -282,7 +305,7 @@ impl> SignedBeaconBlock // Compute the KZG commitments inclusion proof let (_, proof) = body_merkle_tree .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) - .map_err(Error::MerkleTreeError)?; + .map_err(BeaconStateError::MerkleTreeError)?; let kzg_commitments_inclusion_proof = FixedVector::new(proof)?; let block_header = BeaconBlockHeader { @@ -873,6 +896,7 @@ pub mod ssz_tagged_signed_beacon_block_arc { #[cfg(test)] mod test { use super::*; + use crate::{block::EmptyBlock, core::MainnetEthSpec}; #[test] fn add_remove_payload_roundtrip() { diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/block/signed_beacon_block_header.rs similarity index 84% rename from consensus/types/src/signed_beacon_block_header.rs rename to consensus/types/src/block/signed_beacon_block_header.rs index 4a5ff2ec1a..2fcd8a705f 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/block/signed_beacon_block_header.rs @@ -1,13 +1,17 @@ -use crate::context_deserialize; -use crate::{ - BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, PublicKey, Signature, - SignedRoot, test_utils::TestRandom, -}; +use bls::{PublicKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::BeaconBlockHeader, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// A signed header of a `BeaconBlock`. /// /// Spec v0.12.1 diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder/builder_bid.rs similarity index 93% rename from consensus/types/src/builder_bid.rs rename to consensus/types/src/builder/builder_bid.rs index 854dcc47e3..1018fadb64 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder/builder_bid.rs @@ -1,12 +1,6 @@ -use crate::beacon_block_body::KzgCommitments; -use crate::{ - ChainSpec, ContextDeserialize, EthSpec, ExecutionPayloadHeaderBellatrix, - ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, - ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, - ExecutionRequests, ForkName, ForkVersionDecode, SignedRoot, Uint256, test_utils::TestRandom, -}; use bls::PublicKeyBytes; use bls::Signature; +use context_deserialize::ContextDeserialize; use serde::{Deserialize, Deserializer, Serialize}; use ssz::Decode; use ssz_derive::{Decode, Encode}; @@ -14,6 +8,18 @@ use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, EthSpec, SignedRoot, Uint256}, + execution::{ + ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ExecutionRequests, + }, + fork::{ForkName, ForkVersionDecode}, + kzg_ext::KzgCommitments, + test_utils::TestRandom, +}; + #[superstruct( variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( diff --git a/consensus/types/src/builder_pending_payment.rs b/consensus/types/src/builder/builder_pending_payment.rs similarity index 100% rename from consensus/types/src/builder_pending_payment.rs rename to consensus/types/src/builder/builder_pending_payment.rs diff --git a/consensus/types/src/builder_pending_withdrawal.rs b/consensus/types/src/builder/builder_pending_withdrawal.rs similarity index 100% rename from consensus/types/src/builder_pending_withdrawal.rs rename to consensus/types/src/builder/builder_pending_withdrawal.rs diff --git a/consensus/types/src/builder/mod.rs b/consensus/types/src/builder/mod.rs new file mode 100644 index 0000000000..54d0ae4eb7 --- /dev/null +++ b/consensus/types/src/builder/mod.rs @@ -0,0 +1,10 @@ +mod builder_bid; +mod builder_pending_payment; +mod builder_pending_withdrawal; + +pub use builder_bid::{ + BuilderBid, BuilderBidBellatrix, BuilderBidCapella, BuilderBidDeneb, BuilderBidElectra, + BuilderBidFulu, SignedBuilderBid, +}; +pub use builder_pending_payment::BuilderPendingPayment; +pub use builder_pending_withdrawal::BuilderPendingWithdrawal; diff --git a/consensus/types/src/consolidation_request.rs b/consensus/types/src/consolidation/consolidation_request.rs similarity index 84% rename from consensus/types/src/consolidation_request.rs rename to consensus/types/src/consolidation/consolidation_request.rs index 2af3426b68..3f09517a90 100644 --- a/consensus/types/src/consolidation_request.rs +++ b/consensus/types/src/consolidation/consolidation_request.rs @@ -1,11 +1,17 @@ -use crate::context_deserialize; -use crate::{Address, ForkName, PublicKeyBytes, SignedRoot, test_utils::TestRandom}; +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, SignedRoot}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/consolidation/mod.rs b/consensus/types/src/consolidation/mod.rs new file mode 100644 index 0000000000..a6a2f4a331 --- /dev/null +++ b/consensus/types/src/consolidation/mod.rs @@ -0,0 +1,5 @@ +mod consolidation_request; +mod pending_consolidation; + +pub use consolidation_request::ConsolidationRequest; +pub use pending_consolidation::PendingConsolidation; diff --git a/consensus/types/src/pending_consolidation.rs b/consensus/types/src/consolidation/pending_consolidation.rs similarity index 86% rename from consensus/types/src/pending_consolidation.rs rename to consensus/types/src/consolidation/pending_consolidation.rs index 9fb8c3566d..fcd76e43b6 100644 --- a/consensus/types/src/pending_consolidation.rs +++ b/consensus/types/src/consolidation/pending_consolidation.rs @@ -1,11 +1,11 @@ -use crate::ForkName; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/application_domain.rs b/consensus/types/src/core/application_domain.rs similarity index 100% rename from consensus/types/src/application_domain.rs rename to consensus/types/src/core/application_domain.rs diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/core/chain_spec.rs similarity index 99% rename from consensus/types/src/chain_spec.rs rename to consensus/types/src/core/chain_spec.rs index de3f67d14a..da3f9b90cc 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -1,19 +1,27 @@ -use crate::application_domain::{APPLICATION_DOMAIN_BUILDER, ApplicationDomain}; -use crate::blob_sidecar::BlobIdentifier; -use crate::data_column_sidecar::DataColumnsByRootIdentifier; -use crate::*; +use std::{fs::File, path::Path, time::Duration}; + use educe::Educe; use ethereum_hashing::hash; +use fixed_bytes::FixedBytesExtended; use int_to_bytes::int_to_bytes4; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_utils::quoted_u64::MaybeQuoted; use ssz::Encode; -use std::fs::File; -use std::path::Path; -use std::time::Duration; +use ssz_types::{RuntimeVariableList, VariableList}; use tree_hash::TreeHash; +use crate::{ + core::{ + APPLICATION_DOMAIN_BUILDER, Address, ApplicationDomain, EnrForkId, Epoch, EthSpec, + EthSpecId, Hash256, MainnetEthSpec, Slot, Uint256, + }, + data::{BlobIdentifier, DataColumnSubnetId, DataColumnsByRootIdentifier}, + execution::ExecutionBlockHash, + fork::{Fork, ForkData, ForkName}, + state::BeaconState, +}; + /// Each of the BLS signature domains. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Domain { @@ -2602,6 +2610,7 @@ mod tests { #[cfg(test)] mod yaml_tests { use super::*; + use crate::core::MinimalEthSpec; use paste::paste; use std::sync::Arc; use tempfile::NamedTempFile; diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/core/config_and_preset.rs similarity index 95% rename from consensus/types/src/config_and_preset.rs rename to consensus/types/src/core/config_and_preset.rs index 16b09c9c08..08141c7731 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/core/config_and_preset.rs @@ -1,13 +1,14 @@ -use crate::{ - AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, DenebPreset, - ElectraPreset, EthSpec, FuluPreset, GloasPreset, consts::altair, consts::deneb, -}; use maplit::hashmap; use serde::{Deserialize, Serialize}; use serde_json::Value; use std::collections::HashMap; use superstruct::superstruct; +use crate::core::{ + AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, DenebPreset, + ElectraPreset, EthSpec, FuluPreset, GloasPreset, consts, +}; + /// Fusion of a runtime-config with the compile-time preset values. /// /// Mostly useful for the API. @@ -131,11 +132,11 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { "domain_sync_committee_selection_proof".to_uppercase() => u32_hex(spec.domain_sync_committee_selection_proof), "sync_committee_subnet_count".to_uppercase() => - altair::SYNC_COMMITTEE_SUBNET_COUNT.to_string().into(), + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT.to_string().into(), "target_aggregators_per_sync_subcommittee".to_uppercase() => - altair::TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE.to_string().into(), + consts::altair::TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE.to_string().into(), // Deneb - "versioned_hash_version_kzg".to_uppercase() => deneb::VERSIONED_HASH_VERSION_KZG.to_string().into(), + "versioned_hash_version_kzg".to_uppercase() => consts::deneb::VERSIONED_HASH_VERSION_KZG.to_string().into(), // Electra "compounding_withdrawal_prefix".to_uppercase() => u8_hex(spec.compounding_withdrawal_prefix_byte), "unset_deposit_requests_start_index".to_uppercase() => spec.unset_deposit_requests_start_index.to_string().into(), diff --git a/consensus/types/src/consts.rs b/consensus/types/src/core/consts.rs similarity index 94% rename from consensus/types/src/consts.rs rename to consensus/types/src/core/consts.rs index c20d5fe8f3..b6d63c47a8 100644 --- a/consensus/types/src/consts.rs +++ b/consensus/types/src/core/consts.rs @@ -23,5 +23,5 @@ pub mod bellatrix { pub const INTERVALS_PER_SLOT: u64 = 3; } pub mod deneb { - pub use crate::VERSIONED_HASH_VERSION_KZG; + pub use kzg::VERSIONED_HASH_VERSION_KZG; } diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/core/enr_fork_id.rs similarity index 95% rename from consensus/types/src/enr_fork_id.rs rename to consensus/types/src/core/enr_fork_id.rs index e22672aeb6..c3b400cd13 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/core/enr_fork_id.rs @@ -1,11 +1,10 @@ -use crate::Epoch; -use crate::test_utils::TestRandom; - use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Epoch, test_utils::TestRandom}; + /// Specifies a fork which allows nodes to identify each other on the network. This fork is used in /// a nodes local ENR. /// diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/core/eth_spec.rs similarity index 98% rename from consensus/types/src/eth_spec.rs rename to consensus/types/src/core/eth_spec.rs index da3104adb9..dbae48c0b6 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/core/eth_spec.rs @@ -1,16 +1,22 @@ -use crate::*; +use std::{ + fmt::{self, Debug}, + str::FromStr, +}; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ U0, U1, U2, U4, U8, U16, U17, U32, U64, U128, U256, U512, U625, U1024, U2048, U4096, U8192, U65536, U131072, U262144, U1048576, U16777216, U33554432, U134217728, U1073741824, - U1099511627776, UInt, bit::B0, + U1099511627776, UInt, Unsigned, bit::B0, }; -use std::fmt::{self, Debug}; -use std::str::FromStr; -pub type U5000 = UInt, B0>, B0>; // 625 * 8 = 5000 +use crate::{ + core::{ChainSpec, Epoch}, + state::BeaconStateError, +}; + +type U5000 = UInt, B0>, B0>; // 625 * 8 = 5000 const MAINNET: &str = "mainnet"; const MINIMAL: &str = "minimal"; @@ -190,7 +196,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + fn get_committee_count_per_slot( active_validator_count: usize, spec: &ChainSpec, - ) -> Result { + ) -> Result { Self::get_committee_count_per_slot_with( active_validator_count, spec.max_committees_per_slot, @@ -202,7 +208,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + active_validator_count: usize, max_committees_per_slot: usize, target_committee_size: usize, - ) -> Result { + ) -> Result { let slots_per_epoch = Self::SlotsPerEpoch::to_usize(); Ok(std::cmp::max( diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/core/graffiti.rs similarity index 98% rename from consensus/types/src/graffiti.rs rename to consensus/types/src/core/graffiti.rs index 31cc4187a6..d0e0e1b1a8 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/core/graffiti.rs @@ -1,14 +1,13 @@ -use crate::{ - Hash256, - test_utils::{RngCore, TestRandom}, -}; +use std::{fmt, str::FromStr}; + +use rand::RngCore; use regex::bytes::Regex; use serde::{Deserialize, Deserializer, Serialize, Serializer, de::Error}; use ssz::{Decode, DecodeError, Encode}; -use std::fmt; -use std::str::FromStr; use tree_hash::{PackedEncoding, TreeHash}; +use crate::{core::Hash256, test_utils::TestRandom}; + pub const GRAFFITI_BYTES_LEN: usize = 32; /// The 32-byte `graffiti` field on a beacon block. diff --git a/consensus/types/src/core/mod.rs b/consensus/types/src/core/mod.rs new file mode 100644 index 0000000000..bb50bb1856 --- /dev/null +++ b/consensus/types/src/core/mod.rs @@ -0,0 +1,44 @@ +pub mod consts; + +mod application_domain; +mod chain_spec; +mod config_and_preset; +mod enr_fork_id; +mod eth_spec; +mod graffiti; +mod non_zero_usize; +mod preset; +mod relative_epoch; +mod signing_data; +mod slot_data; +#[macro_use] +mod slot_epoch_macros; +mod slot_epoch; +#[cfg(feature = "sqlite")] +mod sqlite; + +pub use application_domain::{APPLICATION_DOMAIN_BUILDER, ApplicationDomain}; +pub use chain_spec::{BlobParameters, BlobSchedule, ChainSpec, Config, Domain}; +pub use config_and_preset::{ + ConfigAndPreset, ConfigAndPresetDeneb, ConfigAndPresetElectra, ConfigAndPresetFulu, + ConfigAndPresetGloas, get_extra_fields, +}; +pub use enr_fork_id::EnrForkId; +pub use eth_spec::{EthSpec, EthSpecId, GNOSIS, GnosisEthSpec, MainnetEthSpec, MinimalEthSpec}; +pub use graffiti::{GRAFFITI_BYTES_LEN, Graffiti, GraffitiString}; +pub use non_zero_usize::new_non_zero_usize; +pub use preset::{ + AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, + FuluPreset, GloasPreset, +}; +pub use relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; +pub use signing_data::{SignedRoot, SigningData}; +pub use slot_data::SlotData; +pub use slot_epoch::{Epoch, Slot}; + +pub type Hash256 = alloy_primitives::B256; +pub type Uint256 = alloy_primitives::U256; +pub type Hash64 = alloy_primitives::B64; +pub type Address = alloy_primitives::Address; +pub type VersionedHash = Hash256; +pub type MerkleProof = Vec; diff --git a/consensus/types/src/non_zero_usize.rs b/consensus/types/src/core/non_zero_usize.rs similarity index 100% rename from consensus/types/src/non_zero_usize.rs rename to consensus/types/src/core/non_zero_usize.rs diff --git a/consensus/types/src/preset.rs b/consensus/types/src/core/preset.rs similarity index 99% rename from consensus/types/src/preset.rs rename to consensus/types/src/core/preset.rs index ab54c0345f..b436fafd3a 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/core/preset.rs @@ -1,5 +1,7 @@ -use crate::{ChainSpec, Epoch, EthSpec, Unsigned}; use serde::{Deserialize, Serialize}; +use ssz_types::typenum::Unsigned; + +use crate::core::{ChainSpec, Epoch, EthSpec}; /// Value-level representation of an Ethereum consensus "preset". /// diff --git a/consensus/types/src/relative_epoch.rs b/consensus/types/src/core/relative_epoch.rs similarity index 99% rename from consensus/types/src/relative_epoch.rs rename to consensus/types/src/core/relative_epoch.rs index 2fa0ae41bd..d1ee7ecc7c 100644 --- a/consensus/types/src/relative_epoch.rs +++ b/consensus/types/src/core/relative_epoch.rs @@ -1,6 +1,7 @@ -use crate::*; use safe_arith::{ArithError, SafeArith}; +use crate::core::{Epoch, Slot}; + #[derive(Debug, PartialEq, Clone, Copy)] pub enum Error { EpochTooLow { base: Epoch, other: Epoch }, diff --git a/consensus/types/src/signing_data.rs b/consensus/types/src/core/signing_data.rs similarity index 85% rename from consensus/types/src/signing_data.rs rename to consensus/types/src/core/signing_data.rs index 69b7dabfe5..907f03fac7 100644 --- a/consensus/types/src/signing_data.rs +++ b/consensus/types/src/core/signing_data.rs @@ -1,13 +1,12 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, Hash256}; - +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{core::Hash256, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] #[context_deserialize(ForkName)] diff --git a/consensus/types/src/slot_data.rs b/consensus/types/src/core/slot_data.rs similarity index 92% rename from consensus/types/src/slot_data.rs rename to consensus/types/src/core/slot_data.rs index 19775913b9..f0bd01814f 100644 --- a/consensus/types/src/slot_data.rs +++ b/consensus/types/src/core/slot_data.rs @@ -1,4 +1,4 @@ -use crate::Slot; +use crate::core::Slot; /// A trait providing a `Slot` getter for messages that are related to a single slot. Useful in /// making parts of attestation and sync committee processing generic. diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/core/slot_epoch.rs similarity index 98% rename from consensus/types/src/slot_epoch.rs rename to consensus/types/src/core/slot_epoch.rs index 05af9c5232..97457701b1 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/core/slot_epoch.rs @@ -10,15 +10,17 @@ //! implement `Into`, however this would allow operations between `Slots` and `Epochs` which //! may lead to programming errors which are not detected by the compiler. -use crate::test_utils::TestRandom; -use crate::{ChainSpec, SignedRoot}; +use std::{fmt, hash::Hash}; use rand::RngCore; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; -use std::fmt; -use std::hash::Hash; + +use crate::{ + core::{ChainSpec, SignedRoot}, + test_utils::TestRandom, +}; #[cfg(feature = "legacy-arith")] use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; diff --git a/consensus/types/src/slot_epoch_macros.rs b/consensus/types/src/core/slot_epoch_macros.rs similarity index 100% rename from consensus/types/src/slot_epoch_macros.rs rename to consensus/types/src/core/slot_epoch_macros.rs diff --git a/consensus/types/src/sqlite.rs b/consensus/types/src/core/sqlite.rs similarity index 96% rename from consensus/types/src/sqlite.rs rename to consensus/types/src/core/sqlite.rs index b6318dc4ce..de892b4e98 100644 --- a/consensus/types/src/sqlite.rs +++ b/consensus/types/src/core/sqlite.rs @@ -1,10 +1,11 @@ //! Implementations of SQLite compatibility traits. -use crate::{Epoch, Slot}; use rusqlite::{ Error, types::{FromSql, FromSqlError, ToSql, ToSqlOutput, ValueRef}, }; +use crate::core::{Epoch, Slot}; + macro_rules! impl_to_from_sql { ($type:ty) => { impl ToSql for $type { diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/data/blob_sidecar.rs similarity index 94% rename from consensus/types/src/blob_sidecar.rs rename to consensus/types/src/data/blob_sidecar.rs index d2c7331a57..709e556933 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/data/blob_sidecar.rs @@ -1,12 +1,7 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ - AbstractExecPayload, BeaconBlockHeader, BeaconStateError, Blob, ChainSpec, Epoch, EthSpec, - FixedVector, ForkName, Hash256, KzgProofs, RuntimeFixedVector, RuntimeVariableList, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, VariableList, - beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, -}; +use std::{fmt::Debug, hash::Hash, sync::Arc}; + use bls::Signature; +use context_deserialize::context_deserialize; use educe::Educe; use kzg::{BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT, Blob as KzgBlob, Kzg, KzgCommitment, KzgProof}; use merkle_proof::{MerkleTreeError, merkle_root_from_branch, verify_merkle_proof}; @@ -15,13 +10,24 @@ use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use std::fmt::Debug; -use std::hash::Hash; -use std::sync::Arc; +use ssz_types::{FixedVector, RuntimeFixedVector, RuntimeVariableList, VariableList}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::{ + BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, SignedBeaconBlock, SignedBeaconBlockHeader, + }, + core::{ChainSpec, Epoch, EthSpec, Hash256, Slot}, + data::Blob, + execution::AbstractExecPayload, + fork::ForkName, + kzg_ext::KzgProofs, + state::BeaconStateError, + test_utils::TestRandom, +}; + /// Container of the data that identifies an individual blob. #[derive( Serialize, Deserialize, Encode, Decode, TreeHash, Copy, Clone, Debug, PartialEq, Eq, Hash, diff --git a/consensus/types/src/data_column_custody_group.rs b/consensus/types/src/data/data_column_custody_group.rs similarity index 98% rename from consensus/types/src/data_column_custody_group.rs rename to consensus/types/src/data/data_column_custody_group.rs index 7ecabab0ab..d96d13cfff 100644 --- a/consensus/types/src/data_column_custody_group.rs +++ b/consensus/types/src/data/data_column_custody_group.rs @@ -1,8 +1,14 @@ -use crate::{ChainSpec, ColumnIndex, DataColumnSubnetId, EthSpec}; +use std::collections::HashSet; + use alloy_primitives::U256; use itertools::Itertools; use safe_arith::{ArithError, SafeArith}; -use std::collections::HashSet; + +use crate::{ + EthSpec, + core::ChainSpec, + data::{ColumnIndex, DataColumnSubnetId}, +}; pub type CustodyIndex = u64; diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data/data_column_sidecar.rs similarity index 94% rename from consensus/types/src/data_column_sidecar.rs rename to consensus/types/src/data/data_column_sidecar.rs index 62ce4467df..71d821f83e 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data/data_column_sidecar.rs @@ -1,13 +1,8 @@ -use crate::beacon_block_body::{BLOB_KZG_COMMITMENTS_INDEX, KzgCommitments}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ - BeaconBlockHeader, BeaconStateError, Epoch, EthSpec, ForkName, Hash256, - SignedBeaconBlockHeader, Slot, -}; +use std::sync::Arc; + use bls::Signature; +use context_deserialize::context_deserialize; use educe::Educe; -use kzg::Error as KzgError; use kzg::{KzgCommitment, KzgProof}; use merkle_proof::verify_merkle_proof; use safe_arith::ArithError; @@ -16,11 +11,19 @@ use ssz::Encode; use ssz_derive::{Decode, Encode}; use ssz_types::Error as SszError; use ssz_types::{FixedVector, VariableList}; -use std::sync::Arc; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::{BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, SignedBeaconBlockHeader}, + core::{Epoch, EthSpec, Hash256, Slot}, + fork::ForkName, + kzg_ext::{KzgCommitments, KzgError}, + state::BeaconStateError, + test_utils::TestRandom, +}; + pub type ColumnIndex = u64; pub type Cell = FixedVector::BytesPerCell>; pub type DataColumn = VariableList, ::MaxBlobCommitmentsPerBlock>; diff --git a/consensus/types/src/data_column_subnet_id.rs b/consensus/types/src/data/data_column_subnet_id.rs similarity index 80% rename from consensus/types/src/data_column_subnet_id.rs rename to consensus/types/src/data/data_column_subnet_id.rs index c6b8846c78..c30ebbba20 100644 --- a/consensus/types/src/data_column_subnet_id.rs +++ b/consensus/types/src/data/data_column_subnet_id.rs @@ -1,10 +1,13 @@ //! Identifies each data column subnet by an integer identifier. -use crate::ChainSpec; -use crate::data_column_sidecar::ColumnIndex; -use safe_arith::{ArithError, SafeArith}; +use std::{ + fmt::{self, Display}, + ops::{Deref, DerefMut}, +}; + +use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; -use std::fmt::{self, Display}; -use std::ops::{Deref, DerefMut}; + +use crate::{core::ChainSpec, data::ColumnIndex}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] @@ -69,15 +72,3 @@ impl From<&DataColumnSubnetId> for u64 { val.0 } } - -#[derive(Debug)] -pub enum Error { - ArithError(ArithError), - InvalidCustodySubnetCount(u64), -} - -impl From for Error { - fn from(e: ArithError) -> Self { - Error::ArithError(e) - } -} diff --git a/consensus/types/src/data/mod.rs b/consensus/types/src/data/mod.rs new file mode 100644 index 0000000000..10d062bada --- /dev/null +++ b/consensus/types/src/data/mod.rs @@ -0,0 +1,23 @@ +mod blob_sidecar; +mod data_column_custody_group; +mod data_column_sidecar; +mod data_column_subnet_id; + +pub use blob_sidecar::{ + BlobIdentifier, BlobSidecar, BlobSidecarError, BlobSidecarList, BlobsList, FixedBlobSidecarList, +}; +pub use data_column_custody_group::{ + CustodyIndex, DataColumnCustodyGroupError, compute_columns_for_custody_group, + compute_ordered_custody_column_indices, compute_subnets_for_node, + compute_subnets_from_custody_group, get_custody_groups, +}; +pub use data_column_sidecar::{ + Cell, ColumnIndex, DataColumn, DataColumnSidecar, DataColumnSidecarError, + DataColumnSidecarList, DataColumnsByRootIdentifier, +}; +pub use data_column_subnet_id::DataColumnSubnetId; + +use crate::core::EthSpec; +use ssz_types::FixedVector; + +pub type Blob = FixedVector::BytesPerBlob>; diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit/deposit.rs similarity index 78% rename from consensus/types/src/deposit.rs rename to consensus/types/src/deposit/deposit.rs index 724f3de2f0..67f8572def 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit/deposit.rs @@ -1,12 +1,12 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::*; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use ssz_types::typenum::U33; +use ssz_types::{FixedVector, typenum::U33}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Hash256, deposit::DepositData, fork::ForkName, test_utils::TestRandom}; + pub const DEPOSIT_TREE_DEPTH: usize = 32; /// A deposit to potentially become a beacon chain validator. diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit/deposit_data.rs similarity index 86% rename from consensus/types/src/deposit_data.rs rename to consensus/types/src/deposit/deposit_data.rs index 3d9ae12808..51697f5d1a 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit/deposit_data.rs @@ -1,10 +1,17 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::{PublicKeyBytes, SecretKey, SignatureBytes}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Hash256, SignedRoot}, + deposit::DepositMessage, + fork::ForkName, + test_utils::TestRandom, +}; + /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit/deposit_message.rs similarity index 81% rename from consensus/types/src/deposit_message.rs rename to consensus/types/src/deposit/deposit_message.rs index 9fe3b87885..4495a5c023 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit/deposit_message.rs @@ -1,11 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; - +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Hash256, SignedRoot}, + fork::ForkName, + test_utils::TestRandom, +}; + /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 diff --git a/consensus/types/src/deposit_request.rs b/consensus/types/src/deposit/deposit_request.rs similarity index 86% rename from consensus/types/src/deposit_request.rs rename to consensus/types/src/deposit/deposit_request.rs index 16acfb3b44..8d3c6e88ba 100644 --- a/consensus/types/src/deposit_request.rs +++ b/consensus/types/src/deposit/deposit_request.rs @@ -1,13 +1,13 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, Hash256, PublicKeyBytes}; -use bls::SignatureBytes; +use bls::{PublicKeyBytes, SignatureBytes}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Hash256, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit/deposit_tree_snapshot.rs similarity index 95% rename from consensus/types/src/deposit_tree_snapshot.rs rename to consensus/types/src/deposit/deposit_tree_snapshot.rs index 400fca217d..24f41397a0 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit/deposit_tree_snapshot.rs @@ -1,10 +1,11 @@ -use crate::*; use ethereum_hashing::{ZERO_HASHES, hash32_concat}; +use fixed_bytes::FixedBytesExtended; use int_to_bytes::int_to_bytes32; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; -use test_utils::TestRandom; + +use crate::{core::Hash256, deposit::DEPOSIT_TREE_DEPTH, test_utils::TestRandom}; #[derive(Encode, Decode, Deserialize, Serialize, Clone, Debug, PartialEq, TestRandom)] pub struct FinalizedExecutionBlock { diff --git a/consensus/types/src/deposit/mod.rs b/consensus/types/src/deposit/mod.rs new file mode 100644 index 0000000000..ff80f65cdb --- /dev/null +++ b/consensus/types/src/deposit/mod.rs @@ -0,0 +1,13 @@ +mod deposit; +mod deposit_data; +mod deposit_message; +mod deposit_request; +mod deposit_tree_snapshot; +mod pending_deposit; + +pub use deposit::{DEPOSIT_TREE_DEPTH, Deposit}; +pub use deposit_data::DepositData; +pub use deposit_message::DepositMessage; +pub use deposit_request::DepositRequest; +pub use deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock}; +pub use pending_deposit::PendingDeposit; diff --git a/consensus/types/src/pending_deposit.rs b/consensus/types/src/deposit/pending_deposit.rs similarity index 78% rename from consensus/types/src/pending_deposit.rs rename to consensus/types/src/deposit/pending_deposit.rs index 4a921edd54..4c039af39c 100644 --- a/consensus/types/src/pending_deposit.rs +++ b/consensus/types/src/deposit/pending_deposit.rs @@ -1,10 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::{PublicKeyBytes, SignatureBytes}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Hash256, Slot}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/execution/bls_to_execution_change.rs similarity index 83% rename from consensus/types/src/bls_to_execution_change.rs rename to consensus/types/src/execution/bls_to_execution_change.rs index 72d737ac71..de14f1b4c5 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/execution/bls_to_execution_change.rs @@ -1,10 +1,17 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::{PublicKeyBytes, SecretKey}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, ChainSpec, Domain, Hash256, SignedRoot}, + execution::SignedBlsToExecutionChange, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/dumb_macros.rs b/consensus/types/src/execution/dumb_macros.rs similarity index 100% rename from consensus/types/src/dumb_macros.rs rename to consensus/types/src/execution/dumb_macros.rs diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/execution/eth1_data.rs similarity index 86% rename from consensus/types/src/eth1_data.rs rename to consensus/types/src/execution/eth1_data.rs index 800f3e25f9..89a4e634a6 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/execution/eth1_data.rs @@ -1,12 +1,11 @@ -use super::Hash256; -use crate::ForkName; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Hash256, fork::ForkName, test_utils::TestRandom}; + /// Contains data obtained from the Eth1 chain. /// /// Spec v0.12.1 diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution/execution_block_hash.rs similarity index 96% rename from consensus/types/src/execution_block_hash.rs rename to consensus/types/src/execution/execution_block_hash.rs index 31905d64df..91c019ce04 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution/execution_block_hash.rs @@ -1,10 +1,11 @@ -use crate::FixedBytesExtended; -use crate::Hash256; -use crate::test_utils::TestRandom; +use std::fmt; + +use fixed_bytes::FixedBytesExtended; use rand::RngCore; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; -use std::fmt; + +use crate::{core::Hash256, test_utils::TestRandom}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash)] diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution/execution_block_header.rs similarity index 98% rename from consensus/types/src/execution_block_header.rs rename to consensus/types/src/execution/execution_block_header.rs index 02152adbf7..e596ba1831 100644 --- a/consensus/types/src/execution_block_header.rs +++ b/consensus/types/src/execution/execution_block_header.rs @@ -17,10 +17,15 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -use crate::{Address, EthSpec, ExecutionPayloadRef, Hash64, Hash256, Uint256}; use alloy_rlp::RlpEncodable; +use fixed_bytes::Uint256; use metastruct::metastruct; +use crate::{ + core::{Address, EthSpec, Hash64, Hash256}, + execution::ExecutionPayloadRef, +}; + /// Execution block header as used for RLP encoding and Keccak hashing. /// /// Credit to Reth for the type definition. diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution/execution_payload.rs similarity index 92% rename from consensus/types/src/execution_payload.rs rename to consensus/types/src/execution/execution_payload.rs index b82947e237..b2278c9166 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution/execution_payload.rs @@ -1,19 +1,29 @@ -use crate::{test_utils::TestRandom, *}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; +use fixed_bytes::Uint256; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use ssz_types::{FixedVector, VariableList}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec, Hash256}, + execution::ExecutionBlockHash, + fork::{ForkName, ForkVersionDecode}, + state::BeaconStateError, + test_utils::TestRandom, + withdrawal::Withdrawals, +}; + pub type Transaction = VariableList; pub type Transactions = VariableList< Transaction<::MaxBytesPerTransaction>, ::MaxTransactionsPerPayload, >; -pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; - #[superstruct( variants(Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), variant_attributes( @@ -38,8 +48,14 @@ pub type Withdrawals = VariableList::MaxWithdrawal arbitrary(bound = "E: EthSpec"), ), ), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) )] #[cfg_attr( feature = "arbitrary", diff --git a/consensus/types/src/execution_payload_bid.rs b/consensus/types/src/execution/execution_payload_bid.rs similarity index 100% rename from consensus/types/src/execution_payload_bid.rs rename to consensus/types/src/execution/execution_payload_bid.rs diff --git a/consensus/types/src/execution_payload_envelope.rs b/consensus/types/src/execution/execution_payload_envelope.rs similarity index 100% rename from consensus/types/src/execution_payload_envelope.rs rename to consensus/types/src/execution/execution_payload_envelope.rs diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution/execution_payload_header.rs similarity index 95% rename from consensus/types/src/execution_payload_header.rs rename to consensus/types/src/execution/execution_payload_header.rs index fceee79c2f..cf78f7871b 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution/execution_payload_header.rs @@ -1,12 +1,28 @@ -use crate::{test_utils::TestRandom, *}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use ssz_types::{FixedVector, VariableList}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec, Hash256, Uint256}, + execution::{ + ExecutionBlockHash, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadRef, + Transactions, + }, + fork::ForkName, + map_execution_payload_ref_into_execution_payload_header, + state::BeaconStateError, + test_utils::TestRandom, +}; + #[superstruct( variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( @@ -35,8 +51,14 @@ use tree_hash_derive::TreeHash; derive(PartialEq, TreeHash, Debug), tree_hash(enum_behaviour = "transparent") ), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), map_ref_into(ExecutionPayloadHeader) )] #[cfg_attr( diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution/execution_requests.rs similarity index 93% rename from consensus/types/src/execution_requests.rs rename to consensus/types/src/execution/execution_requests.rs index 67396af71d..92d717778e 100644 --- a/consensus/types/src/execution_requests.rs +++ b/consensus/types/src/execution/execution_requests.rs @@ -1,7 +1,5 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ConsolidationRequest, DepositRequest, EthSpec, ForkName, Hash256, WithdrawalRequest}; use alloy_primitives::Bytes; +use context_deserialize::context_deserialize; use educe::Educe; use ethereum_hashing::{DynamicContext, Sha256Context}; use serde::{Deserialize, Serialize}; @@ -11,6 +9,15 @@ use ssz_types::VariableList; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + consolidation::ConsolidationRequest, + core::{EthSpec, Hash256}, + deposit::DepositRequest, + fork::ForkName, + test_utils::TestRandom, + withdrawal::WithdrawalRequest, +}; + pub type DepositRequests = VariableList::MaxDepositRequestsPerPayload>; pub type WithdrawalRequests = diff --git a/consensus/types/src/execution/mod.rs b/consensus/types/src/execution/mod.rs new file mode 100644 index 0000000000..da6c860600 --- /dev/null +++ b/consensus/types/src/execution/mod.rs @@ -0,0 +1,45 @@ +mod eth1_data; +mod execution_block_hash; +mod execution_block_header; +#[macro_use] +mod execution_payload; +mod bls_to_execution_change; +mod dumb_macros; +mod execution_payload_bid; +mod execution_payload_envelope; +mod execution_payload_header; +mod execution_requests; +mod payload; +mod signed_bls_to_execution_change; +mod signed_execution_payload_bid; +mod signed_execution_payload_envelope; + +pub use bls_to_execution_change::BlsToExecutionChange; +pub use eth1_data::Eth1Data; +pub use execution_block_hash::ExecutionBlockHash; +pub use execution_block_header::{EncodableExecutionBlockHeader, ExecutionBlockHeader}; +pub use execution_payload::{ + ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, + ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, ExecutionPayloadRef, + Transaction, Transactions, +}; +pub use execution_payload_bid::ExecutionPayloadBid; +pub use execution_payload_envelope::ExecutionPayloadEnvelope; +pub use execution_payload_header::{ + ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, +}; +pub use execution_requests::{ + ConsolidationRequests, DepositRequests, ExecutionRequests, RequestType, WithdrawalRequests, +}; +pub use payload::{ + AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, + BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, BlindedPayloadRef, + BlockProductionVersion, BlockType, ExecPayload, FullPayload, FullPayloadBellatrix, + FullPayloadCapella, FullPayloadDeneb, FullPayloadElectra, FullPayloadFulu, FullPayloadRef, + OwnedExecPayload, +}; +pub use signed_bls_to_execution_change::SignedBlsToExecutionChange; +pub use signed_execution_payload_bid::SignedExecutionPayloadBid; +pub use signed_execution_payload_envelope::SignedExecutionPayloadEnvelope; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/execution/payload.rs similarity index 90% rename from consensus/types/src/payload.rs rename to consensus/types/src/execution/payload.rs index 30a1bfbb28..703b082c18 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/execution/payload.rs @@ -1,16 +1,30 @@ -use crate::{test_utils::TestRandom, *}; use educe::Educe; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::borrow::Cow; -use std::fmt::Debug; -use std::hash::Hash; +use ssz_types::VariableList; +use std::{borrow::Cow, fmt::Debug, hash::Hash}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec, Hash256}, + execution::{ + ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, + ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadRef, Transactions, + }, + fork::ForkName, + map_execution_payload_into_blinded_payload, map_execution_payload_into_full_payload, + state::BeaconStateError, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq)] pub enum BlockType { Blinded, @@ -38,8 +52,8 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn gas_limit(&self) -> u64; fn transactions(&self) -> Option<&Transactions>; /// fork-specific fields - fn withdrawals_root(&self) -> Result; - fn blob_gas_used(&self) -> Result; + fn withdrawals_root(&self) -> Result; + fn blob_gas_used(&self) -> Result; /// Is this a default payload with 0x0 roots for transactions and withdrawals? fn is_default_with_zero_roots(&self) -> bool; @@ -172,8 +186,14 @@ pub trait AbstractExecPayload: ), map_into(ExecutionPayload), map_ref_into(ExecutionPayloadRef), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) )] #[cfg_attr( feature = "arbitrary", @@ -302,9 +322,9 @@ impl ExecPayload for FullPayload { }) } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - FullPayload::Bellatrix(_) => Err(Error::IncorrectStateVariant), + FullPayload::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), FullPayload::Capella(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), FullPayload::Deneb(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), FullPayload::Electra(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), @@ -312,10 +332,10 @@ impl ExecPayload for FullPayload { } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { FullPayload::Bellatrix(_) | FullPayload::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } FullPayload::Deneb(inner) => Ok(inner.execution_payload.blob_gas_used), FullPayload::Electra(inner) => Ok(inner.execution_payload.blob_gas_used), @@ -343,15 +363,15 @@ impl FullPayload { }) } - pub fn default_at_fork(fork_name: ForkName) -> Result { + pub fn default_at_fork(fork_name: ForkName) -> Result { match fork_name { - ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Base | ForkName::Altair => Err(BeaconStateError::IncorrectStateVariant), ForkName::Bellatrix => Ok(FullPayloadBellatrix::default().into()), ForkName::Capella => Ok(FullPayloadCapella::default().into()), ForkName::Deneb => Ok(FullPayloadDeneb::default().into()), ForkName::Electra => Ok(FullPayloadElectra::default().into()), ForkName::Fulu => Ok(FullPayloadFulu::default().into()), - ForkName::Gloas => Err(Error::IncorrectStateVariant), + ForkName::Gloas => Err(BeaconStateError::IncorrectStateVariant), } } } @@ -439,9 +459,9 @@ impl ExecPayload for FullPayloadRef<'_, E> { }) } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - FullPayloadRef::Bellatrix(_) => Err(Error::IncorrectStateVariant), + FullPayloadRef::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), FullPayloadRef::Capella(inner) => { Ok(inner.execution_payload.withdrawals.tree_hash_root()) } @@ -455,10 +475,10 @@ impl ExecPayload for FullPayloadRef<'_, E> { } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { FullPayloadRef::Bellatrix(_) | FullPayloadRef::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } FullPayloadRef::Deneb(inner) => Ok(inner.execution_payload.blob_gas_used), FullPayloadRef::Electra(inner) => Ok(inner.execution_payload.blob_gas_used), @@ -532,8 +552,14 @@ impl TryFrom> for FullPayload { tree_hash(enum_behaviour = "transparent"), ), map_into(ExecutionPayloadHeader), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) )] #[cfg_attr( feature = "arbitrary", @@ -640,9 +666,9 @@ impl ExecPayload for BlindedPayload { None } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - BlindedPayload::Bellatrix(_) => Err(Error::IncorrectStateVariant), + BlindedPayload::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), BlindedPayload::Capella(inner) => Ok(inner.execution_payload_header.withdrawals_root), BlindedPayload::Deneb(inner) => Ok(inner.execution_payload_header.withdrawals_root), BlindedPayload::Electra(inner) => Ok(inner.execution_payload_header.withdrawals_root), @@ -650,10 +676,10 @@ impl ExecPayload for BlindedPayload { } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { BlindedPayload::Bellatrix(_) | BlindedPayload::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } BlindedPayload::Deneb(inner) => Ok(inner.execution_payload_header.blob_gas_used), BlindedPayload::Electra(inner) => Ok(inner.execution_payload_header.blob_gas_used), @@ -746,9 +772,9 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { None } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - BlindedPayloadRef::Bellatrix(_) => Err(Error::IncorrectStateVariant), + BlindedPayloadRef::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), BlindedPayloadRef::Capella(inner) => { Ok(inner.execution_payload_header.withdrawals_root) } @@ -760,10 +786,10 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { BlindedPayloadRef::Bellatrix(_) | BlindedPayloadRef::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } BlindedPayloadRef::Deneb(inner) => Ok(inner.execution_payload_header.blob_gas_used), BlindedPayloadRef::Electra(inner) => Ok(inner.execution_payload_header.blob_gas_used), @@ -855,12 +881,12 @@ macro_rules! impl_exec_payload_common { f(self) } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { let g = $g; g(self) } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { let h = $h; h(self) } @@ -895,15 +921,16 @@ macro_rules! impl_exec_payload_for_fork { }, { |_| { None } }, { - let c: for<'a> fn(&'a $wrapper_type_header) -> Result = - |payload: &$wrapper_type_header| { - let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); - wrapper_ref_type.withdrawals_root() - }; + let c: for<'a> fn( + &'a $wrapper_type_header, + ) -> Result = |payload: &$wrapper_type_header| { + let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); + wrapper_ref_type.withdrawals_root() + }; c }, { - let c: for<'a> fn(&'a $wrapper_type_header) -> Result = + let c: for<'a> fn(&'a $wrapper_type_header) -> Result = |payload: &$wrapper_type_header| { let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); wrapper_ref_type.blob_gas_used() @@ -913,12 +940,12 @@ macro_rules! impl_exec_payload_for_fork { ); impl TryInto<$wrapper_type_header> for BlindedPayload { - type Error = Error; + type Error = BeaconStateError; fn try_into(self) -> Result<$wrapper_type_header, Self::Error> { match self { BlindedPayload::$fork_variant(payload) => Ok(payload), - _ => Err(Error::IncorrectStateVariant), + _ => Err(BeaconStateError::IncorrectStateVariant), } } } @@ -941,13 +968,13 @@ macro_rules! impl_exec_payload_for_fork { } impl TryFrom> for $wrapper_type_header { - type Error = Error; + type Error = BeaconStateError; fn try_from(header: ExecutionPayloadHeader) -> Result { match header { ExecutionPayloadHeader::$fork_variant(execution_payload_header) => { Ok(execution_payload_header.into()) } - _ => Err(Error::PayloadConversionLogicFlaw), + _ => Err(BeaconStateError::PayloadConversionLogicFlaw), } } } @@ -982,7 +1009,7 @@ macro_rules! impl_exec_payload_for_fork { c }, { - let c: for<'a> fn(&'a $wrapper_type_full) -> Result = + let c: for<'a> fn(&'a $wrapper_type_full) -> Result = |payload: &$wrapper_type_full| { let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); wrapper_ref_type.withdrawals_root() @@ -990,7 +1017,7 @@ macro_rules! impl_exec_payload_for_fork { c }, { - let c: for<'a> fn(&'a $wrapper_type_full) -> Result = + let c: for<'a> fn(&'a $wrapper_type_full) -> Result = |payload: &$wrapper_type_full| { let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); wrapper_ref_type.blob_gas_used() @@ -1017,26 +1044,26 @@ macro_rules! impl_exec_payload_for_fork { } impl TryFrom> for $wrapper_type_full { - type Error = Error; + type Error = BeaconStateError; fn try_from(_: ExecutionPayloadHeader) -> Result { - Err(Error::PayloadConversionLogicFlaw) + Err(BeaconStateError::PayloadConversionLogicFlaw) } } impl TryFrom<$wrapped_type_header> for $wrapper_type_full { - type Error = Error; + type Error = BeaconStateError; fn try_from(_: $wrapped_type_header) -> Result { - Err(Error::PayloadConversionLogicFlaw) + Err(BeaconStateError::PayloadConversionLogicFlaw) } } impl TryInto<$wrapper_type_full> for FullPayload { - type Error = Error; + type Error = BeaconStateError; fn try_into(self) -> Result<$wrapper_type_full, Self::Error> { match self { FullPayload::$fork_variant(payload) => Ok(payload), - _ => Err(Error::PayloadConversionLogicFlaw), + _ => Err(BeaconStateError::PayloadConversionLogicFlaw), } } } diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/execution/signed_bls_to_execution_change.rs similarity index 78% rename from consensus/types/src/signed_bls_to_execution_change.rs rename to consensus/types/src/execution/signed_bls_to_execution_change.rs index 910c4c7d7e..535960fb3f 100644 --- a/consensus/types/src/signed_bls_to_execution_change.rs +++ b/consensus/types/src/execution/signed_bls_to_execution_change.rs @@ -1,10 +1,12 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::Signature; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{execution::BlsToExecutionChange, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/signed_execution_payload_bid.rs b/consensus/types/src/execution/signed_execution_payload_bid.rs similarity index 93% rename from consensus/types/src/signed_execution_payload_bid.rs rename to consensus/types/src/execution/signed_execution_payload_bid.rs index 0d5daacf50..84ac024d91 100644 --- a/consensus/types/src/signed_execution_payload_bid.rs +++ b/consensus/types/src/execution/signed_execution_payload_bid.rs @@ -1,3 +1,4 @@ +use crate::execution::execution_payload_bid::ExecutionPayloadBid; use crate::test_utils::TestRandom; use crate::*; use educe::Educe; diff --git a/consensus/types/src/signed_execution_payload_envelope.rs b/consensus/types/src/execution/signed_execution_payload_envelope.rs similarity index 89% rename from consensus/types/src/signed_execution_payload_envelope.rs rename to consensus/types/src/execution/signed_execution_payload_envelope.rs index 7141b4252c..bed695712a 100644 --- a/consensus/types/src/signed_execution_payload_envelope.rs +++ b/consensus/types/src/execution/signed_execution_payload_envelope.rs @@ -1,3 +1,4 @@ +use crate::execution::execution_payload_envelope::ExecutionPayloadEnvelope; use crate::test_utils::TestRandom; use crate::*; use educe::Educe; diff --git a/consensus/types/src/exit/mod.rs b/consensus/types/src/exit/mod.rs new file mode 100644 index 0000000000..cb066d1d7a --- /dev/null +++ b/consensus/types/src/exit/mod.rs @@ -0,0 +1,5 @@ +mod signed_voluntary_exit; +mod voluntary_exit; + +pub use signed_voluntary_exit::SignedVoluntaryExit; +pub use voluntary_exit::VoluntaryExit; diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/exit/signed_voluntary_exit.rs similarity index 84% rename from consensus/types/src/signed_voluntary_exit.rs rename to consensus/types/src/exit/signed_voluntary_exit.rs index 0beffa1e04..b49401a721 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/exit/signed_voluntary_exit.rs @@ -1,12 +1,12 @@ -use crate::context_deserialize; -use crate::{ForkName, VoluntaryExit, test_utils::TestRandom}; use bls::Signature; - +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{exit::VoluntaryExit, fork::ForkName, test_utils::TestRandom}; + /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/exit/voluntary_exit.rs similarity index 90% rename from consensus/types/src/voluntary_exit.rs rename to consensus/types/src/exit/voluntary_exit.rs index 42d792a814..30c6a97c4d 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/exit/voluntary_exit.rs @@ -1,14 +1,17 @@ -use crate::context_deserialize; -use crate::{ - ChainSpec, Domain, Epoch, ForkName, Hash256, SecretKey, SignedRoot, SignedVoluntaryExit, - test_utils::TestRandom, -}; - +use bls::SecretKey; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Domain, Epoch, Hash256, SignedRoot}, + exit::SignedVoluntaryExit, + fork::ForkName, + test_utils::TestRandom, +}; + /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork/fork.rs similarity index 96% rename from consensus/types/src/fork.rs rename to consensus/types/src/fork/fork.rs index 5c5bd7ffd1..371b11e05c 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork/fork.rs @@ -1,12 +1,11 @@ -use crate::test_utils::TestRandom; -use crate::{Epoch, ForkName}; use context_deserialize::context_deserialize; - use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Epoch, fork::ForkName, test_utils::TestRandom}; + /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork/fork_context.rs similarity index 98% rename from consensus/types/src/fork_context.rs rename to consensus/types/src/fork/fork_context.rs index 6c1fb74a09..615eb002c0 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork/fork_context.rs @@ -1,7 +1,11 @@ +use std::collections::BTreeMap; + use parking_lot::RwLock; -use crate::{ChainSpec, Epoch, EthSpec, ForkName, Hash256, Slot}; -use std::collections::BTreeMap; +use crate::{ + core::{ChainSpec, Epoch, EthSpec, Hash256, Slot}, + fork::ForkName, +}; /// Represents a hard fork in the consensus protocol. /// @@ -152,8 +156,7 @@ impl ForkContext { #[cfg(test)] mod tests { use super::*; - use crate::MainnetEthSpec; - use crate::chain_spec::{BlobParameters, BlobSchedule}; + use crate::core::{BlobParameters, BlobSchedule, MainnetEthSpec}; type E = MainnetEthSpec; diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork/fork_data.rs similarity index 88% rename from consensus/types/src/fork_data.rs rename to consensus/types/src/fork/fork_data.rs index 2d5e905efb..1b9c8bad9f 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork/fork_data.rs @@ -1,12 +1,15 @@ -use crate::test_utils::TestRandom; -use crate::{ForkName, Hash256, SignedRoot}; use context_deserialize::context_deserialize; - use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Hash256, SignedRoot}, + fork::ForkName, + test_utils::TestRandom, +}; + /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 diff --git a/consensus/types/src/fork/fork_macros.rs b/consensus/types/src/fork/fork_macros.rs new file mode 100644 index 0000000000..0c7f382ffc --- /dev/null +++ b/consensus/types/src/fork/fork_macros.rs @@ -0,0 +1,60 @@ +/// Map a fork name into a fork-versioned superstruct type like `BeaconBlock`. +/// +/// The `$body` expression is where the magic happens. The macro allows us to achieve polymorphism +/// in the return type, which is not usually possible in Rust without trait objects. +/// +/// E.g. you could call `map_fork_name!(fork, BeaconBlock, serde_json::from_str(s))` to decode +/// different `BeaconBlock` variants depending on the value of `fork`. Note how the type of the body +/// will change between `BeaconBlockBase` and `BeaconBlockAltair` depending on which branch is +/// taken, the important thing is that they are re-unified by injecting them back into the +/// `BeaconBlock` parent enum. +/// +/// If you would also like to extract additional data alongside the superstruct type, use +/// the more flexible `map_fork_name_with` macro. +#[macro_export] +macro_rules! map_fork_name { + ($fork_name:expr, $t:tt, $body:expr) => { + $crate::map_fork_name_with!($fork_name, $t, { ($body, ()) }).0 + }; +} + +/// Map a fork name into a tuple of `(t, extra)` where `t` is a superstruct type. +#[macro_export] +macro_rules! map_fork_name_with { + ($fork_name:expr, $t:tt, $body:block) => { + match $fork_name { + $crate::fork::ForkName::Base => { + let (value, extra_data) = $body; + ($t::Base(value), extra_data) + } + $crate::fork::ForkName::Altair => { + let (value, extra_data) = $body; + ($t::Altair(value), extra_data) + } + $crate::fork::ForkName::Bellatrix => { + let (value, extra_data) = $body; + ($t::Bellatrix(value), extra_data) + } + $crate::fork::ForkName::Capella => { + let (value, extra_data) = $body; + ($t::Capella(value), extra_data) + } + $crate::fork::ForkName::Deneb => { + let (value, extra_data) = $body; + ($t::Deneb(value), extra_data) + } + $crate::fork::ForkName::Electra => { + let (value, extra_data) = $body; + ($t::Electra(value), extra_data) + } + $crate::fork::ForkName::Fulu => { + let (value, extra_data) = $body; + ($t::Fulu(value), extra_data) + } + $crate::fork::ForkName::Gloas => { + let (value, extra_data) = $body; + ($t::Gloas(value), extra_data) + } + } + }; +} diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork/fork_name.rs similarity index 84% rename from consensus/types/src/fork_name.rs rename to consensus/types/src/fork/fork_name.rs index 1d7bf3795b..e9ec5fbe41 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork/fork_name.rs @@ -1,8 +1,12 @@ -use crate::{ChainSpec, Epoch}; +use std::{ + fmt::{self, Display, Formatter}, + str::FromStr, +}; + use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use std::fmt::{self, Display, Formatter}; -use std::str::FromStr; + +use crate::core::{ChainSpec, Epoch}; #[derive( Debug, Clone, Copy, Decode, Encode, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, @@ -243,67 +247,6 @@ impl ForkName { } } -/// Map a fork name into a fork-versioned superstruct type like `BeaconBlock`. -/// -/// The `$body` expression is where the magic happens. The macro allows us to achieve polymorphism -/// in the return type, which is not usually possible in Rust without trait objects. -/// -/// E.g. you could call `map_fork_name!(fork, BeaconBlock, serde_json::from_str(s))` to decode -/// different `BeaconBlock` variants depending on the value of `fork`. Note how the type of the body -/// will change between `BeaconBlockBase` and `BeaconBlockAltair` depending on which branch is -/// taken, the important thing is that they are re-unified by injecting them back into the -/// `BeaconBlock` parent enum. -/// -/// If you would also like to extract additional data alongside the superstruct type, use -/// the more flexible `map_fork_name_with` macro. -#[macro_export] -macro_rules! map_fork_name { - ($fork_name:expr, $t:tt, $body:expr) => { - map_fork_name_with!($fork_name, $t, { ($body, ()) }).0 - }; -} - -/// Map a fork name into a tuple of `(t, extra)` where `t` is a superstruct type. -#[macro_export] -macro_rules! map_fork_name_with { - ($fork_name:expr, $t:tt, $body:block) => { - match $fork_name { - ForkName::Base => { - let (value, extra_data) = $body; - ($t::Base(value), extra_data) - } - ForkName::Altair => { - let (value, extra_data) = $body; - ($t::Altair(value), extra_data) - } - ForkName::Bellatrix => { - let (value, extra_data) = $body; - ($t::Bellatrix(value), extra_data) - } - ForkName::Capella => { - let (value, extra_data) = $body; - ($t::Capella(value), extra_data) - } - ForkName::Deneb => { - let (value, extra_data) = $body; - ($t::Deneb(value), extra_data) - } - ForkName::Electra => { - let (value, extra_data) = $body; - ($t::Electra(value), extra_data) - } - ForkName::Fulu => { - let (value, extra_data) = $body; - ($t::Fulu(value), extra_data) - } - ForkName::Gloas => { - let (value, extra_data) = $body; - ($t::Gloas(value), extra_data) - } - } - }; -} - impl FromStr for ForkName { type Err = String; diff --git a/consensus/types/src/fork/fork_version_decode.rs b/consensus/types/src/fork/fork_version_decode.rs new file mode 100644 index 0000000000..4349efb21f --- /dev/null +++ b/consensus/types/src/fork/fork_version_decode.rs @@ -0,0 +1,6 @@ +use crate::fork::ForkName; + +pub trait ForkVersionDecode: Sized { + /// SSZ decode with explicit fork variant. + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result; +} diff --git a/consensus/types/src/fork/mod.rs b/consensus/types/src/fork/mod.rs new file mode 100644 index 0000000000..1ad1c7cb62 --- /dev/null +++ b/consensus/types/src/fork/mod.rs @@ -0,0 +1,15 @@ +mod fork; +mod fork_context; +mod fork_data; +mod fork_macros; +mod fork_name; +mod fork_version_decode; + +pub use crate::{map_fork_name, map_fork_name_with}; +pub use fork::Fork; +pub use fork_context::{ForkContext, HardFork}; +pub use fork_data::ForkData; +pub use fork_name::{ForkName, InconsistentFork}; +pub use fork_version_decode::ForkVersionDecode; + +pub type ForkVersion = [u8; 4]; diff --git a/consensus/types/src/kzg_ext/consts.rs b/consensus/types/src/kzg_ext/consts.rs new file mode 100644 index 0000000000..06c9f9c749 --- /dev/null +++ b/consensus/types/src/kzg_ext/consts.rs @@ -0,0 +1,3 @@ +pub use kzg::{ + BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_FIELD_ELEMENT, VERSIONED_HASH_VERSION_KZG, +}; diff --git a/consensus/types/src/kzg_ext/mod.rs b/consensus/types/src/kzg_ext/mod.rs new file mode 100644 index 0000000000..63533ec71f --- /dev/null +++ b/consensus/types/src/kzg_ext/mod.rs @@ -0,0 +1,27 @@ +pub mod consts; + +pub use kzg::{Blob as KzgBlob, Error as KzgError, Kzg, KzgCommitment, KzgProof}; + +use ssz_types::VariableList; + +use crate::core::EthSpec; + +// Note on List limit: +// - Deneb to Electra: `MaxBlobCommitmentsPerBlock` +// - Fulu: `MaxCellsPerBlock` +// We choose to use a single type (with the larger value from Fulu as `N`) instead of having to +// introduce a new type for Fulu. This is to avoid messy conversions and having to add extra types +// with no gains - as `N` does not impact serialisation at all, and only affects merkleization, +// which we don't current do on `KzgProofs` anyway. +pub type KzgProofs = VariableList::MaxCellsPerBlock>; + +pub type KzgCommitments = + VariableList::MaxBlobCommitmentsPerBlock>; + +/// Util method helpful for logging. +pub fn format_kzg_commitments(commitments: &[KzgCommitment]) -> String { + let commitment_strings: Vec = commitments.iter().map(|x| x.to_string()).collect(); + let commitments_joined = commitment_strings.join(", "); + let surrounded_commitments = format!("[{}]", commitments_joined); + surrounded_commitments +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index c720ee6679..f1a9751cbe 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -1,4 +1,4 @@ -//! Ethereum 2.0 types +//! Ethereum Consensus types // Clippy lint set up #![cfg_attr( not(test), @@ -12,305 +12,177 @@ #[macro_use] pub mod test_utils; -pub mod aggregate_and_proof; -pub mod application_domain; pub mod attestation; -pub mod attestation_data; -pub mod attestation_duty; -pub mod attester_slashing; -pub mod beacon_block; -pub mod beacon_block_body; -pub mod beacon_block_header; -pub mod beacon_committee; -pub mod beacon_response; -pub mod beacon_state; -pub mod bls_to_execution_change; -pub mod builder_bid; -pub mod builder_pending_payment; -pub mod builder_pending_withdrawal; -pub mod chain_spec; -pub mod checkpoint; -pub mod consolidation_request; -pub mod consts; -pub mod contribution_and_proof; +pub mod block; +pub mod builder; +pub mod consolidation; +pub mod core; +pub mod data; pub mod deposit; -pub mod deposit_data; -pub mod deposit_message; -pub mod deposit_request; -pub mod deposit_tree_snapshot; -pub mod dumb_macros; -pub mod enr_fork_id; -pub mod eth1_data; -pub mod eth_spec; -pub mod execution_block_hash; -pub mod execution_payload; -pub mod execution_payload_bid; -pub mod execution_payload_envelope; -pub mod execution_payload_header; +pub mod execution; +pub mod exit; pub mod fork; -pub mod fork_data; -pub mod fork_name; -pub mod graffiti; -pub mod historical_batch; -pub mod historical_summary; -pub mod indexed_attestation; -pub mod indexed_payload_attestation; -pub mod light_client_bootstrap; -pub mod light_client_finality_update; -pub mod light_client_optimistic_update; -pub mod light_client_update; -pub mod payload; -pub mod payload_attestation; -pub mod payload_attestation_data; -pub mod payload_attestation_message; -pub mod pending_attestation; -pub mod pending_consolidation; -pub mod pending_deposit; -pub mod pending_partial_withdrawal; -pub mod proposer_preparation_data; -pub mod proposer_slashing; -pub mod relative_epoch; -pub mod selection_proof; -pub mod shuffling_id; -pub mod signed_aggregate_and_proof; -pub mod signed_beacon_block; -pub mod signed_beacon_block_header; -pub mod signed_bls_to_execution_change; -pub mod signed_contribution_and_proof; -pub mod signed_execution_payload_bid; -pub mod signed_execution_payload_envelope; -pub mod signed_voluntary_exit; -pub mod signing_data; -pub mod sync_committee_subscription; -pub mod sync_duty; -pub mod validator; -pub mod validator_subscription; -pub mod voluntary_exit; -pub mod withdrawal_credentials; -pub mod withdrawal_request; -#[macro_use] -pub mod slot_epoch_macros; -pub mod activation_queue; -pub mod config_and_preset; -pub mod execution_block_header; -pub mod execution_requests; -pub mod fork_context; -pub mod participation_flags; -pub mod preset; -pub mod slot_epoch; -pub mod subnet_id; -pub mod sync_aggregate; -pub mod sync_aggregator_selection_data; +pub mod kzg_ext; +pub mod light_client; +pub mod slashing; +pub mod state; pub mod sync_committee; -pub mod sync_committee_contribution; -pub mod sync_committee_message; -pub mod sync_selection_proof; -pub mod sync_subnet_id; -pub mod validator_registration_data; +pub mod validator; pub mod withdrawal; -pub mod epoch_cache; -pub mod slot_data; -#[cfg(feature = "sqlite")] -pub mod sqlite; +// Temporary root level exports to maintain backwards compatibility for Lighthouse. +pub use attestation::*; +pub use block::*; +pub use builder::*; +pub use consolidation::*; +pub use core::{consts, *}; +pub use data::*; +pub use deposit::*; +pub use execution::*; +pub use exit::*; +pub use fork::*; +pub use kzg_ext::*; +pub use light_client::*; +pub use slashing::*; +pub use state::*; +pub use sync_committee::*; +pub use validator::*; +pub use withdrawal::*; -pub mod blob_sidecar; -pub mod data_column_custody_group; -pub mod data_column_sidecar; -pub mod data_column_subnet_id; -pub mod light_client_header; -pub mod non_zero_usize; -pub mod runtime_fixed_vector; -pub mod runtime_var_list; +// Temporary facade modules to maintain backwards compatibility for Lighthouse. +pub mod eth_spec { + pub use crate::core::EthSpec; +} -pub use crate::activation_queue::ActivationQueue; -pub use crate::aggregate_and_proof::{ - AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, -}; -pub use crate::attestation::{ - Attestation, AttestationBase, AttestationElectra, AttestationRef, AttestationRefMut, - Error as AttestationError, SingleAttestation, -}; -pub use crate::attestation_data::AttestationData; -pub use crate::attestation_duty::AttestationDuty; -pub use crate::attester_slashing::{ - AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, AttesterSlashingOnDisk, - AttesterSlashingRef, AttesterSlashingRefOnDisk, -}; -pub use crate::beacon_block::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockCapella, - BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockFulu, BeaconBlockGloas, BeaconBlockRef, - BeaconBlockRefMut, BlindedBeaconBlock, BlockImportSource, EmptyBlock, -}; -pub use crate::beacon_block_body::{ - BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyBellatrix, - BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, - BeaconBlockBodyGloas, BeaconBlockBodyRef, BeaconBlockBodyRefMut, -}; -pub use crate::beacon_block_header::BeaconBlockHeader; -pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; -pub use crate::beacon_response::{ - BeaconResponse, ForkVersionDecode, ForkVersionedResponse, UnversionedResponse, -}; -pub use crate::beacon_state::{Error as BeaconStateError, *}; -pub use crate::blob_sidecar::{BlobIdentifier, BlobSidecar, BlobSidecarList, BlobsList}; -pub use crate::bls_to_execution_change::BlsToExecutionChange; -pub use crate::builder_pending_payment::BuilderPendingPayment; -pub use crate::builder_pending_withdrawal::BuilderPendingWithdrawal; -pub use crate::chain_spec::{ChainSpec, Config, Domain}; -pub use crate::checkpoint::Checkpoint; -pub use crate::config_and_preset::{ - ConfigAndPreset, ConfigAndPresetDeneb, ConfigAndPresetElectra, ConfigAndPresetFulu, - ConfigAndPresetGloas, -}; -pub use crate::consolidation_request::ConsolidationRequest; -pub use crate::contribution_and_proof::ContributionAndProof; -pub use crate::data_column_sidecar::{ - ColumnIndex, DataColumnSidecar, DataColumnSidecarList, DataColumnsByRootIdentifier, -}; -pub use crate::data_column_subnet_id::DataColumnSubnetId; -pub use crate::deposit::{DEPOSIT_TREE_DEPTH, Deposit}; -pub use crate::deposit_data::DepositData; -pub use crate::deposit_message::DepositMessage; -pub use crate::deposit_request::DepositRequest; -pub use crate::deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock}; -pub use crate::enr_fork_id::EnrForkId; -pub use crate::epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; -pub use crate::eth_spec::EthSpecId; -pub use crate::eth1_data::Eth1Data; -pub use crate::execution_block_hash::ExecutionBlockHash; -pub use crate::execution_block_header::{EncodableExecutionBlockHeader, ExecutionBlockHeader}; -pub use crate::execution_payload::{ - ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, ExecutionPayloadRef, - Transaction, Transactions, Withdrawals, -}; -pub use crate::execution_payload_bid::ExecutionPayloadBid; -pub use crate::execution_payload_envelope::ExecutionPayloadEnvelope; -pub use crate::execution_payload_header::{ - ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, - ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, - ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, -}; -pub use crate::execution_requests::{ExecutionRequests, RequestType}; -pub use crate::fork::Fork; -pub use crate::fork_context::ForkContext; -pub use crate::fork_data::ForkData; -pub use crate::fork_name::{ForkName, InconsistentFork}; -pub use crate::graffiti::{GRAFFITI_BYTES_LEN, Graffiti}; -pub use crate::historical_batch::HistoricalBatch; -pub use crate::indexed_attestation::{ - IndexedAttestation, IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, -}; -pub use crate::indexed_payload_attestation::IndexedPayloadAttestation; -pub use crate::light_client_bootstrap::{ - LightClientBootstrap, LightClientBootstrapAltair, LightClientBootstrapCapella, - LightClientBootstrapDeneb, LightClientBootstrapElectra, LightClientBootstrapFulu, -}; -pub use crate::light_client_finality_update::{ - LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientFinalityUpdateCapella, - LightClientFinalityUpdateDeneb, LightClientFinalityUpdateElectra, - LightClientFinalityUpdateFulu, -}; -pub use crate::light_client_header::{ - LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, LightClientHeaderFulu, -}; -pub use crate::light_client_optimistic_update::{ - LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, - LightClientOptimisticUpdateCapella, LightClientOptimisticUpdateDeneb, - LightClientOptimisticUpdateElectra, LightClientOptimisticUpdateFulu, -}; -pub use crate::light_client_update::{ - Error as LightClientUpdateError, LightClientUpdate, LightClientUpdateAltair, - LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, - LightClientUpdateFulu, MerkleProof, -}; -pub use crate::participation_flags::ParticipationFlags; -pub use crate::payload::{ - AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, - BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, BlindedPayloadRef, BlockType, - ExecPayload, FullPayload, FullPayloadBellatrix, FullPayloadCapella, FullPayloadDeneb, - FullPayloadElectra, FullPayloadFulu, FullPayloadRef, OwnedExecPayload, -}; -pub use crate::payload_attestation::PayloadAttestation; -pub use crate::payload_attestation_data::PayloadAttestationData; -pub use crate::payload_attestation_message::PayloadAttestationMessage; -pub use crate::pending_attestation::PendingAttestation; -pub use crate::pending_consolidation::PendingConsolidation; -pub use crate::pending_deposit::PendingDeposit; -pub use crate::pending_partial_withdrawal::PendingPartialWithdrawal; -pub use crate::preset::{ - AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, - FuluPreset, GloasPreset, -}; -pub use crate::proposer_preparation_data::ProposerPreparationData; -pub use crate::proposer_slashing::ProposerSlashing; -pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; -pub use crate::runtime_fixed_vector::RuntimeFixedVector; -pub use crate::runtime_var_list::RuntimeVariableList; -pub use crate::selection_proof::SelectionProof; -pub use crate::shuffling_id::AttestationShufflingId; -pub use crate::signed_aggregate_and_proof::{ - SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, -}; -pub use crate::signed_beacon_block::{ - SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, - SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, - SignedBeaconBlockFulu, SignedBeaconBlockGloas, SignedBeaconBlockHash, SignedBlindedBeaconBlock, - ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, -}; -pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; -pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; -pub use crate::signed_contribution_and_proof::SignedContributionAndProof; -pub use crate::signed_execution_payload_bid::SignedExecutionPayloadBid; -pub use crate::signed_execution_payload_envelope::SignedExecutionPayloadEnvelope; -pub use crate::signed_voluntary_exit::SignedVoluntaryExit; -pub use crate::signing_data::{SignedRoot, SigningData}; -pub use crate::slot_epoch::{Epoch, Slot}; -pub use crate::subnet_id::SubnetId; -pub use crate::sync_aggregate::SyncAggregate; -pub use crate::sync_aggregator_selection_data::SyncAggregatorSelectionData; -pub use crate::sync_committee::SyncCommittee; -pub use crate::sync_committee_contribution::{SyncCommitteeContribution, SyncContributionData}; -pub use crate::sync_committee_message::SyncCommitteeMessage; -pub use crate::sync_committee_subscription::SyncCommitteeSubscription; -pub use crate::sync_duty::SyncDuty; -pub use crate::sync_selection_proof::SyncSelectionProof; -pub use crate::sync_subnet_id::SyncSubnetId; -pub use crate::validator::Validator; -pub use crate::validator_registration_data::*; -pub use crate::validator_subscription::ValidatorSubscription; -pub use crate::voluntary_exit::VoluntaryExit; -pub use crate::withdrawal::Withdrawal; -pub use crate::withdrawal_credentials::WithdrawalCredentials; -pub use crate::withdrawal_request::WithdrawalRequest; -pub use fixed_bytes::FixedBytesExtended; +pub mod chain_spec { + pub use crate::core::ChainSpec; +} -pub type CommitteeIndex = u64; -pub type Hash256 = fixed_bytes::Hash256; -pub type Uint256 = fixed_bytes::Uint256; -pub type Address = fixed_bytes::Address; -pub type ForkVersion = [u8; 4]; -pub type BLSFieldElement = Uint256; -pub type Blob = FixedVector::BytesPerBlob>; -// Note on List limit: -// - Deneb to Electra: `MaxBlobCommitmentsPerBlock` -// - Fulu: `MaxCellsPerBlock` -// We choose to use a single type (with the larger value from Fulu as `N`) instead of having to -// introduce a new type for Fulu. This is to avoid messy conversions and having to add extra types -// with no gains - as `N` does not impact serialisation at all, and only affects merkleization, -// which we don't current do on `KzgProofs` anyway. -pub type KzgProofs = VariableList::MaxCellsPerBlock>; -pub type VersionedHash = Hash256; -pub type Hash64 = alloy_primitives::B64; +pub mod beacon_block { + pub use crate::block::{BlindedBeaconBlock, BlockImportSource}; +} + +pub mod beacon_block_body { + pub use crate::kzg_ext::{KzgCommitments, format_kzg_commitments}; +} + +pub mod beacon_state { + pub use crate::state::{ + BeaconState, BeaconStateBase, CommitteeCache, compute_committee_index_in_epoch, + compute_committee_range_in_epoch, epoch_committee_count, + }; +} + +pub mod graffiti { + pub use crate::core::GraffitiString; +} + +pub mod indexed_attestation { + pub use crate::attestation::{IndexedAttestationBase, IndexedAttestationElectra}; +} + +pub mod historical_summary { + pub use crate::state::HistoricalSummary; +} + +pub mod participation_flags { + pub use crate::attestation::ParticipationFlags; +} + +pub mod epoch_cache { + pub use crate::state::{EpochCache, EpochCacheError, EpochCacheKey}; +} + +pub mod non_zero_usize { + pub use crate::core::new_non_zero_usize; +} + +pub mod data_column_sidecar { + pub use crate::data::{ + Cell, ColumnIndex, DataColumn, DataColumnSidecar, DataColumnSidecarError, + DataColumnSidecarList, + }; +} + +pub mod builder_bid { + pub use crate::builder::*; +} + +pub mod blob_sidecar { + pub use crate::data::{ + BlobIdentifier, BlobSidecar, BlobSidecarError, BlobsList, FixedBlobSidecarList, + }; +} + +pub mod payload { + pub use crate::execution::BlockProductionVersion; +} + +pub mod execution_requests { + pub use crate::execution::{ + ConsolidationRequests, DepositRequests, ExecutionRequests, RequestType, WithdrawalRequests, + }; +} + +pub mod execution_payload_envelope { + pub use crate::execution::{ExecutionPayloadEnvelope, SignedExecutionPayloadEnvelope}; +} + +pub mod data_column_custody_group { + pub use crate::data::{ + CustodyIndex, compute_columns_for_custody_group, compute_ordered_custody_column_indices, + compute_subnets_for_node, compute_subnets_from_custody_group, get_custody_groups, + }; +} + +pub mod sync_aggregate { + pub use crate::sync_committee::SyncAggregateError as Error; +} + +pub mod light_client_update { + pub use crate::light_client::consts::{ + CURRENT_SYNC_COMMITTEE_INDEX, CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA, FINALIZED_ROOT_INDEX, + FINALIZED_ROOT_INDEX_ELECTRA, MAX_REQUEST_LIGHT_CLIENT_UPDATES, NEXT_SYNC_COMMITTEE_INDEX, + NEXT_SYNC_COMMITTEE_INDEX_ELECTRA, + }; +} + +pub mod sync_committee_contribution { + pub use crate::sync_committee::{ + SyncCommitteeContributionError as Error, SyncContributionData, + }; +} + +pub mod slot_data { + pub use crate::core::SlotData; +} + +pub mod signed_aggregate_and_proof { + pub use crate::attestation::SignedAggregateAndProofRefMut; +} + +pub mod payload_attestation { + pub use crate::attestation::{ + PayloadAttestation, PayloadAttestationData, PayloadAttestationMessage, + }; +} + +pub mod application_domain { + pub use crate::core::ApplicationDomain; +} + +// Temporary re-exports to maintain backwards compatibility for Lighthouse. +pub use crate::kzg_ext::consts::VERSIONED_HASH_VERSION_KZG; +pub use crate::light_client::LightClientError as LightClientUpdateError; +pub use crate::state::BeaconStateError as Error; pub use bls::{ - AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, - Signature, SignatureBytes, + AggregatePublicKey, AggregateSignature, Error as BlsError, Keypair, PUBLIC_KEY_BYTES_LEN, + PublicKey, PublicKeyBytes, SIGNATURE_BYTES_LEN, SecretKey, Signature, SignatureBytes, + get_withdrawal_credentials, }; pub use context_deserialize::{ContextDeserialize, context_deserialize}; -pub use kzg::{KzgCommitment, KzgProof, VERSIONED_HASH_VERSION_KZG}; +pub use fixed_bytes::FixedBytesExtended; pub use milhouse::{self, List, Vector}; pub use ssz_types::{BitList, BitVector, FixedVector, VariableList, typenum, typenum::Unsigned}; pub use superstruct::superstruct; diff --git a/consensus/types/src/light_client/consts.rs b/consensus/types/src/light_client/consts.rs new file mode 100644 index 0000000000..0092e75e87 --- /dev/null +++ b/consensus/types/src/light_client/consts.rs @@ -0,0 +1,21 @@ +pub const FINALIZED_ROOT_PROOF_LEN: usize = 6; +pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; +pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; +pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; + +pub const FINALIZED_ROOT_PROOF_LEN_ELECTRA: usize = 7; +pub const NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; +pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; + +pub const FINALIZED_ROOT_INDEX: usize = 105; +pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; +pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; +pub const EXECUTION_PAYLOAD_INDEX: usize = 25; + +pub const FINALIZED_ROOT_INDEX_ELECTRA: usize = 169; +pub const CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 86; +pub const NEXT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 87; + +// Max light client updates by range request limits +// spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/p2p-interface.md#configuration +pub const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; diff --git a/consensus/types/src/light_client/error.rs b/consensus/types/src/light_client/error.rs new file mode 100644 index 0000000000..4c7a30db5e --- /dev/null +++ b/consensus/types/src/light_client/error.rs @@ -0,0 +1,42 @@ +use safe_arith::ArithError; + +use crate::state::BeaconStateError; + +#[derive(Debug, PartialEq, Clone)] +pub enum LightClientError { + SszTypesError(ssz_types::Error), + MilhouseError(milhouse::Error), + BeaconStateError(BeaconStateError), + ArithError(ArithError), + AltairForkNotActive, + NotEnoughSyncCommitteeParticipants, + MismatchingPeriods, + InvalidFinalizedBlock, + BeaconBlockBodyError, + InconsistentFork, + GloasNotImplemented, +} + +impl From for LightClientError { + fn from(e: ssz_types::Error) -> LightClientError { + LightClientError::SszTypesError(e) + } +} + +impl From for LightClientError { + fn from(e: BeaconStateError) -> LightClientError { + LightClientError::BeaconStateError(e) + } +} + +impl From for LightClientError { + fn from(e: ArithError) -> LightClientError { + LightClientError::ArithError(e) + } +} + +impl From for LightClientError { + fn from(e: milhouse::Error) -> LightClientError { + LightClientError::MilhouseError(e) + } +} diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client/light_client_bootstrap.rs similarity index 87% rename from consensus/types/src/light_client_bootstrap.rs rename to consensus/types/src/light_client/light_client_bootstrap.rs index d2b1f38df0..fbcc0ef2b0 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client/light_client_bootstrap.rs @@ -1,19 +1,29 @@ -use crate::context_deserialize; -use crate::{ - BeaconState, ChainSpec, ContextDeserialize, EthSpec, FixedVector, ForkName, Hash256, - LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, LightClientHeaderFulu, SignedBlindedBeaconBlock, Slot, SyncCommittee, - light_client_update::*, test_utils::TestRandom, -}; +use std::sync::Arc; + +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::sync::Arc; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, EthSpec, Hash256, Slot}, + fork::ForkName, + light_client::{ + CurrentSyncCommitteeProofLen, CurrentSyncCommitteeProofLenElectra, LightClientError, + LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, + LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, + }, + state::BeaconState, + sync_committee::SyncCommittee, + test_utils::TestRandom, +}; + /// A LightClientBootstrap is the initializer we send over to light_client nodes /// that are trying to generate their basic storage when booting up. #[superstruct( @@ -140,49 +150,49 @@ impl LightClientBootstrap { current_sync_committee: Arc>, current_sync_committee_branch: Vec, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let light_client_bootstrap = match block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu { header: LightClientHeaderFulu::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), // TODO(gloas): implement Gloas light client - ForkName::Gloas => return Err(Error::GloasNotImplemented), + ForkName::Gloas => return Err(LightClientError::GloasNotImplemented), }; Ok(light_client_bootstrap) @@ -192,52 +202,52 @@ impl LightClientBootstrap { beacon_state: &mut BeaconState, block: &SignedBlindedBeaconBlock, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let current_sync_committee_branch = beacon_state.compute_current_sync_committee_proof()?; let current_sync_committee = beacon_state.current_sync_committee()?.clone(); let light_client_bootstrap = match block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu { header: LightClientHeaderFulu::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), // TODO(gloas): implement Gloas light client - ForkName::Gloas => return Err(Error::GloasNotImplemented), + ForkName::Gloas => return Err(LightClientError::GloasNotImplemented), }; Ok(light_client_bootstrap) diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client/light_client_finality_update.rs similarity index 89% rename from consensus/types/src/light_client_finality_update.rs rename to consensus/types/src/light_client/light_client_finality_update.rs index c410015d36..b503785b85 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client/light_client_finality_update.rs @@ -1,20 +1,27 @@ -use super::{EthSpec, FixedVector, Hash256, LightClientHeader, Slot, SyncAggregate}; -use crate::ChainSpec; -use crate::context_deserialize; -use crate::{ - ContextDeserialize, ForkName, LightClientHeaderAltair, LightClientHeaderCapella, - LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, - SignedBlindedBeaconBlock, light_client_update::*, test_utils::TestRandom, -}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, EthSpec, Hash256, Slot}, + fork::ForkName, + light_client::{ + FinalizedRootProofLen, FinalizedRootProofLenElectra, LightClientError, LightClientHeader, + LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, + LightClientHeaderElectra, LightClientHeaderFulu, + }, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; + #[superstruct( variants(Altair, Capella, Deneb, Electra, Fulu), variant_attributes( @@ -98,10 +105,10 @@ impl LightClientFinalityUpdate { sync_aggregate: SyncAggregate, signature_slot: Slot, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let finality_update = match attested_block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { ForkName::Altair | ForkName::Bellatrix => { Self::Altair(LightClientFinalityUpdateAltair { @@ -111,7 +118,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderAltair::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }) @@ -123,7 +132,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderCapella::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -134,7 +145,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderDeneb::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -145,7 +158,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderElectra::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -156,12 +171,14 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderFulu::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), - ForkName::Gloas => return Err(Error::GloasNotImplemented), - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Gloas => return Err(LightClientError::GloasNotImplemented), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), }; Ok(finality_update) diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client/light_client_header.rs similarity index 90% rename from consensus/types/src/light_client_header.rs rename to consensus/types/src/light_client/light_client_header.rs index 9acf11c9b5..fdf9f234ef 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client/light_client_header.rs @@ -1,22 +1,27 @@ -use crate::ChainSpec; -use crate::context_deserialize; -use crate::{BeaconBlockBody, light_client_update::*}; -use crate::{BeaconBlockHeader, ExecutionPayloadHeader}; -use crate::{ContextDeserialize, ForkName}; -use crate::{ - EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, - ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, FixedVector, Hash256, - SignedBlindedBeaconBlock, test_utils::TestRandom, -}; +use std::marker::PhantomData; + +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::Decode; use ssz_derive::{Decode, Encode}; -use std::marker::PhantomData; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::{BeaconBlockBody, BeaconBlockHeader, SignedBlindedBeaconBlock}, + core::{ChainSpec, EthSpec, Hash256}, + execution::{ + ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, + ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + }, + fork::ForkName, + light_client::{ExecutionPayloadProofLen, LightClientError, consts::EXECUTION_PAYLOAD_INDEX}, + test_utils::TestRandom, +}; + #[superstruct( variants(Altair, Capella, Deneb, Electra, Fulu,), variant_attributes( @@ -83,12 +88,12 @@ impl LightClientHeader { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let header = match block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), ForkName::Altair | ForkName::Bellatrix => LightClientHeader::Altair( LightClientHeaderAltair::block_to_light_client_header(block)?, ), @@ -105,7 +110,7 @@ impl LightClientHeader { LightClientHeader::Fulu(LightClientHeaderFulu::block_to_light_client_header(block)?) } // TODO(gloas): implement Gloas light client - ForkName::Gloas => return Err(Error::GloasNotImplemented), + ForkName::Gloas => return Err(LightClientError::GloasNotImplemented), }; Ok(header) } @@ -161,7 +166,7 @@ impl LightClientHeader { impl LightClientHeaderAltair { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { Ok(LightClientHeaderAltair { beacon: block.message().block_header(), _phantom_data: PhantomData, @@ -181,7 +186,7 @@ impl Default for LightClientHeaderAltair { impl LightClientHeaderCapella { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let payload = block .message() .execution_payload()? @@ -192,7 +197,7 @@ impl LightClientHeaderCapella { block .message() .body_capella() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); @@ -223,7 +228,7 @@ impl Default for LightClientHeaderCapella { impl LightClientHeaderDeneb { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let header = block .message() .execution_payload()? @@ -234,7 +239,7 @@ impl LightClientHeaderDeneb { block .message() .body_deneb() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); @@ -265,7 +270,7 @@ impl Default for LightClientHeaderDeneb { impl LightClientHeaderElectra { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let payload = block .message() .execution_payload()? @@ -276,7 +281,7 @@ impl LightClientHeaderElectra { block .message() .body_electra() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); @@ -307,7 +312,7 @@ impl Default for LightClientHeaderElectra { impl LightClientHeaderFulu { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let payload = block .message() .execution_payload()? @@ -318,7 +323,7 @@ impl LightClientHeaderFulu { block .message() .body_fulu() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client/light_client_optimistic_update.rs similarity index 93% rename from consensus/types/src/light_client_optimistic_update.rs rename to consensus/types/src/light_client/light_client_optimistic_update.rs index 078e4f84a5..139c4b6a08 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client/light_client_optimistic_update.rs @@ -1,21 +1,25 @@ -use super::{ContextDeserialize, EthSpec, ForkName, LightClientHeader, Slot, SyncAggregate}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ - ChainSpec, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, LightClientHeaderFulu, SignedBlindedBeaconBlock, - light_client_update::*, -}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; -use ssz_derive::Decode; -use ssz_derive::Encode; +use ssz_derive::{Decode, Encode}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::Hash256; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, EthSpec, Slot}, + fork::ForkName, + light_client::{ + LightClientError, LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, + LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, + }, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; + /// A LightClientOptimisticUpdate is the update we send on each slot, /// it is based off the current unfinalized epoch is verified only against BLS signature. #[superstruct( @@ -77,10 +81,10 @@ impl LightClientOptimisticUpdate { sync_aggregate: SyncAggregate, signature_slot: Slot, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let optimistic_update = match attested_block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { ForkName::Altair | ForkName::Bellatrix => { Self::Altair(LightClientOptimisticUpdateAltair { @@ -119,8 +123,8 @@ impl LightClientOptimisticUpdate { sync_aggregate, signature_slot, }), - ForkName::Gloas => return Err(Error::GloasNotImplemented), - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Gloas => return Err(LightClientError::GloasNotImplemented), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), }; Ok(optimistic_update) diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client/light_client_update.rs similarity index 86% rename from consensus/types/src/light_client_update.rs rename to consensus/types/src/light_client/light_client_update.rs index 9457c90a0a..ed19cbcd41 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client/light_client_update.rs @@ -1,12 +1,6 @@ -use super::{EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; -use crate::LightClientHeader; -use crate::context_deserialize; -use crate::light_client_header::LightClientHeaderElectra; -use crate::{ - ChainSpec, ContextDeserialize, Epoch, ForkName, LightClientHeaderAltair, - LightClientHeaderCapella, LightClientHeaderDeneb, LightClientHeaderFulu, - SignedBlindedBeaconBlock, beacon_state, test_utils::TestRandom, -}; +use std::sync::Arc; + +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use safe_arith::ArithError; use safe_arith::SafeArith; @@ -14,20 +8,23 @@ use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; +use ssz_types::FixedVector; use ssz_types::typenum::{U4, U5, U6, U7}; -use std::sync::Arc; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -pub const FINALIZED_ROOT_INDEX: usize = 105; -pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; -pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; -pub const EXECUTION_PAYLOAD_INDEX: usize = 25; - -pub const FINALIZED_ROOT_INDEX_ELECTRA: usize = 169; -pub const CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 86; -pub const NEXT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 87; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, Epoch, EthSpec, Hash256, Slot}, + fork::ForkName, + light_client::{ + LightClientError, LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, + LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, + }, + sync_committee::{SyncAggregate, SyncCommittee}, + test_utils::TestRandom, +}; pub type FinalizedRootProofLen = U6; pub type CurrentSyncCommitteeProofLen = U5; @@ -38,66 +35,12 @@ pub type FinalizedRootProofLenElectra = U7; pub type CurrentSyncCommitteeProofLenElectra = U6; pub type NextSyncCommitteeProofLenElectra = U6; -pub const FINALIZED_ROOT_PROOF_LEN: usize = 6; -pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; -pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; -pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; - -pub const FINALIZED_ROOT_PROOF_LEN_ELECTRA: usize = 7; -pub const NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; -pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; - -pub type MerkleProof = Vec; -// Max light client updates by range request limits -// spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/p2p-interface.md#configuration -pub const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; - type FinalityBranch = FixedVector; type FinalityBranchElectra = FixedVector; type NextSyncCommitteeBranch = FixedVector; type NextSyncCommitteeBranchElectra = FixedVector; -#[derive(Debug, PartialEq, Clone)] -pub enum Error { - SszTypesError(ssz_types::Error), - MilhouseError(milhouse::Error), - BeaconStateError(beacon_state::Error), - ArithError(ArithError), - AltairForkNotActive, - NotEnoughSyncCommitteeParticipants, - MismatchingPeriods, - InvalidFinalizedBlock, - BeaconBlockBodyError, - InconsistentFork, - // TODO(gloas): implement Gloas light client - GloasNotImplemented, -} - -impl From for Error { - fn from(e: ssz_types::Error) -> Error { - Error::SszTypesError(e) - } -} - -impl From for Error { - fn from(e: beacon_state::Error) -> Error { - Error::BeaconStateError(e) - } -} - -impl From for Error { - fn from(e: ArithError) -> Error { - Error::ArithError(e) - } -} - -impl From for Error { - fn from(e: milhouse::Error) -> Error { - Error::MilhouseError(e) - } -} - /// A LightClientUpdate is the update we request solely to either complete the bootstrapping process, /// or to sync up to the last committee period, we need to have one ready for each ALTAIR period /// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD]. @@ -234,12 +177,12 @@ impl LightClientUpdate { attested_block: &SignedBlindedBeaconBlock, finalized_block: Option<&SignedBlindedBeaconBlock>, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let light_client_update = match attested_block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), fork_name @ ForkName::Altair | fork_name @ ForkName::Bellatrix => { let attested_header = LightClientHeaderAltair::block_to_light_client_header(attested_block)?; @@ -259,9 +202,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -285,9 +230,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -311,9 +258,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -337,9 +286,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -363,9 +314,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -374,7 +327,7 @@ impl LightClientUpdate { // have a distinct execution header will need a new LightClientUpdate variant only // if you need to test or support lightclient usages // TODO(gloas): implement Gloas light client - ForkName::Gloas => return Err(Error::GloasNotImplemented), + ForkName::Gloas => return Err(LightClientError::GloasNotImplemented), }; Ok(light_client_update) @@ -423,23 +376,32 @@ impl LightClientUpdate { fn attested_header_sync_committee_period( &self, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { compute_sync_committee_period_at_slot::(self.attested_header_slot(), chain_spec) - .map_err(Error::ArithError) + .map_err(LightClientError::ArithError) } - fn signature_slot_sync_committee_period(&self, chain_spec: &ChainSpec) -> Result { + fn signature_slot_sync_committee_period( + &self, + chain_spec: &ChainSpec, + ) -> Result { compute_sync_committee_period_at_slot::(*self.signature_slot(), chain_spec) - .map_err(Error::ArithError) + .map_err(LightClientError::ArithError) } - pub fn is_sync_committee_update(&self, chain_spec: &ChainSpec) -> Result { + pub fn is_sync_committee_update( + &self, + chain_spec: &ChainSpec, + ) -> Result { Ok(!self.is_next_sync_committee_branch_empty() && (self.attested_header_sync_committee_period(chain_spec)? == self.signature_slot_sync_committee_period(chain_spec)?)) } - pub fn has_sync_committee_finality(&self, chain_spec: &ChainSpec) -> Result { + pub fn has_sync_committee_finality( + &self, + chain_spec: &ChainSpec, + ) -> Result { Ok( compute_sync_committee_period_at_slot::(self.finalized_header_slot(), chain_spec)? == self.attested_header_sync_committee_period(chain_spec)?, @@ -453,7 +415,7 @@ impl LightClientUpdate { &self, new: &Self, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { // Compare super majority (> 2/3) sync committee participation let max_active_participants = new.sync_aggregate().sync_committee_bits.len(); @@ -577,6 +539,7 @@ fn compute_sync_committee_period_at_slot( #[cfg(test)] mod tests { use super::*; + use crate::light_client::consts::*; use ssz_types::typenum::Unsigned; // `ssz_tests!` can only be defined once per namespace diff --git a/consensus/types/src/light_client/mod.rs b/consensus/types/src/light_client/mod.rs new file mode 100644 index 0000000000..24f3fdbb55 --- /dev/null +++ b/consensus/types/src/light_client/mod.rs @@ -0,0 +1,35 @@ +mod error; +mod light_client_bootstrap; +mod light_client_finality_update; +mod light_client_header; +mod light_client_optimistic_update; +mod light_client_update; + +pub mod consts; + +pub use error::LightClientError; +pub use light_client_bootstrap::{ + LightClientBootstrap, LightClientBootstrapAltair, LightClientBootstrapCapella, + LightClientBootstrapDeneb, LightClientBootstrapElectra, LightClientBootstrapFulu, +}; +pub use light_client_finality_update::{ + LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientFinalityUpdateCapella, + LightClientFinalityUpdateDeneb, LightClientFinalityUpdateElectra, + LightClientFinalityUpdateFulu, +}; +pub use light_client_header::{ + LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, + LightClientHeaderElectra, LightClientHeaderFulu, +}; +pub use light_client_optimistic_update::{ + LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, + LightClientOptimisticUpdateCapella, LightClientOptimisticUpdateDeneb, + LightClientOptimisticUpdateElectra, LightClientOptimisticUpdateFulu, +}; +pub use light_client_update::{ + CurrentSyncCommitteeProofLen, CurrentSyncCommitteeProofLenElectra, ExecutionPayloadProofLen, + FinalizedRootProofLen, FinalizedRootProofLenElectra, LightClientUpdate, + LightClientUpdateAltair, LightClientUpdateCapella, LightClientUpdateDeneb, + LightClientUpdateElectra, LightClientUpdateFulu, NextSyncCommitteeProofLen, + NextSyncCommitteeProofLenElectra, +}; diff --git a/consensus/types/src/runtime_fixed_vector.rs b/consensus/types/src/runtime_fixed_vector.rs deleted file mode 100644 index f562322a3d..0000000000 --- a/consensus/types/src/runtime_fixed_vector.rs +++ /dev/null @@ -1,90 +0,0 @@ -//! Emulates a fixed size array but with the length set at runtime. -//! -//! The length of the list cannot be changed once it is set. - -use std::fmt; -use std::fmt::Debug; - -#[derive(Clone)] -pub struct RuntimeFixedVector { - vec: Vec, - len: usize, -} - -impl Debug for RuntimeFixedVector { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?} (len={})", self.vec, self.len) - } -} - -impl RuntimeFixedVector { - pub fn new(vec: Vec) -> Self { - let len = vec.len(); - Self { vec, len } - } - - pub fn to_vec(&self) -> Vec { - self.vec.clone() - } - - pub fn as_slice(&self) -> &[T] { - self.vec.as_slice() - } - - #[allow(clippy::len_without_is_empty)] - pub fn len(&self) -> usize { - self.len - } - - pub fn into_vec(self) -> Vec { - self.vec - } - - pub fn default(max_len: usize) -> Self { - Self { - vec: vec![T::default(); max_len], - len: max_len, - } - } - - pub fn take(&mut self) -> Self { - let new = std::mem::take(&mut self.vec); - *self = Self::new(vec![T::default(); self.len]); - Self { - vec: new, - len: self.len, - } - } -} - -impl std::ops::Deref for RuntimeFixedVector { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -impl std::ops::DerefMut for RuntimeFixedVector { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.vec[..] - } -} - -impl IntoIterator for RuntimeFixedVector { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.vec.into_iter() - } -} - -impl<'a, T> IntoIterator for &'a RuntimeFixedVector { - type Item = &'a T; - type IntoIter = std::slice::Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.vec.iter() - } -} diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs deleted file mode 100644 index e7b846029e..0000000000 --- a/consensus/types/src/runtime_var_list.rs +++ /dev/null @@ -1,387 +0,0 @@ -use crate::ContextDeserialize; -use educe::Educe; -use serde::de::Error as DeError; -use serde::{Deserialize, Deserializer, Serialize}; -use ssz::Decode; -use ssz_types::Error; -use std::fmt; -use std::fmt::Debug; -use std::ops::{Deref, Index, IndexMut}; -use std::slice::SliceIndex; -use tree_hash::{Hash256, MerkleHasher, PackedEncoding, TreeHash, TreeHashType}; - -/// Emulates a SSZ `List`. -/// -/// An ordered, heap-allocated, variable-length, homogeneous collection of `T`, with no more than -/// `max_len` values. -/// -/// To ensure there are no inconsistent states, we do not allow any mutating operation if `max_len` is not set. -/// -/// ## Example -/// -/// ``` -/// use types::{RuntimeVariableList}; -/// -/// let base: Vec = vec![1, 2, 3, 4]; -/// -/// // Create a `RuntimeVariableList` from a `Vec` that has the expected length. -/// let exact: RuntimeVariableList<_> = RuntimeVariableList::new(base.clone(), 4).unwrap(); -/// assert_eq!(&exact[..], &[1, 2, 3, 4]); -/// -/// // Create a `RuntimeVariableList` from a `Vec` that is too long you'll get an error. -/// let err = RuntimeVariableList::new(base.clone(), 3).unwrap_err(); -/// assert_eq!(err, ssz_types::Error::OutOfBounds { i: 4, len: 3 }); -/// -/// // Create a `RuntimeVariableList` from a `Vec` that is shorter than the maximum. -/// let mut long: RuntimeVariableList<_> = RuntimeVariableList::new(base, 5).unwrap(); -/// assert_eq!(&long[..], &[1, 2, 3, 4]); -/// -/// // Push a value to if it does not exceed the maximum -/// long.push(5).unwrap(); -/// assert_eq!(&long[..], &[1, 2, 3, 4, 5]); -/// -/// // Push a value to if it _does_ exceed the maximum. -/// assert!(long.push(6).is_err()); -/// -/// ``` -#[derive(Clone, Serialize, Deserialize, Educe)] -#[educe(PartialEq, Eq, Hash(bound(T: std::hash::Hash)))] -#[serde(transparent)] -pub struct RuntimeVariableList { - vec: Vec, - #[serde(skip)] - max_len: usize, -} - -impl Debug for RuntimeVariableList { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?} (max_len={})", self.vec, self.max_len) - } -} - -impl RuntimeVariableList { - /// Returns `Ok` if the given `vec` equals the fixed length of `Self`. Otherwise returns - /// `Err(OutOfBounds { .. })`. - pub fn new(vec: Vec, max_len: usize) -> Result { - if vec.len() <= max_len { - Ok(Self { vec, max_len }) - } else { - Err(Error::OutOfBounds { - i: vec.len(), - len: max_len, - }) - } - } - - /// Create an empty list with the given `max_len`. - pub fn empty(max_len: usize) -> Self { - Self { - vec: vec![], - max_len, - } - } - - pub fn as_slice(&self) -> &[T] { - self.vec.as_slice() - } - - pub fn as_mut_slice(&mut self) -> &mut [T] { - self.vec.as_mut_slice() - } - - /// Returns the number of values presently in `self`. - pub fn len(&self) -> usize { - self.vec.len() - } - - /// True if `self` does not contain any values. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the type-level maximum length. - /// - /// Returns `None` if self is uninitialized with a max_len. - pub fn max_len(&self) -> usize { - self.max_len - } - - /// Appends `value` to the back of `self`. - /// - /// Returns `Err(())` when appending `value` would exceed the maximum length. - pub fn push(&mut self, value: T) -> Result<(), Error> { - if self.vec.len() < self.max_len { - self.vec.push(value); - Ok(()) - } else { - Err(Error::OutOfBounds { - i: self.vec.len().saturating_add(1), - len: self.max_len, - }) - } - } -} - -impl RuntimeVariableList { - pub fn from_ssz_bytes(bytes: &[u8], max_len: usize) -> Result { - let vec = if bytes.is_empty() { - vec![] - } else if ::is_ssz_fixed_len() { - let num_items = bytes - .len() - .checked_div(::ssz_fixed_len()) - .ok_or(ssz::DecodeError::ZeroLengthItem)?; - - if num_items > max_len { - return Err(ssz::DecodeError::BytesInvalid(format!( - "RuntimeVariableList of {} items exceeds maximum of {}", - num_items, max_len - ))); - } - - bytes.chunks(::ssz_fixed_len()).try_fold( - Vec::with_capacity(num_items), - |mut vec, chunk| { - vec.push(::from_ssz_bytes(chunk)?); - Ok(vec) - }, - )? - } else { - ssz::decode_list_of_variable_length_items(bytes, Some(max_len))? - }; - Ok(Self { vec, max_len }) - } -} - -impl From> for Vec { - fn from(list: RuntimeVariableList) -> Vec { - list.vec - } -} - -impl> Index for RuntimeVariableList { - type Output = I::Output; - - #[inline] - fn index(&self, index: I) -> &Self::Output { - Index::index(&self.vec, index) - } -} - -impl> IndexMut for RuntimeVariableList { - #[inline] - fn index_mut(&mut self, index: I) -> &mut Self::Output { - IndexMut::index_mut(&mut self.vec, index) - } -} - -impl Deref for RuntimeVariableList { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -impl<'a, T> IntoIterator for &'a RuntimeVariableList { - type Item = &'a T; - type IntoIter = std::slice::Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl IntoIterator for RuntimeVariableList { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.vec.into_iter() - } -} - -impl ssz::Encode for RuntimeVariableList -where - T: ssz::Encode, -{ - fn is_ssz_fixed_len() -> bool { - >::is_ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.vec.ssz_append(buf) - } - - fn ssz_fixed_len() -> usize { - >::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.vec.ssz_bytes_len() - } -} - -impl<'de, C, T> ContextDeserialize<'de, (C, usize)> for RuntimeVariableList -where - T: ContextDeserialize<'de, C>, - C: Clone, -{ - fn context_deserialize(deserializer: D, context: (C, usize)) -> Result - where - D: Deserializer<'de>, - { - // first parse out a Vec using the Vec impl you already have - let vec: Vec = Vec::context_deserialize(deserializer, context.0)?; - let vec_len = vec.len(); - RuntimeVariableList::new(vec, context.1).map_err(|e| { - DeError::custom(format!( - "RuntimeVariableList length {} exceeds max_len {}: {e:?}", - vec_len, context.1, - )) - }) - } -} - -impl TreeHash for RuntimeVariableList { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let root = runtime_vec_tree_hash_root::(&self.vec, self.max_len); - - tree_hash::mix_in_length(&root, self.len()) - } -} - -// We can delete this once the upstream `vec_tree_hash_root` is modified to use a runtime max len. -pub fn runtime_vec_tree_hash_root(vec: &[T], max_len: usize) -> Hash256 -where - T: TreeHash, -{ - match T::tree_hash_type() { - TreeHashType::Basic => { - let mut hasher = - MerkleHasher::with_leaves(max_len.div_ceil(T::tree_hash_packing_factor())); - - for item in vec { - hasher - .write(&item.tree_hash_packed_encoding()) - .expect("ssz_types variable vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types variable vec should not have a remaining buffer") - } - TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => { - let mut hasher = MerkleHasher::with_leaves(max_len); - - for item in vec { - hasher - .write(item.tree_hash_root().as_slice()) - .expect("ssz_types vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types vec should not have a remaining buffer") - } - } -} - -#[cfg(test)] -mod test { - use super::*; - use ssz::*; - use std::fmt::Debug; - - #[test] - fn new() { - let vec = vec![42; 5]; - let fixed: Result, _> = RuntimeVariableList::new(vec, 4); - assert!(fixed.is_err()); - - let vec = vec![42; 3]; - let fixed: Result, _> = RuntimeVariableList::new(vec, 4); - assert!(fixed.is_ok()); - - let vec = vec![42; 4]; - let fixed: Result, _> = RuntimeVariableList::new(vec, 4); - assert!(fixed.is_ok()); - } - - #[test] - fn indexing() { - let vec = vec![1, 2]; - - let mut fixed: RuntimeVariableList = - RuntimeVariableList::new(vec.clone(), 8192).unwrap(); - - assert_eq!(fixed[0], 1); - assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!(fixed[..].len(), 2); - - fixed[1] = 3; - assert_eq!(fixed[1], 3); - } - - #[test] - fn length() { - // Too long. - let vec = vec![42; 5]; - let err = RuntimeVariableList::::new(vec.clone(), 4).unwrap_err(); - assert_eq!(err, Error::OutOfBounds { i: 5, len: 4 }); - - let vec = vec![42; 3]; - let fixed: RuntimeVariableList = RuntimeVariableList::new(vec.clone(), 4).unwrap(); - assert_eq!(&fixed[0..3], &vec[..]); - assert_eq!(&fixed[..], &vec![42, 42, 42][..]); - - let vec = vec![]; - let fixed: RuntimeVariableList = RuntimeVariableList::new(vec, 4).unwrap(); - assert_eq!(&fixed[..], &[] as &[u64]); - } - - #[test] - fn deref() { - let vec = vec![0, 2, 4, 6]; - let fixed: RuntimeVariableList = RuntimeVariableList::new(vec, 4).unwrap(); - - assert_eq!(fixed.first(), Some(&0)); - assert_eq!(fixed.get(3), Some(&6)); - assert_eq!(fixed.get(4), None); - } - - #[test] - fn encode() { - let vec: RuntimeVariableList = RuntimeVariableList::new(vec![0; 2], 2).unwrap(); - assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); - assert_eq!( as Encode>::ssz_fixed_len(), 4); - } - - fn round_trip(item: RuntimeVariableList) { - let max_len = item.max_len(); - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!( - RuntimeVariableList::from_ssz_bytes(encoded, max_len), - Ok(item) - ); - } - - #[test] - fn u16_len_8() { - round_trip::(RuntimeVariableList::new(vec![42; 8], 8).unwrap()); - round_trip::(RuntimeVariableList::new(vec![0; 8], 8).unwrap()); - } -} diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/slashing/attester_slashing.rs similarity index 96% rename from consensus/types/src/attester_slashing.rs rename to consensus/types/src/slashing/attester_slashing.rs index 2bfb65653c..5c214b35f7 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/slashing/attester_slashing.rs @@ -1,9 +1,4 @@ -use crate::context_deserialize; -use crate::indexed_attestation::{ - IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, -}; -use crate::{ContextDeserialize, ForkName}; -use crate::{EthSpec, test_utils::TestRandom}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use rand::{Rng, RngCore}; use serde::{Deserialize, Deserializer, Serialize}; @@ -12,6 +7,13 @@ use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef}, + core::EthSpec, + fork::ForkName, + test_utils::TestRandom, +}; + #[superstruct( variants(Base, Electra), variant_attributes( diff --git a/consensus/types/src/slashing/mod.rs b/consensus/types/src/slashing/mod.rs new file mode 100644 index 0000000000..551b8e3137 --- /dev/null +++ b/consensus/types/src/slashing/mod.rs @@ -0,0 +1,8 @@ +mod attester_slashing; +mod proposer_slashing; + +pub use attester_slashing::{ + AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, AttesterSlashingOnDisk, + AttesterSlashingRef, AttesterSlashingRefOnDisk, +}; +pub use proposer_slashing::ProposerSlashing; diff --git a/consensus/types/src/proposer_slashing.rs b/consensus/types/src/slashing/proposer_slashing.rs similarity index 86% rename from consensus/types/src/proposer_slashing.rs rename to consensus/types/src/slashing/proposer_slashing.rs index f4d914c1e5..697bd1a9aa 100644 --- a/consensus/types/src/proposer_slashing.rs +++ b/consensus/types/src/slashing/proposer_slashing.rs @@ -1,12 +1,11 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, SignedBeaconBlockHeader}; - +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{block::SignedBeaconBlockHeader, fork::ForkName, test_utils::TestRandom}; + /// Two conflicting proposals from the same proposer (validator). /// /// Spec v0.12.1 diff --git a/consensus/types/src/activation_queue.rs b/consensus/types/src/state/activation_queue.rs similarity index 95% rename from consensus/types/src/activation_queue.rs rename to consensus/types/src/state/activation_queue.rs index dd3ce5f88c..0d920a20cf 100644 --- a/consensus/types/src/activation_queue.rs +++ b/consensus/types/src/state/activation_queue.rs @@ -1,6 +1,10 @@ -use crate::{ChainSpec, Epoch, Validator}; use std::collections::BTreeSet; +use crate::{ + core::{ChainSpec, Epoch}, + validator::Validator, +}; + /// Activation queue computed during epoch processing for use in the *next* epoch. #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, PartialEq, Eq, Default, Clone)] diff --git a/consensus/types/src/beacon_state/balance.rs b/consensus/types/src/state/balance.rs similarity index 100% rename from consensus/types/src/beacon_state/balance.rs rename to consensus/types/src/state/balance.rs diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/state/beacon_state.rs similarity index 88% rename from consensus/types/src/beacon_state.rs rename to consensus/types/src/state/beacon_state.rs index 9b49d89678..6f99a07129 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -1,49 +1,56 @@ -use self::committee_cache::get_active_validator_indices; -use crate::ContextDeserialize; -use crate::FixedBytesExtended; -use crate::historical_summary::HistoricalSummary; -use crate::test_utils::TestRandom; -use crate::*; +use std::{fmt, hash::Hash, mem, sync::Arc}; + +use bls::{AggregatePublicKey, PublicKeyBytes, Signature}; use compare_fields::CompareFields; +use context_deserialize::ContextDeserialize; use educe::Educe; use ethereum_hashing::hash; +use fixed_bytes::FixedBytesExtended; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use metastruct::{NumFields, metastruct}; -pub use pubkey_cache::PubkeyCache; +use milhouse::{List, Vector}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, DecodeError, Encode, ssz_encode}; use ssz_derive::{Decode, Encode}; -use std::hash::Hash; -use std::{fmt, mem, sync::Arc}; +use ssz_types::{BitVector, FixedVector, typenum::Unsigned}; use superstruct::superstruct; use swap_or_not_shuffle::compute_shuffled_index; use test_random_derive::TestRandom; +use tracing::instrument; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -pub use self::committee_cache::{ - CommitteeCache, compute_committee_index_in_epoch, compute_committee_range_in_epoch, - epoch_committee_count, +use crate::{ + BuilderPendingPayment, BuilderPendingWithdrawal, ExecutionBlockHash, ExecutionPayloadBid, + attestation::{ + AttestationDuty, BeaconCommittee, Checkpoint, CommitteeIndex, ParticipationFlags, + PendingAttestation, + }, + block::{BeaconBlock, BeaconBlockHeader, SignedBeaconBlockHash}, + consolidation::PendingConsolidation, + core::{ChainSpec, Domain, Epoch, EthSpec, Hash256, RelativeEpoch, RelativeEpochError, Slot}, + deposit::PendingDeposit, + execution::{ + Eth1Data, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, + }, + fork::{Fork, ForkName, ForkVersionDecode, InconsistentFork, map_fork_name}, + light_client::consts::{ + CURRENT_SYNC_COMMITTEE_INDEX, CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA, FINALIZED_ROOT_INDEX, + FINALIZED_ROOT_INDEX_ELECTRA, NEXT_SYNC_COMMITTEE_INDEX, NEXT_SYNC_COMMITTEE_INDEX_ELECTRA, + }, + state::{ + BlockRootsIter, CommitteeCache, EpochCache, EpochCacheError, ExitCache, HistoricalBatch, + HistoricalSummary, ProgressiveBalancesCache, PubkeyCache, SlashingsCache, + get_active_validator_indices, + }, + sync_committee::{SyncCommittee, SyncDuty}, + test_utils::TestRandom, + validator::Validator, + withdrawal::PendingPartialWithdrawal, }; -pub use crate::beacon_state::balance::Balance; -pub use crate::beacon_state::exit_cache::ExitCache; -pub use crate::beacon_state::progressive_balances_cache::*; -pub use crate::beacon_state::slashings_cache::SlashingsCache; -pub use eth_spec::*; -pub use iter::BlockRootsIter; -pub use milhouse::{List, Vector, interface::Interface}; -use tracing::instrument; - -#[macro_use] -mod committee_cache; -mod balance; -mod exit_cache; -mod iter; -mod progressive_balances_cache; -mod pubkey_cache; -mod slashings_cache; -mod tests; pub const CACHED_EPOCHS: usize = 3; const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; @@ -53,7 +60,7 @@ pub type Validators = List::ValidatorRegistryLimit> pub type Balances = List::ValidatorRegistryLimit>; #[derive(Debug, PartialEq, Clone)] -pub enum Error { +pub enum BeaconStateError { /// A state for a different hard-fork was required -- a severe logic error. IncorrectStateVariant, EpochOutOfBounds, @@ -197,7 +204,7 @@ enum AllowNextEpoch { } impl AllowNextEpoch { - fn upper_bound_of(self, current_epoch: Epoch) -> Result { + fn upper_bound_of(self, current_epoch: Epoch) -> Result { match self { AllowNextEpoch::True => Ok(current_epoch.safe_add(1)?), AllowNextEpoch::False => Ok(current_epoch), @@ -378,8 +385,14 @@ impl From for Hash256 { num_fields(all()), )) ), - cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), map_ref_mut_into(BeaconStateRef) )] #[cfg_attr( @@ -760,11 +773,11 @@ impl BeaconState { } /// Returns the `tree_hash_root` of the state. - pub fn canonical_root(&mut self) -> Result { + pub fn canonical_root(&mut self) -> Result { self.update_tree_hash_cache() } - pub fn historical_batch(&mut self) -> Result, Error> { + pub fn historical_batch(&mut self) -> Result, BeaconStateError> { // Updating before cloning makes the clone cheap and saves repeated hashing. self.block_roots_mut().apply_updates()?; self.state_roots_mut().apply_updates()?; @@ -778,7 +791,10 @@ impl BeaconState { /// This method ensures the state's pubkey cache is fully up-to-date before checking if the validator /// exists in the registry. If a validator pubkey exists in the validator registry, returns `Some(i)`, /// otherwise returns `None`. - pub fn get_validator_index(&mut self, pubkey: &PublicKeyBytes) -> Result, Error> { + pub fn get_validator_index( + &mut self, + pubkey: &PublicKeyBytes, + ) -> Result, BeaconStateError> { self.update_pubkey_cache()?; Ok(self.pubkey_cache().get(pubkey)) } @@ -803,7 +819,7 @@ impl BeaconState { /// The epoch following `self.current_epoch()`. /// /// Spec v0.12.1 - pub fn next_epoch(&self) -> Result { + pub fn next_epoch(&self) -> Result { Ok(self.current_epoch().safe_add(1)?) } @@ -812,7 +828,7 @@ impl BeaconState { /// Makes use of the committee cache and will fail if no cache exists for the slot's epoch. /// /// Spec v0.12.1 - pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result { + pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result { let cache = self.committee_cache_at_slot(slot)?; Ok(cache.committees_per_slot()) } @@ -820,7 +836,10 @@ impl BeaconState { /// Compute the number of committees in an entire epoch. /// /// Spec v0.12.1 - pub fn get_epoch_committee_count(&self, relative_epoch: RelativeEpoch) -> Result { + pub fn get_epoch_committee_count( + &self, + relative_epoch: RelativeEpoch, + ) -> Result { let cache = self.committee_cache(relative_epoch)?; Ok(cache.epoch_committee_count() as u64) } @@ -833,7 +852,7 @@ impl BeaconState { pub fn get_cached_active_validator_indices( &self, relative_epoch: RelativeEpoch, - ) -> Result<&[usize], Error> { + ) -> Result<&[usize], BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; Ok(cache.active_validator_indices()) @@ -846,7 +865,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { if epoch >= self.compute_activation_exit_epoch(self.current_epoch(), spec)? { Err(BeaconStateError::EpochOutOfBounds) } else { @@ -859,7 +878,10 @@ impl BeaconState { /// Note: the indices are shuffled (i.e., not in ascending order). /// /// Returns an error if that epoch is not cached, or the cache is not initialized. - pub fn get_shuffling(&self, relative_epoch: RelativeEpoch) -> Result<&[usize], Error> { + pub fn get_shuffling( + &self, + relative_epoch: RelativeEpoch, + ) -> Result<&[usize], BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; Ok(cache.shuffling()) @@ -874,14 +896,14 @@ impl BeaconState { &self, slot: Slot, index: CommitteeIndex, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let epoch = slot.epoch(E::slots_per_epoch()); let relative_epoch = RelativeEpoch::from_epoch(self.current_epoch(), epoch)?; let cache = self.committee_cache(relative_epoch)?; cache .get_beacon_committee(slot, index) - .ok_or(Error::NoCommittee { slot, index }) + .ok_or(BeaconStateError::NoCommittee { slot, index }) } /// Get all of the Beacon committees at a given slot. @@ -892,7 +914,7 @@ impl BeaconState { pub fn get_beacon_committees_at_slot( &self, slot: Slot, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { let cache = self.committee_cache_at_slot(slot)?; cache.get_beacon_committees_at_slot(slot) } @@ -905,7 +927,7 @@ impl BeaconState { pub fn get_beacon_committees_at_epoch( &self, relative_epoch: RelativeEpoch, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; cache.get_all_beacon_committees() } @@ -921,7 +943,7 @@ impl BeaconState { epoch: Epoch, block_root: Hash256, spec: &ChainSpec, - ) -> Result { + ) -> Result { let decision_slot = spec.proposer_shuffling_decision_slot::(epoch); if self.slot() <= decision_slot { Ok(block_root) @@ -937,7 +959,7 @@ impl BeaconState { &self, epoch: Epoch, head_block_root: Hash256, - ) -> Result { + ) -> Result { let decision_slot = epoch.saturating_sub(1u64).end_slot(E::slots_per_epoch()); if self.slot() <= decision_slot { Ok(head_block_root) @@ -957,11 +979,14 @@ impl BeaconState { &self, block_root: Hash256, spec: &ChainSpec, - ) -> Result { + ) -> Result { self.proposer_shuffling_decision_root_at_epoch(self.current_epoch(), block_root, spec) } - pub fn epoch_cache_decision_root(&self, block_root: Hash256) -> Result { + pub fn epoch_cache_decision_root( + &self, + block_root: Hash256, + ) -> Result { // Epoch cache decision root for the current epoch (N) is the block root at the end of epoch // N - 1. This is the same as the root that determines the next epoch attester shuffling. self.attester_shuffling_decision_root(block_root, RelativeEpoch::Next) @@ -978,7 +1003,7 @@ impl BeaconState { &self, block_root: Hash256, relative_epoch: RelativeEpoch, - ) -> Result { + ) -> Result { let decision_slot = self.attester_shuffling_decision_slot(relative_epoch); if self.slot() == decision_slot { Ok(block_root) @@ -1005,9 +1030,9 @@ impl BeaconState { indices: &[usize], seed: &[u8], spec: &ChainSpec, - ) -> Result { + ) -> Result { if indices.is_empty() { - return Err(Error::InsufficientValidators); + return Err(BeaconStateError::InsufficientValidators); } let max_effective_balance = spec.max_effective_balance_for_fork(self.fork_name_unchecked()); @@ -1025,10 +1050,10 @@ impl BeaconState { seed, spec.shuffle_round_count, ) - .ok_or(Error::UnableToShuffle)?; + .ok_or(BeaconStateError::UnableToShuffle)?; let candidate_index = *indices .get(shuffled_index) - .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(shuffled_index))?; let random_value = self.shuffling_random_value(i, seed)?; let effective_balance = self.get_effective_balance(candidate_index)?; if effective_balance.safe_mul(max_random_value)? @@ -1047,11 +1072,11 @@ impl BeaconState { seed: &[u8], indices: &[usize], spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { // Regardless of fork, we never support computing proposer indices for past epochs. let current_epoch = self.current_epoch(); if epoch < current_epoch { - return Err(Error::ComputeProposerIndicesPastEpoch { + return Err(BeaconStateError::ComputeProposerIndicesPastEpoch { current_epoch, request_epoch: epoch, }); @@ -1070,17 +1095,19 @@ impl BeaconState { if self.fork_name_unchecked().fulu_enabled() && epoch < current_epoch.safe_add(spec.min_seed_lookahead)? { - return Err(Error::ComputeProposerIndicesInsufficientLookahead { - current_epoch, - request_epoch: epoch, - }); + return Err( + BeaconStateError::ComputeProposerIndicesInsufficientLookahead { + current_epoch, + request_epoch: epoch, + }, + ); } } else { // Pre-Fulu the situation is reversed, we *should not* compute proposer indices using // too much lookahead. To do so would make us vulnerable to changes in the proposer // indices caused by effective balance changes. if epoch >= current_epoch.safe_add(spec.min_seed_lookahead)? { - return Err(Error::ComputeProposerIndicesExcessiveLookahead { + return Err(BeaconStateError::ComputeProposerIndicesExcessiveLookahead { current_epoch, request_epoch: epoch, }); @@ -1103,7 +1130,7 @@ impl BeaconState { /// In Electra and later, the random value is a 16-bit integer stored in a `u64`. /// /// Prior to Electra, the random value is an 8-bit integer stored in a `u64`. - fn shuffling_random_value(&self, i: usize, seed: &[u8]) -> Result { + fn shuffling_random_value(&self, i: usize, seed: &[u8]) -> Result { if self.fork_name_unchecked().electra_enabled() { Self::shuffling_random_u16_electra(i, seed).map(u64::from) } else { @@ -1114,37 +1141,39 @@ impl BeaconState { /// Get a random byte from the given `seed`. /// /// Used by the proposer & sync committee selection functions. - fn shuffling_random_byte(i: usize, seed: &[u8]) -> Result { + fn shuffling_random_byte(i: usize, seed: &[u8]) -> Result { let mut preimage = seed.to_vec(); preimage.append(&mut int_to_bytes8(i.safe_div(32)? as u64)); let index = i.safe_rem(32)?; hash(&preimage) .get(index) .copied() - .ok_or(Error::ShuffleIndexOutOfBounds(index)) + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(index)) } /// Get two random bytes from the given `seed`. /// /// This is used in place of `shuffling_random_byte` from Electra onwards. - fn shuffling_random_u16_electra(i: usize, seed: &[u8]) -> Result { + fn shuffling_random_u16_electra(i: usize, seed: &[u8]) -> Result { let mut preimage = seed.to_vec(); preimage.append(&mut int_to_bytes8(i.safe_div(16)? as u64)); let offset = i.safe_rem(16)?.safe_mul(2)?; hash(&preimage) .get(offset..offset.safe_add(2)?) - .ok_or(Error::ShuffleIndexOutOfBounds(offset))? + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(offset))? .try_into() .map(u16::from_le_bytes) - .map_err(|_| Error::ShuffleIndexOutOfBounds(offset)) + .map_err(|_| BeaconStateError::ShuffleIndexOutOfBounds(offset)) } /// Convenience accessor for the `execution_payload_header` as an `ExecutionPayloadHeaderRef`. pub fn latest_execution_payload_header( &self, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { match self { - BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), + BeaconState::Base(_) | BeaconState::Altair(_) => { + Err(BeaconStateError::IncorrectStateVariant) + } BeaconState::Bellatrix(state) => Ok(ExecutionPayloadHeaderRef::Bellatrix( &state.latest_execution_payload_header, )), @@ -1161,15 +1190,17 @@ impl BeaconState { &state.latest_execution_payload_header, )), // TODO(EIP-7732): investigate calling functions - BeaconState::Gloas(_) => Err(Error::IncorrectStateVariant), + BeaconState::Gloas(_) => Err(BeaconStateError::IncorrectStateVariant), } } pub fn latest_execution_payload_header_mut( &mut self, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { match self { - BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), + BeaconState::Base(_) | BeaconState::Altair(_) => { + Err(BeaconStateError::IncorrectStateVariant) + } BeaconState::Bellatrix(state) => Ok(ExecutionPayloadHeaderRefMut::Bellatrix( &mut state.latest_execution_payload_header, )), @@ -1186,7 +1217,7 @@ impl BeaconState { &mut state.latest_execution_payload_header, )), // TODO(EIP-7732): investigate calling functions - BeaconState::Gloas(_) => Err(Error::IncorrectStateVariant), + BeaconState::Gloas(_) => Err(BeaconStateError::IncorrectStateVariant), } } @@ -1199,7 +1230,7 @@ impl BeaconState { index: CommitteeIndex, slot_signature: &Signature, spec: &ChainSpec, - ) -> Result { + ) -> Result { let committee = self.get_beacon_committee(slot, index)?; let modulo = std::cmp::max( 1, @@ -1210,7 +1241,7 @@ impl BeaconState { signature_hash .get(0..8) .and_then(|bytes| bytes.try_into().ok()) - .ok_or(Error::IsAggregatorOutOfBounds)?, + .ok_or(BeaconStateError::IsAggregatorOutOfBounds)?, ); Ok(signature_hash_int.safe_rem(modulo)? == 0) @@ -1219,13 +1250,17 @@ impl BeaconState { /// Returns the beacon proposer index for the `slot` in `self.current_epoch()`. /// /// Spec v1.6.0-alpha.1 - pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result { + pub fn get_beacon_proposer_index( + &self, + slot: Slot, + spec: &ChainSpec, + ) -> Result { // Proposer indices are only known for the current epoch, due to the dependence on the // effective balances of validators, which change at every epoch transition. let epoch = slot.epoch(E::slots_per_epoch()); // TODO(EIP-7917): Explore allowing this function to be called with a slot one epoch in the future. if epoch != self.current_epoch() { - return Err(Error::SlotOutOfBounds); + return Err(BeaconStateError::SlotOutOfBounds); } if let Ok(proposer_lookahead) = self.proposer_lookahead() { @@ -1233,7 +1268,7 @@ impl BeaconState { let index = slot.as_usize().safe_rem(E::slots_per_epoch() as usize)?; proposer_lookahead .get(index) - .ok_or(Error::ProposerLookaheadOutOfBounds { i: index }) + .ok_or(BeaconStateError::ProposerLookaheadOutOfBounds { i: index }) .map(|index| *index as usize) } else { // Pre-Fulu @@ -1251,7 +1286,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { // This isn't in the spec, but we remove the footgun that is requesting the current epoch // for a Fulu state. if let Ok(proposer_lookahead) = self.proposer_lookahead() @@ -1281,7 +1316,11 @@ impl BeaconState { /// Compute the seed to use for the beacon proposer selection at the given `slot`. /// /// Spec v0.12.1 - pub fn get_beacon_proposer_seed(&self, slot: Slot, spec: &ChainSpec) -> Result, Error> { + pub fn get_beacon_proposer_seed( + &self, + slot: Slot, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { let epoch = slot.epoch(E::slots_per_epoch()); let mut preimage = self .get_seed(epoch, Domain::BeaconProposer, spec)? @@ -1296,7 +1335,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result<&Arc>, Error> { + ) -> Result<&Arc>, BeaconStateError> { let sync_committee_period = epoch.sync_committee_period(spec)?; let current_sync_committee_period = self.current_epoch().sync_committee_period(spec)?; let next_sync_committee_period = current_sync_committee_period.safe_add(1)?; @@ -1306,7 +1345,7 @@ impl BeaconState { } else if sync_committee_period == next_sync_committee_period { self.next_sync_committee() } else { - Err(Error::SyncCommitteeNotKnown { + Err(BeaconStateError::SyncCommitteeNotKnown { current_epoch: self.current_epoch(), epoch, }) @@ -1317,7 +1356,7 @@ impl BeaconState { pub fn get_sync_committee_indices( &mut self, sync_committee: &SyncCommittee, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { self.update_pubkey_cache()?; sync_committee .pubkeys @@ -1325,13 +1364,16 @@ impl BeaconState { .map(|pubkey| { self.pubkey_cache() .get(pubkey) - .ok_or(Error::PubkeyCacheInconsistent) + .ok_or(BeaconStateError::PubkeyCacheInconsistent) }) .collect() } /// Compute the sync committee indices for the next sync committee. - fn get_next_sync_committee_indices(&self, spec: &ChainSpec) -> Result, Error> { + fn get_next_sync_committee_indices( + &self, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { let epoch = self.current_epoch().safe_add(1)?; let active_validator_indices = self.get_active_validator_indices(epoch, spec)?; @@ -1354,10 +1396,10 @@ impl BeaconState { seed.as_slice(), spec.shuffle_round_count, ) - .ok_or(Error::UnableToShuffle)?; + .ok_or(BeaconStateError::UnableToShuffle)?; let candidate_index = *active_validator_indices .get(shuffled_index) - .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(shuffled_index))?; let random_value = self.shuffling_random_value(i, seed.as_slice())?; let effective_balance = self.get_validator(candidate_index)?.effective_balance; if effective_balance.safe_mul(max_random_value)? @@ -1371,7 +1413,10 @@ impl BeaconState { } /// Compute the next sync committee. - pub fn get_next_sync_committee(&self, spec: &ChainSpec) -> Result, Error> { + pub fn get_next_sync_committee( + &self, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { let sync_committee_indices = self.get_next_sync_committee_indices(spec)?; let pubkeys = sync_committee_indices @@ -1380,7 +1425,7 @@ impl BeaconState { self.validators() .get(index) .map(|v| v.pubkey) - .ok_or(Error::UnknownValidator(index)) + .ok_or(BeaconStateError::UnknownValidator(index)) }) .collect::, _>>()?; let decompressed_pubkeys = pubkeys @@ -1404,7 +1449,7 @@ impl BeaconState { epoch: Epoch, validator_indices: &[u64], spec: &ChainSpec, - ) -> Result, Error>>, Error> { + ) -> Result, BeaconStateError>>, BeaconStateError> { let sync_committee = self.get_built_sync_committee(epoch, spec)?; Ok(validator_indices @@ -1439,7 +1484,7 @@ impl BeaconState { /// Safely obtains the index for latest block roots, given some `slot`. /// /// Spec v0.12.1 - fn get_latest_block_roots_index(&self, slot: Slot) -> Result { + fn get_latest_block_roots_index(&self, slot: Slot) -> Result { if slot < self.slot() && self.slot() <= slot.safe_add(self.block_roots().len() as u64)? { Ok(slot.as_usize().safe_rem(self.block_roots().len())?) } else { @@ -1459,7 +1504,7 @@ impl BeaconState { let i = self.get_latest_block_roots_index(slot)?; self.block_roots() .get(i) - .ok_or(Error::BlockRootsOutOfBounds(i)) + .ok_or(BeaconStateError::BlockRootsOutOfBounds(i)) } /// Return the block root at a recent `epoch`. @@ -1479,12 +1524,12 @@ impl BeaconState { *self .block_roots_mut() .get_mut(i) - .ok_or(Error::BlockRootsOutOfBounds(i))? = block_root; + .ok_or(BeaconStateError::BlockRootsOutOfBounds(i))? = block_root; Ok(()) } /// Fill `randao_mixes` with - pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) -> Result<(), Error> { + pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) -> Result<(), BeaconStateError> { *self.randao_mixes_mut() = Vector::from_elem(index_root)?; Ok(()) } @@ -1496,7 +1541,7 @@ impl BeaconState { &self, epoch: Epoch, allow_next_epoch: AllowNextEpoch, - ) -> Result { + ) -> Result { let current_epoch = self.current_epoch(); let len = E::EpochsPerHistoricalVector::to_u64(); @@ -1505,7 +1550,7 @@ impl BeaconState { { Ok(epoch.as_usize().safe_rem(len as usize)?) } else { - Err(Error::EpochOutOfBounds) + Err(BeaconStateError::EpochOutOfBounds) } } @@ -1521,7 +1566,11 @@ impl BeaconState { /// # Errors: /// /// See `Self::get_randao_mix`. - pub fn update_randao_mix(&mut self, epoch: Epoch, signature: &Signature) -> Result<(), Error> { + pub fn update_randao_mix( + &mut self, + epoch: Epoch, + signature: &Signature, + ) -> Result<(), BeaconStateError> { let i = epoch .as_usize() .safe_rem(E::EpochsPerHistoricalVector::to_usize())?; @@ -1531,36 +1580,36 @@ impl BeaconState { *self .randao_mixes_mut() .get_mut(i) - .ok_or(Error::RandaoMixesOutOfBounds(i))? = + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i))? = *self.get_randao_mix(epoch)? ^ signature_hash; Ok(()) } /// Return the randao mix at a recent ``epoch``. - pub fn get_randao_mix(&self, epoch: Epoch) -> Result<&Hash256, Error> { + pub fn get_randao_mix(&self, epoch: Epoch) -> Result<&Hash256, BeaconStateError> { let i = self.get_randao_mix_index(epoch, AllowNextEpoch::False)?; self.randao_mixes() .get(i) - .ok_or(Error::RandaoMixesOutOfBounds(i)) + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i)) } /// Set the randao mix at a recent ``epoch``. /// /// Spec v0.12.1 - pub fn set_randao_mix(&mut self, epoch: Epoch, mix: Hash256) -> Result<(), Error> { + pub fn set_randao_mix(&mut self, epoch: Epoch, mix: Hash256) -> Result<(), BeaconStateError> { let i = self.get_randao_mix_index(epoch, AllowNextEpoch::True)?; *self .randao_mixes_mut() .get_mut(i) - .ok_or(Error::RandaoMixesOutOfBounds(i))? = mix; + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i))? = mix; Ok(()) } /// Safely obtains the index for latest state roots, given some `slot`. /// /// Spec v0.12.1 - fn get_latest_state_roots_index(&self, slot: Slot) -> Result { + fn get_latest_state_roots_index(&self, slot: Slot) -> Result { if slot < self.slot() && self.slot() <= slot.safe_add(self.state_roots().len() as u64)? { Ok(slot.as_usize().safe_rem(self.state_roots().len())?) } else { @@ -1569,38 +1618,42 @@ impl BeaconState { } /// Gets the state root for some slot. - pub fn get_state_root(&self, slot: Slot) -> Result<&Hash256, Error> { + pub fn get_state_root(&self, slot: Slot) -> Result<&Hash256, BeaconStateError> { let i = self.get_latest_state_roots_index(slot)?; self.state_roots() .get(i) - .ok_or(Error::StateRootsOutOfBounds(i)) + .ok_or(BeaconStateError::StateRootsOutOfBounds(i)) } /// Gets the state root for the start slot of some epoch. - pub fn get_state_root_at_epoch_start(&self, epoch: Epoch) -> Result { + pub fn get_state_root_at_epoch_start(&self, epoch: Epoch) -> Result { self.get_state_root(epoch.start_slot(E::slots_per_epoch())) .copied() } /// Gets the oldest (earliest slot) state root. - pub fn get_oldest_state_root(&self) -> Result<&Hash256, Error> { + pub fn get_oldest_state_root(&self) -> Result<&Hash256, BeaconStateError> { let oldest_slot = self.slot().saturating_sub(self.state_roots().len()); self.get_state_root(oldest_slot) } /// Gets the oldest (earliest slot) block root. - pub fn get_oldest_block_root(&self) -> Result<&Hash256, Error> { + pub fn get_oldest_block_root(&self) -> Result<&Hash256, BeaconStateError> { let oldest_slot = self.slot().saturating_sub(self.block_roots().len()); self.get_block_root(oldest_slot) } /// Sets the latest state root for slot. - pub fn set_state_root(&mut self, slot: Slot, state_root: Hash256) -> Result<(), Error> { + pub fn set_state_root( + &mut self, + slot: Slot, + state_root: Hash256, + ) -> Result<(), BeaconStateError> { let i = self.get_latest_state_roots_index(slot)?; *self .state_roots_mut() .get_mut(i) - .ok_or(Error::StateRootsOutOfBounds(i))? = state_root; + .ok_or(BeaconStateError::StateRootsOutOfBounds(i))? = state_root; Ok(()) } @@ -1609,7 +1662,7 @@ impl BeaconState { &self, epoch: Epoch, allow_next_epoch: AllowNextEpoch, - ) -> Result { + ) -> Result { // We allow the slashings vector to be accessed at any cached epoch at or before // the current epoch, or the next epoch if `AllowNextEpoch::True` is passed. let current_epoch = self.current_epoch(); @@ -1620,7 +1673,7 @@ impl BeaconState { .as_usize() .safe_rem(E::EpochsPerSlashingsVector::to_usize())?) } else { - Err(Error::EpochOutOfBounds) + Err(BeaconStateError::EpochOutOfBounds) } } @@ -1630,21 +1683,21 @@ impl BeaconState { } /// Get the total slashed balances for some epoch. - pub fn get_slashings(&self, epoch: Epoch) -> Result { + pub fn get_slashings(&self, epoch: Epoch) -> Result { let i = self.get_slashings_index(epoch, AllowNextEpoch::False)?; self.slashings() .get(i) .copied() - .ok_or(Error::SlashingsOutOfBounds(i)) + .ok_or(BeaconStateError::SlashingsOutOfBounds(i)) } /// Set the total slashed balances for some epoch. - pub fn set_slashings(&mut self, epoch: Epoch, value: u64) -> Result<(), Error> { + pub fn set_slashings(&mut self, epoch: Epoch, value: u64) -> Result<(), BeaconStateError> { let i = self.get_slashings_index(epoch, AllowNextEpoch::True)?; *self .slashings_mut() .get_mut(i) - .ok_or(Error::SlashingsOutOfBounds(i))? = value; + .ok_or(BeaconStateError::SlashingsOutOfBounds(i))? = value; Ok(()) } @@ -1684,10 +1737,10 @@ impl BeaconState { &mut ExitCache, &mut EpochCache, ), - Error, + BeaconStateError, > { match self { - BeaconState::Base(_) => Err(Error::IncorrectStateVariant), + BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(( &mut state.validators, &mut state.balances, @@ -1762,18 +1815,21 @@ impl BeaconState { } /// Get the balance of a single validator. - pub fn get_balance(&self, validator_index: usize) -> Result { + pub fn get_balance(&self, validator_index: usize) -> Result { self.balances() .get(validator_index) - .ok_or(Error::BalancesOutOfBounds(validator_index)) + .ok_or(BeaconStateError::BalancesOutOfBounds(validator_index)) .copied() } /// Get a mutable reference to the balance of a single validator. - pub fn get_balance_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { + pub fn get_balance_mut( + &mut self, + validator_index: usize, + ) -> Result<&mut u64, BeaconStateError> { self.balances_mut() .get_mut(validator_index) - .ok_or(Error::BalancesOutOfBounds(validator_index)) + .ok_or(BeaconStateError::BalancesOutOfBounds(validator_index)) } /// Generate a seed for the given `epoch`. @@ -1782,7 +1838,7 @@ impl BeaconState { epoch: Epoch, domain_type: Domain, spec: &ChainSpec, - ) -> Result { + ) -> Result { // Bypass the safe getter for RANDAO so we can gracefully handle the scenario where `epoch // == 0`. let mix = { @@ -1793,7 +1849,7 @@ impl BeaconState { let i_mod = i.as_usize().safe_rem(self.randao_mixes().len())?; self.randao_mixes() .get(i_mod) - .ok_or(Error::RandaoMixesOutOfBounds(i_mod))? + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i_mod))? }; let domain_bytes = int_to_bytes4(spec.get_domain_constant(domain_type)); let epoch_bytes = int_to_bytes8(epoch.as_u64()); @@ -1812,17 +1868,20 @@ impl BeaconState { } /// Safe indexer for the `validators` list. - pub fn get_validator(&self, validator_index: usize) -> Result<&Validator, Error> { + pub fn get_validator(&self, validator_index: usize) -> Result<&Validator, BeaconStateError> { self.validators() .get(validator_index) - .ok_or(Error::UnknownValidator(validator_index)) + .ok_or(BeaconStateError::UnknownValidator(validator_index)) } /// Safe mutator for the `validators` list. - pub fn get_validator_mut(&mut self, validator_index: usize) -> Result<&mut Validator, Error> { + pub fn get_validator_mut( + &mut self, + validator_index: usize, + ) -> Result<&mut Validator, BeaconStateError> { self.validators_mut() .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index)) + .ok_or(BeaconStateError::UnknownValidator(validator_index)) } /// Add a validator to the registry and return the validator index that was allocated for it. @@ -1832,7 +1891,7 @@ impl BeaconState { withdrawal_credentials: Hash256, amount: u64, spec: &ChainSpec, - ) -> Result { + ) -> Result { let index = self.validators().len(); let fork_name = self.fork_name_unchecked(); self.validators_mut().push(Validator::from_deposit( @@ -1864,7 +1923,7 @@ impl BeaconState { if pubkey_cache.len() == index { let success = pubkey_cache.insert(pubkey, index); if !success { - return Err(Error::PubkeyCacheInconsistent); + return Err(BeaconStateError::PubkeyCacheInconsistent); } } @@ -1875,14 +1934,14 @@ impl BeaconState { pub fn get_validator_cow( &mut self, validator_index: usize, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { self.validators_mut() .get_cow(validator_index) - .ok_or(Error::UnknownValidator(validator_index)) + .ok_or(BeaconStateError::UnknownValidator(validator_index)) } /// Return the effective balance for a validator with the given `validator_index`. - pub fn get_effective_balance(&self, validator_index: usize) -> Result { + pub fn get_effective_balance(&self, validator_index: usize) -> Result { self.get_validator(validator_index) .map(|v| v.effective_balance) } @@ -1890,20 +1949,27 @@ impl BeaconState { /// Get the inactivity score for a single validator. /// /// Will error if the state lacks an `inactivity_scores` field. - pub fn get_inactivity_score(&self, validator_index: usize) -> Result { + pub fn get_inactivity_score(&self, validator_index: usize) -> Result { self.inactivity_scores()? .get(validator_index) .copied() - .ok_or(Error::InactivityScoresOutOfBounds(validator_index)) + .ok_or(BeaconStateError::InactivityScoresOutOfBounds( + validator_index, + )) } /// Get a mutable reference to the inactivity score for a single validator. /// /// Will error if the state lacks an `inactivity_scores` field. - pub fn get_inactivity_score_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { + pub fn get_inactivity_score_mut( + &mut self, + validator_index: usize, + ) -> Result<&mut u64, BeaconStateError> { self.inactivity_scores_mut()? .get_mut(validator_index) - .ok_or(Error::InactivityScoresOutOfBounds(validator_index)) + .ok_or(BeaconStateError::InactivityScoresOutOfBounds( + validator_index, + )) } /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. @@ -1913,14 +1979,14 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result { + ) -> Result { Ok(spec.compute_activation_exit_epoch(epoch)?) } /// Return the churn limit for the current epoch (number of validators who can leave per epoch). /// /// Uses the current epoch committee cache, and will error if it isn't initialized. - pub fn get_validator_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_validator_churn_limit(&self, spec: &ChainSpec) -> Result { Ok(std::cmp::max( spec.min_per_epoch_churn_limit, (self @@ -1933,7 +1999,7 @@ impl BeaconState { /// Return the activation churn limit for the current epoch (number of validators who can enter per epoch). /// /// Uses the current epoch committee cache, and will error if it isn't initialized. - pub fn get_activation_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_activation_churn_limit(&self, spec: &ChainSpec) -> Result { Ok(match self { BeaconState::Base(_) | BeaconState::Altair(_) @@ -1959,7 +2025,7 @@ impl BeaconState { &self, validator_index: usize, relative_epoch: RelativeEpoch, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; Ok(cache.get_attestation_duties(validator_index)) @@ -1969,7 +2035,10 @@ impl BeaconState { /// /// This method should rarely be invoked because single-pass epoch processing keeps the total /// active balance cache up to date. - pub fn compute_total_active_balance_slow(&self, spec: &ChainSpec) -> Result { + pub fn compute_total_active_balance_slow( + &self, + spec: &ChainSpec, + ) -> Result { let current_epoch = self.current_epoch(); let mut total_active_balance = 0; @@ -1991,20 +2060,20 @@ impl BeaconState { /// the current committee cache is. /// /// Returns minimum `EFFECTIVE_BALANCE_INCREMENT`, to avoid div by 0. - pub fn get_total_active_balance(&self) -> Result { + pub fn get_total_active_balance(&self) -> Result { self.get_total_active_balance_at_epoch(self.current_epoch()) } /// Get the cached total active balance while checking that it is for the correct `epoch`. - pub fn get_total_active_balance_at_epoch(&self, epoch: Epoch) -> Result { + pub fn get_total_active_balance_at_epoch(&self, epoch: Epoch) -> Result { let (initialized_epoch, balance) = self .total_active_balance() - .ok_or(Error::TotalActiveBalanceCacheUninitialized)?; + .ok_or(BeaconStateError::TotalActiveBalanceCacheUninitialized)?; if initialized_epoch == epoch { Ok(balance) } else { - Err(Error::TotalActiveBalanceCacheInconsistent { + Err(BeaconStateError::TotalActiveBalanceCacheInconsistent { initialized_epoch, current_epoch: epoch, }) @@ -2024,7 +2093,10 @@ impl BeaconState { /// Build the total active balance cache for the current epoch if it is not already built. #[instrument(skip_all, level = "debug")] - pub fn build_total_active_balance_cache(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_total_active_balance_cache( + &mut self, + spec: &ChainSpec, + ) -> Result<(), BeaconStateError> { if self .get_total_active_balance_at_epoch(self.current_epoch()) .is_err() @@ -2038,7 +2110,7 @@ impl BeaconState { pub fn force_build_total_active_balance_cache( &mut self, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let total_active_balance = self.compute_total_active_balance_slow(spec)?; *self.total_active_balance_mut() = Some((self.current_epoch(), total_active_balance)); Ok(()) @@ -2055,7 +2127,7 @@ impl BeaconState { epoch: Epoch, previous_epoch: Epoch, current_epoch: Epoch, - ) -> Result<&mut List, Error> { + ) -> Result<&mut List, BeaconStateError> { if epoch == current_epoch { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), @@ -2085,7 +2157,7 @@ impl BeaconState { /// Build all caches (except the tree hash cache), if they need to be built. #[instrument(skip_all, level = "debug")] - pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { self.build_all_committee_caches(spec)?; self.update_pubkey_cache()?; self.build_exit_cache(spec)?; @@ -2096,7 +2168,7 @@ impl BeaconState { /// Build all committee caches, if they need to be built. #[instrument(skip_all, level = "debug")] - pub fn build_all_committee_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_all_committee_caches(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { self.build_committee_cache(RelativeEpoch::Previous, spec)?; self.build_committee_cache(RelativeEpoch::Current, spec)?; self.build_committee_cache(RelativeEpoch::Next, spec)?; @@ -2105,7 +2177,7 @@ impl BeaconState { /// Build the exit cache, if it needs to be built. #[instrument(skip_all, level = "debug")] - pub fn build_exit_cache(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_exit_cache(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { if self.exit_cache().check_initialized().is_err() { *self.exit_cache_mut() = ExitCache::new(self.validators(), spec)?; } @@ -2114,7 +2186,7 @@ impl BeaconState { /// Build the slashings cache if it needs to be built. #[instrument(skip_all, level = "debug")] - pub fn build_slashings_cache(&mut self) -> Result<(), Error> { + pub fn build_slashings_cache(&mut self) -> Result<(), BeaconStateError> { let latest_block_slot = self.latest_block_header().slot; if !self.slashings_cache().is_initialized(latest_block_slot) { *self.slashings_cache_mut() = SlashingsCache::new(latest_block_slot, self.validators()); @@ -2128,7 +2200,7 @@ impl BeaconState { } /// Drop all caches on the state. - pub fn drop_all_caches(&mut self) -> Result<(), Error> { + pub fn drop_all_caches(&mut self) -> Result<(), BeaconStateError> { self.drop_total_active_balance_cache(); self.drop_committee_cache(RelativeEpoch::Previous)?; self.drop_committee_cache(RelativeEpoch::Current)?; @@ -2156,7 +2228,7 @@ impl BeaconState { &mut self, relative_epoch: RelativeEpoch, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let i = Self::committee_cache_index(relative_epoch); let is_initialized = self .committee_cache_at_index(i)? @@ -2177,7 +2249,7 @@ impl BeaconState { &mut self, relative_epoch: RelativeEpoch, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let epoch = relative_epoch.into_epoch(self.current_epoch()); let i = Self::committee_cache_index(relative_epoch); @@ -2193,7 +2265,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { CommitteeCache::initialized(self, epoch, spec) } @@ -2203,7 +2275,7 @@ impl BeaconState { /// /// Note: this function will not build any new committee caches, nor will it update the total /// active balance cache. The total active balance cache must be updated separately. - pub fn advance_caches(&mut self) -> Result<(), Error> { + pub fn advance_caches(&mut self) -> Result<(), BeaconStateError> { self.committee_caches_mut().rotate_left(1); let next = Self::committee_cache_index(RelativeEpoch::Next); @@ -2237,27 +2309,33 @@ impl BeaconState { /// Get the committee cache for some `slot`. /// /// Return an error if the cache for the slot's epoch is not initialized. - fn committee_cache_at_slot(&self, slot: Slot) -> Result<&Arc, Error> { + fn committee_cache_at_slot( + &self, + slot: Slot, + ) -> Result<&Arc, BeaconStateError> { let epoch = slot.epoch(E::slots_per_epoch()); let relative_epoch = RelativeEpoch::from_epoch(self.current_epoch(), epoch)?; self.committee_cache(relative_epoch) } /// Get the committee cache at a given index. - fn committee_cache_at_index(&self, index: usize) -> Result<&Arc, Error> { + fn committee_cache_at_index( + &self, + index: usize, + ) -> Result<&Arc, BeaconStateError> { self.committee_caches() .get(index) - .ok_or(Error::CommitteeCachesOutOfBounds(index)) + .ok_or(BeaconStateError::CommitteeCachesOutOfBounds(index)) } /// Get a mutable reference to the committee cache at a given index. fn committee_cache_at_index_mut( &mut self, index: usize, - ) -> Result<&mut Arc, Error> { + ) -> Result<&mut Arc, BeaconStateError> { self.committee_caches_mut() .get_mut(index) - .ok_or(Error::CommitteeCachesOutOfBounds(index)) + .ok_or(BeaconStateError::CommitteeCachesOutOfBounds(index)) } /// Returns the cache for some `RelativeEpoch`. Returns an error if the cache has not been @@ -2265,19 +2343,24 @@ impl BeaconState { pub fn committee_cache( &self, relative_epoch: RelativeEpoch, - ) -> Result<&Arc, Error> { + ) -> Result<&Arc, BeaconStateError> { let i = Self::committee_cache_index(relative_epoch); let cache = self.committee_cache_at_index(i)?; if cache.is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) { Ok(cache) } else { - Err(Error::CommitteeCacheUninitialized(Some(relative_epoch))) + Err(BeaconStateError::CommitteeCacheUninitialized(Some( + relative_epoch, + ))) } } /// Drops the cache, leaving it in an uninitialized state. - pub fn drop_committee_cache(&mut self, relative_epoch: RelativeEpoch) -> Result<(), Error> { + pub fn drop_committee_cache( + &mut self, + relative_epoch: RelativeEpoch, + ) -> Result<(), BeaconStateError> { *self.committee_cache_at_index_mut(Self::committee_cache_index(relative_epoch))? = Arc::new(CommitteeCache::default()); Ok(()) @@ -2288,7 +2371,7 @@ impl BeaconState { /// Adds all `pubkeys` from the `validators` which are not already in the cache. Will /// never re-add a pubkey. #[instrument(skip_all, level = "debug")] - pub fn update_pubkey_cache(&mut self) -> Result<(), Error> { + pub fn update_pubkey_cache(&mut self) -> Result<(), BeaconStateError> { let mut pubkey_cache = mem::take(self.pubkey_cache_mut()); let start_index = pubkey_cache.len(); @@ -2296,7 +2379,7 @@ impl BeaconState { let index = start_index.safe_add(i)?; let success = pubkey_cache.insert(validator.pubkey, index); if !success { - return Err(Error::PubkeyCacheInconsistent); + return Err(BeaconStateError::PubkeyCacheInconsistent); } } *self.pubkey_cache_mut() = pubkey_cache; @@ -2374,7 +2457,7 @@ impl BeaconState { /// /// Initialize the tree hash cache if it isn't already initialized. #[instrument(skip_all, level = "debug")] - pub fn update_tree_hash_cache<'a>(&'a mut self) -> Result { + pub fn update_tree_hash_cache<'a>(&'a mut self) -> Result { self.apply_pending_mutations()?; map_beacon_state_ref!(&'a _, self.to_ref(), |inner, cons| { let root = inner.tree_hash_root(); @@ -2386,7 +2469,7 @@ impl BeaconState { /// Compute the tree hash root of the validators using the tree hash cache. /// /// Initialize the tree hash cache if it isn't already initialized. - pub fn update_validators_tree_hash_cache(&mut self) -> Result { + pub fn update_validators_tree_hash_cache(&mut self) -> Result { self.validators_mut().apply_updates()?; Ok(self.validators().tree_hash_root()) } @@ -2397,7 +2480,7 @@ impl BeaconState { &self, previous_epoch: Epoch, val: &Validator, - ) -> Result { + ) -> Result { Ok(val.is_active_at(previous_epoch) || (val.slashed && previous_epoch.safe_add(Epoch::new(1))? < val.withdrawable_epoch)) } @@ -2421,7 +2504,7 @@ impl BeaconState { pub fn get_sync_committee_for_next_slot( &self, spec: &ChainSpec, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { let next_slot_epoch = self .slot() .saturating_add(Slot::new(1)) @@ -2447,7 +2530,7 @@ impl BeaconState { // ******* Electra accessors ******* /// Return the churn limit for the current epoch. - pub fn get_balance_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_balance_churn_limit(&self, spec: &ChainSpec) -> Result { let total_active_balance = self.get_total_active_balance()?; let churn = std::cmp::max( spec.min_per_epoch_churn_limit_electra, @@ -2458,20 +2541,26 @@ impl BeaconState { } /// Return the churn limit for the current epoch dedicated to activations and exits. - pub fn get_activation_exit_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_activation_exit_churn_limit( + &self, + spec: &ChainSpec, + ) -> Result { Ok(std::cmp::min( spec.max_per_epoch_activation_exit_churn_limit, self.get_balance_churn_limit(spec)?, )) } - pub fn get_consolidation_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_consolidation_churn_limit(&self, spec: &ChainSpec) -> Result { self.get_balance_churn_limit(spec)? .safe_sub(self.get_activation_exit_churn_limit(spec)?) .map_err(Into::into) } - pub fn get_pending_balance_to_withdraw(&self, validator_index: usize) -> Result { + pub fn get_pending_balance_to_withdraw( + &self, + validator_index: usize, + ) -> Result { let mut pending_balance = 0; for withdrawal in self .pending_partial_withdrawals()? @@ -2489,11 +2578,11 @@ impl BeaconState { &mut self, validator_index: usize, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let balance = self .balances_mut() .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; if *balance > spec.min_activation_balance { let excess_balance = balance.safe_sub(spec.min_activation_balance)?; *balance = spec.min_activation_balance; @@ -2514,11 +2603,11 @@ impl BeaconState { &mut self, validator_index: usize, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let validator = self .validators_mut() .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; AsMut::<[u8; 32]>::as_mut(&mut validator.withdrawal_credentials)[0] = spec.compounding_withdrawal_prefix_byte; @@ -2530,7 +2619,7 @@ impl BeaconState { &mut self, exit_balance: u64, spec: &ChainSpec, - ) -> Result { + ) -> Result { let mut earliest_exit_epoch = std::cmp::max( self.earliest_exit_epoch()?, self.compute_activation_exit_epoch(self.current_epoch(), spec)?, @@ -2560,7 +2649,7 @@ impl BeaconState { | BeaconState::Altair(_) | BeaconState::Bellatrix(_) | BeaconState::Capella(_) - | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), + | BeaconState::Deneb(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Electra(_) | BeaconState::Fulu(_) | BeaconState::Gloas(_) => { // Consume the balance and update state variables *self.exit_balance_to_consume_mut()? = @@ -2575,7 +2664,7 @@ impl BeaconState { &mut self, consolidation_balance: u64, spec: &ChainSpec, - ) -> Result { + ) -> Result { let mut earliest_consolidation_epoch = std::cmp::max( self.earliest_consolidation_epoch()?, self.compute_activation_exit_epoch(self.current_epoch(), spec)?, @@ -2607,7 +2696,7 @@ impl BeaconState { | BeaconState::Altair(_) | BeaconState::Bellatrix(_) | BeaconState::Capella(_) - | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), + | BeaconState::Deneb(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Electra(_) | BeaconState::Fulu(_) | BeaconState::Gloas(_) => { // Consume the balance and update state variables. *self.consolidation_balance_to_consume_mut()? = @@ -2619,7 +2708,7 @@ impl BeaconState { } #[allow(clippy::arithmetic_side_effects)] - pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { + pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), BeaconStateError> { // Required for macros (which use type-hints internally). match (&mut *self, base) { @@ -2710,7 +2799,11 @@ impl BeaconState { Ok(()) } - pub fn rebase_caches_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { + pub fn rebase_caches_on( + &mut self, + base: &Self, + spec: &ChainSpec, + ) -> Result<(), BeaconStateError> { // Use pubkey cache from `base` if it contains superior information (likely if our cache is // uninitialized). Be careful not to use a cache which has *more* validators than expected, // as other code expects `self.pubkey_cache().len() <= self.validators.len()`. @@ -2799,7 +2892,7 @@ impl BeaconState { } #[allow(clippy::arithmetic_side_effects)] - pub fn apply_pending_mutations(&mut self) -> Result<(), Error> { + pub fn apply_pending_mutations(&mut self) -> Result<(), BeaconStateError> { match self { Self::Base(inner) => { map_beacon_state_base_tree_list_fields!(inner, |_, x| { x.apply_updates() }) @@ -2829,43 +2922,43 @@ impl BeaconState { Ok(()) } - pub fn compute_current_sync_committee_proof(&self) -> Result, Error> { + pub fn compute_current_sync_committee_proof(&self) -> Result, BeaconStateError> { // Sync committees are top-level fields, subtract off the generalized indices // for the internal nodes. Result should be 22 or 23, the field offset of the committee // in the `BeaconState`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate let field_gindex = if self.fork_name_unchecked().electra_enabled() { - light_client_update::CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA } else { - light_client_update::CURRENT_SYNC_COMMITTEE_INDEX + CURRENT_SYNC_COMMITTEE_INDEX }; let field_index = field_gindex.safe_sub(self.num_fields_pow2())?; let leaves = self.get_beacon_state_leaves(); self.generate_proof(field_index, &leaves) } - pub fn compute_next_sync_committee_proof(&self) -> Result, Error> { + pub fn compute_next_sync_committee_proof(&self) -> Result, BeaconStateError> { // Sync committees are top-level fields, subtract off the generalized indices // for the internal nodes. Result should be 22 or 23, the field offset of the committee // in the `BeaconState`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate let field_gindex = if self.fork_name_unchecked().electra_enabled() { - light_client_update::NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + NEXT_SYNC_COMMITTEE_INDEX_ELECTRA } else { - light_client_update::NEXT_SYNC_COMMITTEE_INDEX + NEXT_SYNC_COMMITTEE_INDEX }; let field_index = field_gindex.safe_sub(self.num_fields_pow2())?; let leaves = self.get_beacon_state_leaves(); self.generate_proof(field_index, &leaves) } - pub fn compute_finalized_root_proof(&self) -> Result, Error> { + pub fn compute_finalized_root_proof(&self) -> Result, BeaconStateError> { // Finalized root is the right child of `finalized_checkpoint`, divide by two to get // the generalized index of `state.finalized_checkpoint`. let checkpoint_root_gindex = if self.fork_name_unchecked().electra_enabled() { - light_client_update::FINALIZED_ROOT_INDEX_ELECTRA + FINALIZED_ROOT_INDEX_ELECTRA } else { - light_client_update::FINALIZED_ROOT_INDEX + FINALIZED_ROOT_INDEX }; let checkpoint_gindex = checkpoint_root_gindex / 2; @@ -2888,9 +2981,9 @@ impl BeaconState { &self, field_index: usize, leaves: &[Hash256], - ) -> Result, Error> { + ) -> Result, BeaconStateError> { if field_index >= leaves.len() { - return Err(Error::IndexNotSupported(field_index)); + return Err(BeaconStateError::IndexNotSupported(field_index)); } let depth = self.num_fields_pow2().ilog2() as usize; @@ -2949,45 +3042,45 @@ impl BeaconState { } } -impl From for Error { - fn from(e: RelativeEpochError) -> Error { - Error::RelativeEpochError(e) +impl From for BeaconStateError { + fn from(e: RelativeEpochError) -> BeaconStateError { + BeaconStateError::RelativeEpochError(e) } } -impl From for Error { - fn from(e: ssz_types::Error) -> Error { - Error::SszTypesError(e) +impl From for BeaconStateError { + fn from(e: ssz_types::Error) -> BeaconStateError { + BeaconStateError::SszTypesError(e) } } -impl From for Error { - fn from(e: bls::Error) -> Error { - Error::BlsError(e) +impl From for BeaconStateError { + fn from(e: bls::Error) -> BeaconStateError { + BeaconStateError::BlsError(e) } } -impl From for Error { - fn from(e: tree_hash::Error) -> Error { - Error::TreeHashError(e) +impl From for BeaconStateError { + fn from(e: tree_hash::Error) -> BeaconStateError { + BeaconStateError::TreeHashError(e) } } -impl From for Error { - fn from(e: merkle_proof::MerkleTreeError) -> Error { - Error::MerkleTreeError(e) +impl From for BeaconStateError { + fn from(e: merkle_proof::MerkleTreeError) -> BeaconStateError { + BeaconStateError::MerkleTreeError(e) } } -impl From for Error { - fn from(e: ArithError) -> Error { - Error::ArithError(e) +impl From for BeaconStateError { + fn from(e: ArithError) -> BeaconStateError { + BeaconStateError::ArithError(e) } } -impl From for Error { - fn from(e: milhouse::Error) -> Self { - Self::MilhouseError(e) +impl From for BeaconStateError { + fn from(e: milhouse::Error) -> BeaconStateError { + BeaconStateError::MilhouseError(e) } } diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/state/committee_cache.rs similarity index 93% rename from consensus/types/src/beacon_state/committee_cache.rs rename to consensus/types/src/state/committee_cache.rs index 408c269da5..15f6a4cd37 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/state/committee_cache.rs @@ -1,17 +1,20 @@ #![allow(clippy::arithmetic_side_effects)] -use crate::*; -use core::num::NonZeroUsize; +use std::{num::NonZeroUsize, ops::Range, sync::Arc}; + use educe::Educe; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode, four_byte_option_impl}; use ssz_derive::{Decode, Encode}; -use std::ops::Range; -use std::sync::Arc; use swap_or_not_shuffle::shuffle_list; -mod tests; +use crate::{ + attestation::{AttestationDuty, BeaconCommittee, CommitteeIndex}, + core::{ChainSpec, Domain, Epoch, EthSpec, Slot}, + state::{BeaconState, BeaconStateError}, + validator::Validator, +}; // Define "legacy" implementations of `Option`, `Option` which use four bytes // for encoding the union selector. @@ -66,7 +69,7 @@ impl CommitteeCache { state: &BeaconState, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { // Check that the cache is being built for an in-range epoch. // // We allow caches to be constructed for historic epochs, per: @@ -77,23 +80,23 @@ impl CommitteeCache { .saturating_sub(1u64); if reqd_randao_epoch < state.min_randao_epoch() || epoch > state.current_epoch() + 1 { - return Err(Error::EpochOutOfBounds); + return Err(BeaconStateError::EpochOutOfBounds); } // May cause divide-by-zero errors. if E::slots_per_epoch() == 0 { - return Err(Error::ZeroSlotsPerEpoch); + return Err(BeaconStateError::ZeroSlotsPerEpoch); } // The use of `NonZeroUsize` reduces the maximum number of possible validators by one. if state.validators().len() == usize::MAX { - return Err(Error::TooManyValidators); + return Err(BeaconStateError::TooManyValidators); } let active_validator_indices = get_active_validator_indices(state.validators(), epoch); if active_validator_indices.is_empty() { - return Err(Error::InsufficientValidators); + return Err(BeaconStateError::InsufficientValidators); } let committees_per_slot = @@ -107,13 +110,14 @@ impl CommitteeCache { &seed[..], false, ) - .ok_or(Error::UnableToShuffle)?; + .ok_or(BeaconStateError::UnableToShuffle)?; let mut shuffling_positions = vec![<_>::default(); state.validators().len()]; for (i, &v) in shuffling.iter().enumerate() { *shuffling_positions .get_mut(v) - .ok_or(Error::ShuffleIndexOutOfBounds(v))? = NonZeroUsize::new(i + 1).into(); + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(v))? = + NonZeroUsize::new(i + 1).into(); } Ok(Arc::new(CommitteeCache { @@ -188,24 +192,24 @@ impl CommitteeCache { pub fn get_beacon_committees_at_slot( &self, slot: Slot, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { if self.initialized_epoch.is_none() { - return Err(Error::CommitteeCacheUninitialized(None)); + return Err(BeaconStateError::CommitteeCacheUninitialized(None)); } (0..self.committees_per_slot()) .map(|index| { self.get_beacon_committee(slot, index) - .ok_or(Error::NoCommittee { slot, index }) + .ok_or(BeaconStateError::NoCommittee { slot, index }) }) .collect() } /// Returns all committees for `self.initialized_epoch`. - pub fn get_all_beacon_committees(&self) -> Result>, Error> { + pub fn get_all_beacon_committees(&self) -> Result>, BeaconStateError> { let initialized_epoch = self .initialized_epoch - .ok_or(Error::CommitteeCacheUninitialized(None))?; + .ok_or(BeaconStateError::CommitteeCacheUninitialized(None))?; initialized_epoch.slot_iter(self.slots_per_epoch).try_fold( Vec::with_capacity(self.epoch_committee_count()), diff --git a/consensus/types/src/epoch_cache.rs b/consensus/types/src/state/epoch_cache.rs similarity index 97% rename from consensus/types/src/epoch_cache.rs rename to consensus/types/src/state/epoch_cache.rs index 9956cb400a..cdea0d143d 100644 --- a/consensus/types/src/epoch_cache.rs +++ b/consensus/types/src/state/epoch_cache.rs @@ -1,7 +1,12 @@ -use crate::{ActivationQueue, BeaconStateError, ChainSpec, Epoch, Hash256, Slot}; -use safe_arith::{ArithError, SafeArith}; use std::sync::Arc; +use safe_arith::{ArithError, SafeArith}; + +use crate::{ + core::{ChainSpec, Epoch, Hash256, Slot}, + state::{ActivationQueue, BeaconStateError}, +}; + /// Cache of values which are uniquely determined at the start of an epoch. /// /// The values are fixed with respect to the last block of the _prior_ epoch, which we refer diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/state/exit_cache.rs similarity index 97% rename from consensus/types/src/beacon_state/exit_cache.rs rename to consensus/types/src/state/exit_cache.rs index 2828a6138c..43809d1af0 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/state/exit_cache.rs @@ -1,7 +1,13 @@ -use super::{BeaconStateError, ChainSpec, Epoch, Validator}; -use safe_arith::SafeArith; use std::cmp::Ordering; +use safe_arith::SafeArith; + +use crate::{ + core::{ChainSpec, Epoch}, + state::BeaconStateError, + validator::Validator, +}; + /// Map from exit epoch to the number of validators with that exit epoch. #[derive(Debug, Default, Clone, PartialEq)] pub struct ExitCache { diff --git a/consensus/types/src/historical_batch.rs b/consensus/types/src/state/historical_batch.rs similarity index 81% rename from consensus/types/src/historical_batch.rs rename to consensus/types/src/state/historical_batch.rs index 55377f2489..0167d64f62 100644 --- a/consensus/types/src/historical_batch.rs +++ b/consensus/types/src/state/historical_batch.rs @@ -1,11 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; - +use context_deserialize::context_deserialize; +use milhouse::Vector; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, Hash256}, + fork::ForkName, + test_utils::TestRandom, +}; + /// Historical block and state roots. /// /// Spec v0.12.1 @@ -26,6 +31,7 @@ pub struct HistoricalBatch { #[cfg(test)] mod tests { use super::*; + use crate::core::MainnetEthSpec; pub type FoundationHistoricalBatch = HistoricalBatch; diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/state/historical_summary.rs similarity index 87% rename from consensus/types/src/historical_summary.rs rename to consensus/types/src/state/historical_summary.rs index dc147ad042..f520e46483 100644 --- a/consensus/types/src/historical_summary.rs +++ b/consensus/types/src/state/historical_summary.rs @@ -1,13 +1,18 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{BeaconState, EthSpec, ForkName, Hash256}; use compare_fields::CompareFields; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, Hash256}, + fork::ForkName, + state::BeaconState, + test_utils::TestRandom, +}; + /// `HistoricalSummary` matches the components of the phase0 `HistoricalBatch` /// making the two hash_tree_root-compatible. This struct is introduced into the beacon state /// in the Capella hard fork. diff --git a/consensus/types/src/beacon_state/iter.rs b/consensus/types/src/state/iter.rs similarity index 96% rename from consensus/types/src/beacon_state/iter.rs rename to consensus/types/src/state/iter.rs index d99c769e40..d761a6bd85 100644 --- a/consensus/types/src/beacon_state/iter.rs +++ b/consensus/types/src/state/iter.rs @@ -1,4 +1,7 @@ -use crate::*; +use crate::{ + core::{EthSpec, Hash256, Slot}, + state::{BeaconState, BeaconStateError}, +}; /// Returns an iterator across the past block roots of `state` in descending slot-order. /// @@ -28,7 +31,7 @@ impl<'a, E: EthSpec> BlockRootsIter<'a, E> { } impl Iterator for BlockRootsIter<'_, E> { - type Item = Result<(Slot, Hash256), Error>; + type Item = Result<(Slot, Hash256), BeaconStateError>; fn next(&mut self) -> Option { if self.prev > self.genesis_slot diff --git a/consensus/types/src/state/mod.rs b/consensus/types/src/state/mod.rs new file mode 100644 index 0000000000..309796d359 --- /dev/null +++ b/consensus/types/src/state/mod.rs @@ -0,0 +1,35 @@ +mod activation_queue; +mod balance; +mod beacon_state; +#[macro_use] +mod committee_cache; +mod epoch_cache; +mod exit_cache; +mod historical_batch; +mod historical_summary; +mod iter; +mod progressive_balances_cache; +mod pubkey_cache; +mod slashings_cache; + +pub use activation_queue::ActivationQueue; +pub use balance::Balance; +pub use beacon_state::{ + BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateBellatrix, BeaconStateCapella, + BeaconStateDeneb, BeaconStateElectra, BeaconStateError, BeaconStateFulu, BeaconStateGloas, + BeaconStateHash, BeaconStateRef, CACHED_EPOCHS, +}; +pub use committee_cache::{ + CommitteeCache, compute_committee_index_in_epoch, compute_committee_range_in_epoch, + epoch_committee_count, get_active_validator_indices, +}; +pub use epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; +pub use exit_cache::ExitCache; +pub use historical_batch::HistoricalBatch; +pub use historical_summary::HistoricalSummary; +pub use iter::BlockRootsIter; +pub use progressive_balances_cache::{ + EpochTotalBalances, ProgressiveBalancesCache, is_progressive_balances_enabled, +}; +pub use pubkey_cache::PubkeyCache; +pub use slashings_cache::SlashingsCache; diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/state/progressive_balances_cache.rs similarity index 98% rename from consensus/types/src/beacon_state/progressive_balances_cache.rs rename to consensus/types/src/state/progressive_balances_cache.rs index 67d1155dbf..1e4c311f9a 100644 --- a/consensus/types/src/beacon_state/progressive_balances_cache.rs +++ b/consensus/types/src/state/progressive_balances_cache.rs @@ -1,15 +1,17 @@ -use crate::beacon_state::balance::Balance; -use crate::{ - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, - consts::altair::{ - NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, - TIMELY_TARGET_FLAG_INDEX, - }, -}; #[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; use safe_arith::SafeArith; +use crate::{ + attestation::ParticipationFlags, + core::consts::altair::{ + NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, + TIMELY_TARGET_FLAG_INDEX, + }, + core::{ChainSpec, Epoch, EthSpec}, + state::{Balance, BeaconState, BeaconStateError}, +}; + /// This cache keeps track of the accumulated target attestation balance for the current & previous /// epochs. The cached values can be utilised by fork choice to calculate unrealized justification /// and finalization instead of converting epoch participation arrays to balances for each block we diff --git a/consensus/types/src/beacon_state/pubkey_cache.rs b/consensus/types/src/state/pubkey_cache.rs similarity index 98% rename from consensus/types/src/beacon_state/pubkey_cache.rs rename to consensus/types/src/state/pubkey_cache.rs index 85ed00340d..e62fafb53a 100644 --- a/consensus/types/src/beacon_state/pubkey_cache.rs +++ b/consensus/types/src/state/pubkey_cache.rs @@ -1,4 +1,4 @@ -use crate::*; +use bls::PublicKeyBytes; use rpds::HashTrieMapSync as HashTrieMap; type ValidatorIndex = usize; diff --git a/consensus/types/src/beacon_state/slashings_cache.rs b/consensus/types/src/state/slashings_cache.rs similarity index 96% rename from consensus/types/src/beacon_state/slashings_cache.rs rename to consensus/types/src/state/slashings_cache.rs index 6530f795e9..b6ed583df8 100644 --- a/consensus/types/src/beacon_state/slashings_cache.rs +++ b/consensus/types/src/state/slashings_cache.rs @@ -1,8 +1,9 @@ -use crate::{BeaconStateError, Slot, Validator}; #[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; use rpds::HashTrieSetSync as HashTrieSet; +use crate::{core::Slot, state::BeaconStateError, validator::Validator}; + /// Persistent (cheap to clone) cache of all slashed validator indices. #[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(Debug, Default, Clone, PartialEq)] diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/sync_committee/contribution_and_proof.rs similarity index 88% rename from consensus/types/src/contribution_and_proof.rs rename to consensus/types/src/sync_committee/contribution_and_proof.rs index 4d70cd1f8a..2a344b89de 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/sync_committee/contribution_and_proof.rs @@ -1,14 +1,17 @@ -use super::{ - ChainSpec, EthSpec, Fork, ForkName, Hash256, SecretKey, Signature, SignedRoot, - SyncCommitteeContribution, SyncSelectionProof, -}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + sync_committee::{SyncCommitteeContribution, SyncSelectionProof}, + test_utils::TestRandom, +}; + /// A Validators aggregate sync committee contribution and selection proof. #[cfg_attr( feature = "arbitrary", diff --git a/consensus/types/src/sync_committee/mod.rs b/consensus/types/src/sync_committee/mod.rs new file mode 100644 index 0000000000..5a75975fe0 --- /dev/null +++ b/consensus/types/src/sync_committee/mod.rs @@ -0,0 +1,25 @@ +mod contribution_and_proof; +mod signed_contribution_and_proof; +mod sync_aggregate; +mod sync_aggregator_selection_data; +mod sync_committee; +mod sync_committee_contribution; +mod sync_committee_message; +mod sync_committee_subscription; +mod sync_duty; +mod sync_selection_proof; +mod sync_subnet_id; + +pub use contribution_and_proof::ContributionAndProof; +pub use signed_contribution_and_proof::SignedContributionAndProof; +pub use sync_aggregate::{Error as SyncAggregateError, SyncAggregate}; +pub use sync_aggregator_selection_data::SyncAggregatorSelectionData; +pub use sync_committee::{Error as SyncCommitteeError, SyncCommittee}; +pub use sync_committee_contribution::{ + Error as SyncCommitteeContributionError, SyncCommitteeContribution, SyncContributionData, +}; +pub use sync_committee_message::SyncCommitteeMessage; +pub use sync_committee_subscription::SyncCommitteeSubscription; +pub use sync_duty::SyncDuty; +pub use sync_selection_proof::SyncSelectionProof; +pub use sync_subnet_id::{SyncSubnetId, sync_subnet_id_to_string}; diff --git a/consensus/types/src/signed_contribution_and_proof.rs b/consensus/types/src/sync_committee/signed_contribution_and_proof.rs similarity index 87% rename from consensus/types/src/signed_contribution_and_proof.rs rename to consensus/types/src/sync_committee/signed_contribution_and_proof.rs index 51c453d32f..0027003b9f 100644 --- a/consensus/types/src/signed_contribution_and_proof.rs +++ b/consensus/types/src/sync_committee/signed_contribution_and_proof.rs @@ -1,14 +1,17 @@ -use super::{ - ChainSpec, ContributionAndProof, Domain, EthSpec, Fork, ForkName, Hash256, SecretKey, - Signature, SignedRoot, SyncCommitteeContribution, SyncSelectionProof, -}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + sync_committee::{ContributionAndProof, SyncCommitteeContribution, SyncSelectionProof}, + test_utils::TestRandom, +}; + /// A Validators signed contribution proof to publish on the `sync_committee_contribution_and_proof` /// gossipsub topic. #[cfg_attr( diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_committee/sync_aggregate.rs similarity index 91% rename from consensus/types/src/sync_aggregate.rs rename to consensus/types/src/sync_committee/sync_aggregate.rs index ba6d840a52..e5848aa22c 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_committee/sync_aggregate.rs @@ -1,14 +1,20 @@ -use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{AggregateSignature, BitVector, EthSpec, ForkName, SyncCommitteeContribution}; +use bls::AggregateSignature; +use context_deserialize::context_deserialize; use educe::Educe; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::BitVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, consts::altair::SYNC_COMMITTEE_SUBNET_COUNT}, + fork::ForkName, + sync_committee::SyncCommitteeContribution, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq)] pub enum Error { SszTypesError(ssz_types::Error), diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_committee/sync_aggregator_selection_data.rs similarity index 82% rename from consensus/types/src/sync_aggregator_selection_data.rs rename to consensus/types/src/sync_committee/sync_aggregator_selection_data.rs index a280369fea..e905ca036b 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_committee/sync_aggregator_selection_data.rs @@ -1,11 +1,15 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, SignedRoot, Slot}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{SignedRoot, Slot}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Clone, Serialize, Deserialize, Hash, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/sync_committee.rs b/consensus/types/src/sync_committee/sync_committee.rs similarity index 95% rename from consensus/types/src/sync_committee.rs rename to consensus/types/src/sync_committee/sync_committee.rs index a9fde42554..5448411800 100644 --- a/consensus/types/src/sync_committee.rs +++ b/consensus/types/src/sync_committee/sync_committee.rs @@ -1,14 +1,16 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{EthSpec, FixedVector, ForkName, SyncSubnetId}; +use std::collections::HashMap; + use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use std::collections::HashMap; +use ssz_types::FixedVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::EthSpec, fork::ForkName, sync_committee::SyncSubnetId, test_utils::TestRandom}; + #[derive(Debug, PartialEq)] pub enum Error { ArithError(ArithError), diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee/sync_committee_contribution.rs similarity index 93% rename from consensus/types/src/sync_committee_contribution.rs rename to consensus/types/src/sync_committee/sync_committee_contribution.rs index db22a3bdbc..09376fbe5c 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee/sync_committee_contribution.rs @@ -1,12 +1,18 @@ -use super::{AggregateSignature, EthSpec, ForkName, SignedRoot}; -use crate::context_deserialize; -use crate::slot_data::SlotData; -use crate::{BitVector, Hash256, Slot, SyncCommitteeMessage, test_utils::TestRandom}; +use bls::AggregateSignature; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::BitVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, Hash256, SignedRoot, Slot, SlotData}, + fork::ForkName, + sync_committee::SyncCommitteeMessage, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq)] pub enum Error { SszTypesError(ssz_types::Error), diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee/sync_committee_message.rs similarity index 88% rename from consensus/types/src/sync_committee_message.rs rename to consensus/types/src/sync_committee/sync_committee_message.rs index d5bb7250bb..ed42555c43 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee/sync_committee_message.rs @@ -1,14 +1,16 @@ -use crate::context_deserialize; -use crate::slot_data::SlotData; -use crate::test_utils::TestRandom; -use crate::{ - ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, SecretKey, Signature, SignedRoot, Slot, -}; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot, SlotData}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// The data upon which a `SyncCommitteeContribution` is based. #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] diff --git a/consensus/types/src/sync_committee_subscription.rs b/consensus/types/src/sync_committee/sync_committee_subscription.rs similarity index 96% rename from consensus/types/src/sync_committee_subscription.rs rename to consensus/types/src/sync_committee/sync_committee_subscription.rs index 8e040279d7..6365b015dd 100644 --- a/consensus/types/src/sync_committee_subscription.rs +++ b/consensus/types/src/sync_committee/sync_committee_subscription.rs @@ -1,7 +1,8 @@ -use crate::Epoch; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use crate::core::Epoch; + /// A sync committee subscription created when a validator subscribes to sync committee subnets to perform /// sync committee duties. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] diff --git a/consensus/types/src/sync_duty.rs b/consensus/types/src/sync_committee/sync_duty.rs similarity index 96% rename from consensus/types/src/sync_duty.rs rename to consensus/types/src/sync_committee/sync_duty.rs index 59fbc960db..773cc008f9 100644 --- a/consensus/types/src/sync_duty.rs +++ b/consensus/types/src/sync_committee/sync_duty.rs @@ -1,8 +1,13 @@ -use crate::{EthSpec, SyncCommittee, SyncSubnetId}; +use std::collections::HashSet; + use bls::PublicKeyBytes; use safe_arith::ArithError; use serde::{Deserialize, Serialize}; -use std::collections::HashSet; + +use crate::{ + core::EthSpec, + sync_committee::{SyncCommittee, SyncSubnetId}, +}; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncDuty { diff --git a/consensus/types/src/sync_selection_proof.rs b/consensus/types/src/sync_committee/sync_selection_proof.rs similarity index 92% rename from consensus/types/src/sync_selection_proof.rs rename to consensus/types/src/sync_committee/sync_selection_proof.rs index b1e9e8186f..7efc6c4c76 100644 --- a/consensus/types/src/sync_selection_proof.rs +++ b/consensus/types/src/sync_committee/sync_selection_proof.rs @@ -1,16 +1,20 @@ -use crate::consts::altair::{ - SYNC_COMMITTEE_SUBNET_COUNT, TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE, -}; -use crate::{ - ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, - SyncAggregatorSelectionData, -}; +use std::cmp; + +use bls::{PublicKey, SecretKey, Signature}; use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_types::typenum::Unsigned; -use std::cmp; + +use crate::{ + core::{ + ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot, + consts::altair::{SYNC_COMMITTEE_SUBNET_COUNT, TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE}, + }, + fork::Fork, + sync_committee::SyncAggregatorSelectionData, +}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_committee/sync_subnet_id.rs similarity index 92% rename from consensus/types/src/sync_subnet_id.rs rename to consensus/types/src/sync_committee/sync_subnet_id.rs index 3d0d853fca..fb58146178 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_committee/sync_subnet_id.rs @@ -1,13 +1,16 @@ //! Identifies each sync committee subnet by an integer identifier. -use crate::EthSpec; -use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; +use std::{ + collections::HashSet, + fmt::{self, Display}, + ops::{Deref, DerefMut}, + sync::LazyLock, +}; + use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz_types::typenum::Unsigned; -use std::collections::HashSet; -use std::fmt::{self, Display}; -use std::ops::{Deref, DerefMut}; -use std::sync::LazyLock; + +use crate::core::{EthSpec, consts::altair::SYNC_COMMITTEE_SUBNET_COUNT}; static SYNC_SUBNET_ID_TO_STRING: LazyLock> = LazyLock::new(|| { let mut v = Vec::with_capacity(SYNC_COMMITTEE_SUBNET_COUNT as usize); diff --git a/consensus/types/src/test_utils/generate_deterministic_keypairs.rs b/consensus/types/src/test_utils/generate_deterministic_keypairs.rs index f30afda257..5ccd748c25 100644 --- a/consensus/types/src/test_utils/generate_deterministic_keypairs.rs +++ b/consensus/types/src/test_utils/generate_deterministic_keypairs.rs @@ -1,7 +1,8 @@ -use crate::*; +use std::path::PathBuf; + +use bls::Keypair; use eth2_interop_keypairs::{keypair, keypairs_from_yaml_file}; use rayon::prelude::*; -use std::path::PathBuf; use tracing::debug; /// Generates `validator_count` keypairs where the secret key is derived solely from the index of diff --git a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs index 8f4908291e..cf7b5df891 100644 --- a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs +++ b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs @@ -1,11 +1,16 @@ +use bls::Signature; +use kzg::{KzgCommitment, KzgProof}; use rand::Rng; -use kzg::{KzgCommitment, KzgProof}; - -use crate::beacon_block_body::KzgCommitments; -use crate::*; - -use super::*; +use crate::{ + block::{BeaconBlock, SignedBeaconBlock}, + core::{EthSpec, MainnetEthSpec}, + data::{Blob, BlobSidecar, BlobsList}, + execution::FullPayload, + fork::{ForkName, map_fork_name}, + kzg_ext::{KzgCommitments, KzgProofs}, + test_utils::TestRandom, +}; type BlobsBundle = (KzgCommitments, KzgProofs, BlobsList); @@ -73,6 +78,7 @@ pub fn generate_blobs(n_blobs: usize) -> Result, Stri mod test { use super::*; use rand::rng; + use ssz_types::FixedVector; #[test] fn test_verify_blob_inclusion_proof() { diff --git a/consensus/types/src/test_utils/mod.rs b/consensus/types/src/test_utils/mod.rs index 37d58d4342..c4409b4392 100644 --- a/consensus/types/src/test_utils/mod.rs +++ b/consensus/types/src/test_utils/mod.rs @@ -1,17 +1,5 @@ #![allow(clippy::arithmetic_side_effects)] -use std::fmt::Debug; - -pub use rand::{RngCore, SeedableRng}; -pub use rand_xorshift::XorShiftRng; - -pub use generate_deterministic_keypairs::generate_deterministic_keypair; -pub use generate_deterministic_keypairs::generate_deterministic_keypairs; -pub use generate_deterministic_keypairs::load_keypairs_from_yaml; -use ssz::{Decode, Encode, ssz_encode}; -pub use test_random::{TestRandom, test_random_instance}; -use tree_hash::TreeHash; - #[macro_use] mod macros; mod generate_deterministic_keypairs; @@ -19,6 +7,18 @@ mod generate_deterministic_keypairs; mod generate_random_block_and_blobs; mod test_random; +pub use generate_deterministic_keypairs::generate_deterministic_keypair; +pub use generate_deterministic_keypairs::generate_deterministic_keypairs; +pub use generate_deterministic_keypairs::load_keypairs_from_yaml; +pub use test_random::{TestRandom, test_random_instance}; + +pub use rand::{RngCore, SeedableRng}; +pub use rand_xorshift::XorShiftRng; + +use ssz::{Decode, Encode, ssz_encode}; +use std::fmt::Debug; +use tree_hash::TreeHash; + pub fn test_ssz_tree_hash_pair(v1: &T, v2: &U) where T: TreeHash + Encode + Decode + Debug + PartialEq, diff --git a/consensus/types/src/test_utils/test_random/address.rs b/consensus/types/src/test_utils/test_random/address.rs index 421801ce53..2f601cb91e 100644 --- a/consensus/types/src/test_utils/test_random/address.rs +++ b/consensus/types/src/test_utils/test_random/address.rs @@ -1,7 +1,7 @@ -use super::*; +use crate::{core::Address, test_utils::TestRandom}; impl TestRandom for Address { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut key_bytes = vec![0; 20]; rng.fill_bytes(&mut key_bytes); Address::from_slice(&key_bytes[..]) diff --git a/consensus/types/src/test_utils/test_random/aggregate_signature.rs b/consensus/types/src/test_utils/test_random/aggregate_signature.rs index 772f284431..f9f3dd9567 100644 --- a/consensus/types/src/test_utils/test_random/aggregate_signature.rs +++ b/consensus/types/src/test_utils/test_random/aggregate_signature.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::{AggregateSignature, Signature}; + +use crate::test_utils::TestRandom; impl TestRandom for AggregateSignature { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let signature = Signature::random_for_test(rng); let mut aggregate_signature = AggregateSignature::infinity(); aggregate_signature.add_assign(&signature); diff --git a/consensus/types/src/test_utils/test_random/bitfield.rs b/consensus/types/src/test_utils/test_random/bitfield.rs index e335ac7fe8..3bc0d37c62 100644 --- a/consensus/types/src/test_utils/test_random/bitfield.rs +++ b/consensus/types/src/test_utils/test_random/bitfield.rs @@ -1,8 +1,10 @@ -use super::*; use smallvec::smallvec; +use ssz_types::{BitList, BitVector, typenum::Unsigned}; + +use crate::test_utils::TestRandom; impl TestRandom for BitList { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let initial_len = std::cmp::max(1, N::to_usize().div_ceil(8)); let mut raw_bytes = smallvec![0; initial_len]; rng.fill_bytes(&mut raw_bytes); @@ -23,7 +25,7 @@ impl TestRandom for BitList { } impl TestRandom for BitVector { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut raw_bytes = smallvec![0; std::cmp::max(1, N::to_usize().div_ceil(8))]; rng.fill_bytes(&mut raw_bytes); // If N isn't divisible by 8 diff --git a/consensus/types/src/test_utils/test_random/hash256.rs b/consensus/types/src/test_utils/test_random/hash256.rs index 21d443c0e2..4d7570fb55 100644 --- a/consensus/types/src/test_utils/test_random/hash256.rs +++ b/consensus/types/src/test_utils/test_random/hash256.rs @@ -1,7 +1,7 @@ -use super::*; +use crate::{core::Hash256, test_utils::TestRandom}; impl TestRandom for Hash256 { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut key_bytes = vec![0; 32]; rng.fill_bytes(&mut key_bytes); Hash256::from_slice(&key_bytes[..]) diff --git a/consensus/types/src/test_utils/test_random/kzg_commitment.rs b/consensus/types/src/test_utils/test_random/kzg_commitment.rs index a4030f2b6a..31e316a198 100644 --- a/consensus/types/src/test_utils/test_random/kzg_commitment.rs +++ b/consensus/types/src/test_utils/test_random/kzg_commitment.rs @@ -1,4 +1,6 @@ -use super::*; +use kzg::KzgCommitment; + +use crate::test_utils::TestRandom; impl TestRandom for KzgCommitment { fn random_for_test(rng: &mut impl rand::RngCore) -> Self { diff --git a/consensus/types/src/test_utils/test_random/kzg_proof.rs b/consensus/types/src/test_utils/test_random/kzg_proof.rs index 7e771ca566..4465d5ab39 100644 --- a/consensus/types/src/test_utils/test_random/kzg_proof.rs +++ b/consensus/types/src/test_utils/test_random/kzg_proof.rs @@ -1,8 +1,9 @@ -use super::*; -use kzg::BYTES_PER_COMMITMENT; +use kzg::{BYTES_PER_COMMITMENT, KzgProof}; + +use crate::test_utils::TestRandom; impl TestRandom for KzgProof { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut bytes = [0; BYTES_PER_COMMITMENT]; rng.fill_bytes(&mut bytes); Self(bytes) diff --git a/consensus/types/src/test_utils/test_random/mod.rs b/consensus/types/src/test_utils/test_random/mod.rs new file mode 100644 index 0000000000..41812593fa --- /dev/null +++ b/consensus/types/src/test_utils/test_random/mod.rs @@ -0,0 +1,15 @@ +mod address; +mod aggregate_signature; +mod bitfield; +mod hash256; +mod kzg_commitment; +mod kzg_proof; +mod public_key; +mod public_key_bytes; +mod secret_key; +mod signature; +mod signature_bytes; +mod test_random; +mod uint256; + +pub use test_random::{TestRandom, test_random_instance}; diff --git a/consensus/types/src/test_utils/test_random/public_key.rs b/consensus/types/src/test_utils/test_random/public_key.rs index d33e9ac704..9d287c23d7 100644 --- a/consensus/types/src/test_utils/test_random/public_key.rs +++ b/consensus/types/src/test_utils/test_random/public_key.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::{PublicKey, SecretKey}; + +use crate::test_utils::TestRandom; impl TestRandom for PublicKey { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { SecretKey::random_for_test(rng).public_key() } } diff --git a/consensus/types/src/test_utils/test_random/public_key_bytes.rs b/consensus/types/src/test_utils/test_random/public_key_bytes.rs index 6e5cafc4f0..587c3baf8f 100644 --- a/consensus/types/src/test_utils/test_random/public_key_bytes.rs +++ b/consensus/types/src/test_utils/test_random/public_key_bytes.rs @@ -1,9 +1,9 @@ -use bls::PUBLIC_KEY_BYTES_LEN; +use bls::{PUBLIC_KEY_BYTES_LEN, PublicKey, PublicKeyBytes}; -use super::*; +use crate::test_utils::TestRandom; impl TestRandom for PublicKeyBytes { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { //50-50 chance for signature to be "valid" or invalid if bool::random_for_test(rng) { //valid signature diff --git a/consensus/types/src/test_utils/test_random/secret_key.rs b/consensus/types/src/test_utils/test_random/secret_key.rs index da1614aa24..a8295d968a 100644 --- a/consensus/types/src/test_utils/test_random/secret_key.rs +++ b/consensus/types/src/test_utils/test_random/secret_key.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::SecretKey; + +use crate::test_utils::TestRandom; impl TestRandom for SecretKey { - fn random_for_test(_rng: &mut impl RngCore) -> Self { + fn random_for_test(_rng: &mut impl rand::RngCore) -> Self { // TODO: Not deterministic generation. Using `SecretKey::deserialize` results in // `BlstError(BLST_BAD_ENCODING)`, need to debug with blst source on what encoding expects. SecretKey::random() diff --git a/consensus/types/src/test_utils/test_random/signature.rs b/consensus/types/src/test_utils/test_random/signature.rs index 8bc0d71110..006aba9650 100644 --- a/consensus/types/src/test_utils/test_random/signature.rs +++ b/consensus/types/src/test_utils/test_random/signature.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::Signature; + +use crate::test_utils::TestRandom; impl TestRandom for Signature { - fn random_for_test(_rng: &mut impl RngCore) -> Self { + fn random_for_test(_rng: &mut impl rand::RngCore) -> Self { // TODO: `SecretKey::random_for_test` does not return a deterministic signature. Since this // signature will not pass verification we could just return the generator point or the // generator point multiplied by a random scalar if we want disctint signatures. diff --git a/consensus/types/src/test_utils/test_random/signature_bytes.rs b/consensus/types/src/test_utils/test_random/signature_bytes.rs index 2117a48232..6992e57467 100644 --- a/consensus/types/src/test_utils/test_random/signature_bytes.rs +++ b/consensus/types/src/test_utils/test_random/signature_bytes.rs @@ -1,9 +1,9 @@ -use bls::SIGNATURE_BYTES_LEN; +use bls::{SIGNATURE_BYTES_LEN, Signature, SignatureBytes}; -use super::*; +use crate::test_utils::TestRandom; impl TestRandom for SignatureBytes { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { //50-50 chance for signature to be "valid" or invalid if bool::random_for_test(rng) { //valid signature diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random/test_random.rs similarity index 90% rename from consensus/types/src/test_utils/test_random.rs rename to consensus/types/src/test_utils/test_random/test_random.rs index 7c8f86e14d..f31be97c03 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random/test_random.rs @@ -1,23 +1,9 @@ -use crate::*; -use rand::RngCore; -use rand::SeedableRng; +use std::{marker::PhantomData, sync::Arc}; + +use rand::{RngCore, SeedableRng}; use rand_xorshift::XorShiftRng; use smallvec::{SmallVec, smallvec}; -use std::marker::PhantomData; -use std::sync::Arc; - -mod address; -mod aggregate_signature; -mod bitfield; -mod hash256; -mod kzg_commitment; -mod kzg_proof; -mod public_key; -mod public_key_bytes; -mod secret_key; -mod signature; -mod signature_bytes; -mod uint256; +use ssz_types::{VariableList, typenum::Unsigned}; pub fn test_random_instance() -> T { let mut rng = XorShiftRng::from_seed([0x42; 16]); diff --git a/consensus/types/src/test_utils/test_random/uint256.rs b/consensus/types/src/test_utils/test_random/uint256.rs index 30077f0e0f..eccf476595 100644 --- a/consensus/types/src/test_utils/test_random/uint256.rs +++ b/consensus/types/src/test_utils/test_random/uint256.rs @@ -1,7 +1,7 @@ -use super::*; +use crate::{core::Uint256, test_utils::TestRandom}; impl TestRandom for Uint256 { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut key_bytes = [0; 32]; rng.fill_bytes(&mut key_bytes); Self::from_le_slice(&key_bytes[..]) diff --git a/consensus/types/src/validator/mod.rs b/consensus/types/src/validator/mod.rs new file mode 100644 index 0000000000..8a67407821 --- /dev/null +++ b/consensus/types/src/validator/mod.rs @@ -0,0 +1,9 @@ +mod proposer_preparation_data; +mod validator; +mod validator_registration_data; +mod validator_subscription; + +pub use proposer_preparation_data::ProposerPreparationData; +pub use validator::{Validator, is_compounding_withdrawal_credential}; +pub use validator_registration_data::{SignedValidatorRegistrationData, ValidatorRegistrationData}; +pub use validator_subscription::ValidatorSubscription; diff --git a/consensus/types/src/proposer_preparation_data.rs b/consensus/types/src/validator/proposer_preparation_data.rs similarity index 95% rename from consensus/types/src/proposer_preparation_data.rs rename to consensus/types/src/validator/proposer_preparation_data.rs index 477fb3b9d1..8ef675de4f 100644 --- a/consensus/types/src/proposer_preparation_data.rs +++ b/consensus/types/src/validator/proposer_preparation_data.rs @@ -1,6 +1,7 @@ -use crate::*; use serde::{Deserialize, Serialize}; +use crate::core::Address; + /// A proposer preparation, created when a validator prepares the beacon node for potential proposers /// by supplying information required when proposing blocks for the given validators. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator/validator.rs similarity index 97% rename from consensus/types/src/validator.rs rename to consensus/types/src/validator/validator.rs index dec8bba627..7898ab9073 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator/validator.rs @@ -1,13 +1,19 @@ -use crate::context_deserialize; -use crate::{ - Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, ForkName, - Hash256, PublicKeyBytes, test_utils::TestRandom, -}; +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::Checkpoint, + core::{Address, ChainSpec, Epoch, EthSpec, Hash256}, + fork::ForkName, + state::BeaconState, + test_utils::TestRandom, +}; + /// Information about a `BeaconChain` validator. /// /// Spec v0.12.1 diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator/validator_registration_data.rs similarity index 93% rename from consensus/types/src/validator_registration_data.rs rename to consensus/types/src/validator/validator_registration_data.rs index 345771074c..a0a1df7dc5 100644 --- a/consensus/types/src/validator_registration_data.rs +++ b/consensus/types/src/validator/validator_registration_data.rs @@ -1,8 +1,10 @@ -use crate::*; +use bls::{PublicKeyBytes, Signature}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use tree_hash_derive::TreeHash; +use crate::core::{Address, ChainSpec, SignedRoot}; + /// Validator registration, for use in interacting with servers implementing the builder API. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] pub struct SignedValidatorRegistrationData { diff --git a/consensus/types/src/validator_subscription.rs b/consensus/types/src/validator/validator_subscription.rs similarity index 93% rename from consensus/types/src/validator_subscription.rs rename to consensus/types/src/validator/validator_subscription.rs index 62932638ec..92fb200e10 100644 --- a/consensus/types/src/validator_subscription.rs +++ b/consensus/types/src/validator/validator_subscription.rs @@ -1,7 +1,8 @@ -use crate::*; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use crate::{attestation::CommitteeIndex, core::Slot}; + /// A validator subscription, created when a validator subscribes to a slot to perform optional aggregation /// duties. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode, Eq, PartialOrd, Ord)] diff --git a/consensus/types/src/withdrawal/mod.rs b/consensus/types/src/withdrawal/mod.rs new file mode 100644 index 0000000000..bac80d00be --- /dev/null +++ b/consensus/types/src/withdrawal/mod.rs @@ -0,0 +1,9 @@ +mod pending_partial_withdrawal; +mod withdrawal; +mod withdrawal_credentials; +mod withdrawal_request; + +pub use pending_partial_withdrawal::PendingPartialWithdrawal; +pub use withdrawal::{Withdrawal, Withdrawals}; +pub use withdrawal_credentials::WithdrawalCredentials; +pub use withdrawal_request::WithdrawalRequest; diff --git a/consensus/types/src/pending_partial_withdrawal.rs b/consensus/types/src/withdrawal/pending_partial_withdrawal.rs similarity index 85% rename from consensus/types/src/pending_partial_withdrawal.rs rename to consensus/types/src/withdrawal/pending_partial_withdrawal.rs index e9b10f79b5..cd866369a4 100644 --- a/consensus/types/src/pending_partial_withdrawal.rs +++ b/consensus/types/src/withdrawal/pending_partial_withdrawal.rs @@ -1,11 +1,11 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{Epoch, ForkName}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Epoch, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal/withdrawal.rs similarity index 73% rename from consensus/types/src/withdrawal.rs rename to consensus/types/src/withdrawal/withdrawal.rs index ef4a1f285d..d75bd4f501 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal/withdrawal.rs @@ -1,10 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, @@ -21,6 +27,8 @@ pub struct Withdrawal { pub amount: u64, } +pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/withdrawal_credentials.rs b/consensus/types/src/withdrawal/withdrawal_credentials.rs similarity index 91% rename from consensus/types/src/withdrawal_credentials.rs rename to consensus/types/src/withdrawal/withdrawal_credentials.rs index 52d51ed559..b732222ca1 100644 --- a/consensus/types/src/withdrawal_credentials.rs +++ b/consensus/types/src/withdrawal/withdrawal_credentials.rs @@ -1,5 +1,6 @@ -use crate::*; -use bls::get_withdrawal_credentials; +use bls::{PublicKey, get_withdrawal_credentials}; + +use crate::core::{Address, ChainSpec, Hash256}; pub struct WithdrawalCredentials(Hash256); @@ -27,7 +28,7 @@ impl From for Hash256 { #[cfg(test)] mod test { use super::*; - use crate::test_utils::generate_deterministic_keypair; + use crate::{EthSpec, MainnetEthSpec, test_utils::generate_deterministic_keypair}; use std::str::FromStr; #[test] diff --git a/consensus/types/src/withdrawal_request.rs b/consensus/types/src/withdrawal/withdrawal_request.rs similarity index 87% rename from consensus/types/src/withdrawal_request.rs rename to consensus/types/src/withdrawal/withdrawal_request.rs index c08921a68c..98a40016f9 100644 --- a/consensus/types/src/withdrawal_request.rs +++ b/consensus/types/src/withdrawal/withdrawal_request.rs @@ -1,12 +1,13 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{Address, ForkName, PublicKeyBytes}; +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Address, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/tests/committee_cache.rs similarity index 97% rename from consensus/types/src/beacon_state/committee_cache/tests.rs rename to consensus/types/tests/committee_cache.rs index 1d2ca4ccdb..751ef05d29 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/tests/committee_cache.rs @@ -1,9 +1,14 @@ #![cfg(test)] -use crate::test_utils::*; -use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; -use beacon_chain::types::*; use std::sync::LazyLock; + +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use bls::Keypair; +use fixed_bytes::FixedBytesExtended; +use milhouse::Vector; use swap_or_not_shuffle::shuffle_list; +use types::*; + +use crate::test_utils::generate_deterministic_keypairs; pub const VALIDATOR_COUNT: usize = 16; diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/tests/state.rs similarity index 97% rename from consensus/types/src/beacon_state/tests.rs rename to consensus/types/tests/state.rs index e5b05a4a5b..63ab3b8084 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/tests/state.rs @@ -1,15 +1,17 @@ #![cfg(test)] -use crate::test_utils::*; -use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; -use beacon_chain::types::{ - BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateError, ChainSpec, Domain, Epoch, - EthSpec, FixedBytesExtended, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, RelativeEpoch, - Slot, Vector, test_utils::TestRandom, -}; -use ssz::Encode; use std::ops::Mul; use std::sync::LazyLock; + +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use bls::Keypair; +use fixed_bytes::FixedBytesExtended; +use milhouse::Vector; +use rand::SeedableRng; +use rand_xorshift::XorShiftRng; +use ssz::Encode; use swap_or_not_shuffle::compute_shuffled_index; +use types::test_utils::{TestRandom, generate_deterministic_keypairs}; +use types::*; pub const MAX_VALIDATOR_COUNT: usize = 129; pub const SLOT_OFFSET: Slot = Slot::new(1); From 22060ca17bc9aa176833fb675f924f1836d6d0ce Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 5 Dec 2025 14:15:08 +1100 Subject: [PATCH 11/26] Replace wild card (*) imports with explicit imports, and export `indexed_payload_attestation`. --- .../types/src/attestation/indexed_payload_attestation.rs | 4 +++- consensus/types/src/attestation/mod.rs | 2 ++ consensus/types/src/attestation/payload_attestation.rs | 4 +++- consensus/types/src/attestation/payload_attestation_data.rs | 2 +- consensus/types/src/builder/builder_pending_payment.rs | 2 +- consensus/types/src/builder/builder_pending_withdrawal.rs | 2 +- consensus/types/src/execution/execution_payload_bid.rs | 5 ++++- consensus/types/src/execution/execution_payload_envelope.rs | 6 ++++-- 8 files changed, 19 insertions(+), 8 deletions(-) diff --git a/consensus/types/src/attestation/indexed_payload_attestation.rs b/consensus/types/src/attestation/indexed_payload_attestation.rs index ddc2d00185..82a47651d0 100644 --- a/consensus/types/src/attestation/indexed_payload_attestation.rs +++ b/consensus/types/src/attestation/indexed_payload_attestation.rs @@ -1,8 +1,10 @@ use crate::test_utils::TestRandom; -use crate::*; +use crate::{EthSpec, ForkName, PayloadAttestationData, context_deserialize}; +use bls::AggregateSignature; use core::slice::Iter; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/attestation/mod.rs b/consensus/types/src/attestation/mod.rs index f9cfdb5c65..586d99bd90 100644 --- a/consensus/types/src/attestation/mod.rs +++ b/consensus/types/src/attestation/mod.rs @@ -5,6 +5,7 @@ mod attestation_duty; mod beacon_committee; mod checkpoint; mod indexed_attestation; +mod indexed_payload_attestation; mod participation_flags; mod payload_attestation; mod payload_attestation_data; @@ -29,6 +30,7 @@ pub use checkpoint::Checkpoint; pub use indexed_attestation::{ IndexedAttestation, IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, }; +pub use indexed_payload_attestation::IndexedPayloadAttestation; pub use participation_flags::ParticipationFlags; pub use payload_attestation::PayloadAttestation; pub use payload_attestation_data::PayloadAttestationData; diff --git a/consensus/types/src/attestation/payload_attestation.rs b/consensus/types/src/attestation/payload_attestation.rs index fa1bfc0293..b0fdd86891 100644 --- a/consensus/types/src/attestation/payload_attestation.rs +++ b/consensus/types/src/attestation/payload_attestation.rs @@ -1,8 +1,10 @@ use crate::attestation::payload_attestation_data::PayloadAttestationData; use crate::test_utils::TestRandom; -use crate::*; +use crate::{EthSpec, ForkName, context_deserialize}; +use bls::AggregateSignature; use educe::Educe; use serde::{Deserialize, Serialize}; +use ssz::BitList; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/attestation/payload_attestation_data.rs b/consensus/types/src/attestation/payload_attestation_data.rs index 01e9a1e8f8..46ae5bf221 100644 --- a/consensus/types/src/attestation/payload_attestation_data.rs +++ b/consensus/types/src/attestation/payload_attestation_data.rs @@ -1,5 +1,5 @@ use crate::test_utils::TestRandom; -use crate::*; +use crate::{ForkName, Hash256, SignedRoot, Slot, context_deserialize}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; diff --git a/consensus/types/src/builder/builder_pending_payment.rs b/consensus/types/src/builder/builder_pending_payment.rs index 0beb53ac29..7241419ec9 100644 --- a/consensus/types/src/builder/builder_pending_payment.rs +++ b/consensus/types/src/builder/builder_pending_payment.rs @@ -1,5 +1,5 @@ use crate::test_utils::TestRandom; -use crate::*; +use crate::{BuilderPendingWithdrawal, ForkName, context_deserialize}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; diff --git a/consensus/types/src/builder/builder_pending_withdrawal.rs b/consensus/types/src/builder/builder_pending_withdrawal.rs index 951b9cad58..9c82d5a64a 100644 --- a/consensus/types/src/builder/builder_pending_withdrawal.rs +++ b/consensus/types/src/builder/builder_pending_withdrawal.rs @@ -1,5 +1,5 @@ use crate::test_utils::TestRandom; -use crate::*; +use crate::{Address, Epoch, ForkName, context_deserialize}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; diff --git a/consensus/types/src/execution/execution_payload_bid.rs b/consensus/types/src/execution/execution_payload_bid.rs index bb8c26b585..ed4712a02d 100644 --- a/consensus/types/src/execution/execution_payload_bid.rs +++ b/consensus/types/src/execution/execution_payload_bid.rs @@ -1,4 +1,7 @@ -use crate::{test_utils::TestRandom, *}; +use crate::test_utils::TestRandom; +use crate::{ + Address, ExecutionBlockHash, ForkName, Hash256, SignedRoot, Slot, context_deserialize, +}; use educe::Educe; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; diff --git a/consensus/types/src/execution/execution_payload_envelope.rs b/consensus/types/src/execution/execution_payload_envelope.rs index c0fa3babb5..9999713515 100644 --- a/consensus/types/src/execution/execution_payload_envelope.rs +++ b/consensus/types/src/execution/execution_payload_envelope.rs @@ -1,6 +1,8 @@ use crate::test_utils::TestRandom; -use crate::*; -use beacon_block_body::KzgCommitments; +use crate::{ + EthSpec, ExecutionPayloadGloas, ExecutionRequests, ForkName, Hash256, KzgCommitments, + SignedRoot, Slot, context_deserialize, +}; use educe::Educe; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; From f4b184b378175c00ec887e47870d5561b1fe6559 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 5 Dec 2025 14:35:53 +1100 Subject: [PATCH 12/26] Fix build --- consensus/types/src/attestation/indexed_payload_attestation.rs | 1 + consensus/types/src/attestation/payload_attestation.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/consensus/types/src/attestation/indexed_payload_attestation.rs b/consensus/types/src/attestation/indexed_payload_attestation.rs index 82a47651d0..d75801c111 100644 --- a/consensus/types/src/attestation/indexed_payload_attestation.rs +++ b/consensus/types/src/attestation/indexed_payload_attestation.rs @@ -29,6 +29,7 @@ impl IndexedPayloadAttestation { #[cfg(test)] mod tests { use super::*; + use crate::MainnetEthSpec; ssz_and_tree_hash_tests!(IndexedPayloadAttestation); } diff --git a/consensus/types/src/attestation/payload_attestation.rs b/consensus/types/src/attestation/payload_attestation.rs index b0fdd86891..746aa46c52 100644 --- a/consensus/types/src/attestation/payload_attestation.rs +++ b/consensus/types/src/attestation/payload_attestation.rs @@ -24,6 +24,7 @@ pub struct PayloadAttestation { #[cfg(test)] mod payload_attestation_tests { use super::*; + use crate::MinimalEthSpec; ssz_and_tree_hash_tests!(PayloadAttestation); } From e27f31648fd4d4f3504c20f1fcfba55fb84b3ac5 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 5 Dec 2025 21:08:38 +1100 Subject: [PATCH 13/26] Move validator http endpoints to a separate module (#8536) Continuation of: * #8529 Moving `/validator` endpoints out of `http_api` to a separation module. This should improve code maintainability, incremental compilation time and rust analyzer performance. This is a tedious but straight forward change, so we're going with a pair & insta-merge approach to avoid painful & slow async review. @michaelsproul and I paired on the first commit - I believe we are almost done, will pair with @pawanjay176 tomorrow to wrap it up and merge tomorrow. (cc @macladson ) Co-Authored-By: Jimmy Chen --- beacon_node/http_api/src/lib.rs | 1104 ++++--------------- beacon_node/http_api/src/publish_blocks.rs | 11 +- beacon_node/http_api/src/sync_committees.rs | 2 +- beacon_node/http_api/src/utils.rs | 86 ++ beacon_node/http_api/src/validator.rs | 22 - beacon_node/http_api/src/validator/mod.rs | 971 ++++++++++++++++ 6 files changed, 1265 insertions(+), 931 deletions(-) delete mode 100644 beacon_node/http_api/src/validator.rs create mode 100644 beacon_node/http_api/src/validator/mod.rs diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index ccd0698161..3801933855 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -37,13 +37,14 @@ mod validators; mod version; use crate::light_client::{get_light_client_bootstrap, get_light_client_updates}; -use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; +use crate::utils::{AnyVersionFilter, EthV1Filter}; +use crate::validator::post_validator_liveness_epoch; +use crate::validator::*; use crate::version::beacon_response; use beacon::states; use beacon_chain::{ - AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, - WhenSlotSkipped, attestation_verification::VerifiedAttestation, - observed_operations::ObservationOutcome, validator_monitor::timestamp_now, + BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped, + observed_operations::ObservationOutcome, }; use beacon_processor::BeaconProcessorSend; pub use block_id::BlockId; @@ -51,18 +52,20 @@ use builder_states::get_next_withdrawals; use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; use eth2::StatusCode; +use eth2::lighthouse::sync_state::SyncState; use eth2::types::{ self as api_types, BroadcastValidation, ContextDeserialize, EndpointVersion, ForkChoice, - ForkChoiceExtraData, ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, - StateId as CoreStateId, ValidatorId, ValidatorStatus, + ForkChoiceExtraData, ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, ValidatorId, }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use health_metrics::observe::Observe; -use lighthouse_network::rpc::methods::MetaData; -use lighthouse_network::{Enr, NetworkGlobals, PeerId, PubsubMessage, types::SyncState}; +use lighthouse_network::Enr; +use lighthouse_network::NetworkGlobals; +use lighthouse_network::PeerId; +use lighthouse_network::PubsubMessage; use lighthouse_version::version_with_platform; use logging::{SSELoggingComponents, crit}; -use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; +use network::{NetworkMessage, NetworkSenders}; use network_utils::enr_ext::EnrExt; use operation_pool::ReceivedPreCapella; use parking_lot::RwLock; @@ -83,24 +86,19 @@ use std::sync::Arc; use sysinfo::{System, SystemExt}; use system_health::{observe_nat, observe_system_health_bn}; use task_spawner::{Priority, TaskSpawner}; -use tokio::sync::{ - mpsc::{Sender, UnboundedSender}, - oneshot, -}; +use tokio::sync::mpsc::UnboundedSender; use tokio_stream::{ StreamExt, wrappers::{BroadcastStream, errors::BroadcastStreamRecvError}, }; -use tracing::{debug, error, info, warn}; +use tracing::{debug, info, warn}; use types::{ - Attestation, AttestationData, AttesterSlashing, BeaconStateError, ChainSpec, Checkpoint, - ConfigAndPreset, Epoch, EthSpec, ForkName, Hash256, ProposerPreparationData, ProposerSlashing, - SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, - SingleAttestation, Slot, SyncCommitteeMessage, SyncContributionData, + Attestation, AttestationData, AttesterSlashing, BeaconStateError, Checkpoint, ConfigAndPreset, + Epoch, EthSpec, ForkName, Hash256, ProposerSlashing, SignedBlindedBeaconBlock, + SignedBlsToExecutionChange, SignedVoluntaryExit, SingleAttestation, Slot, SyncCommitteeMessage, }; use version::{ - ResponseIncludesVersion, V1, V2, V3, add_consensus_version_header, add_ssz_content_type_header, + ResponseIncludesVersion, V1, V2, add_consensus_version_header, add_ssz_content_type_header, execution_optimistic_finalized_beacon_response, inconsistent_fork_rejection, unsupported_version_rejection, }; @@ -360,16 +358,18 @@ pub fn serve( } // Create a filter that extracts the endpoint version. - let any_version = warp::path(API_PREFIX).and(warp::path::param::().or_else( - |_| async move { - Err(warp_utils::reject::custom_bad_request( - "Invalid version identifier".to_string(), - )) - }, - )); + let any_version = warp::path(API_PREFIX) + .and( + warp::path::param::().or_else(|_| async move { + Err(warp_utils::reject::custom_bad_request( + "Invalid version identifier".to_string(), + )) + }), + ) + .boxed(); // Filter that enforces a single endpoint version and then discards the `EndpointVersion`. - let single_version = |reqd: EndpointVersion| { + fn single_version(any_version: AnyVersionFilter, reqd: EndpointVersion) -> EthV1Filter { any_version .and_then(move |version| async move { if version == reqd { @@ -379,10 +379,11 @@ pub fn serve( } }) .untuple_one() - }; + .boxed() + } - let eth_v1 = single_version(V1); - let eth_v2 = single_version(V2); + let eth_v1 = single_version(any_version.clone(), V1); + let eth_v2 = single_version(any_version.clone(), V2); // Create a `warp` filter that provides access to the network globals. let inner_network_globals = ctx.network_globals.clone(); @@ -403,34 +404,34 @@ pub fn serve( // Create a `warp` filter that provides access to the beacon chain. let inner_ctx = ctx.clone(); - let chain_filter = - warp::any() - .map(move || inner_ctx.chain.clone()) - .and_then(|chain| async move { - match chain { - Some(chain) => Ok(chain), - None => Err(warp_utils::reject::custom_not_found( - "Beacon chain genesis has not yet been observed.".to_string(), - )), - } - }); + let chain_filter = warp::any() + .map(move || inner_ctx.chain.clone()) + .and_then(|chain| async move { + match chain { + Some(chain) => Ok(chain), + None => Err(warp_utils::reject::custom_not_found( + "Beacon chain genesis has not yet been observed.".to_string(), + )), + } + }) + .boxed(); // Create a `warp` filter that provides access to the network sender channel. let network_tx = ctx .network_senders .as_ref() .map(|senders| senders.network_send()); - let network_tx_filter = - warp::any() - .map(move || network_tx.clone()) - .and_then(|network_tx| async move { - match network_tx { - Some(network_tx) => Ok(network_tx), - None => Err(warp_utils::reject::custom_not_found( - "The networking stack has not yet started (network_tx).".to_string(), - )), - } - }); + let network_tx_filter = warp::any() + .map(move || network_tx.clone()) + .and_then(|network_tx| async move { + match network_tx { + Some(network_tx) => Ok(network_tx), + None => Err(warp_utils::reject::custom_not_found( + "The networking stack has not yet started (network_tx).".to_string(), + )), + } + }) + .boxed(); // Create a `warp` filter that provides access to the network attestation subscription channel. let validator_subscriptions_tx = ctx @@ -447,7 +448,8 @@ pub fn serve( .to_string(), )), } - }); + }) + .boxed(); // Create a `warp` filter that rejects requests whilst the node is syncing. let not_while_syncing_filter = @@ -487,7 +489,8 @@ pub fn serve( SyncState::Stalled => Ok(()), } }, - ); + ) + .boxed(); // Create a `warp` filter that returns 404s if the light client server is disabled. let light_client_server_filter = @@ -540,8 +543,9 @@ pub fn serve( .beacon_processor_send .clone() .filter(|_| config.enable_beacon_processor); - let task_spawner_filter = - warp::any().map(move || TaskSpawner::new(beacon_processor_send.clone())); + let task_spawner_filter = warp::any() + .map(move || TaskSpawner::new(beacon_processor_send.clone())) + .boxed(); let duplicate_block_status_code = ctx.config.duplicate_block_status_code; @@ -553,6 +557,7 @@ pub fn serve( // GET beacon/genesis let get_beacon_genesis = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("genesis")) .and(warp::path::end()) @@ -576,6 +581,7 @@ pub fn serve( */ let beacon_states_path = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("states")) .and(warp::path::param::().or_else(|_| async { @@ -652,6 +658,7 @@ pub fn serve( // mechanism for arbitrary forwards block iteration, we only support iterating forwards along // the canonical chain. let get_beacon_headers = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("headers")) .and(warp::query::()) @@ -748,6 +755,7 @@ pub fn serve( // GET beacon/headers/{block_id} let get_beacon_headers_block_id = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("headers")) .and(warp::path::param::().or_else(|_| async { @@ -803,6 +811,7 @@ pub fn serve( // POST beacon/blocks let post_beacon_blocks = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("blocks")) .and(warp::path::end()) @@ -839,6 +848,7 @@ pub fn serve( ); let post_beacon_blocks_ssz = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("blocks")) .and(warp::path::end()) @@ -875,6 +885,7 @@ pub fn serve( ); let post_beacon_blocks_v2 = eth_v2 + .clone() .and(warp::path("beacon")) .and(warp::path("blocks")) .and(warp::query::()) @@ -914,6 +925,7 @@ pub fn serve( ); let post_beacon_blocks_v2_ssz = eth_v2 + .clone() .and(warp::path("beacon")) .and(warp::path("blocks")) .and(warp::query::()) @@ -957,6 +969,7 @@ pub fn serve( // POST beacon/blinded_blocks let post_beacon_blinded_blocks = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("blinded_blocks")) .and(warp::path::end()) @@ -984,6 +997,7 @@ pub fn serve( // POST beacon/blocks let post_beacon_blinded_blocks_ssz = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("blinded_blocks")) .and(warp::path::end()) @@ -1018,6 +1032,7 @@ pub fn serve( ); let post_beacon_blinded_blocks_v2 = eth_v2 + .clone() .and(warp::path("beacon")) .and(warp::path("blinded_blocks")) .and(warp::query::()) @@ -1057,6 +1072,7 @@ pub fn serve( ); let post_beacon_blinded_blocks_v2_ssz = eth_v2 + .clone() .and(warp::path("beacon")) .and(warp::path("blinded_blocks")) .and(warp::query::()) @@ -1099,6 +1115,7 @@ pub fn serve( }); let beacon_blocks_path_v1 = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("blocks")) .and(block_id_or_err) @@ -1106,6 +1123,7 @@ pub fn serve( .and(chain_filter.clone()); let beacon_blocks_path_any = any_version + .clone() .and(warp::path("beacon")) .and(warp::path("blocks")) .and(block_id_or_err) @@ -1231,6 +1249,7 @@ pub fn serve( // GET beacon/blinded_blocks/{block_id} let get_beacon_blinded_block = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("blinded_blocks")) .and(block_id_or_err) @@ -1283,6 +1302,7 @@ pub fn serve( // GET beacon/blob_sidecars/{block_id} let get_blob_sidecars = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("blob_sidecars")) .and(block_id_or_err) @@ -1334,6 +1354,7 @@ pub fn serve( // GET beacon/blobs/{block_id} let get_blobs = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("blobs")) .and(block_id_or_err) @@ -1383,18 +1404,21 @@ pub fn serve( */ let beacon_pool_path = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("pool")) .and(task_spawner_filter.clone()) .and(chain_filter.clone()); let beacon_pool_path_v2 = eth_v2 + .clone() .and(warp::path("beacon")) .and(warp::path("pool")) .and(task_spawner_filter.clone()) .and(chain_filter.clone()); let beacon_pool_path_any = any_version + .clone() .and(warp::path("beacon")) .and(warp::path("pool")) .and(task_spawner_filter.clone()) @@ -1524,7 +1548,7 @@ pub fn serve( .register_api_attester_slashing(slashing.to_ref()); if let ObservationOutcome::New(slashing) = outcome { - publish_pubsub_message( + utils::publish_pubsub_message( &network_tx, PubsubMessage::AttesterSlashing(Box::new( slashing.clone().into_inner(), @@ -1615,7 +1639,7 @@ pub fn serve( .register_api_proposer_slashing(&slashing); if let ObservationOutcome::New(slashing) = outcome { - publish_pubsub_message( + utils::publish_pubsub_message( &network_tx, PubsubMessage::ProposerSlashing(Box::new( slashing.clone().into_inner(), @@ -1673,7 +1697,7 @@ pub fn serve( .register_api_voluntary_exit(&exit.message); if let ObservationOutcome::New(exit) = outcome { - publish_pubsub_message( + utils::publish_pubsub_message( &network_tx, PubsubMessage::VoluntaryExit(Box::new(exit.clone().into_inner())), )?; @@ -1770,7 +1794,7 @@ pub fn serve( ReceivedPreCapella::Yes }; if matches!(received_pre_capella, ReceivedPreCapella::No) { - publish_pubsub_message( + utils::publish_pubsub_message( &network_tx, PubsubMessage::BlsToExecutionChange(Box::new( verified_address_change.as_inner().clone(), @@ -1824,6 +1848,7 @@ pub fn serve( ); let beacon_rewards_path = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("rewards")) .and(task_spawner_filter.clone()) @@ -1854,6 +1879,7 @@ pub fn serve( */ let builder_states_path = eth_v1 + .clone() .and(warp::path("builder")) .and(warp::path("states")) .and(chain_filter.clone()); @@ -1908,6 +1934,7 @@ pub fn serve( */ let beacon_light_client_path = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("light_client")) .and(light_client_server_filter) @@ -2060,6 +2087,7 @@ pub fn serve( */ let beacon_rewards_path = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("rewards")) .and(task_spawner_filter.clone()) @@ -2144,10 +2172,11 @@ pub fn serve( * config */ - let config_path = eth_v1.and(warp::path("config")); + let config_path = eth_v1.clone().and(warp::path("config")); // GET config/fork_schedule let get_config_fork_schedule = config_path + .clone() .and(warp::path("fork_schedule")) .and(warp::path::end()) .and(task_spawner_filter.clone()) @@ -2166,6 +2195,7 @@ pub fn serve( // GET config/spec let get_config_spec = config_path + .clone() .and(warp::path("spec")) .and(warp::path::end()) .and(task_spawner_filter.clone()) @@ -2205,6 +2235,7 @@ pub fn serve( // GET debug/beacon/data_column_sidecars/{block_id} let get_debug_data_column_sidecars = eth_v1 + .clone() .and(warp::path("debug")) .and(warp::path("beacon")) .and(warp::path("data_column_sidecars")) @@ -2254,6 +2285,7 @@ pub fn serve( // GET debug/beacon/states/{state_id} let get_debug_beacon_states = any_version + .clone() .and(warp::path("debug")) .and(warp::path("beacon")) .and(warp::path("states")) @@ -2329,6 +2361,7 @@ pub fn serve( // GET debug/beacon/heads let get_debug_beacon_heads = any_version + .clone() .and(warp::path("debug")) .and(warp::path("beacon")) .and(warp::path("heads")) @@ -2369,6 +2402,7 @@ pub fn serve( // GET debug/fork_choice let get_debug_fork_choice = eth_v1 + .clone() .and(warp::path("debug")) .and(warp::path("fork_choice")) .and(warp::path::end()) @@ -2450,6 +2484,7 @@ pub fn serve( // GET node/identity let get_node_identity = eth_v1 + .clone() .and(warp::path("node")) .and(warp::path("identity")) .and(warp::path::end()) @@ -2469,7 +2504,7 @@ pub fn serve( enr, p2p_addresses, discovery_addresses, - metadata: from_meta_data::( + metadata: utils::from_meta_data::( &network_globals.local_metadata, &chain.spec, ), @@ -2480,6 +2515,7 @@ pub fn serve( // GET node/version let get_node_version = eth_v1 + .clone() .and(warp::path("node")) .and(warp::path("version")) .and(warp::path::end()) @@ -2493,6 +2529,7 @@ pub fn serve( // GET node/syncing let get_node_syncing = eth_v1 + .clone() .and(warp::path("node")) .and(warp::path("syncing")) .and(warp::path::end()) @@ -2554,6 +2591,7 @@ pub fn serve( // GET node/health let get_node_health = eth_v1 + .clone() .and(warp::path("node")) .and(warp::path("health")) .and(warp::path::end()) @@ -2602,6 +2640,7 @@ pub fn serve( // GET node/peers/{peer_id} let get_node_peers_by_id = eth_v1 + .clone() .and(warp::path("node")) .and(warp::path("peers")) .and(warp::path::param::()) @@ -2656,6 +2695,7 @@ pub fn serve( // GET node/peers let get_node_peers = eth_v1 + .clone() .and(warp::path("node")) .and(warp::path("peers")) .and(warp::path::end()) @@ -2720,6 +2760,7 @@ pub fn serve( // GET node/peer_count let get_node_peer_count = eth_v1 + .clone() .and(warp::path("node")) .and(warp::path("peer_count")) .and(warp::path::end()) @@ -2763,804 +2804,124 @@ pub fn serve( */ // GET validator/duties/proposer/{epoch} - let get_validator_duties_proposer = eth_v1 - .and(warp::path("validator")) - .and(warp::path("duties")) - .and(warp::path("proposer")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid epoch".to_string(), - )) - })) - .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |epoch: Epoch, - not_synced_filter: Result<(), Rejection>, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - proposer_duties::proposer_duties(epoch, &chain) - }) - }, - ); + let get_validator_duties_proposer = get_validator_duties_proposer( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); // GET validator/blocks/{slot} - let get_validator_blocks = any_version - .and(warp::path("validator")) - .and(warp::path("blocks")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid slot".to_string(), - )) - })) - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .and(not_while_syncing_filter.clone()) - .and(warp::query::()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |endpoint_version: EndpointVersion, - slot: Slot, - accept_header: Option, - not_synced_filter: Result<(), Rejection>, - query: api_types::ValidatorBlocksQuery, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - debug!(?slot, "Block production request from HTTP API"); - - not_synced_filter?; - - if endpoint_version == V3 { - produce_block_v3(accept_header, chain, slot, query).await - } else { - produce_block_v2(accept_header, chain, slot, query).await - } - }) - }, - ); + let get_validator_blocks = get_validator_blocks( + any_version.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); // GET validator/blinded_blocks/{slot} - let get_validator_blinded_blocks = eth_v1 - .and(warp::path("validator")) - .and(warp::path("blinded_blocks")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid slot".to_string(), - )) - })) - .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) - .and(warp::query::()) - .and(warp::header::optional::("accept")) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |slot: Slot, - not_synced_filter: Result<(), Rejection>, - query: api_types::ValidatorBlocksQuery, - accept_header: Option, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - not_synced_filter?; - produce_blinded_block_v2(accept_header, chain, slot, query).await - }) - }, - ); + let get_validator_blinded_blocks = get_validator_blinded_blocks( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); // GET validator/attestation_data?slot,committee_index - let get_validator_attestation_data = eth_v1 - .and(warp::path("validator")) - .and(warp::path("attestation_data")) - .and(warp::path::end()) - .and(warp::query::()) - .and(not_while_syncing_filter.clone()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |query: api_types::ValidatorAttestationDataQuery, - not_synced_filter: Result<(), Rejection>, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - - let current_slot = chain.slot().map_err(warp_utils::reject::unhandled_error)?; - - // allow a tolerance of one slot to account for clock skew - if query.slot > current_slot + 1 { - return Err(warp_utils::reject::custom_bad_request(format!( - "request slot {} is more than one slot past the current slot {}", - query.slot, current_slot - ))); - } - - chain - .produce_unaggregated_attestation(query.slot, query.committee_index) - .map(|attestation| attestation.data().clone()) - .map(api_types::GenericResponse::from) - .map_err(warp_utils::reject::unhandled_error) - }) - }, - ); + let get_validator_attestation_data = get_validator_attestation_data( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); // GET validator/aggregate_attestation?attestation_data_root,slot - let get_validator_aggregate_attestation = any_version - .and(warp::path("validator")) - .and(warp::path("aggregate_attestation")) - .and(warp::path::end()) - .and(warp::query::()) - .and(not_while_syncing_filter.clone()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |endpoint_version: EndpointVersion, - query: api_types::ValidatorAggregateAttestationQuery, - not_synced_filter: Result<(), Rejection>, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P0, move || { - not_synced_filter?; - crate::aggregate_attestation::get_aggregate_attestation( - query.slot, - &query.attestation_data_root, - query.committee_index, - endpoint_version, - chain, - ) - }) - }, - ); + let get_validator_aggregate_attestation = get_validator_aggregate_attestation( + any_version.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); // POST validator/duties/attester/{epoch} - let post_validator_duties_attester = eth_v1 - .and(warp::path("validator")) - .and(warp::path("duties")) - .and(warp::path("attester")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid epoch".to_string(), - )) - })) - .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) - .and(warp_utils::json::json()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |epoch: Epoch, - not_synced_filter: Result<(), Rejection>, - indices: api_types::ValidatorIndexData, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - attester_duties::attester_duties(epoch, &indices.0, &chain) - }) - }, - ); + let post_validator_duties_attester = post_validator_duties_attester( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); // POST validator/duties/sync/{epoch} - let post_validator_duties_sync = eth_v1 - .and(warp::path("validator")) - .and(warp::path("duties")) - .and(warp::path("sync")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid epoch".to_string(), - )) - })) - .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) - .and(warp_utils::json::json()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |epoch: Epoch, - not_synced_filter: Result<(), Rejection>, - indices: api_types::ValidatorIndexData, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - sync_committees::sync_committee_duties(epoch, &indices.0, &chain) - }) - }, - ); + let post_validator_duties_sync = post_validator_duties_sync( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); // GET validator/sync_committee_contribution - let get_validator_sync_committee_contribution = eth_v1 - .and(warp::path("validator")) - .and(warp::path("sync_committee_contribution")) - .and(warp::path::end()) - .and(warp::query::()) - .and(not_while_syncing_filter.clone()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |sync_committee_data: SyncContributionData, - not_synced_filter: Result<(), Rejection>, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - chain - .get_aggregated_sync_committee_contribution(&sync_committee_data) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "unable to fetch sync contribution: {:?}", - e - )) - })? - .map(api_types::GenericResponse::from) - .ok_or_else(|| { - warp_utils::reject::custom_not_found( - "no matching sync contribution found".to_string(), - ) - }) - }) - }, - ); + let get_validator_sync_committee_contribution = get_validator_sync_committee_contribution( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); // POST validator/aggregate_and_proofs - let post_validator_aggregate_and_proofs = any_version - .and(warp::path("validator")) - .and(warp::path("aggregate_and_proofs")) - .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - // V1 and V2 are identical except V2 has a consensus version header in the request. - // We only require this header for SSZ deserialization, which isn't supported for - // this endpoint presently. - |_endpoint_version: EndpointVersion, - not_synced_filter: Result<(), Rejection>, - task_spawner: TaskSpawner, - chain: Arc>, - aggregates: Vec>, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - let seen_timestamp = timestamp_now(); - let mut verified_aggregates = Vec::with_capacity(aggregates.len()); - let mut messages = Vec::with_capacity(aggregates.len()); - let mut failures = Vec::new(); + let post_validator_aggregate_and_proofs = post_validator_aggregate_and_proofs( + any_version.clone().clone(), + chain_filter.clone(), + network_tx_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - // Verify that all messages in the post are valid before processing further - for (index, aggregate) in aggregates.iter().enumerate() { - match chain.verify_aggregated_attestation_for_gossip(aggregate) { - Ok(verified_aggregate) => { - messages.push(PubsubMessage::AggregateAndProofAttestation(Box::new( - verified_aggregate.aggregate().clone(), - ))); - - // Notify the validator monitor. - chain - .validator_monitor - .read() - .register_api_aggregated_attestation( - seen_timestamp, - verified_aggregate.aggregate(), - verified_aggregate.indexed_attestation(), - &chain.slot_clock, - ); - - verified_aggregates.push((index, verified_aggregate)); - } - // If we already know the attestation, don't broadcast it or attempt to - // further verify it. Return success. - // - // It's reasonably likely that two different validators produce - // identical aggregates, especially if they're using the same beacon - // node. - Err(AttnError::AttestationSupersetKnown(_)) => continue, - // If we've already seen this aggregator produce an aggregate, just - // skip this one. - // - // We're likely to see this with VCs that use fallback BNs. The first - // BN might time-out *after* publishing the aggregate and then the - // second BN will indicate it's already seen the aggregate. - // - // There's no actual error for the user or the network since the - // aggregate has been successfully published by some other node. - Err(AttnError::AggregatorAlreadyKnown(_)) => continue, - Err(e) => { - error!( - error = ?e, - request_index = index, - aggregator_index = aggregate.message().aggregator_index(), - attestation_index = aggregate.message().aggregate().committee_index(), - attestation_slot = %aggregate.message().aggregate().data().slot, - "Failure verifying aggregate and proofs" - ); - failures.push(api_types::Failure::new(index, format!("Verification: {:?}", e))); - } - } - } - - // Publish aggregate attestations to the libp2p network - if !messages.is_empty() { - publish_network_message(&network_tx, NetworkMessage::Publish { messages })?; - } - - // Import aggregate attestations - for (index, verified_aggregate) in verified_aggregates { - if let Err(e) = chain.apply_attestation_to_fork_choice(&verified_aggregate) { - error!( - error = ?e, - request_index = index, - aggregator_index = verified_aggregate.aggregate().message().aggregator_index(), - attestation_index = verified_aggregate.attestation().committee_index(), - attestation_slot = %verified_aggregate.attestation().data().slot, - "Failure applying verified aggregate attestation to fork choice" - ); - failures.push(api_types::Failure::new(index, format!("Fork choice: {:?}", e))); - } - if let Err(e) = chain.add_to_block_inclusion_pool(verified_aggregate) { - warn!( - error = ?e, - request_index = index, - "Could not add verified aggregate attestation to the inclusion pool" - ); - failures.push(api_types::Failure::new(index, format!("Op pool: {:?}", e))); - } - } - - if !failures.is_empty() { - Err(warp_utils::reject::indexed_bad_request("error processing aggregate and proofs".to_string(), - failures, - )) - } else { - Ok(()) - } - }) - }, - ); - - let post_validator_contribution_and_proofs = eth_v1 - .and(warp::path("validator")) - .and(warp::path("contribution_and_proofs")) - .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - |not_synced_filter: Result<(), Rejection>, - task_spawner: TaskSpawner, - chain: Arc>, - contributions: Vec>, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - sync_committees::process_signed_contribution_and_proofs( - contributions, - network_tx, - &chain, - )?; - Ok(api_types::GenericResponse::from(())) - }) - }, - ); + let post_validator_contribution_and_proofs = post_validator_contribution_and_proofs( + eth_v1.clone().clone(), + chain_filter.clone(), + network_tx_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); // POST validator/beacon_committee_subscriptions - let post_validator_beacon_committee_subscriptions = eth_v1 - .and(warp::path("validator")) - .and(warp::path("beacon_committee_subscriptions")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(validator_subscription_tx_filter.clone()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |committee_subscriptions: Vec, - validator_subscription_tx: Sender, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let subscriptions: std::collections::BTreeSet<_> = committee_subscriptions - .iter() - .map(|subscription| { - chain - .validator_monitor - .write() - .auto_register_local_validator(subscription.validator_index); - api_types::ValidatorSubscription { - attestation_committee_index: subscription.committee_index, - slot: subscription.slot, - committee_count_at_slot: subscription.committees_at_slot, - is_aggregator: subscription.is_aggregator, - } - }) - .collect(); - - let message = - ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions }; - if let Err(e) = validator_subscription_tx.try_send(message) { - warn!( - info = "the host may be overloaded or resource-constrained", - error = ?e, - "Unable to process committee subscriptions" - ); - return Err(warp_utils::reject::custom_server_error( - "unable to queue subscription, host may be overloaded or shutting down" - .to_string(), - )); - } - Ok(()) - }) - }, + let post_validator_beacon_committee_subscriptions = + post_validator_beacon_committee_subscriptions( + eth_v1.clone().clone(), + chain_filter.clone(), + validator_subscription_tx_filter.clone(), + task_spawner_filter.clone(), ); // POST validator/prepare_beacon_proposer - let post_validator_prepare_beacon_proposer = eth_v1 - .and(warp::path("validator")) - .and(warp::path("prepare_beacon_proposer")) - .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) - .and(network_tx_filter.clone()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp_utils::json::json()) - .then( - |not_synced_filter: Result<(), Rejection>, - network_tx: UnboundedSender>, - task_spawner: TaskSpawner, - chain: Arc>, - preparation_data: Vec| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - not_synced_filter?; - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::unhandled_error)?; - - let current_slot = chain - .slot_clock - .now_or_genesis() - .ok_or(BeaconChainError::UnableToReadSlot) - .map_err(warp_utils::reject::unhandled_error)?; - let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - - debug!( - count = preparation_data.len(), - "Received proposer preparation data" - ); - - execution_layer - .update_proposer_preparation( - current_epoch, - preparation_data.iter().map(|data| (data, &None)), - ) - .await; - - chain - .prepare_beacon_proposer(current_slot) - .await - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "error updating proposer preparations: {:?}", - e - )) - })?; - - if chain.spec.is_peer_das_scheduled() { - let (finalized_beacon_state, _, _) = - StateId(CoreStateId::Finalized).state(&chain)?; - let validators_and_balances = preparation_data - .iter() - .filter_map(|preparation| { - if let Ok(effective_balance) = finalized_beacon_state - .get_effective_balance(preparation.validator_index as usize) - { - Some((preparation.validator_index as usize, effective_balance)) - } else { - None - } - }) - .collect::>(); - - let current_slot = - chain.slot().map_err(warp_utils::reject::unhandled_error)?; - if let Some(cgc_change) = chain - .data_availability_checker - .custody_context() - .register_validators(validators_and_balances, current_slot, &chain.spec) - { - chain.update_data_column_custody_info(Some( - cgc_change - .effective_epoch - .start_slot(T::EthSpec::slots_per_epoch()), - )); - - network_tx.send(NetworkMessage::CustodyCountChanged { - new_custody_group_count: cgc_change.new_custody_group_count, - sampling_count: cgc_change.sampling_count, - }).unwrap_or_else(|e| { - debug!(error = %e, "Could not send message to the network service. \ - Likely shutdown") - }); - } - } - - Ok::<_, warp::reject::Rejection>(warp::reply::json(&()).into_response()) - }) - }, - ); + let post_validator_prepare_beacon_proposer = post_validator_prepare_beacon_proposer( + eth_v1.clone().clone(), + chain_filter.clone(), + network_tx_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); // POST validator/register_validator - let post_validator_register_validator = eth_v1 - .and(warp::path("validator")) - .and(warp::path("register_validator")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp_utils::json::json()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - register_val_data: Vec| async { - let (tx, rx) = oneshot::channel(); - - let initial_result = task_spawner - .spawn_async_with_rejection_no_conversion(Priority::P0, async move { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::unhandled_error)?; - let current_slot = chain - .slot_clock - .now_or_genesis() - .ok_or(BeaconChainError::UnableToReadSlot) - .map_err(warp_utils::reject::unhandled_error)?; - let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - - debug!( - count = register_val_data.len(), - "Received register validator request" - ); - - let head_snapshot = chain.head_snapshot(); - let spec = &chain.spec; - - let (preparation_data, filtered_registration_data): ( - Vec<(ProposerPreparationData, Option)>, - Vec, - ) = register_val_data - .into_iter() - .filter_map(|register_data| { - chain - .validator_index(®ister_data.message.pubkey) - .ok() - .flatten() - .and_then(|validator_index| { - let validator = head_snapshot - .beacon_state - .get_validator(validator_index) - .ok()?; - let validator_status = ValidatorStatus::from_validator( - validator, - current_epoch, - spec.far_future_epoch, - ) - .superstatus(); - let is_active_or_pending = - matches!(validator_status, ValidatorStatus::Pending) - || matches!( - validator_status, - ValidatorStatus::Active - ); - - // Filter out validators who are not 'active' or 'pending'. - is_active_or_pending.then_some({ - ( - ( - ProposerPreparationData { - validator_index: validator_index as u64, - fee_recipient: register_data - .message - .fee_recipient, - }, - Some(register_data.message.gas_limit), - ), - register_data, - ) - }) - }) - }) - .unzip(); - - // Update the prepare beacon proposer cache based on this request. - execution_layer - .update_proposer_preparation( - current_epoch, - preparation_data.iter().map(|(data, limit)| (data, limit)), - ) - .await; - - // Call prepare beacon proposer blocking with the latest update in order to make - // sure we have a local payload to fall back to in the event of the blinded block - // flow failing. - chain - .prepare_beacon_proposer(current_slot) - .await - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "error updating proposer preparations: {:?}", - e - )) - })?; - - info!( - count = filtered_registration_data.len(), - "Forwarding register validator request to connected builder" - ); - - // It's a waste of a `BeaconProcessor` worker to just - // wait on a response from the builder (especially since - // they have frequent timeouts). Spawn a new task and - // send the response back to our original HTTP request - // task via a channel. - let builder_future = async move { - let arc_builder = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::unhandled_error)? - .builder(); - let builder = arc_builder - .as_ref() - .ok_or(BeaconChainError::BuilderMissing) - .map_err(warp_utils::reject::unhandled_error)?; - builder - .post_builder_validators(&filtered_registration_data) - .await - .map(|resp| warp::reply::json(&resp).into_response()) - .map_err(|e| { - warn!( - num_registrations = filtered_registration_data.len(), - error = ?e, - "Relay error when registering validator(s)" - ); - // Forward the HTTP status code if we are able to, otherwise fall back - // to a server error. - if let eth2::Error::ServerMessage(message) = e { - if message.code == StatusCode::BAD_REQUEST.as_u16() { - return warp_utils::reject::custom_bad_request( - message.message, - ); - } else { - // According to the spec this response should only be a 400 or 500, - // so we fall back to a 500 here. - return warp_utils::reject::custom_server_error( - message.message, - ); - } - } - warp_utils::reject::custom_server_error(format!("{e:?}")) - }) - }; - tokio::task::spawn(async move { tx.send(builder_future.await) }); - - // Just send a generic 200 OK from this closure. We'll - // ignore the `Ok` variant and form a proper response - // from what is sent back down the channel. - Ok(warp::reply::reply().into_response()) - }) - .await; - - if initial_result.is_err() { - return convert_rejection(initial_result).await; - } - - // Await a response from the builder without blocking a - // `BeaconProcessor` worker. - convert_rejection(rx.await.unwrap_or_else(|_| { - Ok(warp::reply::with_status( - warp::reply::json(&"No response from channel"), - warp::http::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response()) - })) - .await - }, - ); + let post_validator_register_validator = post_validator_register_validator( + eth_v1.clone().clone(), + chain_filter.clone(), + task_spawner_filter.clone(), + ); // POST validator/sync_committee_subscriptions - let post_validator_sync_committee_subscriptions = eth_v1 - .and(warp::path("validator")) - .and(warp::path("sync_committee_subscriptions")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(validator_subscription_tx_filter) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |subscriptions: Vec, - validator_subscription_tx: Sender, - task_spawner: TaskSpawner, - chain: Arc>, - | { - task_spawner.blocking_json_task(Priority::P0, move || { - for subscription in subscriptions { - chain - .validator_monitor - .write() - .auto_register_local_validator(subscription.validator_index); - - let message = ValidatorSubscriptionMessage::SyncCommitteeSubscribe { - subscriptions: vec![subscription], - }; - if let Err(e) = validator_subscription_tx.try_send(message) { - warn!( - info = "the host may be overloaded or resource-constrained", - error = ?e, - "Unable to process sync subscriptions" - ); - return Err(warp_utils::reject::custom_server_error( - "unable to queue subscription, host may be overloaded or shutting down".to_string(), - )); - } - } - - Ok(()) - }) - }, - ); + let post_validator_sync_committee_subscriptions = post_validator_sync_committee_subscriptions( + eth_v1.clone().clone(), + chain_filter.clone(), + validator_subscription_tx_filter.clone(), + task_spawner_filter.clone(), + ); // POST validator/liveness/{epoch} - let post_validator_liveness_epoch = eth_v1 - .and(warp::path("validator")) - .and(warp::path("liveness")) - .and(warp::path::param::()) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |epoch: Epoch, - indices: api_types::ValidatorIndexData, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - // Ensure the request is for either the current, previous or next epoch. - let current_epoch = - chain.epoch().map_err(warp_utils::reject::unhandled_error)?; - let prev_epoch = current_epoch.saturating_sub(Epoch::new(1)); - let next_epoch = current_epoch.saturating_add(Epoch::new(1)); - - if epoch < prev_epoch || epoch > next_epoch { - return Err(warp_utils::reject::custom_bad_request(format!( - "request epoch {} is more than one epoch from the current epoch {}", - epoch, current_epoch - ))); - } - - let liveness: Vec = indices - .0 - .iter() - .cloned() - .map(|index| { - let is_live = chain.validator_seen_at_epoch(index as usize, epoch); - api_types::StandardLivenessResponseData { index, is_live } - }) - .collect(); - - Ok(api_types::GenericResponse::from(liveness)) - }) - }, - ); + let post_validator_liveness_epoch = post_validator_liveness_epoch( + eth_v1.clone().clone(), + chain_filter.clone(), + task_spawner_filter.clone(), + ); // POST lighthouse/finalize let post_lighthouse_finalize = warp::path("lighthouse") @@ -3632,7 +2993,10 @@ pub fn serve( ); network_globals.add_trusted_peer(enr.clone()); - publish_network_message(&network_tx, NetworkMessage::ConnectTrustedPeer(enr))?; + utils::publish_network_message( + &network_tx, + NetworkMessage::ConnectTrustedPeer(enr), + )?; Ok(()) }) @@ -3663,7 +3027,7 @@ pub fn serve( ); network_globals.remove_trusted_peer(enr.clone()); - publish_network_message( + utils::publish_network_message( &network_tx, NetworkMessage::DisconnectTrustedPeer(enr), )?; @@ -4115,6 +3479,7 @@ pub fn serve( ); let get_events = eth_v1 + .clone() .and(warp::path("events")) .and(warp::path::end()) .and(multi_key_query::()) @@ -4436,70 +3801,3 @@ pub fn serve( Ok(http_server) } - -fn from_meta_data( - meta_data: &RwLock>, - spec: &ChainSpec, -) -> api_types::MetaData { - let meta_data = meta_data.read(); - let format_hex = |bytes: &[u8]| format!("0x{}", hex::encode(bytes)); - - let seq_number = *meta_data.seq_number(); - let attnets = format_hex(&meta_data.attnets().clone().into_bytes()); - let syncnets = format_hex( - &meta_data - .syncnets() - .cloned() - .unwrap_or_default() - .into_bytes(), - ); - - if spec.is_peer_das_scheduled() { - api_types::MetaData::V3(api_types::MetaDataV3 { - seq_number, - attnets, - syncnets, - custody_group_count: meta_data.custody_group_count().cloned().unwrap_or_default(), - }) - } else { - api_types::MetaData::V2(api_types::MetaDataV2 { - seq_number, - attnets, - syncnets, - }) - } -} - -/// Publish a message to the libp2p pubsub network. -fn publish_pubsub_message( - network_tx: &UnboundedSender>, - message: PubsubMessage, -) -> Result<(), warp::Rejection> { - publish_network_message( - network_tx, - NetworkMessage::Publish { - messages: vec![message], - }, - ) -} - -/// Publish a message to the libp2p pubsub network. -fn publish_pubsub_messages( - network_tx: &UnboundedSender>, - messages: Vec>, -) -> Result<(), warp::Rejection> { - publish_network_message(network_tx, NetworkMessage::Publish { messages }) -} - -/// Publish a message to the libp2p network. -fn publish_network_message( - network_tx: &UnboundedSender>, - message: NetworkMessage, -) -> Result<(), warp::Rejection> { - network_tx.send(message).map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "unable to publish to network channel: {}", - e - )) - }) -} diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 9671a72da2..b54c071eb8 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -138,9 +138,10 @@ pub async fn publish_block>( "Signed block published to network via HTTP API" ); - crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone())).map_err( - |_| BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish)), - )?; + crate::utils::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone())) + .map_err(|_| { + BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish)) + })?; Ok(()) }; @@ -492,7 +493,7 @@ fn publish_blob_sidecars( blob: &GossipVerifiedBlob, ) -> Result<(), BlockError> { let pubsub_message = PubsubMessage::BlobSidecar(Box::new((blob.index(), blob.clone_blob()))); - crate::publish_pubsub_message(sender_clone, pubsub_message) + crate::utils::publish_pubsub_message(sender_clone, pubsub_message) .map_err(|_| BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish))) } @@ -525,7 +526,7 @@ fn publish_column_sidecars( PubsubMessage::DataColumnSidecar(Box::new((subnet, data_col))) }) .collect::>(); - crate::publish_pubsub_messages(sender_clone, pubsub_messages) + crate::utils::publish_pubsub_messages(sender_clone, pubsub_messages) .map_err(|_| BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish))) } diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index edda0e60a6..b9fa24ad6a 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -1,6 +1,6 @@ //! Handlers for sync committee endpoints. -use crate::publish_pubsub_message; +use crate::utils::publish_pubsub_message; use beacon_chain::sync_committee_verification::{ Error as SyncVerificationError, VerifiedSyncCommitteeMessage, }; diff --git a/beacon_node/http_api/src/utils.rs b/beacon_node/http_api/src/utils.rs index cf61fa481c..a89780ba24 100644 --- a/beacon_node/http_api/src/utils.rs +++ b/beacon_node/http_api/src/utils.rs @@ -1,3 +1,89 @@ +use crate::task_spawner::TaskSpawner; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::EndpointVersion; +use lighthouse_network::PubsubMessage; +use lighthouse_network::rpc::methods::MetaData; +use network::{NetworkMessage, ValidatorSubscriptionMessage}; +use parking_lot::RwLock; +use std::sync::Arc; +use tokio::sync::mpsc::{Sender, UnboundedSender}; +use types::{ChainSpec, EthSpec}; +use warp::Rejection; use warp::filters::BoxedFilter; pub type ResponseFilter = BoxedFilter<(warp::reply::Response,)>; +pub type AnyVersionFilter = BoxedFilter<(EndpointVersion,)>; +pub type EthV1Filter = BoxedFilter<()>; +pub type ChainFilter = BoxedFilter<(Arc>,)>; +pub type NotWhileSyncingFilter = BoxedFilter<(Result<(), Rejection>,)>; +pub type TaskSpawnerFilter = BoxedFilter<(TaskSpawner<::EthSpec>,)>; +pub type ValidatorSubscriptionTxFilter = BoxedFilter<(Sender,)>; +pub type NetworkTxFilter = + BoxedFilter<(UnboundedSender::EthSpec>>,)>; + +pub fn from_meta_data( + meta_data: &RwLock>, + spec: &ChainSpec, +) -> eth2::types::MetaData { + let meta_data = meta_data.read(); + let format_hex = |bytes: &[u8]| format!("0x{}", hex::encode(bytes)); + + let seq_number = *meta_data.seq_number(); + let attnets = format_hex(&meta_data.attnets().clone().into_bytes()); + let syncnets = format_hex( + &meta_data + .syncnets() + .cloned() + .unwrap_or_default() + .into_bytes(), + ); + + if spec.is_peer_das_scheduled() { + eth2::types::MetaData::V3(eth2::types::MetaDataV3 { + seq_number, + attnets, + syncnets, + custody_group_count: meta_data.custody_group_count().cloned().unwrap_or_default(), + }) + } else { + eth2::types::MetaData::V2(eth2::types::MetaDataV2 { + seq_number, + attnets, + syncnets, + }) + } +} + +/// Publish a message to the libp2p pubsub network. +pub fn publish_pubsub_message( + network_tx: &UnboundedSender>, + message: PubsubMessage, +) -> Result<(), warp::Rejection> { + publish_network_message( + network_tx, + NetworkMessage::Publish { + messages: vec![message], + }, + ) +} + +/// Publish a message to the libp2p pubsub network. +pub fn publish_pubsub_messages( + network_tx: &UnboundedSender>, + messages: Vec>, +) -> Result<(), warp::Rejection> { + publish_network_message(network_tx, NetworkMessage::Publish { messages }) +} + +/// Publish a message to the libp2p network. +pub fn publish_network_message( + network_tx: &UnboundedSender>, + message: NetworkMessage, +) -> Result<(), warp::Rejection> { + network_tx.send(message).map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "unable to publish to network channel: {}", + e + )) + }) +} diff --git a/beacon_node/http_api/src/validator.rs b/beacon_node/http_api/src/validator.rs deleted file mode 100644 index 25b0feb99e..0000000000 --- a/beacon_node/http_api/src/validator.rs +++ /dev/null @@ -1,22 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; -use types::{BeaconState, PublicKeyBytes}; - -/// Uses the `chain.validator_pubkey_cache` to resolve a pubkey to a validator -/// index and then ensures that the validator exists in the given `state`. -pub fn pubkey_to_validator_index( - chain: &BeaconChain, - state: &BeaconState, - pubkey: &PublicKeyBytes, -) -> Result, Box> { - chain - .validator_index(pubkey) - .map_err(Box::new)? - .filter(|&index| { - state - .validators() - .get(index) - .is_some_and(|v| v.pubkey == *pubkey) - }) - .map(Result::Ok) - .transpose() -} diff --git a/beacon_node/http_api/src/validator/mod.rs b/beacon_node/http_api/src/validator/mod.rs new file mode 100644 index 0000000000..9cf1f1a33d --- /dev/null +++ b/beacon_node/http_api/src/validator/mod.rs @@ -0,0 +1,971 @@ +use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; +use crate::task_spawner::{Priority, TaskSpawner}; +use crate::utils::{ + AnyVersionFilter, ChainFilter, EthV1Filter, NetworkTxFilter, NotWhileSyncingFilter, + ResponseFilter, TaskSpawnerFilter, ValidatorSubscriptionTxFilter, publish_network_message, +}; +use crate::version::V3; +use crate::{StateId, attester_duties, proposer_duties, sync_committees}; +use beacon_chain::attestation_verification::VerifiedAttestation; +use beacon_chain::validator_monitor::timestamp_now; +use beacon_chain::{AttestationError, BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::StatusCode; +use eth2::types::{ + Accept, BeaconCommitteeSubscription, EndpointVersion, Failure, GenericResponse, + StandardLivenessResponseData, StateId as CoreStateId, ValidatorAggregateAttestationQuery, + ValidatorAttestationDataQuery, ValidatorBlocksQuery, ValidatorIndexData, ValidatorStatus, +}; +use lighthouse_network::PubsubMessage; +use network::{NetworkMessage, ValidatorSubscriptionMessage}; +use slot_clock::SlotClock; +use std::sync::Arc; +use tokio::sync::mpsc::{Sender, UnboundedSender}; +use tokio::sync::oneshot; +use tracing::{debug, error, info, warn}; +use types::{ + BeaconState, Epoch, EthSpec, ProposerPreparationData, PublicKeyBytes, SignedAggregateAndProof, + SignedContributionAndProof, SignedValidatorRegistrationData, Slot, SyncContributionData, + ValidatorSubscription, +}; +use warp::{Filter, Rejection, Reply}; +use warp_utils::reject::convert_rejection; + +/// Uses the `chain.validator_pubkey_cache` to resolve a pubkey to a validator +/// index and then ensures that the validator exists in the given `state`. +pub fn pubkey_to_validator_index( + chain: &BeaconChain, + state: &BeaconState, + pubkey: &PublicKeyBytes, +) -> Result, Box> { + chain + .validator_index(pubkey) + .map_err(Box::new)? + .filter(|&index| { + state + .validators() + .get(index) + .is_some_and(|v| v.pubkey == *pubkey) + }) + .map(Result::Ok) + .transpose() +} + +// GET validator/sync_committee_contribution +pub fn get_validator_sync_committee_contribution( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("sync_committee_contribution")) + .and(warp::path::end()) + .and(warp::query::()) + .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |sync_committee_data: SyncContributionData, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + chain + .get_aggregated_sync_committee_contribution(&sync_committee_data) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "unable to fetch sync contribution: {:?}", + e + )) + })? + .map(GenericResponse::from) + .ok_or_else(|| { + warp_utils::reject::custom_not_found( + "no matching sync contribution found".to_string(), + ) + }) + }) + }, + ) + .boxed() +} + +// POST validator/duties/sync/{epoch} +pub fn post_validator_duties_sync( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("sync")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid epoch".to_string(), + )) + })) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp_utils::json::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |epoch: Epoch, + not_synced_filter: Result<(), Rejection>, + indices: ValidatorIndexData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + sync_committees::sync_committee_duties(epoch, &indices.0, &chain) + }) + }, + ) + .boxed() +} + +// POST validator/duties/attester/{epoch} +pub fn post_validator_duties_attester( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("attester")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid epoch".to_string(), + )) + })) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp_utils::json::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |epoch: Epoch, + not_synced_filter: Result<(), Rejection>, + indices: ValidatorIndexData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + attester_duties::attester_duties(epoch, &indices.0, &chain) + }) + }, + ) + .boxed() +} + +// GET validator/aggregate_attestation?attestation_data_root,slot +pub fn get_validator_aggregate_attestation( + any_version: AnyVersionFilter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + any_version + .and(warp::path("validator")) + .and(warp::path("aggregate_attestation")) + .and(warp::path::end()) + .and(warp::query::()) + .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |endpoint_version: EndpointVersion, + query: ValidatorAggregateAttestationQuery, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P0, move || { + not_synced_filter?; + crate::aggregate_attestation::get_aggregate_attestation( + query.slot, + &query.attestation_data_root, + query.committee_index, + endpoint_version, + chain, + ) + }) + }, + ) + .boxed() +} + +// GET validator/attestation_data?slot,committee_index +pub fn get_validator_attestation_data( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("attestation_data")) + .and(warp::path::end()) + .and(warp::query::()) + .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |query: ValidatorAttestationDataQuery, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + + let current_slot = chain.slot().map_err(warp_utils::reject::unhandled_error)?; + + // allow a tolerance of one slot to account for clock skew + if query.slot > current_slot + 1 { + return Err(warp_utils::reject::custom_bad_request(format!( + "request slot {} is more than one slot past the current slot {}", + query.slot, current_slot + ))); + } + + chain + .produce_unaggregated_attestation(query.slot, query.committee_index) + .map(|attestation| attestation.data().clone()) + .map(GenericResponse::from) + .map_err(warp_utils::reject::unhandled_error) + }) + }, + ) + .boxed() +} + +// GET validator/blinded_blocks/{slot} +pub fn get_validator_blinded_blocks( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("blinded_blocks")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid slot".to_string(), + )) + })) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp::query::()) + .and(warp::header::optional::("accept")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |slot: Slot, + not_synced_filter: Result<(), Rejection>, + query: ValidatorBlocksQuery, + accept_header: Option, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + not_synced_filter?; + produce_blinded_block_v2(accept_header, chain, slot, query).await + }) + }, + ) + .boxed() +} + +// GET validator/blocks/{slot} +pub fn get_validator_blocks( + any_version: AnyVersionFilter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + any_version + .and(warp::path("validator")) + .and(warp::path("blocks")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid slot".to_string(), + )) + })) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .and(not_while_syncing_filter) + .and(warp::query::()) + .and(task_spawner_filter) + .and(chain_filter) + .then( + |endpoint_version: EndpointVersion, + slot: Slot, + accept_header: Option, + not_synced_filter: Result<(), Rejection>, + query: ValidatorBlocksQuery, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + debug!(?slot, "Block production request from HTTP API"); + + not_synced_filter?; + + if endpoint_version == V3 { + produce_block_v3(accept_header, chain, slot, query).await + } else { + produce_block_v2(accept_header, chain, slot, query).await + } + }) + }, + ) + .boxed() +} + +// POST validator/liveness/{epoch} +pub fn post_validator_liveness_epoch( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("liveness")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |epoch: Epoch, + indices: ValidatorIndexData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + // Ensure the request is for either the current, previous or next epoch. + let current_epoch = + chain.epoch().map_err(warp_utils::reject::unhandled_error)?; + let prev_epoch = current_epoch.saturating_sub(Epoch::new(1)); + let next_epoch = current_epoch.saturating_add(Epoch::new(1)); + + if epoch < prev_epoch || epoch > next_epoch { + return Err(warp_utils::reject::custom_bad_request(format!( + "request epoch {} is more than one epoch from the current epoch {}", + epoch, current_epoch + ))); + } + + let liveness: Vec = indices + .0 + .iter() + .cloned() + .map(|index| { + let is_live = chain.validator_seen_at_epoch(index as usize, epoch); + StandardLivenessResponseData { index, is_live } + }) + .collect(); + + Ok(GenericResponse::from(liveness)) + }) + }, + ) + .boxed() +} + +// POST validator/sync_committee_subscriptions +pub fn post_validator_sync_committee_subscriptions( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + validator_subscription_tx_filter: ValidatorSubscriptionTxFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("sync_committee_subscriptions")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(validator_subscription_tx_filter) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |subscriptions: Vec, + validator_subscription_tx: Sender, + task_spawner: TaskSpawner, + chain: Arc>, + | { + task_spawner.blocking_json_task(Priority::P0, move || { + for subscription in subscriptions { + chain + .validator_monitor + .write() + .auto_register_local_validator(subscription.validator_index); + + let message = ValidatorSubscriptionMessage::SyncCommitteeSubscribe { + subscriptions: vec![subscription], + }; + if let Err(e) = validator_subscription_tx.try_send(message) { + warn!( + info = "the host may be overloaded or resource-constrained", + error = ?e, + "Unable to process sync subscriptions" + ); + return Err(warp_utils::reject::custom_server_error( + "unable to queue subscription, host may be overloaded or shutting down".to_string(), + )); + } + } + + Ok(()) + }) + }, + ).boxed() +} + +// POST validator/register_validator +pub fn post_validator_register_validator( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("register_validator")) + .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp_utils::json::json()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + register_val_data: Vec| async { + let (tx, rx) = oneshot::channel(); + + let initial_result = task_spawner + .spawn_async_with_rejection_no_conversion(Priority::P0, async move { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::unhandled_error)?; + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(warp_utils::reject::unhandled_error)?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + + debug!( + count = register_val_data.len(), + "Received register validator request" + ); + + let head_snapshot = chain.head_snapshot(); + let spec = &chain.spec; + + let (preparation_data, filtered_registration_data): ( + Vec<(ProposerPreparationData, Option)>, + Vec, + ) = register_val_data + .into_iter() + .filter_map(|register_data| { + chain + .validator_index(®ister_data.message.pubkey) + .ok() + .flatten() + .and_then(|validator_index| { + let validator = head_snapshot + .beacon_state + .get_validator(validator_index) + .ok()?; + let validator_status = ValidatorStatus::from_validator( + validator, + current_epoch, + spec.far_future_epoch, + ) + .superstatus(); + let is_active_or_pending = + matches!(validator_status, ValidatorStatus::Pending) + || matches!( + validator_status, + ValidatorStatus::Active + ); + + // Filter out validators who are not 'active' or 'pending'. + is_active_or_pending.then_some({ + ( + ( + ProposerPreparationData { + validator_index: validator_index as u64, + fee_recipient: register_data + .message + .fee_recipient, + }, + Some(register_data.message.gas_limit), + ), + register_data, + ) + }) + }) + }) + .unzip(); + + // Update the prepare beacon proposer cache based on this request. + execution_layer + .update_proposer_preparation( + current_epoch, + preparation_data.iter().map(|(data, limit)| (data, limit)), + ) + .await; + + // Call prepare beacon proposer blocking with the latest update in order to make + // sure we have a local payload to fall back to in the event of the blinded block + // flow failing. + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "error updating proposer preparations: {:?}", + e + )) + })?; + + info!( + count = filtered_registration_data.len(), + "Forwarding register validator request to connected builder" + ); + + // It's a waste of a `BeaconProcessor` worker to just + // wait on a response from the builder (especially since + // they have frequent timeouts). Spawn a new task and + // send the response back to our original HTTP request + // task via a channel. + let builder_future = async move { + let arc_builder = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::unhandled_error)? + .builder(); + let builder = arc_builder + .as_ref() + .ok_or(BeaconChainError::BuilderMissing) + .map_err(warp_utils::reject::unhandled_error)?; + builder + .post_builder_validators(&filtered_registration_data) + .await + .map(|resp| warp::reply::json(&resp).into_response()) + .map_err(|e| { + warn!( + num_registrations = filtered_registration_data.len(), + error = ?e, + "Relay error when registering validator(s)" + ); + // Forward the HTTP status code if we are able to, otherwise fall back + // to a server error. + if let eth2::Error::ServerMessage(message) = e { + if message.code == StatusCode::BAD_REQUEST.as_u16() { + return warp_utils::reject::custom_bad_request( + message.message, + ); + } else { + // According to the spec this response should only be a 400 or 500, + // so we fall back to a 500 here. + return warp_utils::reject::custom_server_error( + message.message, + ); + } + } + warp_utils::reject::custom_server_error(format!("{e:?}")) + }) + }; + tokio::task::spawn(async move { tx.send(builder_future.await) }); + + // Just send a generic 200 OK from this closure. We'll + // ignore the `Ok` variant and form a proper response + // from what is sent back down the channel. + Ok(warp::reply::reply().into_response()) + }) + .await; + + if initial_result.is_err() { + return convert_rejection(initial_result).await; + } + + // Await a response from the builder without blocking a + // `BeaconProcessor` worker. + convert_rejection(rx.await.unwrap_or_else(|_| { + Ok(warp::reply::with_status( + warp::reply::json(&"No response from channel"), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response()) + })) + .await + }, + ) + .boxed() +} + +// POST validator/prepare_beacon_proposer +pub fn post_validator_prepare_beacon_proposer( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + network_tx_filter: NetworkTxFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("prepare_beacon_proposer")) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(network_tx_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp_utils::json::json()) + .then( + |not_synced_filter: Result<(), Rejection>, + network_tx: UnboundedSender>, + task_spawner: TaskSpawner, + chain: Arc>, + preparation_data: Vec| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + not_synced_filter?; + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::unhandled_error)?; + + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(warp_utils::reject::unhandled_error)?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + + debug!( + count = preparation_data.len(), + "Received proposer preparation data" + ); + + execution_layer + .update_proposer_preparation( + current_epoch, + preparation_data.iter().map(|data| (data, &None)), + ) + .await; + + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "error updating proposer preparations: {:?}", + e + )) + })?; + + if chain.spec.is_peer_das_scheduled() { + let (finalized_beacon_state, _, _) = + StateId(CoreStateId::Finalized).state(&chain)?; + let validators_and_balances = preparation_data + .iter() + .filter_map(|preparation| { + if let Ok(effective_balance) = finalized_beacon_state + .get_effective_balance(preparation.validator_index as usize) + { + Some((preparation.validator_index as usize, effective_balance)) + } else { + None + } + }) + .collect::>(); + + let current_slot = + chain.slot().map_err(warp_utils::reject::unhandled_error)?; + if let Some(cgc_change) = chain + .data_availability_checker + .custody_context() + .register_validators(validators_and_balances, current_slot, &chain.spec) + { + chain.update_data_column_custody_info(Some( + cgc_change + .effective_epoch + .start_slot(T::EthSpec::slots_per_epoch()), + )); + + network_tx.send(NetworkMessage::CustodyCountChanged { + new_custody_group_count: cgc_change.new_custody_group_count, + sampling_count: cgc_change.sampling_count, + }).unwrap_or_else(|e| { + debug!(error = %e, "Could not send message to the network service. \ + Likely shutdown") + }); + } + } + + Ok::<_, warp::reject::Rejection>(warp::reply::json(&()).into_response()) + }) + }, + ) + .boxed() +} + +// POST validator/beacon_committee_subscriptions +pub fn post_validator_beacon_committee_subscriptions( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + validator_subscription_tx_filter: ValidatorSubscriptionTxFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("beacon_committee_subscriptions")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(validator_subscription_tx_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |committee_subscriptions: Vec, + validator_subscription_tx: Sender, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let subscriptions: std::collections::BTreeSet<_> = committee_subscriptions + .iter() + .map(|subscription| { + chain + .validator_monitor + .write() + .auto_register_local_validator(subscription.validator_index); + ValidatorSubscription { + attestation_committee_index: subscription.committee_index, + slot: subscription.slot, + committee_count_at_slot: subscription.committees_at_slot, + is_aggregator: subscription.is_aggregator, + } + }) + .collect(); + + let message = + ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions }; + if let Err(e) = validator_subscription_tx.try_send(message) { + warn!( + info = "the host may be overloaded or resource-constrained", + error = ?e, + "Unable to process committee subscriptions" + ); + return Err(warp_utils::reject::custom_server_error( + "unable to queue subscription, host may be overloaded or shutting down" + .to_string(), + )); + } + Ok(()) + }) + }, + ) + .boxed() +} + +pub fn post_validator_contribution_and_proofs( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + network_tx_filter: NetworkTxFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("contribution_and_proofs")) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>, + contributions: Vec>, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + sync_committees::process_signed_contribution_and_proofs( + contributions, + network_tx, + &chain, + )?; + Ok(GenericResponse::from(())) + }) + }, + ) + .boxed() +} + +// POST validator/aggregate_and_proofs +pub fn post_validator_aggregate_and_proofs( + any_version: AnyVersionFilter, + chain_filter: ChainFilter, + network_tx_filter: NetworkTxFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + any_version + .and(warp::path("validator")) + .and(warp::path("aggregate_and_proofs")) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + // V1 and V2 are identical except V2 has a consensus version header in the request. + // We only require this header for SSZ deserialization, which isn't supported for + // this endpoint presently. + |_endpoint_version: EndpointVersion, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>, + aggregates: Vec>, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + let seen_timestamp = timestamp_now(); + let mut verified_aggregates = Vec::with_capacity(aggregates.len()); + let mut messages = Vec::with_capacity(aggregates.len()); + let mut failures = Vec::new(); + + // Verify that all messages in the post are valid before processing further + for (index, aggregate) in aggregates.iter().enumerate() { + match chain.verify_aggregated_attestation_for_gossip(aggregate) { + Ok(verified_aggregate) => { + messages.push(PubsubMessage::AggregateAndProofAttestation(Box::new( + verified_aggregate.aggregate().clone(), + ))); + + // Notify the validator monitor. + chain + .validator_monitor + .read() + .register_api_aggregated_attestation( + seen_timestamp, + verified_aggregate.aggregate(), + verified_aggregate.indexed_attestation(), + &chain.slot_clock, + ); + + verified_aggregates.push((index, verified_aggregate)); + } + // If we already know the attestation, don't broadcast it or attempt to + // further verify it. Return success. + // + // It's reasonably likely that two different validators produce + // identical aggregates, especially if they're using the same beacon + // node. + Err(AttestationError::AttestationSupersetKnown(_)) => continue, + // If we've already seen this aggregator produce an aggregate, just + // skip this one. + // + // We're likely to see this with VCs that use fallback BNs. The first + // BN might time-out *after* publishing the aggregate and then the + // second BN will indicate it's already seen the aggregate. + // + // There's no actual error for the user or the network since the + // aggregate has been successfully published by some other node. + Err(AttestationError::AggregatorAlreadyKnown(_)) => continue, + Err(e) => { + error!( + error = ?e, + request_index = index, + aggregator_index = aggregate.message().aggregator_index(), + attestation_index = aggregate.message().aggregate().committee_index(), + attestation_slot = %aggregate.message().aggregate().data().slot, + "Failure verifying aggregate and proofs" + ); + failures.push(Failure::new(index, format!("Verification: {:?}", e))); + } + } + } + + // Publish aggregate attestations to the libp2p network + if !messages.is_empty() { + publish_network_message(&network_tx, NetworkMessage::Publish { messages })?; + } + + // Import aggregate attestations + for (index, verified_aggregate) in verified_aggregates { + if let Err(e) = chain.apply_attestation_to_fork_choice(&verified_aggregate) { + error!( + error = ?e, + request_index = index, + aggregator_index = verified_aggregate.aggregate().message().aggregator_index(), + attestation_index = verified_aggregate.attestation().committee_index(), + attestation_slot = %verified_aggregate.attestation().data().slot, + "Failure applying verified aggregate attestation to fork choice" + ); + failures.push(Failure::new(index, format!("Fork choice: {:?}", e))); + } + if let Err(e) = chain.add_to_block_inclusion_pool(verified_aggregate) { + warn!( + error = ?e, + request_index = index, + "Could not add verified aggregate attestation to the inclusion pool" + ); + failures.push(Failure::new(index, format!("Op pool: {:?}", e))); + } + } + + if !failures.is_empty() { + Err(warp_utils::reject::indexed_bad_request("error processing aggregate and proofs".to_string(), + failures, + )) + } else { + Ok(()) + } + }) + }, + ).boxed() +} + +// GET validator/duties/proposer/{epoch} +pub fn get_validator_duties_proposer( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("proposer")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid epoch".to_string(), + )) + })) + .and(warp::path::end()) + .and(not_while_syncing_filter) + .and(task_spawner_filter) + .and(chain_filter) + .then( + |epoch: Epoch, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + proposer_duties::proposer_duties(epoch, &chain) + }) + }, + ) + .boxed() +} From 2afa87879bd1b904e037a7ba67c001a5f1a162cc Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 5 Dec 2025 21:59:42 +1100 Subject: [PATCH 14/26] Move beacon pool http api to its own separate module (#8543) Continuation of: * #8536 Moving `/beacon/pool` endpoints out of `http_api` to a separation module. This should improve code maintainability, incremental compilation time and rust analyzer performance. This is a tedious but straight forward change, so we're going with a pair & insta-merge approach to avoid painful & slow async review Co-Authored-By: Jimmy Chen --- beacon_node/http_api/src/beacon/mod.rs | 1 + beacon_node/http_api/src/beacon/pool.rs | 522 ++++++++++++++++++++++++ beacon_node/http_api/src/lib.rs | 459 ++------------------- beacon_node/http_api/src/utils.rs | 3 +- 4 files changed, 563 insertions(+), 422 deletions(-) create mode 100644 beacon_node/http_api/src/beacon/pool.rs diff --git a/beacon_node/http_api/src/beacon/mod.rs b/beacon_node/http_api/src/beacon/mod.rs index 20394784ae..df5e6eee5c 100644 --- a/beacon_node/http_api/src/beacon/mod.rs +++ b/beacon_node/http_api/src/beacon/mod.rs @@ -1 +1,2 @@ +pub mod pool; pub mod states; diff --git a/beacon_node/http_api/src/beacon/pool.rs b/beacon_node/http_api/src/beacon/pool.rs new file mode 100644 index 0000000000..059573c317 --- /dev/null +++ b/beacon_node/http_api/src/beacon/pool.rs @@ -0,0 +1,522 @@ +use crate::task_spawner::{Priority, TaskSpawner}; +use crate::utils::{NetworkTxFilter, OptionalConsensusVersionHeaderFilter, ResponseFilter}; +use crate::version::{ + ResponseIncludesVersion, V1, V2, add_consensus_version_header, beacon_response, + unsupported_version_rejection, +}; +use crate::{sync_committees, utils}; +use beacon_chain::observed_operations::ObservationOutcome; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::{AttestationPoolQuery, EndpointVersion, Failure, GenericResponse}; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use operation_pool::ReceivedPreCapella; +use slot_clock::SlotClock; +use std::collections::HashSet; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedSender; +use tracing::{debug, info, warn}; +use types::{ + Attestation, AttestationData, AttesterSlashing, ForkName, ProposerSlashing, + SignedBlsToExecutionChange, SignedVoluntaryExit, SingleAttestation, SyncCommitteeMessage, +}; +use warp::filters::BoxedFilter; +use warp::{Filter, Reply}; +use warp_utils::reject::convert_rejection; + +pub type BeaconPoolPathFilter = BoxedFilter<( + TaskSpawner<::EthSpec>, + Arc>, +)>; +pub type BeaconPoolPathV2Filter = BoxedFilter<( + TaskSpawner<::EthSpec>, + Arc>, +)>; +pub type BeaconPoolPathAnyFilter = BoxedFilter<( + EndpointVersion, + TaskSpawner<::EthSpec>, + Arc>, +)>; + +/// POST beacon/pool/bls_to_execution_changes +pub fn post_beacon_pool_bls_to_execution_changes( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("bls_to_execution_changes")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + address_changes: Vec, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let mut failures = vec![]; + + for (index, address_change) in address_changes.into_iter().enumerate() { + let validator_index = address_change.message.validator_index; + + match chain.verify_bls_to_execution_change_for_http_api(address_change) { + Ok(ObservationOutcome::New(verified_address_change)) => { + let validator_index = + verified_address_change.as_inner().message.validator_index; + let address = verified_address_change + .as_inner() + .message + .to_execution_address; + + // New to P2P *and* op pool, gossip immediately if post-Capella. + let received_pre_capella = + if chain.current_slot_is_post_capella().unwrap_or(false) { + ReceivedPreCapella::No + } else { + ReceivedPreCapella::Yes + }; + if matches!(received_pre_capella, ReceivedPreCapella::No) { + utils::publish_pubsub_message( + &network_tx, + PubsubMessage::BlsToExecutionChange(Box::new( + verified_address_change.as_inner().clone(), + )), + )?; + } + + // Import to op pool (may return `false` if there's a race). + let imported = chain.import_bls_to_execution_change( + verified_address_change, + received_pre_capella, + ); + + info!( + %validator_index, + ?address, + published = + matches!(received_pre_capella, ReceivedPreCapella::No), + imported, + "Processed BLS to execution change" + ); + } + Ok(ObservationOutcome::AlreadyKnown) => { + debug!(%validator_index, "BLS to execution change already known"); + } + Err(e) => { + warn!( + validator_index, + reason = ?e, + source = "HTTP", + "Invalid BLS to execution change" + ); + failures.push(Failure::new(index, format!("invalid: {e:?}"))); + } + } + } + + if failures.is_empty() { + Ok(()) + } else { + Err(warp_utils::reject::indexed_bad_request( + "some BLS to execution changes failed to verify".into(), + failures, + )) + } + }) + }, + ) + .boxed() +} + +/// GET beacon/pool/bls_to_execution_changes +pub fn get_beacon_pool_bls_to_execution_changes( + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("bls_to_execution_changes")) + .and(warp::path::end()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let address_changes = chain.op_pool.get_all_bls_to_execution_changes(); + Ok(GenericResponse::from(address_changes)) + }) + }, + ) + .boxed() +} + +/// POST beacon/pool/sync_committees +pub fn post_beacon_pool_sync_committees( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("sync_committees")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + signatures: Vec, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + sync_committees::process_sync_committee_signatures( + signatures, network_tx, &chain, + )?; + Ok(GenericResponse::from(())) + }) + }, + ) + .boxed() +} + +/// GET beacon/pool/voluntary_exits +pub fn get_beacon_pool_voluntary_exits( + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("voluntary_exits")) + .and(warp::path::end()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let attestations = chain.op_pool.get_all_voluntary_exits(); + Ok(GenericResponse::from(attestations)) + }) + }, + ) + .boxed() +} + +/// POST beacon/pool/voluntary_exits +pub fn post_beacon_pool_voluntary_exits( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("voluntary_exits")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + exit: SignedVoluntaryExit, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let outcome = chain + .verify_voluntary_exit_for_gossip(exit.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + // Notify the validator monitor. + chain + .validator_monitor + .read() + .register_api_voluntary_exit(&exit.message); + + if let ObservationOutcome::New(exit) = outcome { + utils::publish_pubsub_message( + &network_tx, + PubsubMessage::VoluntaryExit(Box::new(exit.clone().into_inner())), + )?; + + chain.import_voluntary_exit(exit); + } + + Ok(()) + }) + }, + ) + .boxed() +} + +/// GET beacon/pool/proposer_slashings +pub fn get_beacon_pool_proposer_slashings( + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("proposer_slashings")) + .and(warp::path::end()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let attestations = chain.op_pool.get_all_proposer_slashings(); + Ok(GenericResponse::from(attestations)) + }) + }, + ) + .boxed() +} + +/// POST beacon/pool/proposer_slashings +pub fn post_beacon_pool_proposer_slashings( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("proposer_slashings")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + slashing: ProposerSlashing, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let outcome = chain + .verify_proposer_slashing_for_gossip(slashing.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + // Notify the validator monitor. + chain + .validator_monitor + .read() + .register_api_proposer_slashing(&slashing); + + if let ObservationOutcome::New(slashing) = outcome { + utils::publish_pubsub_message( + &network_tx, + PubsubMessage::ProposerSlashing(Box::new( + slashing.clone().into_inner(), + )), + )?; + + chain.import_proposer_slashing(slashing); + } + + Ok(()) + }) + }, + ) + .boxed() +} + +/// GET beacon/pool/attester_slashings +pub fn get_beacon_pool_attester_slashings( + beacon_pool_path_any: &BeaconPoolPathAnyFilter, +) -> ResponseFilter { + beacon_pool_path_any + .clone() + .and(warp::path("attester_slashings")) + .and(warp::path::end()) + .then( + |endpoint_version: EndpointVersion, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let slashings = chain.op_pool.get_all_attester_slashings(); + + // Use the current slot to find the fork version, and convert all messages to the + // current fork's format. This is to ensure consistent message types matching + // `Eth-Consensus-Version`. + let current_slot = + chain + .slot_clock + .now() + .ok_or(warp_utils::reject::custom_server_error( + "unable to read slot clock".to_string(), + ))?; + let fork_name = chain.spec.fork_name_at_slot::(current_slot); + let slashings = slashings + .into_iter() + .filter(|slashing| { + (fork_name.electra_enabled() + && matches!(slashing, AttesterSlashing::Electra(_))) + || (!fork_name.electra_enabled() + && matches!(slashing, AttesterSlashing::Base(_))) + }) + .collect::>(); + + let require_version = match endpoint_version { + V1 => ResponseIncludesVersion::No, + V2 => ResponseIncludesVersion::Yes(fork_name), + _ => return Err(unsupported_version_rejection(endpoint_version)), + }; + + let res = beacon_response(require_version, &slashings); + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) + }) + }, + ) + .boxed() +} + +// POST beacon/pool/attester_slashings +pub fn post_beacon_pool_attester_slashings( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path_any: &BeaconPoolPathAnyFilter, +) -> ResponseFilter { + beacon_pool_path_any + .clone() + .and(warp::path("attester_slashings")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + // V1 and V2 are identical except V2 has a consensus version header in the request. + // We only require this header for SSZ deserialization, which isn't supported for + // this endpoint presently. + |_endpoint_version: EndpointVersion, + task_spawner: TaskSpawner, + chain: Arc>, + slashing: AttesterSlashing, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let outcome = chain + .verify_attester_slashing_for_gossip(slashing.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + // Notify the validator monitor. + chain + .validator_monitor + .read() + .register_api_attester_slashing(slashing.to_ref()); + + if let ObservationOutcome::New(slashing) = outcome { + utils::publish_pubsub_message( + &network_tx, + PubsubMessage::AttesterSlashing(Box::new( + slashing.clone().into_inner(), + )), + )?; + + chain.import_attester_slashing(slashing); + } + + Ok(()) + }) + }, + ) + .boxed() +} + +/// GET beacon/pool/attestations?committee_index,slot +pub fn get_beacon_pool_attestations( + beacon_pool_path_any: &BeaconPoolPathAnyFilter, +) -> ResponseFilter { + beacon_pool_path_any + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and(warp::query::()) + .then( + |endpoint_version: EndpointVersion, + task_spawner: TaskSpawner, + chain: Arc>, + query: AttestationPoolQuery| { + task_spawner.blocking_response_task(Priority::P1, move || { + let query_filter = |data: &AttestationData, committee_indices: HashSet| { + query.slot.is_none_or(|slot| slot == data.slot) + && query + .committee_index + .is_none_or(|index| committee_indices.contains(&index)) + }; + + let mut attestations = chain.op_pool.get_filtered_attestations(query_filter); + attestations.extend( + chain + .naive_aggregation_pool + .read() + .iter() + .filter(|&att| { + query_filter(att.data(), att.get_committee_indices_map()) + }) + .cloned(), + ); + // Use the current slot to find the fork version, and convert all messages to the + // current fork's format. This is to ensure consistent message types matching + // `Eth-Consensus-Version`. + let current_slot = + chain + .slot_clock + .now() + .ok_or(warp_utils::reject::custom_server_error( + "unable to read slot clock".to_string(), + ))?; + let fork_name = chain.spec.fork_name_at_slot::(current_slot); + let attestations = attestations + .into_iter() + .filter(|att| { + (fork_name.electra_enabled() && matches!(att, Attestation::Electra(_))) + || (!fork_name.electra_enabled() + && matches!(att, Attestation::Base(_))) + }) + .collect::>(); + + let require_version = match endpoint_version { + V1 => ResponseIncludesVersion::No, + V2 => ResponseIncludesVersion::Yes(fork_name), + _ => return Err(unsupported_version_rejection(endpoint_version)), + }; + + let res = beacon_response(require_version, &attestations); + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) + }) + }, + ) + .boxed() +} + +pub fn post_beacon_pool_attestations_v2( + network_tx_filter: &NetworkTxFilter, + optional_consensus_version_header_filter: OptionalConsensusVersionHeaderFilter, + beacon_pool_path_v2: &BeaconPoolPathV2Filter, +) -> ResponseFilter { + beacon_pool_path_v2 + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and(warp_utils::json::json::>()) + .and(optional_consensus_version_header_filter) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + attestations: Vec, + _fork_name: Option, + network_tx: UnboundedSender>| async move { + let result = crate::publish_attestations::publish_attestations( + task_spawner, + chain, + attestations, + network_tx, + true, + ) + .await + .map(|()| warp::reply::json(&())); + convert_rejection(result).await + }, + ) + .boxed() +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 3801933855..628b94a2a7 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -36,16 +36,14 @@ mod validator_inclusion; mod validators; mod version; +use crate::beacon::pool::*; use crate::light_client::{get_light_client_bootstrap, get_light_client_updates}; use crate::utils::{AnyVersionFilter, EthV1Filter}; use crate::validator::post_validator_liveness_epoch; use crate::validator::*; use crate::version::beacon_response; use beacon::states; -use beacon_chain::{ - BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped, - observed_operations::ObservationOutcome, -}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use beacon_processor::BeaconProcessorSend; pub use block_id::BlockId; use builder_states::get_next_withdrawals; @@ -62,12 +60,10 @@ use health_metrics::observe::Observe; use lighthouse_network::Enr; use lighthouse_network::NetworkGlobals; use lighthouse_network::PeerId; -use lighthouse_network::PubsubMessage; use lighthouse_version::version_with_platform; use logging::{SSELoggingComponents, crit}; use network::{NetworkMessage, NetworkSenders}; use network_utils::enr_ext::EnrExt; -use operation_pool::ReceivedPreCapella; use parking_lot::RwLock; pub use publish_blocks::{ ProvenancedBlock, publish_blinded_block, publish_block, reconstruct_block, @@ -76,7 +72,6 @@ use serde::{Deserialize, Serialize}; use slot_clock::SlotClock; use ssz::Encode; pub use state_id::StateId; -use std::collections::HashSet; use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; @@ -93,9 +88,8 @@ use tokio_stream::{ }; use tracing::{debug, info, warn}; use types::{ - Attestation, AttestationData, AttesterSlashing, BeaconStateError, Checkpoint, ConfigAndPreset, - Epoch, EthSpec, ForkName, Hash256, ProposerSlashing, SignedBlindedBeaconBlock, - SignedBlsToExecutionChange, SignedVoluntaryExit, SingleAttestation, Slot, SyncCommitteeMessage, + BeaconStateError, Checkpoint, ConfigAndPreset, Epoch, EthSpec, ForkName, Hash256, + SignedBlindedBeaconBlock, Slot, }; use version::{ ResponseIncludesVersion, V1, V2, add_consensus_version_header, add_ssz_content_type_header, @@ -106,7 +100,7 @@ use warp::Reply; use warp::hyper::Body; use warp::sse::Event; use warp::{Filter, Rejection, http::Response}; -use warp_utils::{query::multi_key_query, reject::convert_rejection, uor::UnifyingOrFilter}; +use warp_utils::{query::multi_key_query, uor::UnifyingOrFilter}; const API_PREFIX: &str = "eth"; @@ -804,10 +798,10 @@ pub fn serve( * beacon/blocks */ let consensus_version_header_filter = - warp::header::header::(CONSENSUS_VERSION_HEADER); + warp::header::header::(CONSENSUS_VERSION_HEADER).boxed(); let optional_consensus_version_header_filter = - warp::header::optional::(CONSENSUS_VERSION_HEADER); + warp::header::optional::(CONSENSUS_VERSION_HEADER).boxed(); // POST beacon/blocks let post_beacon_blocks = eth_v1 @@ -816,7 +810,7 @@ pub fn serve( .and(warp::path("blocks")) .and(warp::path::end()) .and(warp::body::json()) - .and(consensus_version_header_filter) + .and(consensus_version_header_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) @@ -853,7 +847,7 @@ pub fn serve( .and(warp::path("blocks")) .and(warp::path::end()) .and(warp::body::bytes()) - .and(consensus_version_header_filter) + .and(consensus_version_header_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) @@ -891,7 +885,7 @@ pub fn serve( .and(warp::query::()) .and(warp::path::end()) .and(warp::body::json()) - .and(consensus_version_header_filter) + .and(consensus_version_header_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) @@ -931,7 +925,7 @@ pub fn serve( .and(warp::query::()) .and(warp::path::end()) .and(warp::body::bytes()) - .and(consensus_version_header_filter) + .and(consensus_version_header_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) @@ -1408,444 +1402,67 @@ pub fn serve( .and(warp::path("beacon")) .and(warp::path("pool")) .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); + .and(chain_filter.clone()) + .boxed(); let beacon_pool_path_v2 = eth_v2 .clone() .and(warp::path("beacon")) .and(warp::path("pool")) .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); + .and(chain_filter.clone()) + .boxed(); let beacon_pool_path_any = any_version .clone() .and(warp::path("beacon")) .and(warp::path("pool")) .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); + .and(chain_filter.clone()) + .boxed(); - let post_beacon_pool_attestations_v2 = beacon_pool_path_v2 - .clone() - .and(warp::path("attestations")) - .and(warp::path::end()) - .and(warp_utils::json::json::>()) - .and(optional_consensus_version_header_filter) - .and(network_tx_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - attestations: Vec, - _fork_name: Option, - network_tx: UnboundedSender>| async move { - let result = crate::publish_attestations::publish_attestations( - task_spawner, - chain, - attestations, - network_tx, - true, - ) - .await - .map(|()| warp::reply::json(&())); - convert_rejection(result).await - }, - ); + let post_beacon_pool_attestations_v2 = post_beacon_pool_attestations_v2( + &network_tx_filter, + optional_consensus_version_header_filter, + &beacon_pool_path_v2, + ); // GET beacon/pool/attestations?committee_index,slot - let get_beacon_pool_attestations = beacon_pool_path_any - .clone() - .and(warp::path("attestations")) - .and(warp::path::end()) - .and(warp::query::()) - .then( - |endpoint_version: EndpointVersion, - task_spawner: TaskSpawner, - chain: Arc>, - query: api_types::AttestationPoolQuery| { - task_spawner.blocking_response_task(Priority::P1, move || { - let query_filter = |data: &AttestationData, committee_indices: HashSet| { - query.slot.is_none_or(|slot| slot == data.slot) - && query - .committee_index - .is_none_or(|index| committee_indices.contains(&index)) - }; - - let mut attestations = chain.op_pool.get_filtered_attestations(query_filter); - attestations.extend( - chain - .naive_aggregation_pool - .read() - .iter() - .filter(|&att| { - query_filter(att.data(), att.get_committee_indices_map()) - }) - .cloned(), - ); - // Use the current slot to find the fork version, and convert all messages to the - // current fork's format. This is to ensure consistent message types matching - // `Eth-Consensus-Version`. - let current_slot = - chain - .slot_clock - .now() - .ok_or(warp_utils::reject::custom_server_error( - "unable to read slot clock".to_string(), - ))?; - let fork_name = chain.spec.fork_name_at_slot::(current_slot); - let attestations = attestations - .into_iter() - .filter(|att| { - (fork_name.electra_enabled() && matches!(att, Attestation::Electra(_))) - || (!fork_name.electra_enabled() - && matches!(att, Attestation::Base(_))) - }) - .collect::>(); - - let require_version = match endpoint_version { - V1 => ResponseIncludesVersion::No, - V2 => ResponseIncludesVersion::Yes(fork_name), - _ => return Err(unsupported_version_rejection(endpoint_version)), - }; - - let res = beacon_response(require_version, &attestations); - Ok(add_consensus_version_header( - warp::reply::json(&res).into_response(), - fork_name, - )) - }) - }, - ); + let get_beacon_pool_attestations = get_beacon_pool_attestations(&beacon_pool_path_any); // POST beacon/pool/attester_slashings - let post_beacon_pool_attester_slashings = beacon_pool_path_any - .clone() - .and(warp::path("attester_slashings")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - // V1 and V2 are identical except V2 has a consensus version header in the request. - // We only require this header for SSZ deserialization, which isn't supported for - // this endpoint presently. - |_endpoint_version: EndpointVersion, - task_spawner: TaskSpawner, - chain: Arc>, - slashing: AttesterSlashing, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let outcome = chain - .verify_attester_slashing_for_gossip(slashing.clone()) - .map_err(|e| { - warp_utils::reject::object_invalid(format!( - "gossip verification failed: {:?}", - e - )) - })?; - - // Notify the validator monitor. - chain - .validator_monitor - .read() - .register_api_attester_slashing(slashing.to_ref()); - - if let ObservationOutcome::New(slashing) = outcome { - utils::publish_pubsub_message( - &network_tx, - PubsubMessage::AttesterSlashing(Box::new( - slashing.clone().into_inner(), - )), - )?; - - chain.import_attester_slashing(slashing); - } - - Ok(()) - }) - }, - ); + let post_beacon_pool_attester_slashings = + post_beacon_pool_attester_slashings(&network_tx_filter, &beacon_pool_path_any); // GET beacon/pool/attester_slashings let get_beacon_pool_attester_slashings = - beacon_pool_path_any - .clone() - .and(warp::path("attester_slashings")) - .and(warp::path::end()) - .then( - |endpoint_version: EndpointVersion, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P1, move || { - let slashings = chain.op_pool.get_all_attester_slashings(); - - // Use the current slot to find the fork version, and convert all messages to the - // current fork's format. This is to ensure consistent message types matching - // `Eth-Consensus-Version`. - let current_slot = chain.slot_clock.now().ok_or( - warp_utils::reject::custom_server_error( - "unable to read slot clock".to_string(), - ), - )?; - let fork_name = chain.spec.fork_name_at_slot::(current_slot); - let slashings = slashings - .into_iter() - .filter(|slashing| { - (fork_name.electra_enabled() - && matches!(slashing, AttesterSlashing::Electra(_))) - || (!fork_name.electra_enabled() - && matches!(slashing, AttesterSlashing::Base(_))) - }) - .collect::>(); - - let require_version = match endpoint_version { - V1 => ResponseIncludesVersion::No, - V2 => ResponseIncludesVersion::Yes(fork_name), - _ => return Err(unsupported_version_rejection(endpoint_version)), - }; - - let res = beacon_response(require_version, &slashings); - Ok(add_consensus_version_header( - warp::reply::json(&res).into_response(), - fork_name, - )) - }) - }, - ); + get_beacon_pool_attester_slashings(&beacon_pool_path_any); // POST beacon/pool/proposer_slashings - let post_beacon_pool_proposer_slashings = beacon_pool_path - .clone() - .and(warp::path("proposer_slashings")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - slashing: ProposerSlashing, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let outcome = chain - .verify_proposer_slashing_for_gossip(slashing.clone()) - .map_err(|e| { - warp_utils::reject::object_invalid(format!( - "gossip verification failed: {:?}", - e - )) - })?; - - // Notify the validator monitor. - chain - .validator_monitor - .read() - .register_api_proposer_slashing(&slashing); - - if let ObservationOutcome::New(slashing) = outcome { - utils::publish_pubsub_message( - &network_tx, - PubsubMessage::ProposerSlashing(Box::new( - slashing.clone().into_inner(), - )), - )?; - - chain.import_proposer_slashing(slashing); - } - - Ok(()) - }) - }, - ); + let post_beacon_pool_proposer_slashings = + post_beacon_pool_proposer_slashings(&network_tx_filter, &beacon_pool_path); // GET beacon/pool/proposer_slashings - let get_beacon_pool_proposer_slashings = beacon_pool_path - .clone() - .and(warp::path("proposer_slashings")) - .and(warp::path::end()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let attestations = chain.op_pool.get_all_proposer_slashings(); - Ok(api_types::GenericResponse::from(attestations)) - }) - }, - ); + let get_beacon_pool_proposer_slashings = get_beacon_pool_proposer_slashings(&beacon_pool_path); // POST beacon/pool/voluntary_exits - let post_beacon_pool_voluntary_exits = beacon_pool_path - .clone() - .and(warp::path("voluntary_exits")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - exit: SignedVoluntaryExit, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let outcome = chain - .verify_voluntary_exit_for_gossip(exit.clone()) - .map_err(|e| { - warp_utils::reject::object_invalid(format!( - "gossip verification failed: {:?}", - e - )) - })?; - - // Notify the validator monitor. - chain - .validator_monitor - .read() - .register_api_voluntary_exit(&exit.message); - - if let ObservationOutcome::New(exit) = outcome { - utils::publish_pubsub_message( - &network_tx, - PubsubMessage::VoluntaryExit(Box::new(exit.clone().into_inner())), - )?; - - chain.import_voluntary_exit(exit); - } - - Ok(()) - }) - }, - ); + let post_beacon_pool_voluntary_exits = + post_beacon_pool_voluntary_exits(&network_tx_filter, &beacon_pool_path); // GET beacon/pool/voluntary_exits - let get_beacon_pool_voluntary_exits = beacon_pool_path - .clone() - .and(warp::path("voluntary_exits")) - .and(warp::path::end()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let attestations = chain.op_pool.get_all_voluntary_exits(); - Ok(api_types::GenericResponse::from(attestations)) - }) - }, - ); + let get_beacon_pool_voluntary_exits = get_beacon_pool_voluntary_exits(&beacon_pool_path); // POST beacon/pool/sync_committees - let post_beacon_pool_sync_committees = beacon_pool_path - .clone() - .and(warp::path("sync_committees")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - signatures: Vec, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - sync_committees::process_sync_committee_signatures( - signatures, network_tx, &chain, - )?; - Ok(api_types::GenericResponse::from(())) - }) - }, - ); + let post_beacon_pool_sync_committees = + post_beacon_pool_sync_committees(&network_tx_filter, &beacon_pool_path); // GET beacon/pool/bls_to_execution_changes - let get_beacon_pool_bls_to_execution_changes = beacon_pool_path - .clone() - .and(warp::path("bls_to_execution_changes")) - .and(warp::path::end()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let address_changes = chain.op_pool.get_all_bls_to_execution_changes(); - Ok(api_types::GenericResponse::from(address_changes)) - }) - }, - ); + let get_beacon_pool_bls_to_execution_changes = + get_beacon_pool_bls_to_execution_changes(&beacon_pool_path); // POST beacon/pool/bls_to_execution_changes - let post_beacon_pool_bls_to_execution_changes = beacon_pool_path - .clone() - .and(warp::path("bls_to_execution_changes")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - address_changes: Vec, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let mut failures = vec![]; - - for (index, address_change) in address_changes.into_iter().enumerate() { - let validator_index = address_change.message.validator_index; - - match chain.verify_bls_to_execution_change_for_http_api(address_change) { - Ok(ObservationOutcome::New(verified_address_change)) => { - let validator_index = - verified_address_change.as_inner().message.validator_index; - let address = verified_address_change - .as_inner() - .message - .to_execution_address; - - // New to P2P *and* op pool, gossip immediately if post-Capella. - let received_pre_capella = - if chain.current_slot_is_post_capella().unwrap_or(false) { - ReceivedPreCapella::No - } else { - ReceivedPreCapella::Yes - }; - if matches!(received_pre_capella, ReceivedPreCapella::No) { - utils::publish_pubsub_message( - &network_tx, - PubsubMessage::BlsToExecutionChange(Box::new( - verified_address_change.as_inner().clone(), - )), - )?; - } - - // Import to op pool (may return `false` if there's a race). - let imported = chain.import_bls_to_execution_change( - verified_address_change, - received_pre_capella, - ); - - info!( - %validator_index, - ?address, - published = - matches!(received_pre_capella, ReceivedPreCapella::No), - imported, - "Processed BLS to execution change" - ); - } - Ok(ObservationOutcome::AlreadyKnown) => { - debug!(%validator_index, "BLS to execution change already known"); - } - Err(e) => { - warn!( - validator_index, - reason = ?e, - source = "HTTP", - "Invalid BLS to execution change" - ); - failures.push(api_types::Failure::new( - index, - format!("invalid: {e:?}"), - )); - } - } - } - - if failures.is_empty() { - Ok(()) - } else { - Err(warp_utils::reject::indexed_bad_request( - "some BLS to execution changes failed to verify".into(), - failures, - )) - } - }) - }, - ); + let post_beacon_pool_bls_to_execution_changes = + post_beacon_pool_bls_to_execution_changes(&network_tx_filter, &beacon_pool_path); let beacon_rewards_path = eth_v1 .clone() diff --git a/beacon_node/http_api/src/utils.rs b/beacon_node/http_api/src/utils.rs index a89780ba24..f2b859ebe5 100644 --- a/beacon_node/http_api/src/utils.rs +++ b/beacon_node/http_api/src/utils.rs @@ -7,7 +7,7 @@ use network::{NetworkMessage, ValidatorSubscriptionMessage}; use parking_lot::RwLock; use std::sync::Arc; use tokio::sync::mpsc::{Sender, UnboundedSender}; -use types::{ChainSpec, EthSpec}; +use types::{ChainSpec, EthSpec, ForkName}; use warp::Rejection; use warp::filters::BoxedFilter; @@ -20,6 +20,7 @@ pub type TaskSpawnerFilter = BoxedFilter<(TaskSpawner< pub type ValidatorSubscriptionTxFilter = BoxedFilter<(Sender,)>; pub type NetworkTxFilter = BoxedFilter<(UnboundedSender::EthSpec>>,)>; +pub type OptionalConsensusVersionHeaderFilter = BoxedFilter<(Option,)>; pub fn from_meta_data( meta_data: &RwLock>, From 7bfcc0352090cd3abdf6fe8915d56d0610e39689 Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 8 Dec 2025 09:37:23 +0400 Subject: [PATCH 15/26] Reduce `eth2` dependency space (#8524) Remove certain dependencies from `eth2`, and feature-gate others which are only used by certain endpoints. | Removed | Optional | Dev only | | -------- | -------- | -------- | | `either` `enr` `libp2p-identity` `multiaddr` | `protoarray` `eth2_keystore` `eip_3076` `zeroize` `reqwest-eventsource` `futures` `futures-util` | `rand` `test_random_derive` | This is done by adding an `events` feature which enables the events endpoint and its associated dependencies. The `lighthouse` feature also enables its associated dependencies making them optional. The networking-adjacent dependencies were removed by just having certain fields use a `String` instead of an explicit network type. This means the user should handle conversion at the call site instead. This is a bit spicy, but I believe `PeerId`, `Enr` and `Multiaddr` are easily converted to and from `String`s so I think it's fine and reduces our dependency space by a lot. The alternative is to feature gate these types behind a `network` feature instead. Co-Authored-By: Mac L --- Cargo.lock | 4 ---- beacon_node/beacon_chain/Cargo.toml | 2 +- beacon_node/execution_layer/Cargo.toml | 2 +- beacon_node/http_api/Cargo.toml | 2 +- beacon_node/http_api/src/lib.rs | 9 +++++--- beacon_node/http_api/tests/tests.rs | 18 +++++++++++---- beacon_node/lighthouse_network/Cargo.toml | 2 +- common/eth2/Cargo.toml | 27 ++++++++++------------- common/eth2/src/error.rs | 2 ++ common/eth2/src/lib.rs | 16 +++++++++----- common/eth2/src/types.rs | 17 ++++++++------ common/health_metrics/Cargo.toml | 2 +- common/monitoring_api/Cargo.toml | 2 +- validator_client/http_api/Cargo.toml | 2 +- 14 files changed, 61 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 481808b41f..f832485d5a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3128,17 +3128,13 @@ dependencies = [ "context_deserialize", "educe", "eip_3076", - "either", - "enr", "eth2_keystore", "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", "futures", "futures-util", - "libp2p-identity", "mediatype", - "multiaddr", "pretty_reqwest_error", "proto_array", "rand 0.9.2", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index e889f53bb0..b42585c2a1 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -19,7 +19,7 @@ alloy-primitives = { workspace = true } bitvec = { workspace = true } bls = { workspace = true } educe = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } eth2_network_config = { workspace = true } ethereum_hashing = { workspace = true } ethereum_serde_utils = { workspace = true } diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 43b2e1dd75..540f9dc0a0 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -12,7 +12,7 @@ alloy-rpc-types-eth = { workspace = true } arc-swap = "1.6.0" builder_client = { path = "../builder_client" } bytes = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["events", "lighthouse"] } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } fixed_bytes = { workspace = true } diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 7dd0d0223f..3aa9c8351c 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -12,7 +12,7 @@ bs58 = "0.4.0" bytes = { workspace = true } directory = { workspace = true } either = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 628b94a2a7..4ed02f3cbf 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2118,9 +2118,12 @@ pub fn serve( let discovery_addresses = enr.multiaddr_p2p_udp(); Ok(api_types::GenericResponse::from(api_types::IdentityData { peer_id: network_globals.local_peer_id().to_base58(), - enr, - p2p_addresses, - discovery_addresses, + enr: enr.to_base64(), + p2p_addresses: p2p_addresses.iter().map(|a| a.to_string()).collect(), + discovery_addresses: discovery_addresses + .iter() + .map(|a| a.to_string()) + .collect(), metadata: utils::from_meta_data::( &network_globals.local_metadata, &chain.spec, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 8d99e696cf..a86cc4f4ef 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -2853,9 +2853,19 @@ impl ApiTester { let expected = IdentityData { peer_id: self.local_enr.peer_id().to_string(), - enr: self.local_enr.clone(), - p2p_addresses: self.local_enr.multiaddr_p2p_tcp(), - discovery_addresses: self.local_enr.multiaddr_p2p_udp(), + enr: self.local_enr.to_base64(), + p2p_addresses: self + .local_enr + .multiaddr_p2p_tcp() + .iter() + .map(|a| a.to_string()) + .collect(), + discovery_addresses: self + .local_enr + .multiaddr_p2p_udp() + .iter() + .map(|a| a.to_string()) + .collect(), metadata: MetaData::V2(MetaDataV2 { seq_number: 0, attnets: "0x0000000000000000".to_string(), @@ -2884,7 +2894,7 @@ impl ApiTester { pub async fn test_get_node_peers_by_id(self) -> Self { let result = self .client - .get_node_peers_by_id(self.external_peer_id) + .get_node_peers_by_id(&self.external_peer_id.to_string()) .await .unwrap() .data; diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index a6dd276c19..d2431cca04 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -17,7 +17,7 @@ directory = { workspace = true } dirs = { workspace = true } discv5 = { workspace = true } either = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } fnv = { workspace = true } diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index f7e6cde210..ba4bcd3649 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -5,36 +5,33 @@ authors = ["Paul Hauner "] edition = { workspace = true } [features] -default = ["lighthouse"] -lighthouse = [] +default = [] +lighthouse = ["proto_array", "eth2_keystore", "eip_3076", "zeroize"] +events = ["reqwest-eventsource", "futures", "futures-util"] [dependencies] context_deserialize = { workspace = true } educe = { workspace = true } -eip_3076 = { workspace = true } -either = { workspace = true } -enr = { version = "0.13.0", features = ["ed25519"] } -eth2_keystore = { workspace = true } +eip_3076 = { workspace = true, optional = true } +eth2_keystore = { workspace = true, optional = true } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -futures = { workspace = true } -futures-util = "0.3.8" -libp2p-identity = { version = "0.2", features = ["peerid"] } +futures = { workspace = true, optional = true } +futures-util = { version = "0.3.8", optional = true } mediatype = "0.19.13" -multiaddr = "0.18.2" pretty_reqwest_error = { workspace = true } -proto_array = { workspace = true } -rand = { workspace = true } +proto_array = { workspace = true, optional = true } reqwest = { workspace = true } -reqwest-eventsource = "0.6.0" +reqwest-eventsource = { version = "0.6.0", optional = true } sensitive_url = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } ssz_types = { workspace = true } -test_random_derive = { path = "../../common/test_random_derive" } types = { workspace = true } -zeroize = { workspace = true } +zeroize = { workspace = true, optional = true } [dev-dependencies] +rand = { workspace = true } +test_random_derive = { path = "../../common/test_random_derive" } tokio = { workspace = true } diff --git a/common/eth2/src/error.rs b/common/eth2/src/error.rs index c1bacb4510..1f21220b79 100644 --- a/common/eth2/src/error.rs +++ b/common/eth2/src/error.rs @@ -14,6 +14,7 @@ use std::{fmt, path::PathBuf}; pub enum Error { /// The `reqwest` client raised an error. HttpClient(PrettyReqwestError), + #[cfg(feature = "events")] /// The `reqwest_eventsource` client raised an error. SseClient(Box), /// The server returned an error message where the body was able to be parsed. @@ -91,6 +92,7 @@ impl Error { pub fn status(&self) -> Option { match self { Error::HttpClient(error) => error.inner().status(), + #[cfg(feature = "events")] Error::SseClient(error) => { if let reqwest_eventsource::Error::InvalidStatusCode(status, _) = error.as_ref() { Some(*status) diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 4e832a11df..4e2109be04 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -22,20 +22,23 @@ pub use beacon_response::{ }; pub use self::error::{Error, ok_or_error, success_or_error}; +pub use reqwest; +pub use reqwest::{StatusCode, Url}; +pub use sensitive_url::SensitiveUrl; + use self::mixin::{RequestAccept, ResponseOptional}; use self::types::*; use educe::Educe; +#[cfg(feature = "events")] use futures::Stream; +#[cfg(feature = "events")] use futures_util::StreamExt; -use libp2p_identity::PeerId; -pub use reqwest; use reqwest::{ Body, IntoUrl, RequestBuilder, Response, header::{HeaderMap, HeaderValue}, }; -pub use reqwest::{StatusCode, Url}; +#[cfg(feature = "events")] use reqwest_eventsource::{Event, EventSource}; -pub use sensitive_url::SensitiveUrl; use serde::{Serialize, de::DeserializeOwned}; use ssz::Encode; use std::fmt; @@ -1978,7 +1981,7 @@ impl BeaconNodeHttpClient { /// `GET node/peers/{peer_id}` pub async fn get_node_peers_by_id( &self, - peer_id: PeerId, + peer_id: &str, ) -> Result, Error> { let mut path = self.eth_path(V1)?; @@ -1986,7 +1989,7 @@ impl BeaconNodeHttpClient { .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("node") .push("peers") - .push(&peer_id.to_string()); + .push(peer_id); self.get(path).await } @@ -2761,6 +2764,7 @@ impl BeaconNodeHttpClient { } /// `GET events?topics` + #[cfg(feature = "events")] pub async fn get_events( &self, topic: &[EventTopic], diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index cbdaa004d0..5aa3de5e17 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1,13 +1,13 @@ //! This module exposes a superset of the `types` crate. It adds additional types that are only //! required for the HTTP API. +pub use types::*; + use crate::{ CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, EXECUTION_PAYLOAD_BLINDED_HEADER, EXECUTION_PAYLOAD_VALUE_HEADER, Error as ServerError, }; -use enr::{CombinedKey, Enr}; use mediatype::{MediaType, MediaTypeList, names}; -use multiaddr::Multiaddr; use reqwest::header::HeaderMap; use serde::{Deserialize, Deserializer, Serialize}; use serde_utils::quoted_u64::Quoted; @@ -18,9 +18,11 @@ use std::fmt::{self, Display}; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; + +#[cfg(test)] use test_random_derive::TestRandom; +#[cfg(test)] use types::test_utils::TestRandom; -pub use types::*; // TODO(mac): Temporary module and re-export hack to expose old `consensus/types` via `eth2/types`. pub use crate::beacon_response::*; @@ -557,9 +559,9 @@ pub struct ChainHeadData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct IdentityData { pub peer_id: String, - pub enr: Enr, - pub p2p_addresses: Vec, - pub discovery_addresses: Vec, + pub enr: String, + pub p2p_addresses: Vec, + pub discovery_addresses: Vec, pub metadata: MetaData, } @@ -2208,7 +2210,8 @@ pub enum ContentType { Ssz, } -#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[cfg_attr(test, derive(TestRandom))] +#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode, Decode)] #[serde(bound = "E: EthSpec")] pub struct BlobsBundle { pub commitments: KzgCommitments, diff --git a/common/health_metrics/Cargo.toml b/common/health_metrics/Cargo.toml index 20a8c6e4e4..816d4ec68c 100644 --- a/common/health_metrics/Cargo.toml +++ b/common/health_metrics/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = { workspace = true } [dependencies] -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } metrics = { workspace = true } [target.'cfg(target_os = "linux")'.dependencies] diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 9e2c36e2c7..e00b1f027b 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -6,7 +6,7 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } health_metrics = { workspace = true } lighthouse_version = { workspace = true } metrics = { workspace = true } diff --git a/validator_client/http_api/Cargo.toml b/validator_client/http_api/Cargo.toml index 588aa2ca93..bb624ea988 100644 --- a/validator_client/http_api/Cargo.toml +++ b/validator_client/http_api/Cargo.toml @@ -16,7 +16,7 @@ deposit_contract = { workspace = true } directory = { workspace = true } dirs = { workspace = true } doppelganger_service = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } eth2_keystore = { workspace = true } ethereum_serde_utils = { workspace = true } filesystem = { workspace = true } From 77d58437da08f8f7fee6ebeeca1b7c0ffc5ea674 Mon Sep 17 00:00:00 2001 From: Mac L Date: Tue, 9 Dec 2025 10:03:02 +0400 Subject: [PATCH 16/26] Clarify `alloy` dependencies (#8550) Previously, we had a pinned version of `alloy` to fix some crate compatibility issues we encountered during the migration away from `ethers`. Now that the migration is complete we should remove the pin. This also updates alloy crates to their latest versions. Co-Authored-By: Mac L --- Cargo.lock | 67 +++++++++---------- Cargo.toml | 13 ++-- common/deposit_contract/Cargo.toml | 4 +- .../execution_engine_integration/Cargo.toml | 6 +- 4 files changed, 47 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f832485d5a..2bb7b6d81b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -142,9 +142,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3abecb92ba478a285fbf5689100dbafe4003ded4a09bf4b5ef62cca87cd4f79e" +checksum = "2e318e25fb719e747a7e8db1654170fc185024f3ed5b10f86c08d448a912f6e2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -153,6 +153,7 @@ dependencies = [ "alloy-trie", "alloy-tx-macros", "auto_impl", + "borsh", "c-kzg", "derive_more 2.0.1", "either", @@ -168,9 +169,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e864d4f11d1fb8d3ac2fd8f3a15f1ee46d55ec6d116b342ed1b2cb737f25894" +checksum = "364380a845193a317bcb7a5398fc86cdb66c47ebe010771dde05f6869bf9e64a" dependencies = [ "alloy-consensus", "alloy-eips", @@ -191,8 +192,6 @@ dependencies = [ "alloy-sol-type-parser", "alloy-sol-types", "itoa", - "serde", - "serde_json", "winnow", ] @@ -236,9 +235,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e867b5fd52ed0372a95016f3a37cbff95a9d5409230fbaef2d8ea00e8618098" +checksum = "a4c4d7c5839d9f3a467900c625416b24328450c65702eb3d8caff8813e4d1d33" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -271,9 +270,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcab4c51fb1273e3b0f59078e0cdf8aa99f697925b09f0d2055c18be46b4d48c" +checksum = "f72cf87cda808e593381fb9f005ffa4d2475552b7a6c5ac33d087bf77d82abd0" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -286,9 +285,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5d6ed73d440bae8f27771b7cd507fa8f10f19ddf0b8f67e7622a52e0dbf798e" +checksum = "12aeb37b6f2e61b93b1c3d34d01ee720207c76fe447e2a2c217e433ac75b17f5" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -312,9 +311,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219dccd2cf753a43bd9b0fbb7771a16927ffdb56e43e3a15755bef1a74d614aa" +checksum = "abd29ace62872083e30929cd9b282d82723196d196db589f3ceda67edcc05552" dependencies = [ "alloy-consensus", "alloy-eips", @@ -355,9 +354,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0ef8cbc2b68e2512acf04b2d296c05c98a661bc460462add6414528f4ff3d9b" +checksum = "9b710636d7126e08003b8217e24c09f0cca0b46d62f650a841736891b1ed1fc1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -416,9 +415,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7c2630fde9ff6033a780635e1af6ef40e92d74a9cacb8af3defc1b15cfebca5" +checksum = "d0882e72d2c1c0c79dcf4ab60a67472d3f009a949f774d4c17d0bdb669cfde05" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -439,9 +438,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "425e14ee32eb8b7edd6a2247fe0ed640785e6eba75af27db27f1e6220c15ef0d" +checksum = "6a63fb40ed24e4c92505f488f9dd256e2afaed17faa1b7a221086ebba74f4122" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -450,9 +449,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0185f68a0f8391ab996d335a887087d7ccdbc97952efab3516f6307d456ba2cd" +checksum = "9eae0c7c40da20684548cbc8577b6b7447f7bf4ddbac363df95e3da220e41e72" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -471,9 +470,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e856112bfa0d9adc85bd7c13db03fad0e71d1d6fb4c2010e475b6718108236" +checksum = "c0df1987ed0ff2d0159d76b52e7ddfc4e4fbddacc54d2fbee765e0d14d7c01b5" dependencies = [ "alloy-primitives", "serde", @@ -482,9 +481,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a4f629da632d5279bbc5731634f0f5c9484ad9c4cad0cd974d9669dc1f46d6" +checksum = "6ff69deedee7232d7ce5330259025b868c5e6a52fa8dffda2c861fb3a5889b24" dependencies = [ "alloy-primitives", "async-trait", @@ -497,9 +496,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "590dcaeb290cdce23155e68af4791d093afc3754b1a331198a25d2d44c5456e8" +checksum = "72cfe0be3ec5a8c1a46b2e5a7047ed41121d360d97f4405bb7c1c784880c86cb" dependencies = [ "alloy-consensus", "alloy-network", @@ -583,9 +582,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe215a2f9b51d5f1aa5c8cf22c8be8cdb354934de09c9a4e37aefb79b77552fd" +checksum = "be98b07210d24acf5b793c99b759e9a696e4a2e67593aec0487ae3b3e1a2478c" dependencies = [ "alloy-json-rpc", "auto_impl", @@ -606,9 +605,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc1b37b1a30d23deb3a8746e882c70b384c574d355bc2bbea9ea918b0c31366e" +checksum = "4198a1ee82e562cab85e7f3d5921aab725d9bd154b6ad5017f82df1695877c97" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -637,9 +636,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ccf423f6de62e8ce1d6c7a11fb7508ae3536d02e0d68aaeb05c8669337d0937" +checksum = "333544408503f42d7d3792bfc0f7218b643d968a03d2c0ed383ae558fb4a76d0" dependencies = [ "darling 0.21.3", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index 21cf551c48..5296b5d9b7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -95,10 +95,15 @@ version = "8.0.1" [workspace.dependencies] account_utils = { path = "common/account_utils" } -alloy-consensus = { version = "=1.0.42", default-features = false } -alloy-primitives = { version = "=1.4.1", default-features = false, features = ["rlp", "getrandom"] } -alloy-rlp = { version = "=0.3.12", default-features = false } -alloy-rpc-types-eth = { version = "=1.0.42", default-features = false, features = ["serde"] } +alloy-consensus = { version = "1", default-features = false } +alloy-dyn-abi = { version = "1", default-features = false } +alloy-json-abi = { version = "1", default-features = false } +alloy-network = { version = "1", default-features = false } +alloy-primitives = { version = "1", default-features = false, features = ["rlp", "getrandom"] } +alloy-provider = { version = "1", default-features = false, features = ["reqwest"] } +alloy-rlp = { version = "0.3", default-features = false } +alloy-rpc-types-eth = { version = "1", default-features = false, features = ["serde"] } +alloy-signer-local = { version = "1", default-features = false } anyhow = "1" arbitrary = { version = "1", features = ["derive"] } async-channel = "1.9.0" diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index dfaad43719..53f1bc3e2b 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -7,8 +7,8 @@ edition = { workspace = true } build = "build.rs" [dependencies] -alloy-dyn-abi = "1.4" -alloy-json-abi = "1.4" +alloy-dyn-abi = { workspace = true } +alloy-json-abi = { workspace = true } alloy-primitives = { workspace = true } ethereum_ssz = { workspace = true } serde_json = { workspace = true } diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 74bf43e3ae..78ed266fb2 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -7,11 +7,11 @@ edition = { workspace = true } portable = ["types/portable"] [dependencies] -alloy-network = "1.0" +alloy-network = { workspace = true } alloy-primitives = { workspace = true } -alloy-provider = "1.0" +alloy-provider = { workspace = true } alloy-rpc-types-eth = { workspace = true } -alloy-signer-local = "1.0" +alloy-signer-local = { workspace = true } async-channel = { workspace = true } deposit_contract = { workspace = true } execution_layer = { workspace = true } From f3fd1f210b2f4ed7d208f81f9a09e1edced3bb3d Mon Sep 17 00:00:00 2001 From: Mac L Date: Tue, 9 Dec 2025 11:13:41 +0400 Subject: [PATCH 17/26] Remove `consensus/types` re-exports (#8540) There are certain crates which we re-export within `types` which creates a fragmented DevEx, where there are various ways to import the same crates. ```rust // consensus/types/src/lib.rs pub use bls::{ AggregatePublicKey, AggregateSignature, Error as BlsError, Keypair, PUBLIC_KEY_BYTES_LEN, PublicKey, PublicKeyBytes, SIGNATURE_BYTES_LEN, SecretKey, Signature, SignatureBytes, get_withdrawal_credentials, }; pub use context_deserialize::{ContextDeserialize, context_deserialize}; pub use fixed_bytes::FixedBytesExtended; pub use milhouse::{self, List, Vector}; pub use ssz_types::{BitList, BitVector, FixedVector, VariableList, typenum, typenum::Unsigned}; pub use superstruct::superstruct; ``` This PR removes these re-exports and makes it explicit that these types are imported from a non-`consensus/types` crate. Co-Authored-By: Mac L --- Cargo.lock | 64 +++++++++++++++++++ Cargo.toml | 1 + .../src/validator/slashing_protection.rs | 3 +- beacon_node/Cargo.toml | 1 + beacon_node/beacon_chain/Cargo.toml | 3 + .../beacon_chain/src/attester_cache.rs | 5 +- .../beacon_chain/src/beacon_block_streamer.rs | 6 +- beacon_node/beacon_chain/src/beacon_chain.rs | 2 + .../src/beacon_fork_choice_store.rs | 3 +- .../beacon_chain/src/beacon_proposer_cache.rs | 5 +- .../beacon_chain/src/block_times_cache.rs | 2 +- .../beacon_chain/src/block_verification.rs | 3 +- beacon_node/beacon_chain/src/builder.rs | 4 +- .../overflow_lru_cache.rs | 5 +- beacon_node/beacon_chain/src/errors.rs | 3 +- .../beacon_chain/src/graffiti_calculator.rs | 3 +- .../beacon_chain/src/historical_blocks.rs | 3 +- .../src/naive_aggregation_pool.rs | 7 +- .../beacon_chain/src/observed_aggregates.rs | 3 +- .../beacon_chain/src/observed_attesters.rs | 5 +- .../src/observed_block_producers.rs | 3 +- .../beacon_chain/src/observed_slashable.rs | 3 +- .../beacon_chain/src/shuffling_cache.rs | 1 + .../beacon_chain/src/single_attestation.rs | 4 +- .../src/sync_committee_verification.rs | 3 +- beacon_node/beacon_chain/src/test_utils.rs | 11 +++- .../beacon_chain/src/validator_monitor.rs | 6 +- .../src/validator_pubkey_cache.rs | 7 +- .../tests/attestation_production.rs | 5 +- .../tests/attestation_verification.rs | 8 ++- .../beacon_chain/tests/blob_verification.rs | 1 + .../beacon_chain/tests/block_verification.rs | 2 + .../beacon_chain/tests/column_verification.rs | 1 + .../beacon_chain/tests/op_verification.rs | 1 + beacon_node/beacon_chain/tests/rewards.rs | 3 +- .../beacon_chain/tests/schema_stability.rs | 3 +- beacon_node/beacon_chain/tests/store_tests.rs | 3 + .../tests/sync_committee_verification.rs | 7 +- beacon_node/beacon_chain/tests/tests.rs | 5 +- .../beacon_chain/tests/validator_monitor.rs | 3 +- beacon_node/builder_client/Cargo.toml | 2 + beacon_node/builder_client/src/lib.rs | 9 ++- beacon_node/execution_layer/Cargo.toml | 2 + beacon_node/execution_layer/src/engine_api.rs | 4 +- .../execution_layer/src/engine_api/http.rs | 8 ++- .../src/engine_api/json_structures.rs | 4 +- beacon_node/execution_layer/src/lib.rs | 3 +- .../test_utils/execution_block_generator.rs | 7 +- .../src/test_utils/mock_builder.rs | 9 +-- .../src/test_utils/mock_execution_layer.rs | 3 +- .../execution_layer/src/versioned_hashes.rs | 3 +- beacon_node/genesis/Cargo.toml | 1 + beacon_node/genesis/src/interop.rs | 6 +- beacon_node/http_api/Cargo.toml | 3 + beacon_node/http_api/src/block_id.rs | 5 +- beacon_node/http_api/src/lib.rs | 5 +- beacon_node/http_api/src/validator/mod.rs | 3 +- .../tests/broadcast_validation_tests.rs | 5 +- beacon_node/http_api/tests/fork_tests.rs | 6 +- .../http_api/tests/interactive_tests.rs | 5 +- beacon_node/http_api/tests/tests.rs | 8 ++- beacon_node/lighthouse_network/Cargo.toml | 3 + .../lighthouse_network/src/discovery/mod.rs | 3 +- .../src/peer_manager/mod.rs | 3 +- .../lighthouse_network/src/rpc/codec.rs | 7 +- .../lighthouse_network/src/rpc/protocol.rs | 3 +- .../lighthouse_network/src/types/mod.rs | 3 +- .../lighthouse_network/src/types/topics.rs | 3 +- .../lighthouse_network/tests/common.rs | 6 +- .../lighthouse_network/tests/rpc_tests.rs | 6 +- beacon_node/network/Cargo.toml | 2 + beacon_node/network/src/service.rs | 3 +- beacon_node/network/src/status.rs | 3 +- .../src/sync/block_lookups/parent_chain.rs | 3 +- beacon_node/operation_pool/Cargo.toml | 4 ++ beacon_node/operation_pool/src/attestation.rs | 3 +- .../operation_pool/src/attestation_storage.rs | 8 ++- beacon_node/operation_pool/src/lib.rs | 4 +- beacon_node/operation_pool/src/persistence.rs | 1 + .../operation_pool/src/reward_cache.rs | 5 +- beacon_node/src/config.rs | 3 +- beacon_node/store/Cargo.toml | 3 + beacon_node/store/src/chunked_vector.rs | 3 + beacon_node/store/src/config.rs | 2 +- .../store/src/database/leveldb_impl.rs | 3 +- beacon_node/store/src/errors.rs | 2 +- beacon_node/store/src/hdiff.rs | 3 +- beacon_node/store/src/hot_cold_store.rs | 2 + beacon_node/store/src/iter.rs | 4 +- beacon_node/store/src/partial_beacon_state.rs | 5 +- common/account_utils/Cargo.toml | 1 + .../src/validator_definitions.rs | 3 +- common/deposit_contract/Cargo.toml | 1 + common/deposit_contract/src/lib.rs | 9 ++- common/eip_3076/Cargo.toml | 2 + common/eip_3076/src/lib.rs | 5 +- common/eth2/Cargo.toml | 2 + common/eth2/src/lib.rs | 2 + common/eth2/src/lighthouse_vc/http_client.rs | 1 + common/eth2/src/lighthouse_vc/std_types.rs | 3 +- common/eth2/src/lighthouse_vc/types.rs | 1 + common/eth2/src/types.rs | 3 + common/eth2_network_config/Cargo.toml | 1 + common/eth2_network_config/src/lib.rs | 3 +- common/validator_dir/src/builder.rs | 4 +- common/validator_dir/src/validator_dir.rs | 3 +- common/validator_dir/tests/tests.rs | 3 +- consensus/fork_choice/Cargo.toml | 1 + consensus/fork_choice/src/fork_choice.rs | 3 +- consensus/fork_choice/tests/tests.rs | 7 +- consensus/proto_array/Cargo.toml | 1 + .../src/fork_choice_test_definition.rs | 5 +- .../fork_choice_test_definition/no_votes.rs | 2 +- consensus/proto_array/src/proto_array.rs | 5 +- .../src/proto_array_fork_choice.rs | 8 ++- consensus/state_processing/Cargo.toml | 3 + .../src/common/get_attesting_indices.rs | 5 +- .../src/common/slash_validator.rs | 1 + consensus/state_processing/src/epoch_cache.rs | 5 +- consensus/state_processing/src/genesis.rs | 1 + .../src/per_block_processing.rs | 1 + .../altair/sync_committee.rs | 6 +- .../process_operations.rs | 3 +- .../per_block_processing/signature_sets.rs | 15 +++-- .../src/per_block_processing/tests.rs | 3 + .../per_block_processing/verify_deposit.rs | 1 + .../altair/participation_flag_updates.rs | 2 +- .../epoch_processing_summary.rs | 5 +- .../src/per_epoch_processing/errors.rs | 3 +- .../historical_roots_update.rs | 2 +- .../justification_and_finalization_state.rs | 3 +- .../src/per_epoch_processing/resets.rs | 3 +- .../src/per_epoch_processing/single_pass.rs | 7 +- .../src/per_epoch_processing/slashings.rs | 3 +- .../src/per_slot_processing.rs | 1 + .../state_processing/src/state_advance.rs | 3 +- .../state_processing/src/upgrade/altair.rs | 3 +- .../state_processing/src/upgrade/capella.rs | 3 +- .../state_processing/src/upgrade/fulu.rs | 5 +- consensus/types/Cargo.toml | 1 + consensus/types/benches/benches.rs | 3 +- consensus/types/src/block/beacon_block.rs | 3 +- consensus/types/src/core/eth_spec.rs | 4 +- consensus/types/src/core/preset.rs | 2 +- consensus/types/src/deposit/deposit.rs | 3 +- consensus/types/src/lib.rs | 11 ---- .../src/light_client/light_client_update.rs | 4 +- consensus/types/src/state/beacon_state.rs | 3 +- consensus/types/src/state/iter.rs | 1 + .../sync_committee/sync_selection_proof.rs | 5 +- .../src/sync_committee/sync_subnet_id.rs | 2 +- .../src/test_utils/test_random/bitfield.rs | 3 +- .../src/test_utils/test_random/test_random.rs | 3 +- lcli/Cargo.toml | 1 + lcli/src/generate_bootnode_enr.rs | 3 +- lighthouse/tests/account_manager.rs | 2 +- lighthouse/tests/validator_manager.rs | 1 + slasher/Cargo.toml | 3 + slasher/src/attester_record.rs | 4 +- slasher/src/database.rs | 9 ++- slasher/src/test_utils.rs | 9 +-- testing/ef_tests/Cargo.toml | 3 + .../src/cases/merkle_proof_validity.rs | 5 +- testing/ef_tests/src/cases/ssz_generic.rs | 6 +- testing/ef_tests/tests/tests.rs | 1 + .../execution_engine_integration/Cargo.toml | 3 + .../src/test_rig.rs | 5 +- .../src/transactions.rs | 4 +- testing/simulator/Cargo.toml | 1 + testing/simulator/src/checks.rs | 3 +- testing/state_transition_vectors/Cargo.toml | 2 + testing/state_transition_vectors/src/main.rs | 8 +-- testing/web3signer_tests/Cargo.toml | 3 + testing/web3signer_tests/src/lib.rs | 3 + .../beacon_node_fallback/Cargo.toml | 1 + .../beacon_node_fallback/src/lib.rs | 3 +- .../doppelganger_service/Cargo.toml | 1 + .../doppelganger_service/src/lib.rs | 3 +- validator_client/http_api/Cargo.toml | 3 + validator_client/http_api/src/keystores.rs | 3 +- validator_client/http_api/src/lib.rs | 5 +- validator_client/http_api/src/remotekeys.rs | 3 +- validator_client/http_api/src/test_utils.rs | 1 + validator_client/http_api/src/tests.rs | 1 + .../http_api/src/tests/keystores.rs | 4 ++ .../initialized_validators/src/lib.rs | 3 +- .../lighthouse_validator_store/Cargo.toml | 1 + .../lighthouse_validator_store/src/lib.rs | 7 +- validator_client/signing_method/Cargo.toml | 1 + validator_client/signing_method/src/lib.rs | 1 + .../signing_method/src/web3signer.rs | 1 + .../slashing_protection/Cargo.toml | 2 + .../src/attestation_tests.rs | 3 +- .../src/bin/test_generator.rs | 3 +- .../slashing_protection/src/block_tests.rs | 3 +- .../src/extra_interchange_tests.rs | 2 +- .../src/interchange_test.rs | 4 +- .../slashing_protection/src/lib.rs | 5 +- .../src/slashing_database.rs | 3 +- .../slashing_protection/tests/migration.rs | 3 +- .../validator_services/src/block_service.rs | 3 +- .../validator_services/src/duties_service.rs | 3 +- .../validator_services/src/sync.rs | 3 +- .../src/sync_committee_service.rs | 5 +- validator_client/validator_store/Cargo.toml | 1 + validator_client/validator_store/src/lib.rs | 7 +- validator_manager/Cargo.toml | 1 + validator_manager/src/common.rs | 1 + validator_manager/src/create_validators.rs | 2 + validator_manager/src/delete_validators.rs | 2 +- validator_manager/src/exit_validators.rs | 3 +- validator_manager/src/list_validators.rs | 3 +- validator_manager/src/move_validators.rs | 3 +- 213 files changed, 556 insertions(+), 259 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2bb7b6d81b..413596beeb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -34,6 +34,7 @@ dependencies = [ name = "account_utils" version = "0.1.0" dependencies = [ + "bls", "eth2_keystore", "eth2_wallet", "filesystem", @@ -1237,6 +1238,7 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "execution_layer", + "fixed_bytes", "fork_choice", "futures", "genesis", @@ -1251,6 +1253,7 @@ dependencies = [ "maplit", "merkle_proof", "metrics", + "milhouse", "mockall", "mockall_double", "once_cell", @@ -1279,6 +1282,7 @@ dependencies = [ "tracing", "tree_hash", "tree_hash_derive", + "typenum", "types", "zstd 0.13.3", ] @@ -1289,6 +1293,7 @@ version = "8.0.1" dependencies = [ "account_utils", "beacon_chain", + "bls", "clap", "clap_utils", "client", @@ -1319,6 +1324,7 @@ dependencies = [ name = "beacon_node_fallback" version = "0.1.0" dependencies = [ + "bls", "clap", "eth2", "futures", @@ -1591,6 +1597,8 @@ dependencies = [ name = "builder_client" version = "0.1.0" dependencies = [ + "bls", + "context_deserialize", "eth2", "ethereum_ssz", "lighthouse_version", @@ -2545,6 +2553,7 @@ dependencies = [ "alloy-dyn-abi", "alloy-json-abi", "alloy-primitives", + "bls", "ethereum_ssz", "hex", "reqwest", @@ -2755,6 +2764,7 @@ name = "doppelganger_service" version = "0.1.0" dependencies = [ "beacon_node_fallback", + "bls", "environment", "eth2", "futures", @@ -2863,16 +2873,19 @@ dependencies = [ "hex", "kzg", "logging", + "milhouse", "rayon", "serde", "serde_json", "serde_repr", "serde_yaml", "snap", + "ssz_types", "state_processing", "swap_or_not_shuffle", "tree_hash", "tree_hash_derive", + "typenum", "types", ] @@ -2900,7 +2913,9 @@ name = "eip_3076" version = "0.1.0" dependencies = [ "arbitrary", + "bls", "ethereum_serde_utils", + "fixed_bytes", "serde", "serde_json", "tempfile", @@ -3124,6 +3139,7 @@ dependencies = [ name = "eth2" version = "0.1.0" dependencies = [ + "bls", "context_deserialize", "educe", "eip_3076", @@ -3143,6 +3159,7 @@ dependencies = [ "serde", "serde_json", "ssz_types", + "superstruct", "test_random_derive", "tokio", "types", @@ -3212,6 +3229,7 @@ dependencies = [ "discv5", "eth2_config", "ethereum_ssz", + "fixed_bytes", "kzg", "pretty_reqwest_error", "reqwest", @@ -3352,8 +3370,10 @@ dependencies = [ "alloy-rpc-types-eth", "alloy-signer-local", "async-channel 1.9.0", + "bls", "deposit_contract", "execution_layer", + "fixed_bytes", "fork_choice", "futures", "hex", @@ -3365,6 +3385,7 @@ dependencies = [ "task_executor", "tempfile", "tokio", + "typenum", "types", ] @@ -3377,6 +3398,7 @@ dependencies = [ "alloy-rlp", "alloy-rpc-types-eth", "arc-swap", + "bls", "builder_client", "bytes", "eth2", @@ -3415,6 +3437,7 @@ dependencies = [ "tree_hash", "tree_hash_derive", "triehash", + "typenum", "types", "warp", "zeroize", @@ -3588,6 +3611,7 @@ dependencies = [ "beacon_chain", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "logging", "metrics", "proto_array", @@ -3778,6 +3802,7 @@ dependencies = [ name = "genesis" version = "0.2.0" dependencies = [ + "bls", "ethereum_hashing", "ethereum_ssz", "int_to_bytes", @@ -4223,14 +4248,17 @@ version = "0.1.0" dependencies = [ "beacon_chain", "beacon_processor", + "bls", "bs58 0.4.0", "bytes", + "context_deserialize", "directory", "either", "eth2", "ethereum_serde_utils", "ethereum_ssz", "execution_layer", + "fixed_bytes", "futures", "genesis", "health_metrics", @@ -4937,6 +4965,7 @@ dependencies = [ "ethereum_hashing", "ethereum_ssz", "execution_layer", + "fixed_bytes", "hex", "lighthouse_network", "lighthouse_version", @@ -5486,6 +5515,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "async-channel 1.9.0", + "bls", "bytes", "delay_map", "directory", @@ -5495,6 +5525,7 @@ dependencies = [ "eth2", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "fnv", "futures", "hex", @@ -5527,6 +5558,7 @@ dependencies = [ "tokio-util", "tracing", "tracing-subscriber", + "typenum", "types", "unsigned-varint 0.8.0", ] @@ -5541,6 +5573,7 @@ version = "0.1.0" dependencies = [ "account_utils", "beacon_node_fallback", + "bls", "doppelganger_service", "either", "environment", @@ -6208,6 +6241,7 @@ dependencies = [ "eth2_network_config", "ethereum_ssz", "execution_layer", + "fixed_bytes", "fnv", "futures", "genesis", @@ -6240,6 +6274,7 @@ dependencies = [ "tokio-stream", "tracing", "tracing-subscriber", + "typenum", "types", ] @@ -6644,9 +6679,11 @@ version = "0.2.0" dependencies = [ "beacon_chain", "bitvec", + "bls", "educe", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "itertools 0.10.5", "maplit", "metrics", @@ -6656,7 +6693,9 @@ dependencies = [ "serde", "state_processing", "store", + "superstruct", "tokio", + "typenum", "types", ] @@ -7154,6 +7193,7 @@ version = "0.2.0" dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "safe_arith", "serde", "serde_yaml", @@ -8325,6 +8365,7 @@ dependencies = [ name = "signing_method" version = "0.1.0" dependencies = [ + "bls", "eth2_keystore", "ethereum_serde_utils", "lockfile", @@ -8380,6 +8421,7 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", + "typenum", "types", ] @@ -8394,11 +8436,13 @@ name = "slasher" version = "0.1.0" dependencies = [ "bincode", + "bls", "byteorder", "educe", "ethereum_ssz", "ethereum_ssz_derive", "filesystem", + "fixed_bytes", "flate2", "libmdbx", "lmdb-rkv", @@ -8418,6 +8462,7 @@ dependencies = [ "tracing", "tree_hash", "tree_hash_derive", + "typenum", "types", ] @@ -8443,9 +8488,11 @@ name = "slashing_protection" version = "0.1.0" dependencies = [ "arbitrary", + "bls", "eip_3076", "ethereum_serde_utils", "filesystem", + "fixed_bytes", "r2d2", "r2d2_sqlite", "rayon", @@ -8571,11 +8618,13 @@ dependencies = [ "ethereum_hashing", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "int_to_bytes", "integer-sqrt", "itertools 0.10.5", "merkle_proof", "metrics", + "milhouse", "rand 0.9.2", "rayon", "safe_arith", @@ -8585,6 +8634,7 @@ dependencies = [ "tokio", "tracing", "tree_hash", + "typenum", "types", ] @@ -8593,7 +8643,9 @@ name = "state_transition_vectors" version = "0.1.0" dependencies = [ "beacon_chain", + "bls", "ethereum_ssz", + "fixed_bytes", "state_processing", "tokio", "types", @@ -8616,11 +8668,13 @@ dependencies = [ "directory", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "itertools 0.10.5", "leveldb", "logging", "lru 0.12.5", "metrics", + "milhouse", "parking_lot", "rand 0.9.2", "redb", @@ -8634,6 +8688,7 @@ dependencies = [ "tempfile", "tracing", "tracing-subscriber", + "typenum", "types", "xdelta3", "zstd 0.13.3", @@ -9546,6 +9601,7 @@ dependencies = [ "tracing", "tree_hash", "tree_hash_derive", + "typenum", ] [[package]] @@ -9769,6 +9825,7 @@ dependencies = [ "eth2_keystore", "ethereum_serde_utils", "filesystem", + "fixed_bytes", "futures", "graffiti_file", "health_metrics", @@ -9785,6 +9842,7 @@ dependencies = [ "signing_method", "slashing_protection", "slot_clock", + "ssz_types", "sysinfo", "system_health", "task_executor", @@ -9792,6 +9850,7 @@ dependencies = [ "tokio", "tokio-stream", "tracing", + "typenum", "types", "url", "validator_dir", @@ -9829,6 +9888,7 @@ version = "0.1.0" dependencies = [ "account_utils", "beacon_chain", + "bls", "clap", "clap_utils", "educe", @@ -9885,6 +9945,7 @@ dependencies = [ name = "validator_store" version = "0.1.0" dependencies = [ + "bls", "eth2", "slashing_protection", "types", @@ -10126,10 +10187,12 @@ version = "0.1.0" dependencies = [ "account_utils", "async-channel 1.9.0", + "bls", "environment", "eth2", "eth2_keystore", "eth2_network_config", + "fixed_bytes", "futures", "initialized_validators", "lighthouse_validator_store", @@ -10141,6 +10204,7 @@ dependencies = [ "serde_yaml", "slashing_protection", "slot_clock", + "ssz_types", "task_executor", "tempfile", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 5296b5d9b7..aea8fd1b8d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -259,6 +259,7 @@ tracing-opentelemetry = "0.31.0" tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } tree_hash = "0.12.0" tree_hash_derive = "0.12.0" +typenum = "1" types = { path = "consensus/types" } url = "2" uuid = { version = "0.8", features = ["serde", "v4"] } diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index 18064b990f..96098ccbbd 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -1,3 +1,4 @@ +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use environment::Environment; use slashing_protection::{ @@ -7,7 +8,7 @@ use slashing_protection::{ use std::fs::File; use std::path::PathBuf; use std::str::FromStr; -use types::{Epoch, EthSpec, PublicKeyBytes, Slot}; +use types::{Epoch, EthSpec, Slot}; pub const CMD: &str = "slashing-protection"; pub const IMPORT_CMD: &str = "import"; diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index fd01355978..5352814dd5 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -20,6 +20,7 @@ testing = [] # Enables testing-only CLI flags [dependencies] account_utils = { workspace = true } beacon_chain = { workspace = true } +bls = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } client = { path = "client" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index b42585c2a1..734cfdf32b 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -26,6 +26,7 @@ ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } execution_layer = { workspace = true } +fixed_bytes = { workspace = true } fork_choice = { workspace = true } futures = { workspace = true } genesis = { workspace = true } @@ -39,6 +40,7 @@ logging = { workspace = true } lru = { workspace = true } merkle_proof = { workspace = true } metrics = { workspace = true } +milhouse = { workspace = true } once_cell = { workspace = true } oneshot_broadcast = { path = "../../common/oneshot_broadcast/" } operation_pool = { workspace = true } @@ -65,6 +67,7 @@ tokio-stream = { workspace = true } tracing = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } +typenum = { workspace = true } types = { workspace = true } zstd = { workspace = true } diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs index beaa1e581c..26a3389812 100644 --- a/beacon_node/beacon_chain/src/attester_cache.rs +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -10,13 +10,14 @@ //! and penalties can be computed and the `state.current_justified_checkpoint` can be updated. use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use fixed_bytes::FixedBytesExtended; use parking_lot::RwLock; use state_processing::state_advance::{Error as StateAdvanceError, partial_state_advance}; use std::collections::HashMap; use std::ops::Range; use types::{ - BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, - Hash256, RelativeEpoch, Slot, + BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, RelativeEpoch, + Slot, attestation::AttestationError, beacon_state::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index c816a0b29f..7b3bb03e56 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -685,13 +685,13 @@ impl From for BeaconChainError { mod tests { use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches}; use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType, test_spec}; + use bls::Keypair; use execution_layer::test_utils::Block; + use fixed_bytes::FixedBytesExtended; use std::sync::Arc; use std::sync::LazyLock; use tokio::sync::mpsc; - use types::{ - ChainSpec, Epoch, EthSpec, FixedBytesExtended, Hash256, Keypair, MinimalEthSpec, Slot, - }; + use types::{ChainSpec, Epoch, EthSpec, Hash256, MinimalEthSpec, Slot}; const VALIDATOR_COUNT: usize = 48; diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index adc400b1c1..25b2aa30cb 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -74,6 +74,7 @@ use crate::{ AvailabilityPendingExecutedBlock, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead, metrics, }; +use bls::{PublicKey, PublicKeyBytes, Signature}; use eth2::beacon_response::ForkVersionedResponse; use eth2::types::{ EventKind, SseBlobSidecar, SseBlock, SseDataColumnSidecar, SseExtendedPayloadAttributes, @@ -82,6 +83,7 @@ use execution_layer::{ BlockProposalContents, BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, }; +use fixed_bytes::FixedBytesExtended; use fork_choice::{ AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 0c203009bb..60487f9c46 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -6,6 +6,7 @@ use crate::{BeaconSnapshot, metrics}; use educe::Educe; +use fixed_bytes::FixedBytesExtended; use fork_choice::ForkChoiceStore; use proto_array::JustifiedBalances; use safe_arith::ArithError; @@ -17,7 +18,7 @@ use store::{Error as StoreError, HotColdDB, ItemStore}; use superstruct::superstruct; use types::{ AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, - FixedBytesExtended, Hash256, Slot, + Hash256, Slot, }; #[derive(Debug)] diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index bd6460eba7..a923d657a8 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -18,10 +18,9 @@ use state_processing::state_advance::partial_state_advance; use std::num::NonZeroUsize; use std::sync::Arc; use tracing::instrument; +use typenum::Unsigned; use types::non_zero_usize::new_non_zero_usize; -use types::{ - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot, Unsigned, -}; +use types::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot}; /// The number of sets of proposer indices that should be cached. const CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16); diff --git a/beacon_node/beacon_chain/src/block_times_cache.rs b/beacon_node/beacon_chain/src/block_times_cache.rs index bd1adb7e40..e8d4c75dce 100644 --- a/beacon_node/beacon_chain/src/block_times_cache.rs +++ b/beacon_node/beacon_chain/src/block_times_cache.rs @@ -294,7 +294,7 @@ impl BlockTimesCache { #[cfg(test)] mod test { use super::*; - use types::FixedBytesExtended; + use fixed_bytes::FixedBytesExtended; #[test] fn observed_time_uses_minimum() { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 374f1e2b36..bca8d2bc57 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -66,6 +66,7 @@ use crate::{ beacon_chain::{BeaconForkChoice, ForkChoiceError}, metrics, }; +use bls::{PublicKey, PublicKeyBytes}; use educe::Educe; use eth2::types::{BlockGossip, EventKind}; use execution_layer::PayloadStatus; @@ -97,7 +98,7 @@ use tracing::{Instrument, Span, debug, debug_span, error, info_span, instrument} use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecutionBlockHash, FullPayload, Hash256, InconsistentFork, KzgProofs, - PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, data_column_sidecar::DataColumnSidecarError, }; diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index ef438b16e0..58dbf1c35e 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -21,7 +21,9 @@ use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, BeaconSnapshot, ServerSentEventHandler, }; +use bls::Signature; use execution_layer::ExecutionLayer; +use fixed_bytes::FixedBytesExtended; use fork_choice::{ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use kzg::Kzg; @@ -43,7 +45,7 @@ use tracing::{debug, error, info}; use types::data_column_custody_group::CustodyIndex; use types::{ BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, - Epoch, EthSpec, FixedBytesExtended, Hash256, Signature, SignedBeaconBlock, Slot, + Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index e7c536c0d8..776fb50f61 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -1261,15 +1261,14 @@ mod pending_components_tests { use crate::PayloadVerificationOutcome; use crate::block_verification_types::BlockImportData; use crate::test_utils::{NumBlobs, generate_rand_block_and_blobs, test_spec}; + use fixed_bytes::FixedBytesExtended; use fork_choice::PayloadVerificationStatus; use kzg::KzgCommitment; use rand::SeedableRng; use rand::rngs::StdRng; use state_processing::ConsensusContext; use types::test_utils::TestRandom; - use types::{ - BeaconState, FixedBytesExtended, ForkName, MainnetEthSpec, SignedBeaconBlock, Slot, - }; + use types::{BeaconState, ForkName, MainnetEthSpec, SignedBeaconBlock, Slot}; type E = MainnetEthSpec; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 9dc6e897fb..8f615baab4 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -9,9 +9,11 @@ use crate::observed_aggregates::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; use crate::observed_block_producers::Error as ObservedBlockProducersError; use crate::observed_data_sidecars::Error as ObservedDataSidecarsError; +use bls::PublicKeyBytes; use execution_layer::PayloadStatus; use fork_choice::ExecutionStatus; use futures::channel::mpsc::TrySendError; +use milhouse::Error as MilhouseError; use operation_pool::OpPoolError; use safe_arith::ArithError; use ssz_types::Error as SszTypesError; @@ -28,7 +30,6 @@ use state_processing::{ }; use task_executor::ShutdownReason; use tokio::task::JoinError; -use types::milhouse::Error as MilhouseError; use types::*; macro_rules! easy_from_to { diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs index e8110d14cd..56808e0e67 100644 --- a/beacon_node/beacon_chain/src/graffiti_calculator.rs +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -225,13 +225,14 @@ async fn engine_version_cache_refresh_service( mod tests { use crate::ChainConfig; use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType, test_spec}; + use bls::Keypair; use execution_layer::EngineCapabilities; use execution_layer::test_utils::{DEFAULT_CLIENT_VERSION, DEFAULT_ENGINE_CAPABILITIES}; use std::sync::Arc; use std::sync::LazyLock; use std::time::Duration; use tracing::info; - use types::{ChainSpec, GRAFFITI_BYTES_LEN, Graffiti, Keypair, MinimalEthSpec}; + use types::{ChainSpec, GRAFFITI_BYTES_LEN, Graffiti, MinimalEthSpec}; const VALIDATOR_COUNT: usize = 48; /// A cached set of keys. diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index e4040eea6b..91b0f12cbb 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -1,5 +1,6 @@ use crate::data_availability_checker::{AvailableBlock, AvailableBlockData}; use crate::{BeaconChain, BeaconChainTypes, WhenSlotSkipped, metrics}; +use fixed_bytes::FixedBytesExtended; use itertools::Itertools; use state_processing::{ per_block_processing::ParallelSignatureSets, @@ -12,7 +13,7 @@ use store::metadata::DataColumnInfo; use store::{AnchorInfo, BlobInfo, DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp}; use strum::IntoStaticStr; use tracing::{debug, instrument}; -use types::{FixedBytesExtended, Hash256, Slot}; +use types::{Hash256, Slot}; /// Use a longer timeout on the pubkey cache. /// diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs index 4c4478d17e..beefc2d678 100644 --- a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -577,12 +577,11 @@ where #[cfg(test)] mod tests { use super::*; - use ssz_types::BitList; - use store::BitVector; + use fixed_bytes::FixedBytesExtended; + use ssz_types::{BitList, BitVector}; use tree_hash::TreeHash; use types::{ - Attestation, AttestationBase, AttestationElectra, FixedBytesExtended, Fork, Hash256, - SyncCommitteeMessage, + Attestation, AttestationBase, AttestationElectra, Fork, Hash256, SyncCommitteeMessage, test_utils::{generate_deterministic_keypair, test_random_instance}, }; diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs index f6f62e1b73..b2c5cb4b38 100644 --- a/beacon_node/beacon_chain/src/observed_aggregates.rs +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -473,7 +473,8 @@ where #[cfg(not(debug_assertions))] mod tests { use super::*; - use types::{AttestationBase, FixedBytesExtended, Hash256, test_utils::test_random_instance}; + use fixed_bytes::FixedBytesExtended; + use types::{AttestationBase, Hash256, test_utils::test_random_instance}; type E = types::MainnetEthSpec; diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index 34d68fe3ac..d5433f49d1 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -19,8 +19,9 @@ use bitvec::vec::BitVec; use std::collections::{HashMap, HashSet}; use std::hash::Hash; use std::marker::PhantomData; +use typenum::Unsigned; use types::slot_data::SlotData; -use types::{Epoch, EthSpec, Hash256, Slot, Unsigned}; +use types::{Epoch, EthSpec, Hash256, Slot}; /// The maximum capacity of the `AutoPruningEpochContainer`. /// @@ -619,7 +620,7 @@ impl SlotSubcommitteeIndex { #[cfg(test)] mod tests { use super::*; - use types::FixedBytesExtended; + use fixed_bytes::FixedBytesExtended; type E = types::MainnetEthSpec; diff --git a/beacon_node/beacon_chain/src/observed_block_producers.rs b/beacon_node/beacon_chain/src/observed_block_producers.rs index 096c8bff77..b740735ac4 100644 --- a/beacon_node/beacon_chain/src/observed_block_producers.rs +++ b/beacon_node/beacon_chain/src/observed_block_producers.rs @@ -4,7 +4,8 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; -use types::{BeaconBlockRef, Epoch, EthSpec, Hash256, Slot, Unsigned}; +use typenum::Unsigned; +use types::{BeaconBlockRef, Epoch, EthSpec, Hash256, Slot}; #[derive(Debug, PartialEq)] pub enum Error { diff --git a/beacon_node/beacon_chain/src/observed_slashable.rs b/beacon_node/beacon_chain/src/observed_slashable.rs index 001a0d4a86..704d605436 100644 --- a/beacon_node/beacon_chain/src/observed_slashable.rs +++ b/beacon_node/beacon_chain/src/observed_slashable.rs @@ -5,7 +5,8 @@ use crate::observed_block_producers::Error; use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; -use types::{EthSpec, Hash256, Slot, Unsigned}; +use typenum::Unsigned; +use types::{EthSpec, Hash256, Slot}; #[derive(Eq, Hash, PartialEq, Debug, Default)] pub struct ProposalKey { diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index 22921147a6..618d459754 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -290,6 +290,7 @@ impl BlockShufflingIds { #[cfg(not(debug_assertions))] #[cfg(test)] mod test { + use fixed_bytes::FixedBytesExtended; use types::*; use crate::test_utils::EphemeralHarnessType; diff --git a/beacon_node/beacon_chain/src/single_attestation.rs b/beacon_node/beacon_chain/src/single_attestation.rs index 33a093687e..955eb98e92 100644 --- a/beacon_node/beacon_chain/src/single_attestation.rs +++ b/beacon_node/beacon_chain/src/single_attestation.rs @@ -1,7 +1,7 @@ use crate::attestation_verification::Error; +use ssz_types::{BitList, BitVector}; use types::{ - Attestation, AttestationBase, AttestationElectra, BitList, BitVector, EthSpec, ForkName, - SingleAttestation, + Attestation, AttestationBase, AttestationElectra, EthSpec, ForkName, SingleAttestation, }; pub fn single_attestation_to_attestation( diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index 88b040a6e5..e74e284e58 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -30,6 +30,7 @@ use crate::observed_attesters::SlotSubcommitteeIndex; use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, metrics, observed_aggregates::ObserveOutcome, }; +use bls::AggregateSignature; use bls::{PublicKeyBytes, verify_signature_sets}; use educe::Educe; use safe_arith::ArithError; @@ -51,7 +52,7 @@ use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::slot_data::SlotData; use types::sync_committee::SyncCommitteeError; use types::{ - AggregateSignature, BeaconStateError, EthSpec, Hash256, SignedContributionAndProof, Slot, + BeaconStateError, EthSpec, Hash256, SignedContributionAndProof, Slot, SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, sync_committee_contribution::Error as ContributionError, }; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 759b7e9bd7..3651512b85 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -20,6 +20,9 @@ pub use crate::{ validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}, }; use bls::get_withdrawal_credentials; +use bls::{ + AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, Signature, SignatureBytes, +}; use eth2::types::SignedBlockContentsTuple; use execution_layer::test_utils::generate_genesis_header; use execution_layer::{ @@ -30,6 +33,7 @@ use execution_layer::{ MockExecutionLayer, }, }; +use fixed_bytes::FixedBytesExtended; use futures::channel::mpsc::Receiver; pub use genesis::{DEFAULT_ETH1_BLOCK_HASH, InteropGenesisBuilder}; use int_to_bytes::int_to_bytes32; @@ -46,7 +50,7 @@ use rand::seq::SliceRandom; use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slot_clock::{SlotClock, TestingSlotClock}; -use ssz_types::RuntimeVariableList; +use ssz_types::{RuntimeVariableList, VariableList}; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; @@ -61,12 +65,13 @@ use store::{HotColdDB, ItemStore, MemoryStore, config::StoreConfig}; use task_executor::TaskExecutor; use task_executor::{ShutdownReason, test_utils::TestRuntime}; use tree_hash::TreeHash; +use typenum::U4294967296; use types::data_column_custody_group::CustodyIndex; use types::indexed_attestation::IndexedAttestationBase; use types::payload::BlockProductionVersion; use types::test_utils::TestRandom; pub use types::test_utils::generate_deterministic_keypairs; -use types::{typenum::U4294967296, *}; +use types::*; // 4th September 2019 pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; @@ -3276,7 +3281,7 @@ pub fn generate_rand_block_and_blobs( ) -> (SignedBeaconBlock>, Vec>) { let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); - let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng)); + let mut block = SignedBeaconBlock::from_block(inner, Signature::random_for_test(rng)); let mut blob_sidecars = vec![]; let bundle = match block { diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index ba06d5da4e..2a76d65d32 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -4,6 +4,7 @@ use crate::beacon_proposer_cache::{BeaconProposerCache, TYPICAL_SLOTS_PER_EPOCH}; use crate::metrics; +use bls::PublicKeyBytes; use itertools::Itertools; use logging::crit; use parking_lot::{Mutex, RwLock}; @@ -28,9 +29,10 @@ use types::consts::altair::{ use types::{ Attestation, AttestationData, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, - IndexedAttestationRef, ProposerSlashing, PublicKeyBytes, SignedAggregateAndProof, - SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit, + IndexedAttestationRef, ProposerSlashing, SignedAggregateAndProof, SignedContributionAndProof, + Slot, SyncCommitteeMessage, VoluntaryExit, }; + /// Used for Prometheus labels. /// /// We've used `total` for this value to align with Nimbus, as per: diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index a346a649f0..26ac02d91b 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -1,6 +1,8 @@ use crate::errors::BeaconChainError; use crate::{BeaconChainTypes, BeaconStore}; use bls::PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN; +use bls::{PublicKey, PublicKeyBytes}; +use fixed_bytes::FixedBytesExtended; use rayon::prelude::*; use smallvec::SmallVec; use ssz::{Decode, Encode}; @@ -9,7 +11,7 @@ use std::collections::HashMap; use std::marker::PhantomData; use store::{DBColumn, Error as StoreError, StoreItem, StoreOp}; use tracing::instrument; -use types::{BeaconState, FixedBytesExtended, Hash256, PublicKey, PublicKeyBytes}; +use types::{BeaconState, Hash256}; /// Provides a mapping of `validator_index -> validator_publickey`. /// @@ -244,10 +246,11 @@ impl DatabasePubkey { mod test { use super::*; use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType}; + use bls::Keypair; use logging::create_test_tracing_subscriber; use std::sync::Arc; use store::HotColdDB; - use types::{EthSpec, Keypair, MainnetEthSpec}; + use types::{EthSpec, MainnetEthSpec}; type E = MainnetEthSpec; type T = EphemeralHarnessType; diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 0acb23d512..017c249d10 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -4,11 +4,10 @@ use beacon_chain::attestation_simulator::produce_unaggregated_attestation; use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::validator_monitor::UNAGGREGATED_ATTESTATION_LAG_SLOTS; use beacon_chain::{StateSkipConfig, WhenSlotSkipped, metrics}; +use bls::{AggregateSignature, Keypair}; use std::sync::{Arc, LazyLock}; use tree_hash::TreeHash; -use types::{ - AggregateSignature, Attestation, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot, -}; +use types::{Attestation, EthSpec, MainnetEthSpec, RelativeEpoch, Slot}; pub const VALIDATOR_COUNT: usize = 16; diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 706ffad3c1..7984ea4708 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -13,15 +13,17 @@ use beacon_chain::{ single_attestation_to_attestation, test_spec, }, }; +use bls::{AggregateSignature, Keypair, SecretKey}; +use fixed_bytes::FixedBytesExtended; use genesis::{DEFAULT_ETH1_BLOCK_HASH, interop_genesis_state}; use int_to_bytes::int_to_bytes32; use state_processing::per_slot_processing; use std::sync::{Arc, LazyLock}; use tree_hash::TreeHash; +use typenum::Unsigned; use types::{ - Address, AggregateSignature, Attestation, AttestationRef, ChainSpec, Epoch, EthSpec, - FixedBytesExtended, ForkName, Hash256, Keypair, MainnetEthSpec, SecretKey, SelectionProof, - SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, Unsigned, + Address, Attestation, AttestationRef, ChainSpec, Epoch, EthSpec, ForkName, Hash256, + MainnetEthSpec, SelectionProof, SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, signed_aggregate_and_proof::SignedAggregateAndProofRefMut, test_utils::generate_deterministic_keypair, }; diff --git a/beacon_node/beacon_chain/tests/blob_verification.rs b/beacon_node/beacon_chain/tests/blob_verification.rs index c42a2828c0..d1a0d87adf 100644 --- a/beacon_node/beacon_chain/tests/blob_verification.rs +++ b/beacon_node/beacon_chain/tests/blob_verification.rs @@ -7,6 +7,7 @@ use beacon_chain::{ AvailabilityProcessingStatus, BlockError, ChainConfig, InvalidSignature, NotifyExecutionLayer, block_verification_types::AsBlock, }; +use bls::{Keypair, Signature}; use logging::create_test_tracing_subscriber; use std::sync::{Arc, LazyLock}; use types::{blob_sidecar::FixedBlobSidecarList, *}; diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 881885cef2..2644b74b28 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -13,6 +13,8 @@ use beacon_chain::{ BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock, InvalidSignature, NotifyExecutionLayer, }; +use bls::{AggregateSignature, Keypair, Signature}; +use fixed_bytes::FixedBytesExtended; use logging::create_test_tracing_subscriber; use slasher::{Config as SlasherConfig, Slasher}; use state_processing::{ diff --git a/beacon_node/beacon_chain/tests/column_verification.rs b/beacon_node/beacon_chain/tests/column_verification.rs index 229ae1e199..be9b3b2fa1 100644 --- a/beacon_node/beacon_chain/tests/column_verification.rs +++ b/beacon_node/beacon_chain/tests/column_verification.rs @@ -9,6 +9,7 @@ use beacon_chain::{ AvailabilityProcessingStatus, BlockError, ChainConfig, InvalidSignature, NotifyExecutionLayer, block_verification_types::AsBlock, }; +use bls::{Keypair, Signature}; use logging::create_test_tracing_subscriber; use std::sync::{Arc, LazyLock}; use types::*; diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index c18af0bde7..2f97f10745 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -9,6 +9,7 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, test_spec, }, }; +use bls::Keypair; use state_processing::per_block_processing::errors::{ AttesterSlashingInvalid, BlockOperationError, ExitInvalid, ProposerSlashingInvalid, }; diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index 0a5881e486..ee9cf511ea 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -7,8 +7,9 @@ use beacon_chain::test_utils::{ use beacon_chain::{ BlockError, ChainConfig, StateSkipConfig, WhenSlotSkipped, test_utils::{AttestationStrategy, BlockStrategy, RelativeSyncCommittee}, - types::{Epoch, EthSpec, Keypair, MinimalEthSpec}, + types::{Epoch, EthSpec, MinimalEthSpec}, }; +use bls::Keypair; use eth2::types::{StandardAttestationRewards, TotalAttestationRewards, ValidatorId}; use state_processing::{BlockReplayError, BlockReplayer}; use std::array::IntoIter; diff --git a/beacon_node/beacon_chain/tests/schema_stability.rs b/beacon_node/beacon_chain/tests/schema_stability.rs index 3b09921c15..db7f7dbdbb 100644 --- a/beacon_node/beacon_chain/tests/schema_stability.rs +++ b/beacon_node/beacon_chain/tests/schema_stability.rs @@ -4,6 +4,7 @@ use beacon_chain::{ persisted_custody::PersistedCustody, test_utils::{BeaconChainHarness, DiskHarnessType, test_spec}, }; +use bls::Keypair; use logging::create_test_tracing_subscriber; use operation_pool::PersistedOperationPool; use ssz::Encode; @@ -16,7 +17,7 @@ use store::{ }; use strum::IntoEnumIterator; use tempfile::{TempDir, tempdir}; -use types::{ChainSpec, Hash256, Keypair, MainnetEthSpec, Slot}; +use types::{ChainSpec, Hash256, MainnetEthSpec, Slot}; type E = MainnetEthSpec; type Store = Arc, BeaconNodeBackend>>; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 0733d901fc..c1c53c014c 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -25,11 +25,14 @@ use beacon_chain::{ historical_blocks::HistoricalBlockError, migrate::MigratorConfig, }; +use bls::{Keypair, Signature, SignatureBytes}; +use fixed_bytes::FixedBytesExtended; use logging::create_test_tracing_subscriber; use maplit::hashset; use rand::Rng; use rand::rngs::StdRng; use slot_clock::{SlotClock, TestingSlotClock}; +use ssz_types::VariableList; use state_processing::{BlockReplayer, state_advance::complete_state_advance}; use std::collections::HashMap; use std::collections::HashSet; diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 9dd12410fb..d2124c6641 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -2,6 +2,8 @@ use beacon_chain::sync_committee_verification::{Error as SyncCommitteeError, SyncCommitteeData}; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee}; +use bls::{AggregateSignature, Keypair, SecretKey}; +use fixed_bytes::FixedBytesExtended; use int_to_bytes::int_to_bytes32; use safe_arith::SafeArith; use state_processing::{ @@ -11,10 +13,11 @@ use state_processing::{ use std::sync::LazyLock; use store::{SignedContributionAndProof, SyncCommitteeMessage}; use tree_hash::TreeHash; +use typenum::Unsigned; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::{ - AggregateSignature, Epoch, EthSpec, FixedBytesExtended, Hash256, Keypair, MainnetEthSpec, - SecretKey, Slot, SyncContributionData, SyncSelectionProof, SyncSubnetId, Unsigned, + Epoch, EthSpec, Hash256, MainnetEthSpec, Slot, SyncContributionData, SyncSelectionProof, + SyncSubnetId, }; pub type E = MainnetEthSpec; diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index ec0e607d00..17d9c5f697 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -8,13 +8,14 @@ use beacon_chain::{ OP_POOL_DB_KEY, }, }; +use bls::Keypair; use operation_pool::PersistedOperationPool; use state_processing::EpochProcessingError; use state_processing::{per_slot_processing, per_slot_processing::Error as SlotProcessingError}; use std::sync::LazyLock; use types::{ - BeaconState, BeaconStateError, BlockImportSource, Checkpoint, EthSpec, Hash256, Keypair, - MinimalEthSpec, RelativeEpoch, Slot, + BeaconState, BeaconStateError, BlockImportSource, Checkpoint, EthSpec, Hash256, MinimalEthSpec, + RelativeEpoch, Slot, }; type E = MinimalEthSpec; diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index 95732abeb5..521fc4ac97 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -2,8 +2,9 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::validator_monitor::{MISSED_BLOCK_LAG_SLOTS, ValidatorMonitorConfig}; +use bls::{Keypair, PublicKeyBytes}; use std::sync::LazyLock; -use types::{Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; +use types::{Epoch, EthSpec, Hash256, MainnetEthSpec, Slot}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 48; diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index 9b1f86360d..09bf3f48b4 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -5,6 +5,8 @@ edition = { workspace = true } authors = ["Sean Anderson "] [dependencies] +bls = { workspace = true } +context_deserialize = { workspace = true } eth2 = { workspace = true } ethereum_ssz = { workspace = true } lighthouse_version = { workspace = true } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index b486e77083..4fc6b3a379 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,9 +1,11 @@ +use bls::PublicKeyBytes; +use context_deserialize::ContextDeserialize; pub use eth2::Error; use eth2::types::beacon_response::EmptyMetadata; use eth2::types::builder_bid::SignedBuilderBid; use eth2::types::{ - ContentType, ContextDeserialize, EthSpec, ExecutionBlockHash, ForkName, ForkVersionDecode, - ForkVersionedResponse, PublicKeyBytes, SignedValidatorRegistrationData, Slot, + ContentType, EthSpec, ExecutionBlockHash, ForkName, ForkVersionDecode, ForkVersionedResponse, + SignedValidatorRegistrationData, Slot, }; use eth2::types::{FullPayloadContents, SignedBlindedBeaconBlock}; use eth2::{ @@ -538,9 +540,10 @@ impl BuilderHttpClient { #[cfg(test)] mod tests { use super::*; + use bls::Signature; + use eth2::types::MainnetEthSpec; use eth2::types::builder_bid::{BuilderBid, BuilderBidFulu}; use eth2::types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use eth2::types::{MainnetEthSpec, Signature}; use mockito::{Matcher, Server, ServerGuard}; type E = MainnetEthSpec; diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 540f9dc0a0..c443e94574 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -10,6 +10,7 @@ alloy-primitives = { workspace = true } alloy-rlp = { workspace = true } alloy-rpc-types-eth = { workspace = true } arc-swap = "1.6.0" +bls = { workspace = true } builder_client = { path = "../builder_client" } bytes = { workspace = true } eth2 = { workspace = true, features = ["events", "lighthouse"] } @@ -48,6 +49,7 @@ tracing = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } triehash = "0.8.4" +typenum = { workspace = true } types = { workspace = true } warp = { workspace = true } zeroize = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 98da7dbf2c..f285640b21 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -20,8 +20,8 @@ use strum::IntoStaticStr; use superstruct::superstruct; pub use types::{ Address, BeaconBlockRef, ConsolidationRequest, EthSpec, ExecutionBlockHash, ExecutionPayload, - ExecutionPayloadHeader, ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, - Uint256, VariableList, Withdrawal, Withdrawals, + ExecutionPayloadHeader, ExecutionPayloadRef, ForkName, Hash256, Transactions, Uint256, + Withdrawal, Withdrawals, }; use types::{ ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 8f7564ace6..c421491f80 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -103,9 +103,10 @@ pub static LIGHTHOUSE_JSON_CLIENT_VERSION: LazyLock = /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. pub mod deposit_log { + use bls::{PublicKeyBytes, SignatureBytes}; use ssz::Decode; use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message; - use types::{ChainSpec, DepositData, Hash256, PublicKeyBytes, SignatureBytes}; + use types::{ChainSpec, DepositData, Hash256}; pub use eth2::lighthouse::DepositLog; @@ -1466,10 +1467,13 @@ mod test { use super::auth::JwtKey; use super::*; use crate::test_utils::{DEFAULT_JWT_SECRET, MockServer}; + use fixed_bytes::FixedBytesExtended; + use ssz_types::VariableList; use std::future::Future; use std::str::FromStr; use std::sync::Arc; - use types::{FixedBytesExtended, MainnetEthSpec, Unsigned}; + use typenum::Unsigned; + use types::MainnetEthSpec; struct Tester { server: MockServer, diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index cc46070325..fc8eae015b 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1101,10 +1101,10 @@ impl TryFrom for ClientVersionV1 { #[cfg(test)] mod tests { + use bls::{PublicKeyBytes, SignatureBytes}; use ssz::Encode; use types::{ - ConsolidationRequest, DepositRequest, MainnetEthSpec, PublicKeyBytes, RequestType, - SignatureBytes, WithdrawalRequest, + ConsolidationRequest, DepositRequest, MainnetEthSpec, RequestType, WithdrawalRequest, }; use super::*; diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index c2a31c2699..34b1832894 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -9,6 +9,7 @@ use crate::payload_cache::PayloadCache; use arc_swap::ArcSwapOption; use auth::{Auth, JwtKey, strip_prefix}; pub use block_hash::calculate_execution_block_hash; +use bls::{PublicKeyBytes, Signature}; use builder_client::BuilderHttpClient; pub use engine_api::EngineCapabilities; use engine_api::Error as ApiError; @@ -55,7 +56,7 @@ use types::{ use types::{ BeaconStateError, BlindedPayload, ChainSpec, Epoch, ExecPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, - FullPayload, ProposerPreparationData, PublicKeyBytes, Signature, Slot, + FullPayload, ProposerPreparationData, Slot, }; mod block_hash; diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 7e0033d732..89d2994ce2 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -8,6 +8,7 @@ use crate::engines::ForkchoiceState; use alloy_consensus::TxEnvelope; use alloy_rpc_types_eth::Transaction as AlloyTransaction; use eth2::types::BlobsBundle; +use fixed_bytes::FixedBytesExtended; use kzg::{Kzg, KzgCommitment, KzgProof}; use parking_lot::Mutex; use rand::{Rng, SeedableRng, rngs::StdRng}; @@ -22,8 +23,8 @@ use tree_hash_derive::TreeHash; use types::{ Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, - ExecutionPayloadGloas, ExecutionPayloadHeader, FixedBytesExtended, ForkName, Hash256, - KzgProofs, Transaction, Transactions, Uint256, + ExecutionPayloadGloas, ExecutionPayloadHeader, ForkName, Hash256, KzgProofs, Transaction, + Transactions, Uint256, }; use super::DEFAULT_TERMINAL_BLOCK; @@ -41,7 +42,7 @@ pub enum Block { PoS(ExecutionPayload), } -pub fn mock_el_extra_data() -> types::VariableList { +pub fn mock_el_extra_data() -> VariableList { "block gen was here".as_bytes().to_vec().try_into().unwrap() } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 589b29193c..1d4f36b62c 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,5 +1,6 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes, PayloadParameters}; +use bls::{PublicKeyBytes, SecretKey, Signature}; use bytes::Bytes; use eth2::beacon_response::ForkVersionedResponse; use eth2::types::PublishBlockRequest; @@ -15,6 +16,7 @@ use fork_choice::ForkchoiceUpdateParameters; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; use ssz::Encode; +use ssz_types::VariableList; use std::collections::HashMap; use std::fmt::Debug; use std::future::Future; @@ -26,6 +28,7 @@ use tempfile::NamedTempFile; use tokio_stream::StreamExt; use tracing::{debug, error, info, warn}; use tree_hash::TreeHash; +use types::ExecutionBlockHash; use types::builder_bid::{ BuilderBid, BuilderBidBellatrix, BuilderBidCapella, BuilderBidDeneb, BuilderBidElectra, BuilderBidFulu, BuilderBidGloas, SignedBuilderBid, @@ -33,10 +36,8 @@ use types::builder_bid::{ use types::{ Address, BeaconState, ChainSpec, Epoch, EthSpec, ExecPayload, ExecutionPayload, ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, Hash256, - PublicKeyBytes, Signature, SignedBlindedBeaconBlock, SignedRoot, - SignedValidatorRegistrationData, Slot, Uint256, + SignedBlindedBeaconBlock, SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, }; -use types::{ExecutionBlockHash, SecretKey}; use warp::reply::{self, Reply}; use warp::{Filter, Rejection}; @@ -72,7 +73,7 @@ impl Operation { } } -pub fn mock_builder_extra_data() -> types::VariableList { +pub fn mock_builder_extra_data() -> VariableList { "mock_builder".as_bytes().to_vec().try_into().unwrap() } diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 73c998956c..c69edb8f39 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -5,9 +5,10 @@ use crate::{ *, }; use alloy_primitives::B256 as H256; +use fixed_bytes::FixedBytesExtended; use kzg::Kzg; use tempfile::NamedTempFile; -use types::{FixedBytesExtended, MainnetEthSpec}; +use types::MainnetEthSpec; pub struct MockExecutionLayer { pub server: MockServer, diff --git a/beacon_node/execution_layer/src/versioned_hashes.rs b/beacon_node/execution_layer/src/versioned_hashes.rs index 97c3100de9..21cfd5a322 100644 --- a/beacon_node/execution_layer/src/versioned_hashes.rs +++ b/beacon_node/execution_layer/src/versioned_hashes.rs @@ -1,6 +1,7 @@ use alloy_consensus::TxEnvelope; use alloy_rlp::Decodable; -use types::{EthSpec, ExecutionPayloadRef, Hash256, Unsigned, VersionedHash}; +use typenum::Unsigned; +use types::{EthSpec, ExecutionPayloadRef, Hash256, VersionedHash}; #[derive(Debug)] pub enum Error { diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 8f6f3516fc..124231a57e 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] +bls = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } int_to_bytes = { workspace = true } diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index dfa4daab9a..349b8f19c8 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -1,12 +1,10 @@ use crate::common::genesis_deposits; +use bls::{Keypair, PublicKey, Signature}; use ethereum_hashing::hash; use rayon::prelude::*; use ssz::Encode; use state_processing::initialize_beacon_state_from_eth1; -use types::{ - BeaconState, ChainSpec, DepositData, EthSpec, ExecutionPayloadHeader, Hash256, Keypair, - PublicKey, Signature, -}; +use types::{BeaconState, ChainSpec, DepositData, EthSpec, ExecutionPayloadHeader, Hash256}; pub const DEFAULT_ETH1_BLOCK_HASH: &[u8] = &[0x42; 32]; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 3aa9c8351c..571dab1027 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -8,14 +8,17 @@ autotests = false # using a single test binary com [dependencies] beacon_chain = { workspace = true } beacon_processor = { workspace = true } +bls = { workspace = true } bs58 = "0.4.0" bytes = { workspace = true } +context_deserialize = { workspace = true } directory = { workspace = true } either = { workspace = true } eth2 = { workspace = true, features = ["lighthouse"] } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } +fixed_bytes = { workspace = true } futures = { workspace = true } health_metrics = { workspace = true } hex = { workspace = true } diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 64f5451560..ea8b47f91e 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -6,12 +6,13 @@ use eth2::beacon_response::{ExecutionOptimisticFinalizedMetadata, UnversionedRes use eth2::types::BlockId as CoreBlockId; use eth2::types::DataColumnIndicesQuery; use eth2::types::{BlobIndicesQuery, BlobWrapper, BlobsVersionedHashesQuery}; +use fixed_bytes::FixedBytesExtended; use std::fmt; use std::str::FromStr; use std::sync::Arc; use types::{ - BlobSidecarList, DataColumnSidecarList, EthSpec, FixedBytesExtended, ForkName, Hash256, - SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, + BlobSidecarList, DataColumnSidecarList, EthSpec, ForkName, Hash256, SignedBeaconBlock, + SignedBlindedBeaconBlock, Slot, }; use warp::Rejection; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 4ed02f3cbf..58cd2a3bdb 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -48,12 +48,13 @@ use beacon_processor::BeaconProcessorSend; pub use block_id::BlockId; use builder_states::get_next_withdrawals; use bytes::Bytes; +use context_deserialize::ContextDeserialize; use directory::DEFAULT_ROOT_DIR; use eth2::StatusCode; use eth2::lighthouse::sync_state::SyncState; use eth2::types::{ - self as api_types, BroadcastValidation, ContextDeserialize, EndpointVersion, ForkChoice, - ForkChoiceExtraData, ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, ValidatorId, + self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceExtraData, + ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, ValidatorId, }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use health_metrics::observe::Observe; diff --git a/beacon_node/http_api/src/validator/mod.rs b/beacon_node/http_api/src/validator/mod.rs index 9cf1f1a33d..8baf7c5245 100644 --- a/beacon_node/http_api/src/validator/mod.rs +++ b/beacon_node/http_api/src/validator/mod.rs @@ -9,6 +9,7 @@ use crate::{StateId, attester_duties, proposer_duties, sync_committees}; use beacon_chain::attestation_verification::VerifiedAttestation; use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{AttestationError, BeaconChain, BeaconChainError, BeaconChainTypes}; +use bls::PublicKeyBytes; use eth2::StatusCode; use eth2::types::{ Accept, BeaconCommitteeSubscription, EndpointVersion, Failure, GenericResponse, @@ -23,7 +24,7 @@ use tokio::sync::mpsc::{Sender, UnboundedSender}; use tokio::sync::oneshot; use tracing::{debug, error, info, warn}; use types::{ - BeaconState, Epoch, EthSpec, ProposerPreparationData, PublicKeyBytes, SignedAggregateAndProof, + BeaconState, Epoch, EthSpec, ProposerPreparationData, SignedAggregateAndProof, SignedContributionAndProof, SignedValidatorRegistrationData, Slot, SyncContributionData, ValidatorSubscription, }; diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 33f462fa5e..357b78cf41 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -6,13 +6,12 @@ use beacon_chain::{ }; use eth2::reqwest::{Response, StatusCode}; use eth2::types::{BroadcastValidation, PublishBlockRequest}; +use fixed_bytes::FixedBytesExtended; use http_api::test_utils::InteractiveTester; use http_api::{Config, ProvenancedBlock, publish_blinded_block, publish_block, reconstruct_block}; use std::collections::HashSet; use std::sync::Arc; -use types::{ - ColumnIndex, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, Slot, -}; +use types::{ColumnIndex, Epoch, EthSpec, ForkName, Hash256, MainnetEthSpec, Slot}; use warp::Rejection; use warp_utils::reject::CustomBadRequest; diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 50cf866b6a..b96c8bd112 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -4,13 +4,15 @@ use beacon_chain::{ StateSkipConfig, test_utils::{DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME, RelativeSyncCommittee}, }; +use bls::PublicKey; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; use execution_layer::test_utils::generate_genesis_header; +use fixed_bytes::FixedBytesExtended; use genesis::{InteropGenesisBuilder, bls_withdrawal_credentials}; use http_api::test_utils::*; use std::collections::HashSet; use types::{ - Address, ChainSpec, Epoch, EthSpec, FixedBytesExtended, Hash256, MinimalEthSpec, Slot, + Address, ChainSpec, Epoch, EthSpec, Hash256, MinimalEthSpec, Slot, test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, }; @@ -392,7 +394,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { fn withdrawal_credentials_fn<'a>( index: usize, - _: &'a types::PublicKey, + _: &'a PublicKey, spec: &'a ChainSpec, ) -> Hash256 { // It is a bit inefficient to regenerate the whole keypair here, but this is a workaround. diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 83cb70a7a3..0119a7645c 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -11,6 +11,7 @@ use beacon_processor::{Work, WorkEvent, work_reprocessing_queue::ReprocessQueueM use eth2::types::ProduceBlockV3Response; use eth2::types::{DepositContractData, StateId}; use execution_layer::{ForkchoiceState, PayloadAttributes}; +use fixed_bytes::FixedBytesExtended; use http_api::test_utils::InteractiveTester; use parking_lot::Mutex; use slot_clock::SlotClock; @@ -21,8 +22,8 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use types::{ - Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, FixedBytesExtended, ForkName, - Hash256, MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot, Uint256, + Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec, + MinimalEthSpec, ProposerPreparationData, Slot, Uint256, }; type E = MainnetEthSpec; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index a86cc4f4ef..f8eba0ee2b 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -6,6 +6,7 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, test_spec, }, }; +use bls::{AggregateSignature, Keypair, PublicKeyBytes, Signature, SignatureBytes}; use eth2::{ BeaconNodeHttpClient, Error, Error::ServerMessage, @@ -21,6 +22,7 @@ use execution_layer::test_utils::{ DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_GAS_LIMIT, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, MockBuilder, Operation, mock_builder_extra_data, mock_el_extra_data, }; +use fixed_bytes::FixedBytesExtended; use futures::FutureExt; use futures::stream::{Stream, StreamExt}; use http_api::{ @@ -34,6 +36,7 @@ use operation_pool::attestation_storage::CheckpointKey; use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; +use ssz::BitList; use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_slot_processing; use state_processing::state_advance::partial_state_advance; @@ -43,9 +46,8 @@ use tokio::time::Duration; use tree_hash::TreeHash; use types::application_domain::ApplicationDomain; use types::{ - AggregateSignature, BitList, Domain, EthSpec, ExecutionBlockHash, Hash256, Keypair, - MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, SingleAttestation, Slot, - attestation::AttestationBase, + Domain, EthSpec, ExecutionBlockHash, Hash256, MainnetEthSpec, RelativeEpoch, SelectionProof, + SignedRoot, SingleAttestation, Slot, attestation::AttestationBase, }; type E = MainnetEthSpec; diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index d2431cca04..efb6f27dc5 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -11,6 +11,7 @@ libp2p-websocket = [] [dependencies] alloy-primitives = { workspace = true } alloy-rlp = { workspace = true } +bls = { workspace = true } bytes = { workspace = true } delay_map = { workspace = true } directory = { workspace = true } @@ -20,6 +21,7 @@ either = { workspace = true } eth2 = { workspace = true, features = ["lighthouse"] } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } fnv = { workspace = true } futures = { workspace = true } gossipsub = { workspace = true } @@ -49,6 +51,7 @@ tokio = { workspace = true } tokio-util = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } +typenum = { workspace = true } types = { workspace = true } unsigned-varint = { version = "0.8", features = ["codec"] } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 3589882ae9..a8c87523a5 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1231,7 +1231,8 @@ mod tests { use super::*; use crate::rpc::methods::{MetaData, MetaDataV3}; use libp2p::identity::secp256k1; - use types::{BitVector, MinimalEthSpec, SubnetId}; + use ssz_types::BitVector; + use types::{MinimalEthSpec, SubnetId}; type E = MinimalEthSpec; diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index dfa8b374e9..3cfe2b3c3b 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -2978,7 +2978,8 @@ mod tests { use proptest::prelude::*; use std::collections::HashSet; use tokio::runtime::Runtime; - use types::{DataColumnSubnetId, Unsigned}; + use typenum::Unsigned; + use types::DataColumnSubnetId; use types::{EthSpec, MainnetEthSpec as E}; #[derive(Clone, Debug)] diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 5b3574d48a..48a29699c8 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -908,11 +908,12 @@ mod tests { use super::*; use crate::rpc::protocol::*; use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; + use bls::Signature; + use fixed_bytes::FixedBytesExtended; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, - DataColumnsByRootIdentifier, EmptyBlock, Epoch, FixedBytesExtended, FullPayload, - KzgCommitment, KzgProof, Signature, SignedBeaconBlockHeader, Slot, - blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, + DataColumnsByRootIdentifier, EmptyBlock, Epoch, FullPayload, KzgCommitment, KzgProof, + SignedBeaconBlockHeader, Slot, blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, }; type Spec = types::MainnetEthSpec; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 08085f3c27..366515d42f 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -1,5 +1,6 @@ use super::methods::*; use crate::rpc::codec::SSZSnappyInboundCodec; +use bls::Signature; use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; use futures::{FutureExt, StreamExt}; @@ -20,7 +21,7 @@ use types::{ EmptyBlock, Epoch, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, - MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, + MainnetEthSpec, MinimalEthSpec, SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index 3f57406fc7..eea8782b2d 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -3,7 +3,8 @@ mod pubsub; mod subnet; mod topics; -use types::{BitVector, EthSpec}; +use ssz_types::BitVector; +use types::EthSpec; pub type EnrAttestationBitfield = BitVector<::SubnetBitfieldLength>; pub type EnrSyncCommitteeBitfield = BitVector<::SyncCommitteeSubnetCount>; diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index cfdee907b9..0c988f35c3 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -2,7 +2,8 @@ use gossipsub::{IdentTopic as Topic, TopicHash}; use serde::{Deserialize, Serialize}; use std::collections::HashSet; use strum::AsRefStr; -use types::{ChainSpec, DataColumnSubnetId, EthSpec, ForkName, SubnetId, SyncSubnetId, Unsigned}; +use typenum::Unsigned; +use types::{ChainSpec, DataColumnSubnetId, EthSpec, ForkName, SubnetId, SyncSubnetId}; use crate::Subnet; diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index d04f1211cf..412ee5aca5 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -1,4 +1,5 @@ #![cfg(test)] +use fixed_bytes::FixedBytesExtended; use lighthouse_network::Enr; use lighthouse_network::Multiaddr; use lighthouse_network::service::Network as LibP2PService; @@ -9,10 +10,7 @@ use std::sync::Weak; use tokio::runtime::Runtime; use tracing::{Instrument, debug, error, info_span}; use tracing_subscriber::EnvFilter; -use types::{ - ChainSpec, EnrForkId, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, - MinimalEthSpec, -}; +use types::{ChainSpec, EnrForkId, Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec}; type E = MinimalEthSpec; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 8613edf5f5..599fcd242b 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -3,6 +3,8 @@ use crate::common; use crate::common::spec_with_all_forks_enabled; use crate::common::{Protocol, build_tracing_subscriber}; +use bls::Signature; +use fixed_bytes::FixedBytesExtended; use lighthouse_network::rpc::{RequestType, methods::*}; use lighthouse_network::service::api_types::AppRequestId; use lighthouse_network::{NetworkEvent, ReportSource, Response}; @@ -16,8 +18,8 @@ use tracing::{Instrument, debug, error, info_span, warn}; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EmptyBlock, Epoch, - EthSpec, FixedBytesExtended, ForkName, Hash256, KzgCommitment, KzgProof, MinimalEthSpec, - Signature, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + EthSpec, ForkName, Hash256, KzgCommitment, KzgProof, MinimalEthSpec, SignedBeaconBlock, + SignedBeaconBlockHeader, Slot, }; type E = MinimalEthSpec; diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index b60c5e6dbf..bf26196576 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -22,6 +22,7 @@ delay_map = { workspace = true } educe = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } +fixed_bytes = { workspace = true } fnv = { workspace = true } futures = { workspace = true } hex = { workspace = true } @@ -45,6 +46,7 @@ tokio = { workspace = true } tokio-stream = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } +typenum = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a416f5cb12..0869b442ae 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -37,8 +37,9 @@ use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; use tracing::{debug, error, info, trace, warn}; +use typenum::Unsigned; use types::{ - EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, Unsigned, + EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, }; diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index ebf5c1829e..c571a40485 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -1,5 +1,6 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; -use types::{EthSpec, FixedBytesExtended, Hash256}; +use fixed_bytes::FixedBytesExtended; +use types::{EthSpec, Hash256}; use lighthouse_network::rpc::{StatusMessage, methods::StatusMessageV2}; /// Trait to produce a `StatusMessage` representing the state of the given `beacon_chain`. diff --git a/beacon_node/network/src/sync/block_lookups/parent_chain.rs b/beacon_node/network/src/sync/block_lookups/parent_chain.rs index 551a0261f2..5deea1dd94 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_chain.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_chain.rs @@ -118,7 +118,8 @@ pub(crate) fn find_oldest_fork_ancestor( #[cfg(test)] mod tests { use super::{Node, compute_parent_chains, find_oldest_fork_ancestor}; - use types::{FixedBytesExtended, Hash256}; + use fixed_bytes::FixedBytesExtended; + use types::Hash256; fn h(n: u64) -> Hash256 { Hash256::from_low_u64_be(n) diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index eeddb53c23..6fab7a752a 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -9,9 +9,11 @@ portable = ["beacon_chain/portable"] [dependencies] bitvec = { workspace = true } +bls = { workspace = true } educe = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } itertools = { workspace = true } metrics = { workspace = true } parking_lot = { workspace = true } @@ -20,6 +22,8 @@ rayon = { workspace = true } serde = { workspace = true } state_processing = { workspace = true } store = { workspace = true } +superstruct = { workspace = true } +typenum = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index f28d8f278a..897a7e5ecc 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -1,12 +1,13 @@ use crate::attestation_storage::{CompactAttestationRef, CompactIndexedAttestation}; use crate::max_cover::MaxCover; use crate::reward_cache::RewardCache; +use ssz::BitList; use state_processing::common::{ attesting_indices_base::get_attesting_indices, base, get_attestation_participation_flag_indices, }; use std::collections::HashMap; use types::{ - Attestation, BeaconState, BitList, ChainSpec, EthSpec, + Attestation, BeaconState, ChainSpec, EthSpec, beacon_state::BeaconStateBase, consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, }; diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 4f1b8b81fe..9094c9cd4d 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -1,11 +1,13 @@ use crate::AttestationStats; +use bls::AggregateSignature; use itertools::Itertools; +use ssz::{BitList, BitVector}; use std::collections::{BTreeMap, HashMap, HashSet}; +use superstruct::superstruct; +use typenum::Unsigned; use types::{ - AggregateSignature, Attestation, AttestationData, BeaconState, BitList, BitVector, Checkpoint, - Epoch, EthSpec, Hash256, Slot, Unsigned, + Attestation, AttestationData, BeaconState, Checkpoint, Epoch, EthSpec, Hash256, Slot, attestation::{AttestationBase, AttestationElectra}, - superstruct, }; #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index e92d381bac..00361450a5 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -35,12 +35,12 @@ use state_processing::{SigVerifiedOp, VerifyOperation}; use std::collections::{HashMap, HashSet, hash_map::Entry}; use std::marker::PhantomData; use std::ptr; +use typenum::Unsigned; use types::{ AbstractExecPayload, Attestation, AttestationData, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ProposerSlashing, SignedBeaconBlock, SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, Validator, sync_aggregate::Error as SyncAggregateError, - typenum::Unsigned, }; type SyncContributions = RwLock>>>; @@ -793,6 +793,8 @@ mod release_tests { use beacon_chain::test_utils::{ BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee, test_spec, }; + use bls::Keypair; + use fixed_bytes::FixedBytesExtended; use maplit::hashset; use state_processing::epoch_cache::initialize_epoch_cache; use state_processing::{VerifyOperation, common::get_attesting_indices_from_state}; diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index ee45c8dd05..241b5fec53 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -11,6 +11,7 @@ use state_processing::SigVerifiedOp; use std::collections::HashSet; use std::mem; use store::{DBColumn, Error as StoreError, StoreItem}; +use superstruct::superstruct; use types::attestation::AttestationOnDisk; use types::*; diff --git a/beacon_node/operation_pool/src/reward_cache.rs b/beacon_node/operation_pool/src/reward_cache.rs index adedcb5e39..1e3fc4cf2d 100644 --- a/beacon_node/operation_pool/src/reward_cache.rs +++ b/beacon_node/operation_pool/src/reward_cache.rs @@ -1,8 +1,7 @@ use crate::OpPoolError; use bitvec::vec::BitVec; -use types::{ - BeaconState, BeaconStateError, Epoch, EthSpec, FixedBytesExtended, Hash256, ParticipationFlags, -}; +use fixed_bytes::FixedBytesExtended; +use types::{BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, ParticipationFlags}; #[derive(Debug, PartialEq, Eq, Clone)] struct Initialization { diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 0f169ffaad..26dd3b6642 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -6,6 +6,7 @@ use beacon_chain::chain_config::{ }; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::graffiti_calculator::GraffitiOrigin; +use bls::PublicKeyBytes; use clap::{ArgMatches, Id, parser::ValueSource}; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; use clap_utils::{parse_flag, parse_required}; @@ -29,7 +30,7 @@ use std::str::FromStr; use std::time::Duration; use tracing::{error, info, warn}; use types::graffiti::GraffitiString; -use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes}; +use types::{Checkpoint, Epoch, EthSpec, Hash256}; const PURGE_DB_CONFIRMATION: &str = "confirm"; diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 61a8474a73..50028fe73f 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -15,11 +15,13 @@ db-key = "0.0.5" directory = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } itertools = { workspace = true } leveldb = { version = "0.8.6", optional = true, default-features = false } logging = { workspace = true } lru = { workspace = true } metrics = { workspace = true } +milhouse = { workspace = true } parking_lot = { workspace = true } redb = { version = "2.1.3", optional = true } safe_arith = { workspace = true } @@ -31,6 +33,7 @@ strum = { workspace = true } superstruct = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } +typenum = { workspace = true } types = { workspace = true } xdelta3 = { workspace = true } zstd = { workspace = true } diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index ee043c14f4..9c8114e0c1 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -16,7 +16,9 @@ //! of elements. To find the chunk index of a vector index: `cindex = vindex / chunk_size`. use self::UpdatePattern::*; use crate::*; +use milhouse::{List, Vector}; use ssz::{Decode, Encode}; +use typenum::Unsigned; use types::historical_summary::HistoricalSummary; /// Description of how a `BeaconState` field is updated during state processing. @@ -784,6 +786,7 @@ impl From for ChunkError { #[cfg(test)] mod test { use super::*; + use fixed_bytes::FixedBytesExtended; use types::MainnetEthSpec as TestSpec; use types::*; diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index c0f15f2417..05aa016ec1 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -1,5 +1,4 @@ use crate::hdiff::HierarchyConfig; -use crate::superstruct; use crate::{DBColumn, Error, StoreItem}; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; @@ -7,6 +6,7 @@ use ssz_derive::{Decode, Encode}; use std::io::{Read, Write}; use std::num::NonZeroUsize; use strum::{Display, EnumString, EnumVariantNames}; +use superstruct::superstruct; use types::EthSpec; use types::non_zero_usize::new_non_zero_usize; use zstd::{Decoder, Encoder}; diff --git a/beacon_node/store/src/database/leveldb_impl.rs b/beacon_node/store/src/database/leveldb_impl.rs index 8fdd5812ea..6b8c615631 100644 --- a/beacon_node/store/src/database/leveldb_impl.rs +++ b/beacon_node/store/src/database/leveldb_impl.rs @@ -3,6 +3,7 @@ use crate::hot_cold_store::{BytesKey, HotColdDBError}; use crate::{ ColumnIter, ColumnKeyIter, DBColumn, Error, KeyValueStoreOp, get_key_for_col, metrics, }; +use fixed_bytes::FixedBytesExtended; use leveldb::{ compaction::Compaction, database::{ @@ -16,7 +17,7 @@ use leveldb::{ use std::collections::HashSet; use std::marker::PhantomData; use std::path::Path; -use types::{EthSpec, FixedBytesExtended, Hash256}; +use types::{EthSpec, Hash256}; use super::interface::WriteOptions; diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index f62647ae54..6da99b7bd6 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -6,7 +6,7 @@ use crate::{DBColumn, hdiff}; use leveldb::error::Error as LevelDBError; use ssz::DecodeError; use state_processing::BlockReplayError; -use types::{BeaconStateError, EpochCacheError, Hash256, InconsistentFork, Slot, milhouse}; +use types::{BeaconStateError, EpochCacheError, Hash256, InconsistentFork, Slot}; pub type Result = std::result::Result; diff --git a/beacon_node/store/src/hdiff.rs b/beacon_node/store/src/hdiff.rs index 3e20aab9bf..323c87a914 100644 --- a/beacon_node/store/src/hdiff.rs +++ b/beacon_node/store/src/hdiff.rs @@ -2,6 +2,7 @@ use crate::{DBColumn, StoreConfig, StoreItem, metrics}; use bls::PublicKeyBytes; use itertools::Itertools; +use milhouse::List; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -11,7 +12,7 @@ use std::str::FromStr; use std::sync::LazyLock; use superstruct::superstruct; use types::historical_summary::HistoricalSummary; -use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, List, Slot, Validator}; +use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot, Validator}; static EMPTY_PUBKEY: LazyLock = LazyLock::new(PublicKeyBytes::empty); diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 8f5eead8c2..c413719174 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -18,6 +18,7 @@ use crate::{ metrics::{self, COLD_METRIC, HOT_METRIC}, parse_data_column_key, }; +use fixed_bytes::FixedBytesExtended; use itertools::{Itertools, process_results}; use lru::LruCache; use parking_lot::{Mutex, RwLock}; @@ -38,6 +39,7 @@ use std::path::Path; use std::sync::Arc; use std::time::Duration; use tracing::{debug, error, info, instrument, warn}; +use typenum::Unsigned; use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; use types::*; use zstd::{Decoder, Encoder}; diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 88d509731c..e2b666e597 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -2,9 +2,9 @@ use crate::errors::HandleUnavailable; use crate::{Error, HotColdDB, ItemStore}; use std::borrow::Cow; use std::marker::PhantomData; +use typenum::Unsigned; use types::{ BeaconState, BeaconStateError, BlindedPayload, EthSpec, Hash256, SignedBeaconBlock, Slot, - typenum::Unsigned, }; /// Implemented for types that have ancestors (e.g., blocks, states) that may be iterated over. @@ -387,8 +387,8 @@ mod test { use crate::{MemoryStore, StoreConfig as Config}; use beacon_chain::test_utils::BeaconChainHarness; use beacon_chain::types::MainnetEthSpec; + use fixed_bytes::FixedBytesExtended; use std::sync::Arc; - use types::FixedBytesExtended; fn get_state() -> BeaconState { let harness = BeaconChainHarness::builder(E::default()) diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 13b0dfab9f..8ee37169ac 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -3,11 +3,12 @@ use crate::chunked_vector::{ load_variable_list_from_db, load_vector_from_db, }; use crate::{DBColumn, Error, KeyValueStore, KeyValueStoreOp}; -use ssz::{Decode, DecodeError, Encode}; +use milhouse::{List, Vector}; +use ssz::{BitVector, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::sync::Arc; +use superstruct::superstruct; use types::historical_summary::HistoricalSummary; -use types::superstruct; use types::*; /// DEPRECATED Lightweight variant of the `BeaconState` that is stored in the database. diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index 00c74a1303..d0a3e487c4 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -6,6 +6,7 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +bls = { workspace = true } eth2_keystore = { workspace = true } eth2_wallet = { workspace = true } filesystem = { workspace = true } diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 596d50de42..bffdfcc38b 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -4,6 +4,7 @@ //! attempt) to load into the `crate::intialized_validators::InitializedValidators` struct. use crate::{default_keystore_password_path, read_password_string, write_file_via_temporary}; +use bls::PublicKey; use eth2_keystore::Keystore; use regex::Regex; use serde::{Deserialize, Serialize}; @@ -12,7 +13,7 @@ use std::fs::{self, File, create_dir_all}; use std::io; use std::path::{Path, PathBuf}; use tracing::error; -use types::{Address, PublicKey, graffiti::GraffitiString}; +use types::{Address, graffiti::GraffitiString}; use validator_dir::VOTING_KEYSTORE_FILE; use zeroize::Zeroizing; diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index 53f1bc3e2b..76c18ef242 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -10,6 +10,7 @@ build = "build.rs" alloy-dyn-abi = { workspace = true } alloy-json-abi = { workspace = true } alloy-primitives = { workspace = true } +bls = { workspace = true } ethereum_ssz = { workspace = true } serde_json = { workspace = true } tree_hash = { workspace = true } diff --git a/common/deposit_contract/src/lib.rs b/common/deposit_contract/src/lib.rs index e5f11bb89c..6200a4ca15 100644 --- a/common/deposit_contract/src/lib.rs +++ b/common/deposit_contract/src/lib.rs @@ -1,9 +1,10 @@ use alloy_dyn_abi::{DynSolValue, JsonAbiExt}; use alloy_json_abi::JsonAbi; use alloy_primitives::FixedBytes; +use bls::{PublicKeyBytes, SignatureBytes}; use ssz::{Decode, DecodeError as SszDecodeError, Encode}; use tree_hash::TreeHash; -use types::{DepositData, Hash256, PublicKeyBytes, SignatureBytes}; +use types::{DepositData, Hash256}; #[derive(Debug)] pub enum Error { @@ -126,10 +127,8 @@ pub fn decode_eth1_tx_data(bytes: &[u8], amount: u64) -> Result<(DepositData, Ha #[cfg(test)] mod tests { use super::*; - use types::{ - ChainSpec, EthSpec, Keypair, MinimalEthSpec, Signature, - test_utils::generate_deterministic_keypair, - }; + use bls::{Keypair, Signature}; + use types::{ChainSpec, EthSpec, MinimalEthSpec, test_utils::generate_deterministic_keypair}; type E = MinimalEthSpec; diff --git a/common/eip_3076/Cargo.toml b/common/eip_3076/Cargo.toml index 851ef26238..058e1fd1a0 100644 --- a/common/eip_3076/Cargo.toml +++ b/common/eip_3076/Cargo.toml @@ -11,7 +11,9 @@ json = ["dep:serde_json"] [dependencies] arbitrary = { workspace = true, features = ["derive"], optional = true } +bls = { workspace = true } ethereum_serde_utils = { workspace = true } +fixed_bytes = { workspace = true } serde = { workspace = true } serde_json = { workspace = true, optional = true } types = { workspace = true } diff --git a/common/eip_3076/src/lib.rs b/common/eip_3076/src/lib.rs index 2d47a77de4..cdd05d7b1e 100644 --- a/common/eip_3076/src/lib.rs +++ b/common/eip_3076/src/lib.rs @@ -1,9 +1,10 @@ +use bls::PublicKeyBytes; use serde::{Deserialize, Serialize}; use std::cmp::max; use std::collections::{HashMap, HashSet}; #[cfg(feature = "json")] use std::io; -use types::{Epoch, Hash256, PublicKeyBytes, Slot}; +use types::{Epoch, Hash256, Slot}; #[derive(Debug)] pub enum Error { @@ -170,9 +171,9 @@ impl Interchange { #[cfg(test)] mod tests { use super::*; + use fixed_bytes::FixedBytesExtended; use std::fs::File; use tempfile::tempdir; - use types::FixedBytesExtended; fn get_interchange() -> Interchange { Interchange { diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index ba4bcd3649..da8aba5ded 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -10,6 +10,7 @@ lighthouse = ["proto_array", "eth2_keystore", "eip_3076", "zeroize"] events = ["reqwest-eventsource", "futures", "futures-util"] [dependencies] +bls = { workspace = true } context_deserialize = { workspace = true } educe = { workspace = true } eip_3076 = { workspace = true, optional = true } @@ -28,6 +29,7 @@ sensitive_url = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } ssz_types = { workspace = true } +superstruct = { workspace = true } types = { workspace = true } zeroize = { workspace = true, optional = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 4e2109be04..820d817d9d 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -28,6 +28,8 @@ pub use sensitive_url::SensitiveUrl; use self::mixin::{RequestAccept, ResponseOptional}; use self::types::*; +use bls::SignatureBytes; +use context_deserialize::ContextDeserialize; use educe::Educe; #[cfg(feature = "events")] use futures::Stream; diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 8c9d3397a8..3c850fcb05 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -1,5 +1,6 @@ use super::types::*; use crate::{Error, success_or_error}; +use bls::PublicKeyBytes; use reqwest::{ IntoUrl, header::{HeaderMap, HeaderValue}, diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index 0290bdd0b7..c54252b9e3 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -1,6 +1,7 @@ +use bls::PublicKeyBytes; use eth2_keystore::Keystore; use serde::{Deserialize, Serialize}; -use types::{Address, Graffiti, PublicKeyBytes}; +use types::{Address, Graffiti}; use zeroize::Zeroizing; pub use eip_3076::Interchange; diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 8e1d90f8f9..07f8421dc5 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -1,6 +1,7 @@ pub use crate::lighthouse::Health; pub use crate::lighthouse_vc::std_types::*; pub use crate::types::{GenericResponse, VersionData}; +use bls::{PublicKey, PublicKeyBytes}; use eth2_keystore::Keystore; use serde::{Deserialize, Serialize}; use std::path::PathBuf; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 5aa3de5e17..aace8f936c 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -7,6 +7,8 @@ use crate::{ CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, EXECUTION_PAYLOAD_BLINDED_HEADER, EXECUTION_PAYLOAD_VALUE_HEADER, Error as ServerError, }; +use bls::{PublicKeyBytes, SecretKey, Signature, SignatureBytes}; +use context_deserialize::ContextDeserialize; use mediatype::{MediaType, MediaTypeList, names}; use reqwest::header::HeaderMap; use serde::{Deserialize, Deserializer, Serialize}; @@ -18,6 +20,7 @@ use std::fmt::{self, Display}; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; +use superstruct::superstruct; #[cfg(test)] use test_random_derive::TestRandom; diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index ec5b0cc1d7..416ffb1975 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -10,6 +10,7 @@ build = "build.rs" bytes = { workspace = true } discv5 = { workspace = true } eth2_config = { workspace = true } +fixed_bytes = { workspace = true } kzg = { workspace = true } pretty_reqwest_error = { workspace = true } reqwest = { workspace = true } diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 12de21239a..16ee45e524 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -464,9 +464,10 @@ fn parse_state_download_url(url: &str) -> Result { #[cfg(test)] mod tests { use super::*; + use fixed_bytes::FixedBytesExtended; use ssz::Encode; use tempfile::Builder as TempBuilder; - use types::{Eth1Data, FixedBytesExtended, GnosisEthSpec, MainnetEthSpec}; + use types::{Eth1Data, GnosisEthSpec, MainnetEthSpec}; type E = MainnetEthSpec; diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index bae36789bb..ab495242e4 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -1,5 +1,5 @@ use crate::{Error as DirError, ValidatorDir}; -use bls::get_withdrawal_credentials; +use bls::{Keypair, Signature, get_withdrawal_credentials}; use deposit_contract::{Error as DepositError, encode_eth1_tx_data}; use eth2_keystore::{Error as KeystoreError, Keystore, KeystoreBuilder, PlainText}; use filesystem::create_with_600_perms; @@ -7,7 +7,7 @@ use rand::{Rng, distr::Alphanumeric}; use std::fs::{File, create_dir_all}; use std::io::{self, Write}; use std::path::{Path, PathBuf}; -use types::{ChainSpec, DepositData, Hash256, Keypair, Signature}; +use types::{ChainSpec, DepositData, Hash256}; /// The `Alphanumeric` crate only generates a-z, A-Z, 0-9, therefore it has a range of 62 /// characters. diff --git a/common/validator_dir/src/validator_dir.rs b/common/validator_dir/src/validator_dir.rs index 8b50ea6687..0799897a70 100644 --- a/common/validator_dir/src/validator_dir.rs +++ b/common/validator_dir/src/validator_dir.rs @@ -2,6 +2,7 @@ use crate::builder::{ ETH1_DEPOSIT_AMOUNT_FILE, ETH1_DEPOSIT_DATA_FILE, VOTING_KEYSTORE_FILE, WITHDRAWAL_KEYSTORE_FILE, keystore_password_path, }; +use bls::Keypair; use deposit_contract::decode_eth1_tx_data; use educe::Educe; use eth2_keystore::{Error as KeystoreError, Keystore, PlainText}; @@ -10,7 +11,7 @@ use std::fs::{File, read, write}; use std::io; use std::path::{Path, PathBuf}; use tree_hash::TreeHash; -use types::{DepositData, Hash256, Keypair}; +use types::{DepositData, Hash256}; /// The file used to save the Eth1 transaction hash from a deposit. pub const ETH1_DEPOSIT_TX_HASH_FILE: &str = "eth1-deposit-tx-hash.txt"; diff --git a/common/validator_dir/tests/tests.rs b/common/validator_dir/tests/tests.rs index 7d9730ebd3..ede80c244e 100644 --- a/common/validator_dir/tests/tests.rs +++ b/common/validator_dir/tests/tests.rs @@ -1,10 +1,11 @@ #![cfg(not(debug_assertions))] +use bls::Keypair; use eth2_keystore::{Keystore, KeystoreBuilder, PlainText}; use std::fs::{self, File}; use std::path::Path; use tempfile::{TempDir, tempdir}; -use types::{EthSpec, Keypair, MainnetEthSpec, test_utils::generate_deterministic_keypair}; +use types::{EthSpec, MainnetEthSpec, test_utils::generate_deterministic_keypair}; use validator_dir::{ Builder, BuilderError, ETH1_DEPOSIT_DATA_FILE, ETH1_DEPOSIT_TX_HASH_FILE, VOTING_KEYSTORE_FILE, ValidatorDir, WITHDRAWAL_KEYSTORE_FILE, diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 0a244c2ba1..a07aa38aa5 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -8,6 +8,7 @@ edition = { workspace = true } [dependencies] ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } logging = { workspace = true } metrics = { workspace = true } proto_array = { workspace = true } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 6565e7cdaf..9a8cae0c36 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,5 +1,6 @@ use crate::metrics::{self, scrape_for_metrics}; use crate::{ForkChoiceStore, InvalidationOperation}; +use fixed_bytes::FixedBytesExtended; use logging::crit; use proto_array::{ Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, JustifiedBalances, @@ -19,7 +20,7 @@ use tracing::{debug, instrument, warn}; use types::{ AbstractExecPayload, AttestationShufflingId, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, - FixedBytesExtended, Hash256, IndexedAttestationRef, RelativeEpoch, SignedBeaconBlock, Slot, + Hash256, IndexedAttestationRef, RelativeEpoch, SignedBeaconBlock, Slot, consts::bellatrix::INTERVALS_PER_SLOT, }; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 67b792ef0d..d3a84ee85b 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -7,6 +7,7 @@ use beacon_chain::{ BeaconChain, BeaconChainError, BeaconForkChoiceStore, ChainConfig, ForkChoiceError, StateSkipConfig, WhenSlotSkipped, }; +use fixed_bytes::FixedBytesExtended; use fork_choice::{ ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, }; @@ -17,9 +18,9 @@ use std::time::Duration; use store::MemoryStore; use types::SingleAttestation; use types::{ - BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, - ForkName, Hash256, IndexedAttestation, MainnetEthSpec, RelativeEpoch, SignedBeaconBlock, Slot, - SubnetId, test_utils::generate_deterministic_keypair, + BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, ForkName, Hash256, + IndexedAttestation, MainnetEthSpec, RelativeEpoch, SignedBeaconBlock, Slot, SubnetId, + test_utils::generate_deterministic_keypair, }; pub type E = MainnetEthSpec; diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index bd6757c0fa..782610e0d3 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -11,6 +11,7 @@ path = "src/bin.rs" [dependencies] ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } serde_yaml = { workspace = true } diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 43a7e3b77f..e9deb6759f 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -5,11 +5,12 @@ mod votes; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; use crate::{InvalidationOperation, JustifiedBalances}; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use std::collections::BTreeSet; use types::{ - AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, FixedBytesExtended, - Hash256, MainnetEthSpec, Slot, + AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + MainnetEthSpec, Slot, }; pub use execution_status::*; diff --git a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs index de84fbdd12..d20eaacb99 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs @@ -1,4 +1,4 @@ -use types::FixedBytesExtended; +use fixed_bytes::FixedBytesExtended; use super::*; diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 1d78ce9f44..5bfcdae463 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1,5 +1,6 @@ use crate::error::InvalidBestNodeInfo; use crate::{Block, ExecutionStatus, JustifiedBalances, error::Error}; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz::four_byte_option_impl; @@ -7,8 +8,8 @@ use ssz_derive::{Decode, Encode}; use std::collections::{HashMap, HashSet}; use superstruct::superstruct; use types::{ - AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, - FixedBytesExtended, Hash256, Slot, + AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + Slot, }; // Define a "legacy" implementation of `Option` which uses four bytes for encoding the union diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 137471ce36..3edf1e0644 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -7,6 +7,7 @@ use crate::{ }, ssz_container::SszContainer, }; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -15,8 +16,8 @@ use std::{ fmt, }; use types::{ - AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, - FixedBytesExtended, Hash256, Slot, + AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + Slot, }; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; @@ -1095,7 +1096,8 @@ fn compute_deltas( #[cfg(test)] mod test_compute_deltas { use super::*; - use types::{FixedBytesExtended, MainnetEthSpec}; + use fixed_bytes::FixedBytesExtended; + use types::MainnetEthSpec; /// Gives a hash that is not the zero hash (unless i is `usize::MAX)`. fn hash_from_index(i: usize) -> Hash256 { diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 3821aa1689..a08035d583 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -24,11 +24,13 @@ educe = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } int_to_bytes = { workspace = true } integer-sqrt = "0.1.5" itertools = { workspace = true } merkle_proof = { workspace = true } metrics = { workspace = true } +milhouse = { workspace = true } rand = { workspace = true } rayon = { workspace = true } safe_arith = { workspace = true } @@ -37,6 +39,7 @@ ssz_types = { workspace = true } test_random_derive = { path = "../../common/test_random_derive" } tracing = { workspace = true } tree_hash = { workspace = true } +typenum = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/consensus/state_processing/src/common/get_attesting_indices.rs b/consensus/state_processing/src/common/get_attesting_indices.rs index e4f5aa3c8b..dc7be7c251 100644 --- a/consensus/state_processing/src/common/get_attesting_indices.rs +++ b/consensus/state_processing/src/common/get_attesting_indices.rs @@ -2,6 +2,7 @@ use types::*; pub mod attesting_indices_base { use crate::per_block_processing::errors::{AttestationInvalid as Invalid, BlockOperationError}; + use ssz_types::{BitList, VariableList}; use types::*; /// Convert `attestation` to (almost) indexed-verifiable form. @@ -44,10 +45,10 @@ pub mod attesting_indices_base { } pub mod attesting_indices_electra { - use std::collections::HashSet; - use crate::per_block_processing::errors::{AttestationInvalid as Invalid, BlockOperationError}; use safe_arith::SafeArith; + use ssz_types::{BitList, BitVector, VariableList}; + use std::collections::HashSet; use types::*; /// Compute an Electra IndexedAttestation given a list of committees. diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index 52f360849e..01c1855fb1 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -6,6 +6,7 @@ use crate::{ }; use safe_arith::SafeArith; use std::cmp; +use typenum::Unsigned; use types::{ consts::altair::{PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, *, diff --git a/consensus/state_processing/src/epoch_cache.rs b/consensus/state_processing/src/epoch_cache.rs index 86db037446..ee03596d09 100644 --- a/consensus/state_processing/src/epoch_cache.rs +++ b/consensus/state_processing/src/epoch_cache.rs @@ -2,12 +2,11 @@ use crate::common::altair::BaseRewardPerIncrement; use crate::common::base::SqrtTotalActiveBalance; use crate::common::{altair, base}; use crate::metrics; +use fixed_bytes::FixedBytesExtended; use safe_arith::SafeArith; use tracing::instrument; use types::epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; -use types::{ - ActivationQueue, BeaconState, ChainSpec, EthSpec, FixedBytesExtended, ForkName, Hash256, -}; +use types::{ActivationQueue, BeaconState, ChainSpec, EthSpec, ForkName, Hash256}; /// Precursor to an `EpochCache`. pub struct PreEpochCache { diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 88ef79310d..d00e1fcfac 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -7,6 +7,7 @@ use crate::upgrade::{ upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, upgrade_to_fulu, upgrade_to_gloas, }; +use fixed_bytes::FixedBytesExtended; use safe_arith::{ArithError, SafeArith}; use std::sync::Arc; use tree_hash::TreeHash; diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 9e7a20040e..f78c8c4eb3 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -5,6 +5,7 @@ use safe_arith::{ArithError, SafeArith, SafeArithIter}; use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set}; use std::borrow::Cow; use tree_hash::TreeHash; +use typenum::Unsigned; use types::*; pub use self::verify_attester_slashing::{ diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 1219c7df44..8cc9de42db 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -1,12 +1,12 @@ use crate::common::{altair::BaseRewardPerIncrement, decrease_balance, increase_balance}; use crate::per_block_processing::errors::{BlockProcessingError, SyncAggregateInvalid}; use crate::{VerifySignatures, signature_sets::sync_aggregate_signature_set}; +use bls::PublicKeyBytes; use safe_arith::SafeArith; use std::borrow::Cow; +use typenum::Unsigned; use types::consts::altair::{PROPOSER_WEIGHT, SYNC_REWARD_WEIGHT, WEIGHT_DENOMINATOR}; -use types::{ - BeaconState, BeaconStateError, ChainSpec, EthSpec, PublicKeyBytes, SyncAggregate, Unsigned, -}; +use types::{BeaconState, BeaconStateError, ChainSpec, EthSpec, SyncAggregate}; pub fn process_sync_aggregate( state: &mut BeaconState, diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 9a1c6c2f6a..8afeeb685b 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -5,8 +5,9 @@ use crate::common::{ slash_validator, }; use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; +use ssz_types::FixedVector; +use typenum::U33; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; -use types::typenum::U33; pub fn process_operations>( state: &mut BeaconState, diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index dafd0d79ea..0e936007ee 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -2,17 +2,18 @@ //! validated individually, or alongside in others in a potentially cheaper bulk operation. //! //! This module exposes one function to extract each type of `SignatureSet` from a `BeaconBlock`. -use bls::SignatureSet; +use bls::{AggregateSignature, PublicKey, PublicKeyBytes, Signature, SignatureSet}; use ssz::DecodeError; use std::borrow::Cow; use tree_hash::TreeHash; +use typenum::Unsigned; use types::{ - AbstractExecPayload, AggregateSignature, AttesterSlashingRef, BeaconBlockRef, BeaconState, - BeaconStateError, ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, - InconsistentFork, IndexedAttestation, IndexedAttestationRef, ProposerSlashing, PublicKey, - PublicKeyBytes, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, - SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, + AbstractExecPayload, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, + ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, + IndexedAttestation, IndexedAttestationRef, ProposerSlashing, SignedAggregateAndProof, + SignedBeaconBlock, SignedBeaconBlockHeader, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, SigningData, Slot, SyncAggregate, + SyncAggregatorSelectionData, }; pub type Result = std::result::Result; diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index c32797f77f..739717b33f 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -11,7 +11,10 @@ use crate::{ per_block_processing::{process_operations, verify_exit::verify_exit}, }; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use bls::{AggregateSignature, Keypair, PublicKeyBytes, Signature, SignatureBytes}; +use fixed_bytes::FixedBytesExtended; use ssz_types::Bitfield; +use ssz_types::VariableList; use std::sync::{Arc, LazyLock}; use test_utils::generate_deterministic_keypairs; use types::*; diff --git a/consensus/state_processing/src/per_block_processing/verify_deposit.rs b/consensus/state_processing/src/per_block_processing/verify_deposit.rs index c996e580a7..d403bfa82b 100644 --- a/consensus/state_processing/src/per_block_processing/verify_deposit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_deposit.rs @@ -1,5 +1,6 @@ use super::errors::{BlockOperationError, DepositInvalid}; use crate::per_block_processing::signature_sets::deposit_pubkey_signature_message; +use bls::PublicKeyBytes; use merkle_proof::verify_merkle_proof; use safe_arith::SafeArith; use tree_hash::TreeHash; diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs index 5c08406eae..5e177c5d2b 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs @@ -1,5 +1,5 @@ use crate::EpochProcessingError; -use types::List; +use milhouse::List; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; use types::participation_flags::ParticipationFlags; diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index fd712cc8e5..a818e08775 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -1,9 +1,10 @@ use super::base::{TotalBalances, ValidatorStatus, validator_statuses::InclusionInfo}; use crate::metrics; +use milhouse::List; use std::sync::Arc; use types::{ - BeaconStateError, Epoch, EthSpec, List, ParticipationFlags, ProgressiveBalancesCache, - SyncCommittee, Validator, + BeaconStateError, Epoch, EthSpec, ParticipationFlags, ProgressiveBalancesCache, SyncCommittee, + Validator, consts::altair::{TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX}, }; diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index a5a2a69ebf..4818dcbf67 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -1,4 +1,5 @@ -use types::{BeaconStateError, EpochCacheError, InconsistentFork, milhouse}; +use milhouse; +use types::{BeaconStateError, EpochCacheError, InconsistentFork}; #[derive(Debug, PartialEq)] pub enum EpochProcessingError { diff --git a/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs index 8fcdda062c..9172d954bc 100644 --- a/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs +++ b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs @@ -1,7 +1,7 @@ use super::errors::EpochProcessingError; use safe_arith::SafeArith; use tree_hash::TreeHash; -use types::Unsigned; +use typenum::Unsigned; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; diff --git a/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs b/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs index 66d68804e1..8d712fd19b 100644 --- a/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs +++ b/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs @@ -1,4 +1,5 @@ -use types::{BeaconState, BeaconStateError, BitVector, Checkpoint, Epoch, EthSpec, Hash256}; +use ssz_types::BitVector; +use types::{BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256}; /// This is a subset of the `BeaconState` which is used to compute justification and finality /// without modifying the `BeaconState`. diff --git a/consensus/state_processing/src/per_epoch_processing/resets.rs b/consensus/state_processing/src/per_epoch_processing/resets.rs index c9f69c3c95..e05fb30c33 100644 --- a/consensus/state_processing/src/per_epoch_processing/resets.rs +++ b/consensus/state_processing/src/per_epoch_processing/resets.rs @@ -1,8 +1,9 @@ use super::errors::EpochProcessingError; +use milhouse::List; use safe_arith::SafeArith; +use typenum::Unsigned; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; -use types::{List, Unsigned}; pub fn process_eth1_data_reset( state: &mut BeaconState, diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 1584e932bd..914e025f2f 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -8,19 +8,20 @@ use crate::{ per_epoch_processing::{Delta, Error, ParticipationEpochSummary}, }; use itertools::izip; +use milhouse::{Cow, List, Vector}; use safe_arith::{SafeArith, SafeArithIter}; use std::cmp::{max, min}; use std::collections::{BTreeSet, HashMap}; use tracing::instrument; +use typenum::Unsigned; use types::{ ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, DepositData, Epoch, - EthSpec, ExitCache, ForkName, List, ParticipationFlags, PendingDeposit, - ProgressiveBalancesCache, RelativeEpoch, Unsigned, Validator, Vector, + EthSpec, ExitCache, ForkName, ParticipationFlags, PendingDeposit, ProgressiveBalancesCache, + RelativeEpoch, Validator, consts::altair::{ NUM_FLAG_INDICES, PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, }, - milhouse::Cow, }; pub struct SinglePassConfig { diff --git a/consensus/state_processing/src/per_epoch_processing/slashings.rs b/consensus/state_processing/src/per_epoch_processing/slashings.rs index 47eb06e907..6008276d15 100644 --- a/consensus/state_processing/src/per_epoch_processing/slashings.rs +++ b/consensus/state_processing/src/per_epoch_processing/slashings.rs @@ -4,7 +4,8 @@ use crate::per_epoch_processing::{ single_pass::{SinglePassConfig, process_epoch_single_pass}, }; use safe_arith::{SafeArith, SafeArithIter}; -use types::{BeaconState, ChainSpec, EthSpec, Unsigned}; +use typenum::Unsigned; +use types::{BeaconState, ChainSpec, EthSpec}; /// Process slashings. pub fn process_slashings( diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 8695054e1e..0f8e5dc52d 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -3,6 +3,7 @@ use crate::upgrade::{ upgrade_to_electra, upgrade_to_fulu, upgrade_to_gloas, }; use crate::{per_epoch_processing::EpochProcessingSummary, *}; +use fixed_bytes::FixedBytesExtended; use safe_arith::{ArithError, SafeArith}; use tracing::instrument; use types::*; diff --git a/consensus/state_processing/src/state_advance.rs b/consensus/state_processing/src/state_advance.rs index 4d38e7797e..19b21dad19 100644 --- a/consensus/state_processing/src/state_advance.rs +++ b/consensus/state_processing/src/state_advance.rs @@ -5,7 +5,8 @@ //! duplication and protect against some easy-to-make mistakes when performing state advances. use crate::*; -use types::{BeaconState, ChainSpec, EthSpec, FixedBytesExtended, Hash256, Slot}; +use fixed_bytes::FixedBytesExtended; +use types::{BeaconState, ChainSpec, EthSpec, Hash256, Slot}; #[derive(Debug, PartialEq)] pub enum Error { diff --git a/consensus/state_processing/src/upgrade/altair.rs b/consensus/state_processing/src/upgrade/altair.rs index 3006da25ae..022175ff99 100644 --- a/consensus/state_processing/src/upgrade/altair.rs +++ b/consensus/state_processing/src/upgrade/altair.rs @@ -2,11 +2,12 @@ use crate::common::update_progressive_balances_cache::initialize_progressive_bal use crate::common::{ attesting_indices_base::get_attesting_indices, get_attestation_participation_flag_indices, }; +use milhouse::List; use std::mem; use std::sync::Arc; use types::{ BeaconState, BeaconStateAltair, BeaconStateError as Error, ChainSpec, EpochCache, EthSpec, - Fork, List, ParticipationFlags, PendingAttestation, RelativeEpoch, SyncCommittee, + Fork, ParticipationFlags, PendingAttestation, RelativeEpoch, SyncCommittee, }; /// Translate the participation information from the epoch prior to the fork into Altair's format. diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index ae0dbde767..948fa511b7 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -1,7 +1,8 @@ +use milhouse::List; use std::mem; use types::{ BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EpochCache, EthSpec, - Fork, List, + Fork, }; /// Transform a `Bellatrix` state into an `Capella` state. diff --git a/consensus/state_processing/src/upgrade/fulu.rs b/consensus/state_processing/src/upgrade/fulu.rs index c2aced7047..c14c1edbec 100644 --- a/consensus/state_processing/src/upgrade/fulu.rs +++ b/consensus/state_processing/src/upgrade/fulu.rs @@ -1,8 +1,7 @@ +use milhouse::Vector; use safe_arith::SafeArith; use std::mem; -use types::{ - BeaconState, BeaconStateError as Error, BeaconStateFulu, ChainSpec, EthSpec, Fork, Vector, -}; +use types::{BeaconState, BeaconStateError as Error, BeaconStateFulu, ChainSpec, EthSpec, Fork}; /// Transform a `Electra` state into an `Fulu` state. pub fn upgrade_to_fulu( diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 559a181948..78c6f871cb 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -65,6 +65,7 @@ test_random_derive = { path = "../../common/test_random_derive" } tracing = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } +typenum = { workspace = true } [dev-dependencies] beacon_chain = { workspace = true } diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index 814001d966..397c33163e 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -1,10 +1,11 @@ use criterion::{BatchSize, BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use fixed_bytes::FixedBytesExtended; use milhouse::List; use rayon::prelude::*; use ssz::Encode; use std::sync::Arc; use types::{ - BeaconState, Epoch, Eth1Data, EthSpec, FixedBytesExtended, Hash256, MainnetEthSpec, Validator, + BeaconState, Epoch, Eth1Data, EthSpec, Hash256, MainnetEthSpec, Validator, test_utils::generate_deterministic_keypair, }; diff --git a/consensus/types/src/block/beacon_block.rs b/consensus/types/src/block/beacon_block.rs index c2f361eb4b..a4e7e800bc 100644 --- a/consensus/types/src/block/beacon_block.rs +++ b/consensus/types/src/block/beacon_block.rs @@ -7,11 +7,12 @@ use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; -use ssz_types::{BitList, BitVector, FixedVector, VariableList, typenum::Unsigned}; +use ssz_types::{BitList, BitVector, FixedVector, VariableList}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use typenum::Unsigned; use crate::{ attestation::{AttestationBase, AttestationData, IndexedAttestationBase}, diff --git a/consensus/types/src/core/eth_spec.rs b/consensus/types/src/core/eth_spec.rs index 11857e678c..72fd1ebc9e 100644 --- a/consensus/types/src/core/eth_spec.rs +++ b/consensus/types/src/core/eth_spec.rs @@ -5,7 +5,7 @@ use std::{ use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; -use ssz_types::typenum::{ +use typenum::{ U0, U1, U2, U4, U8, U16, U17, U32, U64, U128, U256, U512, U625, U1024, U2048, U4096, U8192, U65536, U131072, U262144, U1048576, U16777216, U33554432, U134217728, U1073741824, U1099511627776, UInt, Unsigned, bit::B0, @@ -625,7 +625,7 @@ impl EthSpec for GnosisEthSpec { #[cfg(test)] mod test { use crate::{EthSpec, GnosisEthSpec, MainnetEthSpec, MinimalEthSpec}; - use ssz_types::typenum::Unsigned; + use typenum::Unsigned; fn assert_valid_spec() { let spec = E::default_spec(); diff --git a/consensus/types/src/core/preset.rs b/consensus/types/src/core/preset.rs index b436fafd3a..75d2d8df6b 100644 --- a/consensus/types/src/core/preset.rs +++ b/consensus/types/src/core/preset.rs @@ -1,5 +1,5 @@ use serde::{Deserialize, Serialize}; -use ssz_types::typenum::Unsigned; +use typenum::Unsigned; use crate::core::{ChainSpec, Epoch, EthSpec}; diff --git a/consensus/types/src/deposit/deposit.rs b/consensus/types/src/deposit/deposit.rs index 67f8572def..0b08bd6509 100644 --- a/consensus/types/src/deposit/deposit.rs +++ b/consensus/types/src/deposit/deposit.rs @@ -1,9 +1,10 @@ use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use ssz_types::{FixedVector, typenum::U33}; +use ssz_types::FixedVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use typenum::U33; use crate::{core::Hash256, deposit::DepositData, fork::ForkName, test_utils::TestRandom}; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index a8a78f8cfb..cd9252bde8 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -165,14 +165,3 @@ pub mod application_domain { pub use crate::kzg_ext::consts::VERSIONED_HASH_VERSION_KZG; pub use crate::light_client::LightClientError as LightClientUpdateError; pub use crate::state::BeaconStateError as Error; - -pub use bls::{ - AggregatePublicKey, AggregateSignature, Error as BlsError, Keypair, PUBLIC_KEY_BYTES_LEN, - PublicKey, PublicKeyBytes, SIGNATURE_BYTES_LEN, SecretKey, Signature, SignatureBytes, - get_withdrawal_credentials, -}; -pub use context_deserialize::{ContextDeserialize, context_deserialize}; -pub use fixed_bytes::FixedBytesExtended; -pub use milhouse::{self, List, Vector}; -pub use ssz_types::{BitList, BitVector, FixedVector, VariableList, typenum, typenum::Unsigned}; -pub use superstruct::superstruct; diff --git a/consensus/types/src/light_client/light_client_update.rs b/consensus/types/src/light_client/light_client_update.rs index 7fc2c36239..aa7b800cc8 100644 --- a/consensus/types/src/light_client/light_client_update.rs +++ b/consensus/types/src/light_client/light_client_update.rs @@ -9,10 +9,10 @@ use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; use ssz_types::FixedVector; -use ssz_types::typenum::{U4, U5, U6, U7}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use typenum::{U4, U5, U6, U7}; use crate::{ block::SignedBlindedBeaconBlock, @@ -574,7 +574,7 @@ fn compute_sync_committee_period_at_slot( mod tests { use super::*; use crate::light_client::consts::*; - use ssz_types::typenum::Unsigned; + use typenum::Unsigned; // `ssz_tests!` can only be defined once per namespace #[cfg(test)] diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index 948899c98d..f36c02ce6b 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -13,13 +13,14 @@ use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, DecodeError, Encode, ssz_encode}; use ssz_derive::{Decode, Encode}; -use ssz_types::{BitVector, FixedVector, typenum::Unsigned}; +use ssz_types::{BitVector, FixedVector}; use superstruct::superstruct; use swap_or_not_shuffle::compute_shuffled_index; use test_random_derive::TestRandom; use tracing::instrument; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use typenum::Unsigned; use crate::{ attestation::{ diff --git a/consensus/types/src/state/iter.rs b/consensus/types/src/state/iter.rs index d761a6bd85..63f28d74c4 100644 --- a/consensus/types/src/state/iter.rs +++ b/consensus/types/src/state/iter.rs @@ -56,6 +56,7 @@ impl Iterator for BlockRootsIter<'_, E> { #[cfg(test)] mod test { use crate::*; + use fixed_bytes::FixedBytesExtended; type E = MinimalEthSpec; diff --git a/consensus/types/src/sync_committee/sync_selection_proof.rs b/consensus/types/src/sync_committee/sync_selection_proof.rs index 7efc6c4c76..723f0c06c9 100644 --- a/consensus/types/src/sync_committee/sync_selection_proof.rs +++ b/consensus/types/src/sync_committee/sync_selection_proof.rs @@ -5,7 +5,7 @@ use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::Encode; -use ssz_types::typenum::Unsigned; +use typenum::Unsigned; use crate::{ core::{ @@ -112,8 +112,9 @@ impl From for SyncSelectionProof { #[cfg(test)] mod test { use super::*; - use crate::{FixedBytesExtended, MainnetEthSpec}; + use crate::MainnetEthSpec; use eth2_interop_keypairs::keypair; + use fixed_bytes::FixedBytesExtended; #[test] fn proof_sign_and_verify() { diff --git a/consensus/types/src/sync_committee/sync_subnet_id.rs b/consensus/types/src/sync_committee/sync_subnet_id.rs index fb58146178..6cb11f6b03 100644 --- a/consensus/types/src/sync_committee/sync_subnet_id.rs +++ b/consensus/types/src/sync_committee/sync_subnet_id.rs @@ -8,7 +8,7 @@ use std::{ use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; -use ssz_types::typenum::Unsigned; +use typenum::Unsigned; use crate::core::{EthSpec, consts::altair::SYNC_COMMITTEE_SUBNET_COUNT}; diff --git a/consensus/types/src/test_utils/test_random/bitfield.rs b/consensus/types/src/test_utils/test_random/bitfield.rs index 3bc0d37c62..762f41eb34 100644 --- a/consensus/types/src/test_utils/test_random/bitfield.rs +++ b/consensus/types/src/test_utils/test_random/bitfield.rs @@ -1,5 +1,6 @@ use smallvec::smallvec; -use ssz_types::{BitList, BitVector, typenum::Unsigned}; +use ssz_types::{BitList, BitVector}; +use typenum::Unsigned; use crate::test_utils::TestRandom; diff --git a/consensus/types/src/test_utils/test_random/test_random.rs b/consensus/types/src/test_utils/test_random/test_random.rs index f31be97c03..101fbec51b 100644 --- a/consensus/types/src/test_utils/test_random/test_random.rs +++ b/consensus/types/src/test_utils/test_random/test_random.rs @@ -3,7 +3,8 @@ use std::{marker::PhantomData, sync::Arc}; use rand::{RngCore, SeedableRng}; use rand_xorshift::XorShiftRng; use smallvec::{SmallVec, smallvec}; -use ssz_types::{VariableList, typenum::Unsigned}; +use ssz_types::VariableList; +use typenum::Unsigned; pub fn test_random_instance() -> T { let mut rng = XorShiftRng::from_seed([0x42; 16]); diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 04eb41960b..43e361b60d 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -26,6 +26,7 @@ eth2_wallet = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } +fixed_bytes = { workspace = true } hex = { workspace = true } lighthouse_network = { workspace = true } lighthouse_version = { workspace = true } diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 71186904d0..620539a95f 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -1,4 +1,5 @@ use clap::ArgMatches; +use fixed_bytes::FixedBytesExtended; use lighthouse_network::{ NETWORK_KEY_FILENAME, NetworkConfig, discovery::{CombinedKey, ENR_FILENAME, build_enr}, @@ -9,7 +10,7 @@ use std::io::Write; use std::path::PathBuf; use std::{fs, net::Ipv4Addr}; use std::{fs::File, num::NonZeroU16}; -use types::{ChainSpec, EnrForkId, Epoch, EthSpec, FixedBytesExtended, Hash256}; +use types::{ChainSpec, EnrForkId, Epoch, EthSpec, Hash256}; pub fn run(matches: &ArgMatches, spec: &ChainSpec) -> Result<(), String> { let ip: Ipv4Addr = clap_utils::parse_required(matches, "ip")?; diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 0b945bcb2d..9bfcae85e5 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -18,6 +18,7 @@ use account_utils::{ eth2_keystore::KeystoreBuilder, validator_definitions::{SigningDefinition, ValidatorDefinition, ValidatorDefinitions}, }; +use bls::{Keypair, PublicKey}; use slashing_protection::{SLASHING_PROTECTION_FILENAME, SlashingDatabase}; use std::env; use std::fs::{self, File}; @@ -26,7 +27,6 @@ use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output, Stdio}; use std::str::from_utf8; use tempfile::{TempDir, tempdir}; -use types::{Keypair, PublicKey}; use validator_dir::ValidatorDir; use zeroize::Zeroizing; diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index 99afa7b682..d6d720a561 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -1,3 +1,4 @@ +use bls::PublicKeyBytes; use eth2::SensitiveUrl; use serde::de::DeserializeOwned; use std::fs; diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 94d048ef72..a068b2e885 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -14,11 +14,13 @@ portable = ["types/portable"] [dependencies] bincode = { workspace = true } +bls = { workspace = true } byteorder = { workspace = true } educe = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } filesystem = { workspace = true } +fixed_bytes = { workspace = true } flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } @@ -38,6 +40,7 @@ strum = { workspace = true } tracing = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } +typenum = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/slasher/src/attester_record.rs b/slasher/src/attester_record.rs index 67145193ac..db326a9d80 100644 --- a/slasher/src/attester_record.rs +++ b/slasher/src/attester_record.rs @@ -1,5 +1,7 @@ use crate::{Error, database::IndexedAttestationId}; +use bls::AggregateSignature; use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; use std::borrow::Cow; use std::sync::{ Arc, @@ -7,7 +9,7 @@ use std::sync::{ }; use tree_hash::TreeHash as _; use tree_hash_derive::TreeHash; -use types::{AggregateSignature, EthSpec, Hash256, IndexedAttestation, VariableList}; +use types::{EthSpec, Hash256, IndexedAttestation}; #[derive(Debug, Clone, Copy)] pub struct AttesterRecord { diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 2df2849612..80d073a81c 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -7,6 +7,7 @@ use crate::{ AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, Config, Database, Error, ProposerSlashingStatus, metrics, }; +use bls::AggregateSignature; use byteorder::{BigEndian, ByteOrder}; use interface::{Environment, OpenDatabases, RwTransaction}; use lru::LruCache; @@ -14,15 +15,16 @@ use parking_lot::Mutex; use serde::de::DeserializeOwned; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; use std::borrow::{Borrow, Cow}; use std::marker::PhantomData; use std::sync::Arc; use tracing::info; use tree_hash::TreeHash; use types::{ - AggregateSignature, AttestationData, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, + AttestationData, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, IndexedAttestationBase, IndexedAttestationElectra, ProposerSlashing, SignedBeaconBlockHeader, - Slot, VariableList, + Slot, }; /// Current database schema version, to check compatibility of on-disk DB with software. @@ -860,7 +862,8 @@ impl SlasherDB { #[cfg(test)] mod test { use super::*; - use types::{Checkpoint, ForkName, MainnetEthSpec, Unsigned}; + use typenum::Unsigned; + use types::{Checkpoint, ForkName, MainnetEthSpec}; type E = MainnetEthSpec; diff --git a/slasher/src/test_utils.rs b/slasher/src/test_utils.rs index bbbadac761..20d1ee9217 100644 --- a/slasher/src/test_utils.rs +++ b/slasher/src/test_utils.rs @@ -1,10 +1,11 @@ +use bls::{AggregateSignature, Signature}; +use fixed_bytes::FixedBytesExtended; use std::collections::HashSet; use std::sync::Arc; use types::{ - AggregateSignature, AttestationData, AttesterSlashing, AttesterSlashingBase, - AttesterSlashingElectra, BeaconBlockHeader, ChainSpec, Checkpoint, Epoch, EthSpec, - FixedBytesExtended, Hash256, IndexedAttestation, MainnetEthSpec, Signature, - SignedBeaconBlockHeader, Slot, + AttestationData, AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, + BeaconBlockHeader, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, IndexedAttestation, + MainnetEthSpec, SignedBeaconBlockHeader, Slot, indexed_attestation::{IndexedAttestationBase, IndexedAttestationElectra}, }; diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 581785e2a9..cef201ee91 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -27,14 +27,17 @@ fs2 = { workspace = true } hex = { workspace = true } kzg = { workspace = true } logging = { workspace = true } +milhouse = { workspace = true } rayon = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_repr = { workspace = true } serde_yaml = { workspace = true } snap = { workspace = true } +ssz_types = { workspace = true } state_processing = { workspace = true } swap_or_not_shuffle = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } +typenum = { workspace = true } types = { workspace = true } diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index 1103d2fe82..52f5333df1 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -1,11 +1,12 @@ use super::*; use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; +use ssz_types::FixedVector; use tree_hash::Hash256; +use typenum::Unsigned; use types::{ BeaconBlockBody, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, - BeaconBlockBodyFulu, BeaconBlockBodyGloas, BeaconState, FixedVector, FullPayload, Unsigned, - light_client_update, + BeaconBlockBodyFulu, BeaconBlockBodyGloas, BeaconState, FullPayload, light_client_update, }; #[derive(Debug, Clone, Deserialize)] diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index 8742f8a140..1dd37a22ee 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -5,12 +5,14 @@ use crate::cases::common::{DecimalU128, DecimalU256, SszStaticType}; use crate::cases::ssz_static::{check_serialization, check_tree_hash}; use crate::decode::{context_yaml_decode_file, log_file_access, snappy_decode_file}; use context_deserialize::{ContextDeserialize, context_deserialize}; +use milhouse::Vector; use serde::{Deserialize, Deserializer, de::Error as SerdeError}; use ssz_derive::{Decode, Encode}; +use ssz_types::{BitList, BitVector, FixedVector, VariableList}; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -use types::typenum::*; -use types::{BitList, BitVector, FixedVector, ForkName, VariableList, Vector}; +use typenum::*; +use types::ForkName; #[derive(Debug, Clone, Deserialize)] #[context_deserialize(ForkName)] diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 089e4464cd..0cec69c97e 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -1,6 +1,7 @@ #![cfg(feature = "ef_tests")] use ef_tests::*; +use typenum::Unsigned; use types::*; // Check that the hand-computed multiplications on EthSpec are correctly computed. diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 78ed266fb2..034b6c5c8a 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -13,8 +13,10 @@ alloy-provider = { workspace = true } alloy-rpc-types-eth = { workspace = true } alloy-signer-local = { workspace = true } async-channel = { workspace = true } +bls = { workspace = true } deposit_contract = { workspace = true } execution_layer = { workspace = true } +fixed_bytes = { workspace = true } fork_choice = { workspace = true } futures = { workspace = true } hex = { workspace = true } @@ -26,4 +28,5 @@ serde_json = { workspace = true } task_executor = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } +typenum = { workspace = true } types = { workspace = true } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 57501c6ee2..8413da4c5e 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -6,11 +6,13 @@ use alloy_network::{EthereumWallet, TransactionBuilder}; use alloy_primitives::Address as AlloyAddress; use alloy_provider::{Provider, ProviderBuilder}; use alloy_signer_local::PrivateKeySigner; +use bls::PublicKeyBytes; use execution_layer::test_utils::DEFAULT_GAS_LIMIT; use execution_layer::{ BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, PayloadParameters, PayloadStatus, }; +use fixed_bytes::FixedBytesExtended; use fork_choice::ForkchoiceUpdateParameters; use reqwest::{Client, header::CONTENT_TYPE}; use sensitive_url::SensitiveUrl; @@ -22,8 +24,9 @@ use tokio::time::sleep; use types::payload::BlockProductionVersion; use types::{ Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, - FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, + ForkName, Hash256, MainnetEthSpec, Slot, Uint256, }; + const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(60); const TEST_FORK: ForkName = ForkName::Capella; diff --git a/testing/execution_engine_integration/src/transactions.rs b/testing/execution_engine_integration/src/transactions.rs index fe36a1bf67..8cd63ce307 100644 --- a/testing/execution_engine_integration/src/transactions.rs +++ b/testing/execution_engine_integration/src/transactions.rs @@ -1,8 +1,10 @@ use alloy_network::TransactionBuilder; use alloy_primitives::{Address, U256}; use alloy_rpc_types_eth::{AccessList, TransactionRequest}; +use bls::{Keypair, Signature}; use deposit_contract::{BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS, encode_eth1_tx_data}; -use types::{DepositData, EthSpec, FixedBytesExtended, Hash256, Keypair, Signature}; +use fixed_bytes::FixedBytesExtended; +use types::{DepositData, EthSpec, Hash256}; /// Hardcoded deposit contract address based on sender address and nonce pub const DEPOSIT_CONTRACT_ADDRESS: &str = "64f43BEc7F86526686C931d65362bB8698872F90"; diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 54035f2e82..a1b1b6f95d 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -20,4 +20,5 @@ serde_json = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } +typenum = { workspace = true } types = { workspace = true } diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 1240785121..35200692c3 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -1,7 +1,8 @@ use crate::local_network::LocalNetwork; use node_test_rig::eth2::types::{BlockId, FinalityCheckpointsData, StateId}; use std::time::Duration; -use types::{Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Slot, Unsigned}; +use typenum::Unsigned; +use types::{Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Slot}; /// Checks that all of the validators have on-boarded by the start of the second eth1 voting /// period. diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 66376f0a51..437aa539f4 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -10,7 +10,9 @@ portable = ["beacon_chain/portable"] [dependencies] beacon_chain = { workspace = true } +bls = { workspace = true } ethereum_ssz = { workspace = true } +fixed_bytes = { workspace = true } state_processing = { workspace = true } tokio = { workspace = true } types = { workspace = true } diff --git a/testing/state_transition_vectors/src/main.rs b/testing/state_transition_vectors/src/main.rs index 4a829b6803..80c30489b7 100644 --- a/testing/state_transition_vectors/src/main.rs +++ b/testing/state_transition_vectors/src/main.rs @@ -3,6 +3,8 @@ mod macros; mod exit; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use bls::Keypair; +use fixed_bytes::FixedBytesExtended; use ssz::Encode; use std::env; use std::fs::{self, File}; @@ -10,10 +12,8 @@ use std::io::Write; use std::path::{Path, PathBuf}; use std::process::exit; use std::sync::LazyLock; -use types::{ - BeaconState, EthSpec, Keypair, SignedBeaconBlock, test_utils::generate_deterministic_keypairs, -}; -use types::{FixedBytesExtended, Hash256, MainnetEthSpec, Slot}; +use types::{BeaconState, EthSpec, SignedBeaconBlock, test_utils::generate_deterministic_keypairs}; +use types::{Hash256, MainnetEthSpec, Slot}; type E = MainnetEthSpec; diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index b4637b4030..3ef2e0f7f7 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -9,10 +9,12 @@ edition = { workspace = true } [dev-dependencies] account_utils = { workspace = true } async-channel = { workspace = true } +bls = { workspace = true } environment = { workspace = true } eth2 = { workspace = true } eth2_keystore = { workspace = true } eth2_network_config = { workspace = true } +fixed_bytes = { workspace = true } futures = { workspace = true } initialized_validators = { workspace = true } lighthouse_validator_store = { workspace = true } @@ -24,6 +26,7 @@ serde_json = { workspace = true } serde_yaml = { workspace = true } slashing_protection = { workspace = true } slot_clock = { workspace = true } +ssz_types = { workspace = true } task_executor = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 15ec745e3f..541f9b2b4a 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -20,9 +20,11 @@ mod tests { use account_utils::validator_definitions::{ SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, }; + use bls::{AggregateSignature, Keypair, PublicKeyBytes, SecretKey, Signature}; use eth2::types::FullBlockContents; use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; + use fixed_bytes::FixedBytesExtended; use initialized_validators::{ InitializedValidators, load_pem_certificate, load_pkcs12_identity, }; @@ -32,6 +34,7 @@ mod tests { use serde::Serialize; use slashing_protection::{SLASHING_PROTECTION_FILENAME, SlashingDatabase}; use slot_clock::{SlotClock, TestingSlotClock}; + use ssz_types::BitList; use std::env; use std::fmt::Debug; use std::fs::{self, File}; diff --git a/validator_client/beacon_node_fallback/Cargo.toml b/validator_client/beacon_node_fallback/Cargo.toml index 5fe2af4cb0..481aece48b 100644 --- a/validator_client/beacon_node_fallback/Cargo.toml +++ b/validator_client/beacon_node_fallback/Cargo.toml @@ -9,6 +9,7 @@ name = "beacon_node_fallback" path = "src/lib.rs" [dependencies] +bls = { workspace = true } clap = { workspace = true } eth2 = { workspace = true } futures = { workspace = true } diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index 0f13d8c8b7..6abcd44cc9 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -773,12 +773,13 @@ impl ApiTopic { mod tests { use super::*; use crate::beacon_node_health::BeaconNodeHealthTier; + use bls::Signature; use eth2::SensitiveUrl; use eth2::Timeouts; use slot_clock::TestingSlotClock; use strum::VariantNames; use types::{BeaconBlockDeneb, MainnetEthSpec, Slot}; - use types::{EmptyBlock, Signature, SignedBeaconBlockDeneb, SignedBlindedBeaconBlock}; + use types::{EmptyBlock, SignedBeaconBlockDeneb, SignedBlindedBeaconBlock}; use validator_test_rig::mock_beacon_node::MockBeaconNode; type E = MainnetEthSpec; diff --git a/validator_client/doppelganger_service/Cargo.toml b/validator_client/doppelganger_service/Cargo.toml index e5b183570d..66b27eb39d 100644 --- a/validator_client/doppelganger_service/Cargo.toml +++ b/validator_client/doppelganger_service/Cargo.toml @@ -6,6 +6,7 @@ authors = ["Sigma Prime "] [dependencies] beacon_node_fallback = { workspace = true } +bls = { workspace = true } environment = { workspace = true } eth2 = { workspace = true } logging = { workspace = true } diff --git a/validator_client/doppelganger_service/src/lib.rs b/validator_client/doppelganger_service/src/lib.rs index b0ed78e996..600ae82c54 100644 --- a/validator_client/doppelganger_service/src/lib.rs +++ b/validator_client/doppelganger_service/src/lib.rs @@ -30,6 +30,7 @@ //! Doppelganger protection is a best-effort, last-line-of-defence mitigation. Do not rely upon it. use beacon_node_fallback::BeaconNodeFallback; +use bls::PublicKeyBytes; use environment::RuntimeContext; use eth2::types::LivenessResponseData; use logging::crit; @@ -41,7 +42,7 @@ use std::sync::Arc; use task_executor::ShutdownReason; use tokio::time::sleep; use tracing::{error, info}; -use types::{Epoch, EthSpec, PublicKeyBytes, Slot}; +use types::{Epoch, EthSpec, Slot}; use validator_store::{DoppelgangerStatus, ValidatorStore}; struct LivenessResponses { diff --git a/validator_client/http_api/Cargo.toml b/validator_client/http_api/Cargo.toml index bb624ea988..2bd57867ac 100644 --- a/validator_client/http_api/Cargo.toml +++ b/validator_client/http_api/Cargo.toml @@ -20,6 +20,7 @@ eth2 = { workspace = true, features = ["lighthouse"] } eth2_keystore = { workspace = true } ethereum_serde_utils = { workspace = true } filesystem = { workspace = true } +fixed_bytes = { workspace = true } graffiti_file = { workspace = true } health_metrics = { workspace = true } initialized_validators = { workspace = true } @@ -41,6 +42,7 @@ tempfile = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } tracing = { workspace = true } +typenum = { workspace = true } types = { workspace = true } url = { workspace = true } validator_dir = { workspace = true } @@ -54,3 +56,4 @@ zeroize = { workspace = true } futures = { workspace = true } itertools = { workspace = true } rand = { workspace = true, features = ["small_rng"] } +ssz_types = { workspace = true } diff --git a/validator_client/http_api/src/keystores.rs b/validator_client/http_api/src/keystores.rs index c0f918f9bb..18accf0d5a 100644 --- a/validator_client/http_api/src/keystores.rs +++ b/validator_client/http_api/src/keystores.rs @@ -1,5 +1,6 @@ //! Implementation of the standard keystore management API. use account_utils::validator_definitions::PasswordStorage; +use bls::PublicKeyBytes; use eth2::lighthouse_vc::{ std_types::{ DeleteKeystoreStatus, DeleteKeystoresRequest, DeleteKeystoresResponse, @@ -18,7 +19,7 @@ use std::sync::Arc; use task_executor::TaskExecutor; use tokio::runtime::Handle; use tracing::{info, warn}; -use types::{EthSpec, PublicKeyBytes}; +use types::EthSpec; use validator_dir::{Builder as ValidatorDirBuilder, keystore_password_path}; use warp::Rejection; use warp_utils::reject::{custom_bad_request, custom_server_error}; diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index 4494fca957..a35b4ec6c6 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -22,6 +22,7 @@ use account_utils::{ }; pub use api_secret::ApiSecret; use beacon_node_fallback::CandidateInfo; +use bls::{PublicKey, PublicKeyBytes}; use core::convert::Infallible; use create_validator::{ create_validators_mnemonic, create_validators_web3signer, get_voting_password_storage, @@ -30,8 +31,8 @@ use directory::{DEFAULT_HARDCODED_NETWORK, DEFAULT_ROOT_DIR, DEFAULT_VALIDATOR_D use eth2::lighthouse_vc::{ std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, types::{ - self as api_types, GenericResponse, GetGraffitiResponse, Graffiti, PublicKey, - PublicKeyBytes, SetGraffitiRequest, UpdateCandidatesRequest, UpdateCandidatesResponse, + self as api_types, GenericResponse, GetGraffitiResponse, Graffiti, SetGraffitiRequest, + UpdateCandidatesRequest, UpdateCandidatesResponse, }, }; use health_metrics::observe::Observe; diff --git a/validator_client/http_api/src/remotekeys.rs b/validator_client/http_api/src/remotekeys.rs index 5aa63baac3..987e1b8740 100644 --- a/validator_client/http_api/src/remotekeys.rs +++ b/validator_client/http_api/src/remotekeys.rs @@ -2,6 +2,7 @@ use account_utils::validator_definitions::{ SigningDefinition, ValidatorDefinition, Web3SignerDefinition, }; +use bls::PublicKeyBytes; use eth2::lighthouse_vc::std_types::{ DeleteRemotekeyStatus, DeleteRemotekeysRequest, DeleteRemotekeysResponse, ImportRemotekeyStatus, ImportRemotekeysRequest, ImportRemotekeysResponse, @@ -14,7 +15,7 @@ use std::sync::Arc; use task_executor::TaskExecutor; use tokio::runtime::Handle; use tracing::{info, warn}; -use types::{EthSpec, PublicKeyBytes}; +use types::EthSpec; use url::Url; use warp::Rejection; use warp_utils::reject::custom_server_error; diff --git a/validator_client/http_api/src/test_utils.rs b/validator_client/http_api/src/test_utils.rs index 9a8784f202..f83d9f4d52 100644 --- a/validator_client/http_api/src/test_utils.rs +++ b/validator_client/http_api/src/test_utils.rs @@ -4,6 +4,7 @@ use account_utils::validator_definitions::ValidatorDefinitions; use account_utils::{ eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, }; +use bls::Keypair; use deposit_contract::decode_eth1_tx_data; use doppelganger_service::DoppelgangerService; use eth2::{ diff --git a/validator_client/http_api/src/tests.rs b/validator_client/http_api/src/tests.rs index b0780e7427..5cb631983c 100644 --- a/validator_client/http_api/src/tests.rs +++ b/validator_client/http_api/src/tests.rs @@ -11,6 +11,7 @@ use account_utils::{ eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, random_password_string, validator_definitions::ValidatorDefinitions, }; +use bls::{Keypair, PublicKeyBytes}; use deposit_contract::decode_eth1_tx_data; use eth2::{ Error as ApiError, diff --git a/validator_client/http_api/src/tests/keystores.rs b/validator_client/http_api/src/tests/keystores.rs index dd2266e3f6..eeb3cd94de 100644 --- a/validator_client/http_api/src/tests/keystores.rs +++ b/validator_client/http_api/src/tests/keystores.rs @@ -1,19 +1,23 @@ use super::*; use account_utils::random_password_string; use bls::PublicKeyBytes; +use bls::{AggregateSignature, PublicKey}; use eth2::lighthouse_vc::types::UpdateFeeRecipientRequest; use eth2::lighthouse_vc::{ http_client::ValidatorClientHttpClient as HttpClient, std_types::{KeystoreJsonStr as Keystore, *}, types::Web3SignerValidatorRequest, }; +use fixed_bytes::FixedBytesExtended; use itertools::Itertools; use lighthouse_validator_store::DEFAULT_GAS_LIMIT; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; use slashing_protection::interchange::{Interchange, InterchangeMetadata}; +use ssz_types::BitList; use std::{collections::HashMap, path::Path}; use tokio::runtime::Handle; +use typenum::Unsigned; use types::{Address, attestation::AttestationBase}; use validator_store::ValidatorStore; use zeroize::Zeroizing; diff --git a/validator_client/initialized_validators/src/lib.rs b/validator_client/initialized_validators/src/lib.rs index 4d61bd4ed8..db6d03174d 100644 --- a/validator_client/initialized_validators/src/lib.rs +++ b/validator_client/initialized_validators/src/lib.rs @@ -15,6 +15,7 @@ use account_utils::{ Web3SignerDefinition, }, }; +use bls::{Keypair, PublicKey, PublicKeyBytes}; use eth2_keystore::Keystore; use lockfile::{Lockfile, LockfileError}; use metrics::set_gauge; @@ -30,7 +31,7 @@ use std::sync::Arc; use std::time::Duration; use tracing::{debug, error, info, warn}; use types::graffiti::GraffitiString; -use types::{Address, Graffiti, Keypair, PublicKey, PublicKeyBytes}; +use types::{Address, Graffiti}; use url::{ParseError, Url}; use validator_dir::Builder as ValidatorDirBuilder; use zeroize::Zeroizing; diff --git a/validator_client/lighthouse_validator_store/Cargo.toml b/validator_client/lighthouse_validator_store/Cargo.toml index 0f8220bdc9..01c7616be1 100644 --- a/validator_client/lighthouse_validator_store/Cargo.toml +++ b/validator_client/lighthouse_validator_store/Cargo.toml @@ -7,6 +7,7 @@ authors = ["Sigma Prime "] [dependencies] account_utils = { workspace = true } beacon_node_fallback = { workspace = true } +bls = { workspace = true } doppelganger_service = { workspace = true } either = { workspace = true } environment = { workspace = true } diff --git a/validator_client/lighthouse_validator_store/src/lib.rs b/validator_client/lighthouse_validator_store/src/lib.rs index dc8fb07b65..3bea21a05d 100644 --- a/validator_client/lighthouse_validator_store/src/lib.rs +++ b/validator_client/lighthouse_validator_store/src/lib.rs @@ -1,4 +1,5 @@ use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; +use bls::{PublicKeyBytes, Signature}; use doppelganger_service::DoppelgangerService; use eth2::types::PublishBlockRequest; use initialized_validators::InitializedValidators; @@ -19,9 +20,9 @@ use tracing::{error, info, instrument, warn}; use types::{ AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, - PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, - SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit, - Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, + SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, + SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, + SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, VoluntaryExit, graffiti::GraffitiString, }; diff --git a/validator_client/signing_method/Cargo.toml b/validator_client/signing_method/Cargo.toml index 2defd25caa..cb321c2d49 100644 --- a/validator_client/signing_method/Cargo.toml +++ b/validator_client/signing_method/Cargo.toml @@ -5,6 +5,7 @@ edition = { workspace = true } authors = ["Sigma Prime "] [dependencies] +bls = { workspace = true } eth2_keystore = { workspace = true } ethereum_serde_utils = { workspace = true } lockfile = { workspace = true } diff --git a/validator_client/signing_method/src/lib.rs b/validator_client/signing_method/src/lib.rs index 7e0f2c02f7..d0d9868952 100644 --- a/validator_client/signing_method/src/lib.rs +++ b/validator_client/signing_method/src/lib.rs @@ -3,6 +3,7 @@ //! - Via a local `Keypair`. //! - Via a remote signer (Web3Signer) +use bls::{Keypair, PublicKey, Signature}; use eth2_keystore::Keystore; use lockfile::Lockfile; use parking_lot::Mutex; diff --git a/validator_client/signing_method/src/web3signer.rs b/validator_client/signing_method/src/web3signer.rs index 99fad10303..246d9e9e09 100644 --- a/validator_client/signing_method/src/web3signer.rs +++ b/validator_client/signing_method/src/web3signer.rs @@ -1,6 +1,7 @@ //! Contains the types required to make JSON requests to Web3Signer servers. use super::Error; +use bls::{PublicKeyBytes, Signature}; use serde::{Deserialize, Serialize}; use types::*; diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 6a778c5de3..b80da6c786 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -11,9 +11,11 @@ portable = ["types/portable"] [dependencies] arbitrary = { workspace = true, features = ["derive"] } +bls = { workspace = true } eip_3076 = { workspace = true, features = ["json"] } ethereum_serde_utils = { workspace = true } filesystem = { workspace = true } +fixed_bytes = { workspace = true } r2d2 = { workspace = true } r2d2_sqlite = "0.21.0" rusqlite = { workspace = true } diff --git a/validator_client/slashing_protection/src/attestation_tests.rs b/validator_client/slashing_protection/src/attestation_tests.rs index 37766f271b..d16c961336 100644 --- a/validator_client/slashing_protection/src/attestation_tests.rs +++ b/validator_client/slashing_protection/src/attestation_tests.rs @@ -2,7 +2,8 @@ use crate::test_utils::*; use crate::*; -use types::{AttestationData, Checkpoint, Epoch, FixedBytesExtended, Slot}; +use fixed_bytes::FixedBytesExtended; +use types::{AttestationData, Checkpoint, Epoch, Slot}; pub fn build_checkpoint(epoch_num: u64) -> Checkpoint { Checkpoint { diff --git a/validator_client/slashing_protection/src/bin/test_generator.rs b/validator_client/slashing_protection/src/bin/test_generator.rs index dfda7983f7..df1c63f37d 100644 --- a/validator_client/slashing_protection/src/bin/test_generator.rs +++ b/validator_client/slashing_protection/src/bin/test_generator.rs @@ -1,11 +1,12 @@ use eip_3076::{Interchange, InterchangeData, InterchangeMetadata, SignedAttestation, SignedBlock}; +use fixed_bytes::FixedBytesExtended; use slashing_protection::SUPPORTED_INTERCHANGE_FORMAT_VERSION; use slashing_protection::interchange_test::{MultiTestCase, TestCase}; use slashing_protection::test_utils::{DEFAULT_GENESIS_VALIDATORS_ROOT, pubkey}; use std::fs::{self, File}; use std::io::Write; use std::path::Path; -use types::{Epoch, FixedBytesExtended, Hash256, Slot}; +use types::{Epoch, Hash256, Slot}; fn metadata(genesis_validators_root: Hash256) -> InterchangeMetadata { InterchangeMetadata { diff --git a/validator_client/slashing_protection/src/block_tests.rs b/validator_client/slashing_protection/src/block_tests.rs index b3273015f4..2531f52d8c 100644 --- a/validator_client/slashing_protection/src/block_tests.rs +++ b/validator_client/slashing_protection/src/block_tests.rs @@ -2,7 +2,8 @@ use super::*; use crate::test_utils::*; -use types::{BeaconBlockHeader, FixedBytesExtended, Slot}; +use fixed_bytes::FixedBytesExtended; +use types::{BeaconBlockHeader, Slot}; pub fn block(slot: u64) -> BeaconBlockHeader { BeaconBlockHeader { diff --git a/validator_client/slashing_protection/src/extra_interchange_tests.rs b/validator_client/slashing_protection/src/extra_interchange_tests.rs index 0f88ec8b1d..18457720e4 100644 --- a/validator_client/slashing_protection/src/extra_interchange_tests.rs +++ b/validator_client/slashing_protection/src/extra_interchange_tests.rs @@ -2,8 +2,8 @@ use crate::test_utils::pubkey; use crate::*; +use fixed_bytes::FixedBytesExtended; use tempfile::tempdir; -use types::FixedBytesExtended; #[test] fn export_non_existent_key() { diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index ebe0105f24..0dfcda204d 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -2,11 +2,13 @@ use crate::{ SigningRoot, SlashingDatabase, test_utils::{DEFAULT_GENESIS_VALIDATORS_ROOT, pubkey}, }; +use bls::PublicKeyBytes; use eip_3076::{Interchange, SignedAttestation, SignedBlock}; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use std::collections::HashSet; use tempfile::tempdir; -use types::{Epoch, FixedBytesExtended, Hash256, PublicKeyBytes, Slot}; +use types::{Epoch, Hash256, Slot}; #[derive(Debug, Clone, Deserialize, Serialize)] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] diff --git a/validator_client/slashing_protection/src/lib.rs b/validator_client/slashing_protection/src/lib.rs index 917d51d38b..f8580e7315 100644 --- a/validator_client/slashing_protection/src/lib.rs +++ b/validator_client/slashing_protection/src/lib.rs @@ -19,10 +19,11 @@ pub use crate::slashing_database::{ InterchangeError, InterchangeImportOutcome, SUPPORTED_INTERCHANGE_FORMAT_VERSION, SlashingDatabase, }; +use bls::PublicKeyBytes; use rusqlite::Error as SQLError; use std::fmt::Display; use std::io::{Error as IOError, ErrorKind}; -use types::{Hash256, PublicKeyBytes}; +use types::Hash256; /// The filename within the `validators` directory that contains the slashing protection DB. pub const SLASHING_PROTECTION_FILENAME: &str = "slashing_protection.sqlite"; @@ -133,7 +134,7 @@ impl Display for NotSafe { #[cfg(test)] mod test { - use types::FixedBytesExtended; + use fixed_bytes::FixedBytesExtended; use super::*; diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index 00677212a3..67e1234ac5 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -1,6 +1,7 @@ use crate::signed_attestation::InvalidAttestation; use crate::signed_block::InvalidBlock; use crate::{NotSafe, Safe, SignedAttestation, SignedBlock, SigningRoot, signing_root_from_row}; +use bls::PublicKeyBytes; use eip_3076::{ Interchange, InterchangeData, InterchangeMetadata, SignedAttestation as InterchangeAttestation, SignedBlock as InterchangeBlock, @@ -12,7 +13,7 @@ use std::fs::File; use std::path::Path; use std::time::Duration; use tracing::instrument; -use types::{AttestationData, BeaconBlockHeader, Epoch, Hash256, PublicKeyBytes, SignedRoot, Slot}; +use types::{AttestationData, BeaconBlockHeader, Epoch, Hash256, SignedRoot, Slot}; type Pool = r2d2::Pool; diff --git a/validator_client/slashing_protection/tests/migration.rs b/validator_client/slashing_protection/tests/migration.rs index 3d4ec7ea9a..14bf0d63f9 100644 --- a/validator_client/slashing_protection/tests/migration.rs +++ b/validator_client/slashing_protection/tests/migration.rs @@ -1,10 +1,11 @@ //! Tests for upgrading a previous version of the database to the latest schema. +use fixed_bytes::FixedBytesExtended; use slashing_protection::{NotSafe, SlashingDatabase}; use std::collections::HashMap; use std::fs; use std::path::{Path, PathBuf}; use tempfile::tempdir; -use types::{FixedBytesExtended, Hash256}; +use types::Hash256; fn test_data_dir() -> PathBuf { Path::new(&std::env::var("CARGO_MANIFEST_DIR").unwrap()).join("migration-tests") diff --git a/validator_client/validator_services/src/block_service.rs b/validator_client/validator_services/src/block_service.rs index 8ec53d3f40..23658af03f 100644 --- a/validator_client/validator_services/src/block_service.rs +++ b/validator_client/validator_services/src/block_service.rs @@ -1,4 +1,5 @@ use beacon_node_fallback::{ApiTopic, BeaconNodeFallback, Error as FallbackError, Errors}; +use bls::PublicKeyBytes; use eth2::{BeaconNodeHttpClient, StatusCode}; use graffiti_file::{GraffitiFile, determine_graffiti}; use logging::crit; @@ -11,7 +12,7 @@ use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::mpsc; use tracing::{Instrument, debug, error, info, info_span, instrument, trace, warn}; -use types::{BlockType, ChainSpec, EthSpec, Graffiti, PublicKeyBytes, Slot}; +use types::{BlockType, ChainSpec, EthSpec, Graffiti, Slot}; use validator_store::{Error as ValidatorStoreError, SignedBlock, UnsignedBlock, ValidatorStore}; #[derive(Debug)] diff --git a/validator_client/validator_services/src/duties_service.rs b/validator_client/validator_services/src/duties_service.rs index 7569d3946a..c2378181ef 100644 --- a/validator_client/validator_services/src/duties_service.rs +++ b/validator_client/validator_services/src/duties_service.rs @@ -10,6 +10,7 @@ use crate::block_service::BlockServiceNotification; use crate::sync::SyncDutiesMap; use crate::sync::poll_sync_committee_duties; use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; +use bls::PublicKeyBytes; use eth2::types::{ AttesterData, BeaconCommitteeSelection, BeaconCommitteeSubscription, DutiesResponse, ProposerData, StateId, ValidatorId, @@ -29,7 +30,7 @@ use std::time::Duration; use task_executor::TaskExecutor; use tokio::{sync::mpsc::Sender, time::sleep}; use tracing::{debug, error, info, warn}; -use types::{ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, SelectionProof, Slot}; +use types::{ChainSpec, Epoch, EthSpec, Hash256, SelectionProof, Slot}; use validator_metrics::{ATTESTATION_DUTY, get_int_gauge, set_int_gauge}; use validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore}; diff --git a/validator_client/validator_services/src/sync.rs b/validator_client/validator_services/src/sync.rs index 77032ed15b..0f456a7050 100644 --- a/validator_client/validator_services/src/sync.rs +++ b/validator_client/validator_services/src/sync.rs @@ -1,4 +1,5 @@ use crate::duties_service::{DutiesService, Error, SelectionProofConfig}; +use bls::PublicKeyBytes; use eth2::types::SyncCommitteeSelection; use futures::future::join_all; use futures::stream::{FuturesUnordered, StreamExt}; @@ -8,7 +9,7 @@ use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::sync::Arc; use tracing::{debug, error, info, warn}; -use types::{ChainSpec, EthSpec, PublicKeyBytes, Slot, SyncDuty, SyncSelectionProof, SyncSubnetId}; +use types::{ChainSpec, EthSpec, Slot, SyncDuty, SyncSelectionProof, SyncSubnetId}; use validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore}; /// Top-level data-structure containing sync duty information. diff --git a/validator_client/validator_services/src/sync_committee_service.rs b/validator_client/validator_services/src/sync_committee_service.rs index 5f6b1cb710..28c3d1caad 100644 --- a/validator_client/validator_services/src/sync_committee_service.rs +++ b/validator_client/validator_services/src/sync_committee_service.rs @@ -1,5 +1,6 @@ use crate::duties_service::DutiesService; use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; +use bls::PublicKeyBytes; use eth2::types::BlockId; use futures::future::FutureExt; use futures::future::join_all; @@ -13,8 +14,8 @@ use task_executor::TaskExecutor; use tokio::time::{Duration, Instant, sleep, sleep_until}; use tracing::{Instrument, debug, error, info, info_span, instrument, trace, warn}; use types::{ - ChainSpec, EthSpec, Hash256, PublicKeyBytes, Slot, SyncCommitteeSubscription, - SyncContributionData, SyncDuty, SyncSelectionProof, SyncSubnetId, + ChainSpec, EthSpec, Hash256, Slot, SyncCommitteeSubscription, SyncContributionData, SyncDuty, + SyncSelectionProof, SyncSubnetId, }; use validator_store::{Error as ValidatorStoreError, ValidatorStore}; diff --git a/validator_client/validator_store/Cargo.toml b/validator_client/validator_store/Cargo.toml index 8c5451b2d0..8b1879c837 100644 --- a/validator_client/validator_store/Cargo.toml +++ b/validator_client/validator_store/Cargo.toml @@ -5,6 +5,7 @@ edition = { workspace = true } authors = ["Sigma Prime "] [dependencies] +bls = { workspace = true } eth2 = { workspace = true } slashing_protection = { workspace = true } types = { workspace = true } diff --git a/validator_client/validator_store/src/lib.rs b/validator_client/validator_store/src/lib.rs index 6fd2e27064..2b472799d2 100644 --- a/validator_client/validator_store/src/lib.rs +++ b/validator_client/validator_store/src/lib.rs @@ -1,3 +1,4 @@ +use bls::{PublicKeyBytes, Signature}; use eth2::types::{FullBlockContents, PublishBlockRequest}; use slashing_protection::NotSafe; use std::fmt::Debug; @@ -5,9 +6,9 @@ use std::future::Future; use std::sync::Arc; use types::{ Address, Attestation, AttestationError, BlindedBeaconBlock, Epoch, EthSpec, Graffiti, Hash256, - PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBlindedBeaconBlock, - SignedContributionAndProof, SignedValidatorRegistrationData, Slot, SyncCommitteeContribution, - SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, + SelectionProof, SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedContributionAndProof, + SignedValidatorRegistrationData, Slot, SyncCommitteeContribution, SyncCommitteeMessage, + SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, }; #[derive(Debug, PartialEq, Clone)] diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index 6ef179fbe9..16ce1e023f 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -6,6 +6,7 @@ edition = { workspace = true } [dependencies] account_utils = { workspace = true } +bls = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } educe = { workspace = true } diff --git a/validator_manager/src/common.rs b/validator_manager/src/common.rs index 0e93b25773..a95d2a1fd6 100644 --- a/validator_manager/src/common.rs +++ b/validator_manager/src/common.rs @@ -1,5 +1,6 @@ pub use account_utils::STDIN_INPUTS_FLAG; use account_utils::strip_off_newlines; +use bls::{Keypair, PublicKeyBytes, SignatureBytes}; use eth2::lighthouse_vc::std_types::{InterchangeJsonStr, KeystoreJsonStr}; use eth2::{ SensitiveUrl, diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index 19f78be2ea..8682705956 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -1,6 +1,7 @@ use super::common::*; use crate::DumpConfig; use account_utils::{random_password_string, read_mnemonic_from_cli, read_password_from_user}; +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; use eth2::{ @@ -586,6 +587,7 @@ async fn run(config: CreateConfig, spec: &ChainSpec) -> Result<(), S #[cfg(test)] pub mod tests { use super::*; + use bls::SignatureBytes; use eth2_network_config::Eth2NetworkConfig; use regex::Regex; use std::path::Path; diff --git a/validator_manager/src/delete_validators.rs b/validator_manager/src/delete_validators.rs index 3ff0c9529d..2421b002aa 100644 --- a/validator_manager/src/delete_validators.rs +++ b/validator_manager/src/delete_validators.rs @@ -1,3 +1,4 @@ +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2::{ SensitiveUrl, @@ -5,7 +6,6 @@ use eth2::{ }; use serde::{Deserialize, Serialize}; use std::path::PathBuf; -use types::PublicKeyBytes; use crate::{DumpConfig, common::vc_http_client}; diff --git a/validator_manager/src/exit_validators.rs b/validator_manager/src/exit_validators.rs index 4a398793ce..b53d9c0a16 100644 --- a/validator_manager/src/exit_validators.rs +++ b/validator_manager/src/exit_validators.rs @@ -1,5 +1,6 @@ use crate::{DumpConfig, common::vc_http_client}; +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; use eth2::types::{ConfigAndPreset, Epoch, StateId, ValidatorId, ValidatorStatus}; @@ -10,7 +11,7 @@ use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::fs::write; use std::path::PathBuf; use std::time::Duration; -use types::{ChainSpec, EthSpec, PublicKeyBytes}; +use types::{ChainSpec, EthSpec}; pub const CMD: &str = "exit"; pub const BEACON_URL_FLAG: &str = "beacon-node"; diff --git a/validator_manager/src/list_validators.rs b/validator_manager/src/list_validators.rs index 082894a995..f7a09f8d8e 100644 --- a/validator_manager/src/list_validators.rs +++ b/validator_manager/src/list_validators.rs @@ -1,3 +1,4 @@ +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2::lighthouse_vc::types::SingleKeystoreResponse; use eth2::types::{ConfigAndPreset, StateId, ValidatorId, ValidatorStatus}; @@ -5,7 +6,7 @@ use eth2::{BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use serde::{Deserialize, Serialize}; use std::path::PathBuf; use std::time::Duration; -use types::{ChainSpec, EthSpec, PublicKeyBytes}; +use types::{ChainSpec, EthSpec}; use crate::exit_validators::get_current_epoch; use crate::{DumpConfig, common::vc_http_client}; diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index 08b50eb929..ace1d1941f 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -1,6 +1,7 @@ use super::common::*; use crate::DumpConfig; use account_utils::read_password_from_user; +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2::{ SensitiveUrl, @@ -18,7 +19,7 @@ use std::path::PathBuf; use std::str::FromStr; use std::time::Duration; use tokio::time::sleep; -use types::{Address, PublicKeyBytes}; +use types::Address; use zeroize::Zeroizing; pub const MOVE_DIR_NAME: &str = "lighthouse-validator-move"; From d9ddb72f5bcf3289b7650cc11e227b12a8688bd7 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 11 Dec 2025 03:56:51 -0300 Subject: [PATCH 18/26] Fix testnet script (#8557) Fix an issue where a kurtosis testnet script was failing because no supernodes were provided ``` There was an error interpreting Starlark code Evaluation error: fail: Fulu fork is enabled (epoch: 0) but no supernodes are configured, no nodes have 128 or more validators, and perfect_peerdas_enabled is not enabled. Either configure a supernode, ensure at least one node has 128+ validators, or enable perfect_peerdas_enabled in network_params with 16 participants. at [github.com/ethpandaops/ethereum-package/main.star:83:57]: run at [github.com/ethpandaops/ethereum-package/src/package_io/input_parser.star:377:17]: input_parser at [0:0]: fail ``` Co-Authored-By: Eitan Seri-Levi Co-Authored-By: Pawan Dhananjay --- scripts/tests/genesis-sync-config-electra.yaml | 3 +-- scripts/tests/genesis-sync-config-fulu.yaml | 3 +-- scripts/tests/network_params.yaml | 3 ++- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/scripts/tests/genesis-sync-config-electra.yaml b/scripts/tests/genesis-sync-config-electra.yaml index 153f754c94..1d1ed4d315 100644 --- a/scripts/tests/genesis-sync-config-electra.yaml +++ b/scripts/tests/genesis-sync-config-electra.yaml @@ -6,15 +6,14 @@ participants: # nodes without validators, used for testing sync. - cl_type: lighthouse cl_image: lighthouse:local - supernode: true # no supernode in Electra, this is for future proof validator_count: 0 - cl_type: lighthouse cl_image: lighthouse:local - supernode: false validator_count: 0 network_params: seconds_per_slot: 6 electra_fork_epoch: 0 + fulu_fork_epoch: 100000 # a really big number so this test stays in electra preset: "minimal" additional_services: - tx_fuzz diff --git a/scripts/tests/genesis-sync-config-fulu.yaml b/scripts/tests/genesis-sync-config-fulu.yaml index 98dc8751d6..6d2c2647a9 100644 --- a/scripts/tests/genesis-sync-config-fulu.yaml +++ b/scripts/tests/genesis-sync-config-fulu.yaml @@ -21,8 +21,7 @@ participants: validator_count: 0 network_params: seconds_per_slot: 6 - electra_fork_epoch: 0 - fulu_fork_epoch: 1 + fulu_fork_epoch: 0 preset: "minimal" additional_services: - tx_fuzz diff --git a/scripts/tests/network_params.yaml b/scripts/tests/network_params.yaml index 0fda1aa34b..35916ac1e4 100644 --- a/scripts/tests/network_params.yaml +++ b/scripts/tests/network_params.yaml @@ -6,9 +6,10 @@ participants: cl_image: lighthouse:local cl_extra_params: - --target-peers=3 + supernode: true count: 4 network_params: - electra_fork_epoch: 0 + fulu_fork_epoch: 0 seconds_per_slot: 3 num_validator_keys_per_node: 20 global_log_level: debug From 5abbdb660af28c582d98635725347c28464523b0 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Thu, 11 Dec 2025 14:56:53 +0800 Subject: [PATCH 19/26] Do not request attestation data when attestation duty is empty (#8559) Co-Authored-By: Tan Chee Keong --- .../validator_services/src/attestation_service.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index b2b8bc81e2..587d4668b8 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -193,6 +193,12 @@ impl AttestationService = self.duties_service.attesters(slot).into_iter().collect(); + + // Return early if there is no attestation duties + if attestation_duties.is_empty() { + return Ok(()); + } + let attestation_service = self.clone(); let attestation_data_handle = self @@ -371,10 +377,6 @@ impl AttestationService Date: Fri, 12 Dec 2025 05:45:38 -0300 Subject: [PATCH 20/26] Rust 1.92 lints (#8567) Co-Authored-By: Eitan Seri-Levi --- beacon_node/beacon_chain/src/test_utils.rs | 1 - beacon_node/beacon_chain/tests/store_tests.rs | 2 -- beacon_node/execution_layer/src/engine_api.rs | 2 +- consensus/types/src/fork/fork_context.rs | 3 +-- lcli/src/http_sync.rs | 1 - 5 files changed, 2 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 3651512b85..54fa6b5ff0 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2923,7 +2923,6 @@ where let chain_dump = self.chain.chain_dump().unwrap(); chain_dump .iter() - .cloned() .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root) .filter(|block_hash| *block_hash != Hash256::zero()) .map(|hash| hash.into()) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index c1c53c014c..8de96adb2d 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5528,7 +5528,6 @@ fn get_finalized_epoch_boundary_blocks( dump: &[BeaconSnapshot>], ) -> HashSet { dump.iter() - .cloned() .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into()) .collect() } @@ -5537,7 +5536,6 @@ fn get_blocks( dump: &[BeaconSnapshot>], ) -> HashSet { dump.iter() - .cloned() .map(|checkpoint| checkpoint.beacon_block_root.into()) .collect() } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index f285640b21..b0cc4dd824 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -770,7 +770,7 @@ impl ClientVersionV1 { self.commit .0 .get(..4) - .map_or_else(|| self.commit.0.as_str(), |s| s) + .unwrap_or(self.commit.0.as_str()) .to_lowercase(), lighthouse_commit_prefix .0 diff --git a/consensus/types/src/fork/fork_context.rs b/consensus/types/src/fork/fork_context.rs index aec7276124..89f69bcbb6 100644 --- a/consensus/types/src/fork/fork_context.rs +++ b/consensus/types/src/fork/fork_context.rs @@ -63,8 +63,7 @@ impl ForkContext { let current_epoch = current_slot.epoch(E::slots_per_epoch()); let current_fork = epoch_to_forks .values() - .filter(|&fork| fork.fork_epoch <= current_epoch) - .next_back() + .rfind(|&fork| fork.fork_epoch <= current_epoch) .cloned() .expect("should match at least genesis epoch"); diff --git a/lcli/src/http_sync.rs b/lcli/src/http_sync.rs index dd941cda74..6a0eb2a0e1 100644 --- a/lcli/src/http_sync.rs +++ b/lcli/src/http_sync.rs @@ -132,7 +132,6 @@ async fn get_block_from_source( let (kzg_proofs, blobs): (Vec<_>, Vec<_>) = blobs_from_source .iter() - .cloned() .map(|sidecar| (sidecar.kzg_proof, sidecar.blob.clone())) .unzip(); From cd0b1ef6485d05c8920d308d8c2470f0f5f3fceb Mon Sep 17 00:00:00 2001 From: gustavo Date: Mon, 15 Dec 2025 01:47:04 +0000 Subject: [PATCH 21/26] fix(bls): fix is_infinity when aggregating onto empty AggregateSignature (#8496) Co-Authored-By: figtracer <1gusredo@gmail.com> Co-Authored-By: Michael Sproul --- crypto/bls/src/generic_aggregate_signature.rs | 16 ++++++++++------ crypto/bls/tests/tests.rs | 11 +++++++++++ 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index 98a634ee11..35674394a0 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -124,13 +124,15 @@ where /// Aggregates a signature onto `self`. pub fn add_assign(&mut self, other: &GenericSignature) { if let Some(other_point) = other.point() { - self.is_infinity = self.is_infinity && other.is_infinity; if let Some(self_point) = &mut self.point { - self_point.add_assign(other_point) + self_point.add_assign(other_point); + self.is_infinity = self.is_infinity && other.is_infinity; } else { let mut self_point = AggSig::infinity(); self_point.add_assign(other_point); - self.point = Some(self_point) + self.point = Some(self_point); + // the result is infinity, if `other` is + self.is_infinity = other.is_infinity; } } } @@ -138,13 +140,15 @@ where /// Aggregates an aggregate signature onto `self`. pub fn add_assign_aggregate(&mut self, other: &Self) { if let Some(other_point) = other.point() { - self.is_infinity = self.is_infinity && other.is_infinity; if let Some(self_point) = &mut self.point { - self_point.add_assign_aggregate(other_point) + self_point.add_assign_aggregate(other_point); + self.is_infinity = self.is_infinity && other.is_infinity; } else { let mut self_point = AggSig::infinity(); self_point.add_assign_aggregate(other_point); - self.point = Some(self_point) + self.point = Some(self_point); + // the result is infinity, if `other` is + self.is_infinity = other.is_infinity; } } } diff --git a/crypto/bls/tests/tests.rs b/crypto/bls/tests/tests.rs index 00f82bfcec..1827ea9921 100644 --- a/crypto/bls/tests/tests.rs +++ b/crypto/bls/tests/tests.rs @@ -356,6 +356,17 @@ macro_rules! test_suite { .assert_single_message_verify(true) } + #[test] + fn empty_aggregate_plus_infinity_should_be_infinity() { + let mut agg = AggregateSignature::empty(); + let infinity_sig = Signature::deserialize(&INFINITY_SIGNATURE).unwrap(); + agg.add_assign(&infinity_sig); + assert!( + agg.is_infinity(), + "is_infinity flag should be true after adding infinity to empty" + ); + } + #[test] fn deserialize_infinity_public_key() { PublicKey::deserialize(&bls::INFINITY_PUBLIC_KEY).unwrap_err(); From 49e1112da2af379ed5845249de0a9008a8a5be38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20David=20Ram=C3=ADrez=20Chiquillo?= Date: Sun, 14 Dec 2025 21:33:29 -0500 Subject: [PATCH 22/26] Add regression test for unaligned checkpoint sync with payload pruning (#8458) Closes #8426 Added a new regression test: `reproduction_unaligned_checkpoint_sync_pruned_payload`. This test reproduces the bug where unaligned checkpoint syncs (skipped slots at epoch boundaries) fail to import the anchor block's execution payload when `prune_payloads` is enabled. The test simulates the failure mode by: - Skipping if execution payloads are not applicable. - Creating a harness with an unaligned checkpoint (gap of 3 slots). - Configuring the client with prune_payloads = true. It asserts that the Beacon Chain builds successfully (previously it panicked with `MissingFullBlockExecutionPayloadPruned`), confirming the fix logic in `try_get_full_block`. Co-Authored-By: Andrurachi Co-Authored-By: Michael Sproul --- beacon_node/beacon_chain/tests/store_tests.rs | 152 ++++++++++++++++++ 1 file changed, 152 insertions(+) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 8de96adb2d..cc49f87184 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2782,6 +2782,158 @@ async fn weak_subjectivity_sync_without_blobs() { weak_subjectivity_sync_test(slots, checkpoint_slot, None, false).await } +// Ensures that an unaligned checkpoint sync (the block is older than the state) +// works correctly even when `prune_payloads` is enabled. +// +// Previously, the `HotColdDB` would refuse to load the execution payload for the +// anchor block because it was considered "pruned", causing the node to fail startup. +#[tokio::test] +async fn reproduction_unaligned_checkpoint_sync_pruned_payload() { + let spec = test_spec::(); + + // Requires Execution Payloads. + let Some(_) = spec.deneb_fork_epoch else { + return; + }; + + // Create an unaligned checkpoint with a gap of 3 slots. + let num_initial_slots = E::slots_per_epoch() * 11; + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9 - 3); + + let slots = (1..num_initial_slots) + .map(Slot::new) + .filter(|&slot| slot <= checkpoint_slot || slot > checkpoint_slot + 3) + .collect::>(); + + let temp1 = tempdir().unwrap(); + let full_store = get_store_generic(&temp1, StoreConfig::default(), spec.clone()); + + let harness = get_harness_import_all_data_columns(full_store.clone(), LOW_VALIDATOR_COUNT); + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); + harness + .add_attested_blocks_at_slots( + genesis_state.clone(), + genesis_state_root, + &slots, + &all_validators, + ) + .await; + + // Extract snapshot data from the harness. + let wss_block_root = harness + .chain + .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + let wss_state_root = harness + .chain + .state_root_at_slot(checkpoint_slot) + .unwrap() + .unwrap(); + + let wss_block = harness + .chain + .store + .get_full_block(&wss_block_root) + .unwrap() + .unwrap(); + + // The test premise requires the anchor block to have a payload. + assert!(wss_block.message().execution_payload().is_ok()); + + let wss_blobs_opt = harness + .chain + .get_or_reconstruct_blobs(&wss_block_root) + .unwrap(); + + let wss_state = full_store + .get_state(&wss_state_root, Some(checkpoint_slot), CACHE_STATE_IN_TESTS) + .unwrap() + .unwrap(); + + // Configure the client with `prune_payloads = true`. + // This triggers the path where `try_get_full_block` must explicitly handle the anchor block. + let temp2 = tempdir().unwrap(); + let store_config = StoreConfig { + prune_payloads: true, + ..StoreConfig::default() + }; + + let store = get_store_generic(&temp2, store_config, spec.clone()); + + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(harness.chain.genesis_time), + Duration::from_secs(spec.seconds_per_slot), + ); + slot_clock.set_slot(harness.get_current_slot().as_u64()); + + let chain_config = ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }; + + let trusted_setup = get_kzg(&spec); + let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); + let mock = mock_execution_layer_from_parts( + harness.spec.clone(), + harness.runtime.task_executor.clone(), + ); + let all_custody_columns = (0..spec.number_of_custody_groups).collect::>(); + + // Attempt to build the BeaconChain. + // If the bug is present, this will panic with `MissingFullBlockExecutionPayloadPruned`. + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, trusted_setup) + .chain_config(chain_config) + .store(store.clone()) + .custom_spec(spec.clone().into()) + .task_executor(harness.chain.task_executor.clone()) + .weak_subjectivity_state( + wss_state, + wss_block.clone(), + wss_blobs_opt.clone(), + genesis_state, + ) + .unwrap() + .store_migrator_config(MigratorConfig::default().blocking()) + .slot_clock(slot_clock) + .shutdown_sender(shutdown_tx) + .event_handler(Some(ServerSentEventHandler::new_with_capacity(1))) + .execution_layer(Some(mock.el)) + .ordered_custody_column_indices(all_custody_columns) + .rng(Box::new(StdRng::seed_from_u64(42))) + .build(); + + assert!( + beacon_chain.is_ok(), + "Beacon Chain failed to build. The anchor payload may have been incorrectly pruned. Error: {:?}", + beacon_chain.err() + ); + + let chain = beacon_chain.as_ref().unwrap(); + let wss_block_slot = wss_block.slot(); + + assert_ne!( + wss_block_slot, + chain.head_snapshot().beacon_state.slot(), + "Test invalid: Checkpoint was aligned (Slot {} == Slot {}). The test did not trigger the unaligned edge case.", + wss_block_slot, + chain.head_snapshot().beacon_state.slot() + ); + + let payload_exists = chain + .store + .execution_payload_exists(&wss_block_root) + .unwrap_or(false); + + assert!( + payload_exists, + "Split block payload must exist in the new node's store after checkpoint sync" + ); +} + async fn weak_subjectivity_sync_test( slots: Vec, checkpoint_slot: Slot, From 6a3a32515f1a96b7cba543add2c75a021881d87f Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 15 Dec 2025 07:20:10 +0400 Subject: [PATCH 23/26] Update `strum` to `0.27` (#8564) #8547 Update our `strum` dependency to `0.27`. This unifies our strum dependencies and removes our duplication of `strum` (and by extension, `strum_macros`). Co-Authored-By: Mac L Co-Authored-By: Michael Sproul --- Cargo.lock | 64 ++++++------------- Cargo.toml | 2 +- beacon_node/store/src/config.rs | 4 +- database_manager/src/lib.rs | 4 +- slasher/src/config.rs | 4 +- .../beacon_node_fallback/src/lib.rs | 4 +- 6 files changed, 27 insertions(+), 55 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 413596beeb..7383d4e1fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -138,7 +138,7 @@ checksum = "4bc32535569185cbcb6ad5fa64d989a47bccb9a08e27284b1f2a3ccf16e6d010" dependencies = [ "alloy-primitives", "num_enum", - "strum 0.27.2", + "strum", ] [[package]] @@ -533,7 +533,7 @@ checksum = "6d792e205ed3b72f795a8044c52877d2e6b6e9b1d13f431478121d8d4eaa9028" dependencies = [ "alloy-sol-macro-input", "const-hex", - "heck 0.5.0", + "heck", "indexmap 2.12.0", "proc-macro-error2", "proc-macro2", @@ -551,7 +551,7 @@ checksum = "0bd1247a8f90b465ef3f1207627547ec16940c35597875cdc09c49d58b19693c" dependencies = [ "const-hex", "dunce", - "heck 0.5.0", + "heck", "macro-string", "proc-macro2", "quote", @@ -1273,7 +1273,7 @@ dependencies = [ "ssz_types", "state_processing", "store", - "strum 0.24.1", + "strum", "superstruct", "task_executor", "tempfile", @@ -1314,7 +1314,7 @@ dependencies = [ "serde_json", "slasher", "store", - "strum 0.24.1", + "strum", "task_executor", "tracing", "types", @@ -1332,7 +1332,7 @@ dependencies = [ "sensitive_url", "serde", "slot_clock", - "strum 0.24.1", + "strum", "task_executor", "tokio", "tracing", @@ -1355,7 +1355,7 @@ dependencies = [ "parking_lot", "serde", "slot_clock", - "strum 0.24.1", + "strum", "task_executor", "tokio", "tokio-util", @@ -1868,7 +1868,7 @@ version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", "syn 2.0.110", @@ -2524,7 +2524,7 @@ dependencies = [ "hex", "serde", "store", - "strum 0.24.1", + "strum", "tracing", "types", ] @@ -3071,7 +3071,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", "syn 2.0.110", @@ -3427,7 +3427,7 @@ dependencies = [ "slot_clock", "ssz_types", "state_processing", - "strum 0.24.1", + "strum", "superstruct", "task_executor", "tempfile", @@ -4057,12 +4057,6 @@ dependencies = [ "psutil", ] -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - [[package]] name = "heck" version = "0.5.0" @@ -5355,7 +5349,7 @@ version = "0.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd297cf53f0cb3dee4d2620bb319ae47ef27c702684309f682bdb7e55a18ae9c" dependencies = [ - "heck 0.5.0", + "heck", "quote", "syn 2.0.110", ] @@ -5550,7 +5544,7 @@ dependencies = [ "smallvec", "snap", "ssz_types", - "strum 0.24.1", + "strum", "superstruct", "task_executor", "tempfile", @@ -6268,7 +6262,7 @@ dependencies = [ "smallvec", "ssz_types", "store", - "strum 0.24.1", + "strum", "task_executor", "tokio", "tokio-stream", @@ -8457,7 +8451,7 @@ dependencies = [ "safe_arith", "serde", "ssz_types", - "strum 0.24.1", + "strum", "tempfile", "tracing", "tree_hash", @@ -8683,7 +8677,7 @@ dependencies = [ "smallvec", "ssz_types", "state_processing", - "strum 0.24.1", + "strum", "superstruct", "tempfile", "tracing", @@ -8706,35 +8700,13 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" -[[package]] -name = "strum" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" -dependencies = [ - "strum_macros 0.24.3", -] - [[package]] name = "strum" version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" dependencies = [ - "strum_macros 0.27.2", -] - -[[package]] -name = "strum_macros" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" -dependencies = [ - "heck 0.4.1", - "proc-macro2", - "quote", - "rustversion", - "syn 1.0.109", + "strum_macros", ] [[package]] @@ -8743,7 +8715,7 @@ version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", "syn 2.0.110", diff --git a/Cargo.toml b/Cargo.toml index aea8fd1b8d..c2094a46ba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -235,7 +235,7 @@ snap = "1" ssz_types = { version = "0.14.0", features = ["context_deserialize", "runtime_types"] } state_processing = { path = "consensus/state_processing" } store = { path = "beacon_node/store" } -strum = { version = "0.24", features = ["derive"] } +strum = { version = "0.27", features = ["derive"] } superstruct = "0.10" swap_or_not_shuffle = { path = "consensus/swap_or_not_shuffle" } syn = "1" diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 05aa016ec1..0aa00e659b 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -5,7 +5,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::io::{Read, Write}; use std::num::NonZeroUsize; -use strum::{Display, EnumString, EnumVariantNames}; +use strum::{Display, EnumString, VariantNames}; use superstruct::superstruct; use types::EthSpec; use types::non_zero_usize::new_non_zero_usize; @@ -267,7 +267,7 @@ mod test { } #[derive( - Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Display, EnumString, EnumVariantNames, + Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Display, EnumString, VariantNames, )] #[strum(serialize_all = "lowercase")] pub enum DatabaseBackend { diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 6bb7531493..608400fa7e 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -21,7 +21,7 @@ use store::{ errors::Error, metadata::{CURRENT_SCHEMA_VERSION, SchemaVersion}, }; -use strum::{EnumString, EnumVariantNames}; +use strum::{EnumString, VariantNames}; use tracing::{info, warn}; use types::{BeaconState, EthSpec, Slot}; @@ -80,7 +80,7 @@ pub fn display_db_version( } #[derive( - Debug, PartialEq, Eq, Clone, EnumString, Deserialize, Serialize, EnumVariantNames, ValueEnum, + Debug, PartialEq, Eq, Clone, EnumString, Deserialize, Serialize, VariantNames, ValueEnum, )] pub enum InspectTarget { #[strum(serialize = "sizes")] diff --git a/slasher/src/config.rs b/slasher/src/config.rs index a8194bed49..144016efd2 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -2,7 +2,7 @@ use crate::Error; use serde::{Deserialize, Serialize}; use std::num::NonZeroUsize; use std::path::PathBuf; -use strum::{Display, EnumString, EnumVariantNames}; +use strum::{Display, EnumString, VariantNames}; use types::non_zero_usize::new_non_zero_usize; use types::{Epoch, EthSpec, IndexedAttestation}; @@ -59,7 +59,7 @@ pub struct DiskConfig { } #[derive( - Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Display, EnumString, EnumVariantNames, + Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Display, EnumString, VariantNames, )] #[strum(serialize_all = "lowercase")] pub enum DatabaseBackend { diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index 6abcd44cc9..2d75df2fa3 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -20,7 +20,7 @@ use std::future::Future; use std::sync::Arc; use std::time::{Duration, Instant}; use std::vec::Vec; -use strum::EnumVariantNames; +use strum::VariantNames; use task_executor::TaskExecutor; use tokio::{sync::RwLock, time::sleep}; use tracing::{debug, error, warn}; @@ -752,7 +752,7 @@ async fn sort_nodes_by_health(nodes: &mut Vec) { } /// Serves as a cue for `BeaconNodeFallback` to tell which requests need to be broadcasted. -#[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize, EnumVariantNames, ValueEnum)] +#[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize, VariantNames, ValueEnum)] #[strum(serialize_all = "kebab-case")] pub enum ApiTopic { None, From 32f7615cc897b9b15c7587c45ffbc1174d8b3ec9 Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 15 Dec 2025 07:20:12 +0400 Subject: [PATCH 24/26] Update `syn` to `2.0.110` (#8563) #8547 We are currently using an older version of `syn` in `test_random_derive`. Updating this removes one of the sources of `syn` `1.0.109` in our dependency tree. Co-Authored-By: Mac L Co-Authored-By: Michael Sproul --- Cargo.lock | 2 +- Cargo.toml | 2 +- common/test_random_derive/src/lib.rs | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7383d4e1fe..6ed7bfd0b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8920,7 +8920,7 @@ name = "test_random_derive" version = "0.2.0" dependencies = [ "quote", - "syn 1.0.109", + "syn 2.0.110", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index c2094a46ba..d5d1687c76 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -238,7 +238,7 @@ store = { path = "beacon_node/store" } strum = { version = "0.27", features = ["derive"] } superstruct = "0.10" swap_or_not_shuffle = { path = "consensus/swap_or_not_shuffle" } -syn = "1" +syn = "2" sysinfo = "0.26" system_health = { path = "common/system_health" } task_executor = { path = "common/task_executor" } diff --git a/common/test_random_derive/src/lib.rs b/common/test_random_derive/src/lib.rs index 3017936f1a..bf57d79aaa 100644 --- a/common/test_random_derive/src/lib.rs +++ b/common/test_random_derive/src/lib.rs @@ -8,7 +8,8 @@ use syn::{DeriveInput, parse_macro_input}; /// The field attribute is: `#[test_random(default)]` fn should_use_default(field: &syn::Field) -> bool { field.attrs.iter().any(|attr| { - attr.path.is_ident("test_random") && attr.tokens.to_string().replace(' ', "") == "(default)" + attr.path().is_ident("test_random") + && matches!(&attr.meta, syn::Meta::List(list) if list.tokens.to_string().replace(' ', "") == "default") }) } From ac8d77369d52e6fd9ffcb4bfa0e8ae11dd65d7ed Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Mon, 15 Dec 2025 14:08:21 +0900 Subject: [PATCH 25/26] Fix Makefile to avoid git describe error in CI (#8513) Fixes the error `fatal: No names found, cannot describe anything.` that occurs when running `make` commands in CI (GitHub Actions). https://github.com/sigp/lighthouse/actions/runs/19839541042/job/56844781126#step:5:13 > fatal: No names found, cannot describe anything. Changed the `GIT_TAG` variable assignment in the Makefile from immediate evaluation to lazy evaluation: ```diff - GIT_TAG := $(shell git describe --tags --candidates 1) + GIT_TAG = $(shell git describe --tags --candidates 1) ``` This change ensures that git describe is only executed when `GIT_TAG` is actually used (in the `build-release-tarballs` target), rather than on every Makefile invocation. Co-Authored-By: ackintosh --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a6891b682f..c14f1d712a 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ EF_TESTS = "testing/ef_tests" STATE_TRANSITION_VECTORS = "testing/state_transition_vectors" EXECUTION_ENGINE_INTEGRATION = "testing/execution_engine_integration" -GIT_TAG := $(shell git describe --tags --candidates 1) +GIT_TAG = $(shell git describe --tags --candidates 1) BIN_DIR = "bin" X86_64_TAG = "x86_64-unknown-linux-gnu" From afa6457acf2a38c90b89eb0434135e1cf3646f77 Mon Sep 17 00:00:00 2001 From: 0xMushow <105550256+0xMushow@users.noreply.github.com> Date: Mon, 15 Dec 2025 06:14:15 +0100 Subject: [PATCH 26/26] fix visual bug on visualize_batch_state leading to a non-wanted comma (#8499) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Which issue # does this PR address? None The `visualize_batch_state`  functions uses the following loop `for mut batch_index in 0..BATCH_BUFFER_SIZE`, making it from `0` to `BATCH_BUFFER_SIZE - 1` (behind the scenes). Hence we would never hit the following condition: ```rust if batch_index != BATCH_BUFFER_SIZE { visualization_string.push(','); } ``` Replacing `!=` with `<` & `BATCH_BUFFER_SIZE -1` allows for the following change: `[A,B,C,D,E,]` to become: `[A,B,C,D,E]` Co-Authored-By: Antoine James --- beacon_node/network/src/sync/range_sync/chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 014d728ffe..4ce10e23ca 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1331,7 +1331,7 @@ impl SyncingChain { .get(&(self.processing_target + batch_index as u64 * EPOCHS_PER_BATCH)) { visualization_string.push(batch.visualize()); - if batch_index != BATCH_BUFFER_SIZE { + if batch_index < BATCH_BUFFER_SIZE - 1 { // Add a comma in between elements visualization_string.push(','); }