diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index a94a19900c..d86abd0721 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -196,7 +196,7 @@ jobs: - name: Run network tests for all known forks run: make test-network env: - TEST_FEATURES: portable,ci_logger + TEST_FEATURES: portable CI_LOGGER_DIR: ${{ runner.temp }}/network_test_logs - name: Upload logs uses: actions/upload-artifact@v4 diff --git a/Cargo.lock b/Cargo.lock index 1cf523e3e6..30be5fa233 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1665,6 +1665,26 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "context_deserialize" +version = "0.1.0" +dependencies = [ + "milhouse", + "serde", + "ssz_types", +] + +[[package]] +name = "context_deserialize_derive" +version = "0.1.0" +dependencies = [ + "context_deserialize", + "quote", + "serde", + "serde_json", + "syn 1.0.109", +] + [[package]] name = "convert_case" version = "0.4.0" @@ -8384,6 +8404,7 @@ dependencies = [ "sensitive_url", "serde_json", "tokio", + "tracing", "tracing-subscriber", "types", ] @@ -9389,6 +9410,8 @@ dependencies = [ "bls", "compare_fields", "compare_fields_derive", + "context_deserialize", + "context_deserialize_derive", "criterion", "derivative", "eth2_interop_keypairs", @@ -9781,6 +9804,7 @@ dependencies = [ name = "validator_store" version = "0.1.0" dependencies = [ + "eth2", "slashing_protection", "types", ] @@ -10036,6 +10060,7 @@ dependencies = [ "account_utils", "async-channel 1.9.0", "environment", + "eth2", "eth2_keystore", "eth2_network_config", "futures", diff --git a/Cargo.toml b/Cargo.toml index fa1cd9a0fd..9d7407d9ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,6 @@ [workspace] members = [ "account_manager", - "beacon_node", "beacon_node/beacon_chain", "beacon_node/beacon_processor", @@ -17,9 +16,7 @@ members = [ "beacon_node/operation_pool", "beacon_node/store", "beacon_node/timer", - "boot_node", - "common/account_utils", "common/clap_utils", "common/compare_fields", @@ -52,7 +49,8 @@ members = [ "common/validator_dir", "common/warp_utils", "common/workspace_members", - + "consensus/context_deserialize", + "consensus/context_deserialize_derive", "consensus/fixed_bytes", "consensus/fork_choice", "consensus/int_to_bytes", @@ -62,23 +60,17 @@ members = [ "consensus/state_processing", "consensus/swap_or_not_shuffle", "consensus/types", - "crypto/bls", "crypto/eth2_key_derivation", "crypto/eth2_keystore", "crypto/eth2_wallet", "crypto/kzg", - "database_manager", - "lcli", - "lighthouse", "lighthouse/environment", - "slasher", "slasher/service", - "testing/ef_tests", "testing/eth1_test_rig", "testing/execution_engine_integration", @@ -87,8 +79,6 @@ members = [ "testing/state_transition_vectors", "testing/validator_test_rig", "testing/web3signer_tests", - - "validator_client", "validator_client/beacon_node_fallback", "validator_client/doppelganger_service", @@ -101,7 +91,6 @@ members = [ "validator_client/slashing_protection", "validator_client/validator_metrics", "validator_client/validator_services", - "validator_manager", ] resolver = "2" @@ -110,61 +99,107 @@ resolver = "2" edition = "2021" [workspace.dependencies] +account_utils = { path = "common/account_utils" } +alloy-consensus = "0.3.0" alloy-primitives = { version = "0.8", features = ["rlp", "getrandom"] } alloy-rlp = "0.3.4" -alloy-consensus = "0.3.0" anyhow = "1" arbitrary = { version = "1", features = ["derive"] } async-channel = "1.9.0" axum = "0.7.7" +beacon_chain = { path = "beacon_node/beacon_chain" } +beacon_node = { path = "beacon_node" } +beacon_node_fallback = { path = "validator_client/beacon_node_fallback" } +beacon_processor = { path = "beacon_node/beacon_processor" } bincode = "1" bitvec = "1" +bls = { path = "crypto/bls" } byteorder = "1" bytes = "1" -cargo_metadata = "0.19" -clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } # Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable # feature ourselves when desired. c-kzg = { version = "1", default-features = false } +cargo_metadata = "0.19" +clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } +clap_utils = { path = "common/clap_utils" } +compare_fields = { path = "common/compare_fields" } compare_fields_derive = { path = "common/compare_fields_derive" } +context_deserialize = { path = "consensus/context_deserialize" } +context_deserialize_derive = { path = "consensus/context_deserialize_derive" } criterion = "0.5" delay_map = "0.4" +deposit_contract = { path = "common/deposit_contract" } derivative = "2" +directory = { path = "common/directory" } dirs = "3" -either = "1.9" -rust_eth_kzg = "0.5.4" discv5 = { version = "0.9", features = ["libp2p"] } +doppelganger_service = { path = "validator_client/doppelganger_service" } +either = "1.9" env_logger = "0.9" +environment = { path = "lighthouse/environment" } +eth1 = { path = "beacon_node/eth1" } +eth1_test_rig = { path = "testing/eth1_test_rig" } +eth2 = { path = "common/eth2" } +eth2_config = { path = "common/eth2_config" } +eth2_key_derivation = { path = "crypto/eth2_key_derivation" } +eth2_keystore = { path = "crypto/eth2_keystore" } +eth2_network_config = { path = "common/eth2_network_config" } +eth2_wallet = { path = "crypto/eth2_wallet" } ethereum_hashing = "0.7.0" ethereum_serde_utils = "0.7" ethereum_ssz = "0.8.2" ethereum_ssz_derive = "0.8.2" ethers-core = "1" +ethers-middleware = { version = "1", default-features = false } ethers-providers = { version = "1", default-features = false } ethers-signers = { version = "1", default-features = false } -ethers-middleware = { version = "1", default-features = false } +execution_layer = { path = "beacon_node/execution_layer" } exit-future = "0.2" +filesystem = { path = "common/filesystem" } +fixed_bytes = { path = "consensus/fixed_bytes" } fnv = "1" +fork_choice = { path = "consensus/fork_choice" } fs2 = "0.4" futures = "0.3" -graffiti_file = { path = "validator_client/graffiti_file" } +genesis = { path = "beacon_node/genesis" } gossipsub = { package = "libp2p-gossipsub", git = "https://github.com/sigp/rust-libp2p.git", rev = "61b2820" } -hex = "0.4" +graffiti_file = { path = "validator_client/graffiti_file" } hashlink = "0.9.0" +health_metrics = { path = "common/health_metrics" } +hex = "0.4" +http_api = { path = "beacon_node/http_api" } hyper = "1" +initialized_validators = { path = "validator_client/initialized_validators" } +int_to_bytes = { path = "consensus/int_to_bytes" } itertools = "0.10" +kzg = { path = "crypto/kzg" } libsecp256k1 = "0.7" +lighthouse_network = { path = "beacon_node/lighthouse_network" } +lighthouse_validator_store = { path = "validator_client/lighthouse_validator_store" } +lighthouse_version = { path = "common/lighthouse_version" } +lockfile = { path = "common/lockfile" } log = "0.4" -logroller = "0.1.4" +logging = { path = "common/logging" } +logroller = "0.1.8" lru = "0.12" +lru_cache = { path = "common/lru_cache" } +malloc_utils = { path = "common/malloc_utils" } maplit = "1" +merkle_proof = { path = "consensus/merkle_proof" } +metrics = { path = "common/metrics" } milhouse = "0.5" mockito = "1.5.0" +monitoring_api = { path = "common/monitoring_api" } +network = { path = "beacon_node/network" } +node_test_rig = { path = "testing/node_test_rig" } num_cpus = "1" once_cell = "1.17.1" +operation_pool = { path = "beacon_node/operation_pool" } parking_lot = "0.12" paste = "1" +pretty_reqwest_error = { path = "common/pretty_reqwest_error" } prometheus = { version = "0.13", default-features = false } +proto_array = { path = "consensus/proto_array" } quickcheck = "1" quickcheck_macros = "1" quote = "1" @@ -182,18 +217,30 @@ reqwest = { version = "0.11", default-features = false, features = [ ring = "0.17" rpds = "0.11" rusqlite = { version = "0.28", features = ["bundled"] } +rust_eth_kzg = "0.5.4" +safe_arith = { path = "consensus/safe_arith" } +sensitive_url = { path = "common/sensitive_url" } serde = { version = "1", features = ["derive"] } serde_json = "1" serde_repr = "0.1" serde_yaml = "0.9" sha2 = "0.9" +signing_method = { path = "validator_client/signing_method" } +slasher = { path = "slasher", default-features = false } +slashing_protection = { path = "validator_client/slashing_protection" } +slot_clock = { path = "common/slot_clock" } smallvec = { version = "1.11.2", features = ["arbitrary"] } snap = "1" ssz_types = "0.10" +state_processing = { path = "consensus/state_processing" } +store = { path = "beacon_node/store" } strum = { version = "0.24", features = ["derive"] } superstruct = "0.8" +swap_or_not_shuffle = { path = "consensus/swap_or_not_shuffle" } syn = "1" sysinfo = "0.26" +system_health = { path = "common/system_health" } +task_executor = { path = "common/task_executor" } tempfile = "3" tokio = { version = "1", features = [ "rt-multi-thread", @@ -210,72 +257,10 @@ tracing-log = "0.2" tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } tree_hash = "0.9" tree_hash_derive = "0.9" -url = "2" -uuid = { version = "0.8", features = ["serde", "v4"] } -warp = { version = "0.3.7", default-features = false, features = ["tls"] } -zeroize = { version = "1", features = ["zeroize_derive", "serde"] } -zip = "0.6" - -# Local crates. -account_utils = { path = "common/account_utils" } -beacon_chain = { path = "beacon_node/beacon_chain" } -beacon_node = { path = "beacon_node" } -beacon_node_fallback = { path = "validator_client/beacon_node_fallback" } -beacon_processor = { path = "beacon_node/beacon_processor" } -bls = { path = "crypto/bls" } -clap_utils = { path = "common/clap_utils" } -compare_fields = { path = "common/compare_fields" } -deposit_contract = { path = "common/deposit_contract" } -directory = { path = "common/directory" } -doppelganger_service = { path = "validator_client/doppelganger_service" } -environment = { path = "lighthouse/environment" } -eth1 = { path = "beacon_node/eth1" } -eth1_test_rig = { path = "testing/eth1_test_rig" } -eth2 = { path = "common/eth2" } -eth2_config = { path = "common/eth2_config" } -eth2_key_derivation = { path = "crypto/eth2_key_derivation" } -eth2_keystore = { path = "crypto/eth2_keystore" } -eth2_network_config = { path = "common/eth2_network_config" } -eth2_wallet = { path = "crypto/eth2_wallet" } -execution_layer = { path = "beacon_node/execution_layer" } -fixed_bytes = { path = "consensus/fixed_bytes" } -filesystem = { path = "common/filesystem" } -fork_choice = { path = "consensus/fork_choice" } -genesis = { path = "beacon_node/genesis" } -health_metrics = { path = "common/health_metrics" } -http_api = { path = "beacon_node/http_api" } -initialized_validators = { path = "validator_client/initialized_validators" } -int_to_bytes = { path = "consensus/int_to_bytes" } -kzg = { path = "crypto/kzg" } -metrics = { path = "common/metrics" } -lighthouse_network = { path = "beacon_node/lighthouse_network" } -lighthouse_validator_store = { path = "validator_client/lighthouse_validator_store" } -lighthouse_version = { path = "common/lighthouse_version" } -workspace_members = { path = "common/workspace_members" } -lockfile = { path = "common/lockfile" } -logging = { path = "common/logging" } -lru_cache = { path = "common/lru_cache" } -malloc_utils = { path = "common/malloc_utils" } -merkle_proof = { path = "consensus/merkle_proof" } -monitoring_api = { path = "common/monitoring_api" } -network = { path = "beacon_node/network" } -node_test_rig = { path = "testing/node_test_rig" } -operation_pool = { path = "beacon_node/operation_pool" } -pretty_reqwest_error = { path = "common/pretty_reqwest_error" } -proto_array = { path = "consensus/proto_array" } -safe_arith = { path = "consensus/safe_arith" } -sensitive_url = { path = "common/sensitive_url" } -signing_method = { path = "validator_client/signing_method" } -slasher = { path = "slasher", default-features = false } -slashing_protection = { path = "validator_client/slashing_protection" } -slot_clock = { path = "common/slot_clock" } -state_processing = { path = "consensus/state_processing" } -store = { path = "beacon_node/store" } -swap_or_not_shuffle = { path = "consensus/swap_or_not_shuffle" } -system_health = { path = "common/system_health" } -task_executor = { path = "common/task_executor" } types = { path = "consensus/types" } unused_port = { path = "common/unused_port" } +url = "2" +uuid = { version = "0.8", features = ["serde", "v4"] } validator_client = { path = "validator_client" } validator_dir = { path = "common/validator_dir" } validator_http_api = { path = "validator_client/http_api" } @@ -284,8 +269,12 @@ validator_metrics = { path = "validator_client/validator_metrics" } validator_services = { path = "validator_client/validator_services" } validator_store = { path = "validator_client/validator_store" } validator_test_rig = { path = "testing/validator_test_rig" } +warp = { version = "0.3.7", default-features = false, features = ["tls"] } warp_utils = { path = "common/warp_utils" } +workspace_members = { path = "common/workspace_members" } xdelta3 = { git = "http://github.com/sigp/xdelta3-rs", rev = "4db64086bb02e9febb584ba93b9d16bb2ae3825a" } +zeroize = { version = "1", features = ["zeroize_derive", "serde"] } +zip = "0.6" zstd = "0.13" [profile.maxperf] @@ -294,5 +283,12 @@ lto = "fat" codegen-units = 1 incremental = false +[profile.reproducible] +inherits = "release" +debug = false +panic = "abort" +codegen-units = 1 +overflow-checks = true + [patch.crates-io] quick-protobuf = { git = "https://github.com/sigp/quick-protobuf.git", rev = "681f413312404ab6e51f0b46f39b0075c6f4ebfd" } diff --git a/Dockerfile.reproducible b/Dockerfile.reproducible new file mode 100644 index 0000000000..df57616874 --- /dev/null +++ b/Dockerfile.reproducible @@ -0,0 +1,44 @@ +# Define the Rust image as an argument with a default to x86_64 Rust 1.82 image based on Debian Bullseye +ARG RUST_IMAGE="rust:1.82-bullseye@sha256:ac7fe7b0c9429313c0fe87d3a8993998d1fe2be9e3e91b5e2ec05d3a09d87128" +FROM ${RUST_IMAGE} AS builder + +# Install specific version of the build dependencies +RUN apt-get update && apt-get install -y libclang-dev=1:11.0-51+nmu5 cmake=3.18.4-2+deb11u1 + +# Add target architecture argument with default value +ARG RUST_TARGET="x86_64-unknown-linux-gnu" + +# Copy the project to the container +COPY . /app +WORKDIR /app + +# Get the latest commit timestamp and set SOURCE_DATE_EPOCH (default it to 0 if not passed) +ARG SOURCE_DATE=0 + +# Set environment variables for reproducibility +ARG RUSTFLAGS="-C link-arg=-Wl,--build-id=none -C metadata='' --remap-path-prefix $(pwd)=." +ENV SOURCE_DATE_EPOCH=$SOURCE_DATE \ + CARGO_INCREMENTAL=0 \ + LC_ALL=C \ + TZ=UTC \ + RUSTFLAGS="${RUSTFLAGS}" + +# Set the default features if not provided +ARG FEATURES="gnosis,slasher-lmdb,slasher-mdbx,slasher-redb,jemalloc" + +# Set the default profile if not provided +ARG PROFILE="reproducible" + +# Build the project with the reproducible settings +RUN cargo build --bin lighthouse \ + --features "${FEATURES}" \ + --profile "${PROFILE}" \ + --locked \ + --target "${RUST_TARGET}" + +RUN mv /app/target/${RUST_TARGET}/${PROFILE}/lighthouse /lighthouse + +# Create a minimal final image with just the binary +FROM gcr.io/distroless/cc-debian12:nonroot-6755e21ccd99ddead6edc8106ba03888cbeed41a +COPY --from=builder /lighthouse /lighthouse +ENTRYPOINT [ "/lighthouse" ] diff --git a/Makefile b/Makefile index 03bf33a6d8..fe5dfbe551 100644 --- a/Makefile +++ b/Makefile @@ -82,6 +82,37 @@ build-lcli-aarch64: build-lcli-riscv64: cross build --bin lcli --target riscv64gc-unknown-linux-gnu --features "portable" --profile "$(CROSS_PROFILE)" --locked +# extracts the current source date for reproducible builds +SOURCE_DATE := $(shell git log -1 --pretty=%ct) + +# Default image for x86_64 +RUST_IMAGE_AMD64 ?= rust:1.82-bullseye@sha256:ac7fe7b0c9429313c0fe87d3a8993998d1fe2be9e3e91b5e2ec05d3a09d87128 + +# Reproducible build for x86_64 +build-reproducible-x86_64: + DOCKER_BUILDKIT=1 docker build \ + --build-arg RUST_TARGET="x86_64-unknown-linux-gnu" \ + --build-arg RUST_IMAGE=$(RUST_IMAGE_AMD64) \ + --build-arg SOURCE_DATE=$(SOURCE_DATE) \ + -f Dockerfile.reproducible \ + -t lighthouse:reproducible-amd64 . + +# Default image for arm64 +RUST_IMAGE_ARM64 ?= rust:1.82-bullseye@sha256:3c1b8b6487513ad4e753d008b960260f5bcc81bf110883460f6ed3cd72bf439b + +# Reproducible build for aarch64 +build-reproducible-aarch64: + DOCKER_BUILDKIT=1 docker build \ + --platform linux/arm64 \ + --build-arg RUST_TARGET="aarch64-unknown-linux-gnu" \ + --build-arg RUST_IMAGE=$(RUST_IMAGE_ARM64) \ + --build-arg SOURCE_DATE=$(SOURCE_DATE) \ + -f Dockerfile.reproducible \ + -t lighthouse:reproducible-arm64 . + +# Build both architectures +build-reproducible-all: build-reproducible-x86_64 build-reproducible-aarch64 + # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary cp $(1)/lighthouse $(BIN_DIR)/lighthouse diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 30d6846964..596419c33e 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -11,9 +11,6 @@ edition = { workspace = true } name = "beacon_node" path = "src/lib.rs" -[dev-dependencies] -node_test_rig = { path = "../testing/node_test_rig" } - [features] write_ssz_files = [ "beacon_chain/write_ssz_files", @@ -45,3 +42,6 @@ task_executor = { workspace = true } tracing = { workspace = true } types = { workspace = true } unused_port = { workspace = true } + +[dev-dependencies] +node_test_rig = { path = "../testing/node_test_rig" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 18b40cab7e..bbe7fad6af 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -5,10 +5,6 @@ authors = ["Paul Hauner ", "Age Manning ), } impl From for Error { fn from(e: BeaconChainError) -> Self { - Self::BeaconChainError(e) + Self::BeaconChainError(Box::new(e)) } } @@ -525,7 +525,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { .observed_attestations .write() .is_known_subset(attestation, observed_attestation_key_root) - .map_err(|e| Error::BeaconChainError(e.into()))? + .map_err(|e| Error::BeaconChainError(Box::new(e.into())))? { metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS); return Err(Error::AttestationSupersetKnown( @@ -628,7 +628,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { if !SelectionProof::from(selection_proof) .is_aggregator(committee.committee.len(), &chain.spec) - .map_err(|e| Error::BeaconChainError(e.into()))? + .map_err(|e| Error::BeaconChainError(Box::new(e.into())))? { return Err(Error::InvalidSelectionProof { aggregator_index }); } @@ -698,7 +698,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { .observed_attestations .write() .observe_item(attestation, Some(observed_attestation_key_root)) - .map_err(|e| Error::BeaconChainError(e.into()))? + .map_err(|e| Error::BeaconChainError(Box::new(e.into())))? { metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS); return Err(Error::AttestationSupersetKnown( diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b4eb848ec6..c1d30253a3 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2733,7 +2733,7 @@ impl BeaconChain { pub fn filter_chain_segment( self: &Arc, chain_segment: Vec>, - ) -> Result>, ChainSegmentResult> { + ) -> Result>, Box> { // This function will never import any blocks. let imported_blocks = vec![]; let mut filtered_chain_segment = Vec::with_capacity(chain_segment.len()); @@ -2750,10 +2750,10 @@ impl BeaconChain { for (i, block) in chain_segment.into_iter().enumerate() { // Ensure the block is the correct structure for the fork at `block.slot()`. if let Err(e) = block.as_block().fork_name(&self.spec) { - return Err(ChainSegmentResult::Failed { + return Err(Box::new(ChainSegmentResult::Failed { imported_blocks, error: BlockError::InconsistentFork(e), - }); + })); } let block_root = block.block_root(); @@ -2765,18 +2765,18 @@ impl BeaconChain { // Without this check it would be possible to have a block verified using the // incorrect shuffling. That would be bad, mmkay. if block_root != *child_parent_root { - return Err(ChainSegmentResult::Failed { + return Err(Box::new(ChainSegmentResult::Failed { imported_blocks, error: BlockError::NonLinearParentRoots, - }); + })); } // Ensure that the slots are strictly increasing throughout the chain segment. if *child_slot <= block.slot() { - return Err(ChainSegmentResult::Failed { + return Err(Box::new(ChainSegmentResult::Failed { imported_blocks, error: BlockError::NonLinearSlots, - }); + })); } } @@ -2807,18 +2807,18 @@ impl BeaconChain { // The block has a known parent that does not descend from the finalized block. // There is no need to process this block or any children. Err(BlockError::NotFinalizedDescendant { block_parent_root }) => { - return Err(ChainSegmentResult::Failed { + return Err(Box::new(ChainSegmentResult::Failed { imported_blocks, error: BlockError::NotFinalizedDescendant { block_parent_root }, - }); + })); } // If there was an error whilst determining if the block was invalid, return that // error. Err(BlockError::BeaconChainError(e)) => { - return Err(ChainSegmentResult::Failed { + return Err(Box::new(ChainSegmentResult::Failed { imported_blocks, error: BlockError::BeaconChainError(e), - }); + })); } // If the block was decided to be irrelevant for any other reason, don't include // this block or any of it's children in the filtered chain segment. @@ -2863,11 +2863,11 @@ impl BeaconChain { ); let mut filtered_chain_segment = match filtered_chain_segment_future.await { Ok(Ok(filtered_segment)) => filtered_segment, - Ok(Err(segment_result)) => return segment_result, + Ok(Err(segment_result)) => return *segment_result, Err(error) => { return ChainSegmentResult::Failed { imported_blocks, - error: BlockError::BeaconChainError(error), + error: BlockError::BeaconChainError(error.into()), } } }; @@ -2906,7 +2906,7 @@ impl BeaconChain { Err(error) => { return ChainSegmentResult::Failed { imported_blocks, - error: BlockError::BeaconChainError(error), + error: BlockError::BeaconChainError(error.into()), }; } }; @@ -3444,20 +3444,23 @@ impl BeaconChain { Ok(status) } - Err(e @ BlockError::BeaconChainError(BeaconChainError::TokioJoin(_))) => { - debug!( - error = ?e, - "Beacon block processing cancelled" - ); - Err(e) - } - // There was an error whilst attempting to verify and import the block. The block might - // be partially verified or partially imported. Err(BlockError::BeaconChainError(e)) => { - crit!( - error = ?e, - "Beacon block processing error" - ); + match e.as_ref() { + BeaconChainError::TokioJoin(e) => { + debug!( + error = ?e, + "Beacon block processing cancelled" + ); + } + _ => { + // There was an error whilst attempting to verify and import the block. The block might + // be partially verified or partially imported. + crit!( + error = ?e, + "Beacon block processing error" + ); + } + }; Err(BlockError::BeaconChainError(e)) } // The block failed verification. @@ -3589,7 +3592,7 @@ impl BeaconChain { header.message.proposer_index, block_root, ) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; + .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?; if let Some(slasher) = self.slasher.as_ref() { slasher.accept_block_header(header); } @@ -3674,7 +3677,7 @@ impl BeaconChain { header.message.proposer_index, block_root, ) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; + .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?; if let Some(slasher) = self.slasher.as_ref() { slasher.accept_block_header(header.clone()); } @@ -3857,7 +3860,7 @@ impl BeaconChain { payload_verification_status, &self.spec, ) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; + .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?; } // If the block is recent enough and it was not optimistically imported, check to see if it @@ -4070,7 +4073,7 @@ impl BeaconChain { warning = "The database is likely corrupt now, consider --purge-db", "No stored fork choice found to restore from" ); - Err(BlockError::BeaconChainError(e)) + Err(BlockError::BeaconChainError(Box::new(e))) } else { Ok(()) } @@ -4125,9 +4128,9 @@ impl BeaconChain { Provided block root is not a checkpoint.", )) .map_err(|err| { - BlockError::BeaconChainError( + BlockError::BeaconChainError(Box::new( BeaconChainError::WeakSubjectivtyShutdownError(err), - ) + )) })?; return Err(BlockError::WeakSubjectivityConflict); } @@ -4901,7 +4904,7 @@ impl BeaconChain { canonical_forkchoice_params: ForkchoiceUpdateParameters, ) -> Result { self.overridden_forkchoice_update_params_or_failure_reason(&canonical_forkchoice_params) - .or_else(|e| match e { + .or_else(|e| match *e { ProposerHeadError::DoNotReOrg(reason) => { trace!( %reason, @@ -4916,19 +4919,19 @@ impl BeaconChain { pub fn overridden_forkchoice_update_params_or_failure_reason( &self, canonical_forkchoice_params: &ForkchoiceUpdateParameters, - ) -> Result> { + ) -> Result>> { let _timer = metrics::start_timer(&metrics::FORK_CHOICE_OVERRIDE_FCU_TIMES); // Never override if proposer re-orgs are disabled. let re_org_head_threshold = self .config .re_org_head_threshold - .ok_or(DoNotReOrg::ReOrgsDisabled)?; + .ok_or(Box::new(DoNotReOrg::ReOrgsDisabled.into()))?; let re_org_parent_threshold = self .config .re_org_parent_threshold - .ok_or(DoNotReOrg::ReOrgsDisabled)?; + .ok_or(Box::new(DoNotReOrg::ReOrgsDisabled.into()))?; let head_block_root = canonical_forkchoice_params.head_root; @@ -4969,7 +4972,7 @@ impl BeaconChain { false }; if !current_slot_ok { - return Err(DoNotReOrg::HeadDistance.into()); + return Err(Box::new(DoNotReOrg::HeadDistance.into())); } // Only attempt a re-org if we have a proposer registered for the re-org slot. @@ -4992,7 +4995,7 @@ impl BeaconChain { decision_root = ?shuffling_decision_root, "Fork choice override proposer shuffling miss" ); - DoNotReOrg::NotProposing + Box::new(DoNotReOrg::NotProposing.into()) })? .index as u64; @@ -5002,7 +5005,7 @@ impl BeaconChain { .has_proposer_preparation_data_blocking(proposer_index) }; if !proposing_at_re_org_slot { - return Err(DoNotReOrg::NotProposing.into()); + return Err(Box::new(DoNotReOrg::NotProposing.into())); } // If the current slot is already equal to the proposal slot (or we are in the tail end of @@ -5017,18 +5020,22 @@ impl BeaconChain { (true, true) }; if !head_weak { - return Err(DoNotReOrg::HeadNotWeak { - head_weight: info.head_node.weight, - re_org_head_weight_threshold: info.re_org_head_weight_threshold, - } - .into()); + return Err(Box::new( + DoNotReOrg::HeadNotWeak { + head_weight: info.head_node.weight, + re_org_head_weight_threshold: info.re_org_head_weight_threshold, + } + .into(), + )); } if !parent_strong { - return Err(DoNotReOrg::ParentNotStrong { - parent_weight: info.parent_node.weight, - re_org_parent_weight_threshold: info.re_org_parent_weight_threshold, - } - .into()); + return Err(Box::new( + DoNotReOrg::ParentNotStrong { + parent_weight: info.parent_node.weight, + re_org_parent_weight_threshold: info.re_org_parent_weight_threshold, + } + .into(), + )); } // Check that the head block arrived late and is vulnerable to a re-org. This check is only @@ -5039,7 +5046,7 @@ impl BeaconChain { let head_block_late = self.block_observed_after_attestation_deadline(head_block_root, head_slot); if !head_block_late { - return Err(DoNotReOrg::HeadNotLate.into()); + return Err(Box::new(DoNotReOrg::HeadNotLate.into())); } let parent_head_hash = info.parent_node.execution_status.block_hash(); @@ -5253,16 +5260,16 @@ impl BeaconChain { .validators() .get(proposer_index as usize) .map(|v| v.pubkey) - .ok_or(BlockProductionError::BeaconChain( + .ok_or(BlockProductionError::BeaconChain(Box::new( BeaconChainError::ValidatorIndexUnknown(proposer_index as usize), - ))?; + )))?; let builder_params = BuilderParams { pubkey, slot: state.slot(), chain_health: self .is_healthy(&parent_root) - .map_err(BlockProductionError::BeaconChain)?, + .map_err(|e| BlockProductionError::BeaconChain(Box::new(e)))?, }; // If required, start the process of loading an execution payload from the EL early. This @@ -6108,7 +6115,7 @@ impl BeaconChain { payload_attributes: payload_attributes.into(), }, metadata: Default::default(), - version: Some(self.spec.fork_name_at_slot::(prepare_slot)), + version: self.spec.fork_name_at_slot::(prepare_slot), })); } } diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index fe9d8c6bfc..6fe710f41a 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -42,7 +42,7 @@ pub enum GossipBlobError { /// /// We were unable to process this blob due to an internal error. It's /// unclear if the blob is valid. - BeaconChainError(BeaconChainError), + BeaconChainError(Box), /// The `BlobSidecar` was gossiped over an incorrect subnet. /// @@ -147,13 +147,13 @@ impl std::fmt::Display for GossipBlobError { impl From for GossipBlobError { fn from(e: BeaconChainError) -> Self { - GossipBlobError::BeaconChainError(e) + GossipBlobError::BeaconChainError(e.into()) } } impl From for GossipBlobError { fn from(e: BeaconStateError) -> Self { - GossipBlobError::BeaconChainError(BeaconChainError::BeaconStateError(e)) + GossipBlobError::BeaconChainError(BeaconChainError::BeaconStateError(e).into()) } } @@ -446,7 +446,7 @@ pub fn validate_blob_sidecar_for_gossip( .observed_blob_sidecars .write() .observe_sidecar(blob_sidecar) - .map_err(|e| GossipBlobError::BeaconChainError(e.into()))? + .map_err(|e| GossipBlobError::BeaconChainError(Box::new(e.into())))? { return Err(GossipBlobError::RepeatBlob { proposer: blob_sidecar.block_proposer_index(), diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs index 69eecc89b8..0809ce34ef 100644 --- a/beacon_node/beacon_chain/src/block_reward.rs +++ b/beacon_node/beacon_chain/src/block_reward.rs @@ -1,6 +1,8 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::lighthouse::{AttestationRewards, BlockReward, BlockRewardMeta}; -use operation_pool::{AttMaxCover, MaxCover, RewardCache, SplitAttestation}; +use operation_pool::{ + AttMaxCover, MaxCover, RewardCache, SplitAttestation, PROPOSER_REWARD_DENOMINATOR, +}; use state_processing::{ common::get_attesting_indices_from_state, per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards, @@ -65,13 +67,10 @@ impl BeaconChain { let mut curr_epoch_total = 0; for cover in &per_attestation_rewards { - for &reward in cover.fresh_validators_rewards.values() { - if cover.att.data.slot.epoch(T::EthSpec::slots_per_epoch()) == state.current_epoch() - { - curr_epoch_total += reward; - } else { - prev_epoch_total += reward; - } + if cover.att.data.slot.epoch(T::EthSpec::slots_per_epoch()) == state.current_epoch() { + curr_epoch_total += cover.score() as u64; + } else { + prev_epoch_total += cover.score() as u64; } } @@ -80,7 +79,16 @@ impl BeaconChain { // Drop the covers. let per_attestation_rewards = per_attestation_rewards .into_iter() - .map(|cover| cover.fresh_validators_rewards) + .map(|cover| { + // Divide each reward numerator by the denominator. This can lead to the total being + // less than the sum of the individual rewards due to the fact that integer division + // does not distribute over addition. + let mut rewards = cover.fresh_validators_rewards; + rewards + .values_mut() + .for_each(|reward| *reward /= PROPOSER_REWARD_DENOMINATOR); + rewards + }) .collect(); // Add the attestation data if desired. diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 074ae93a79..26bf872392 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -252,7 +252,7 @@ pub enum BlockError { /// /// We were unable to process this block due to an internal error. It's unclear if the block is /// valid. - BeaconChainError(BeaconChainError), + BeaconChainError(Box), /// There was an error whilst verifying weak subjectivity. This block conflicts with the /// configured weak subjectivity checkpoint and was not imported. /// @@ -475,38 +475,40 @@ impl From for BlockError { block, local_shuffling, }, - e => BlockError::BeaconChainError(BeaconChainError::BlockSignatureVerifierError(e)), + e => BlockError::BeaconChainError( + BeaconChainError::BlockSignatureVerifierError(e).into(), + ), } } } impl From for BlockError { fn from(e: BeaconChainError) -> Self { - BlockError::BeaconChainError(e) + BlockError::BeaconChainError(e.into()) } } impl From for BlockError { fn from(e: BeaconStateError) -> Self { - BlockError::BeaconChainError(BeaconChainError::BeaconStateError(e)) + BlockError::BeaconChainError(BeaconChainError::BeaconStateError(e).into()) } } impl From for BlockError { fn from(e: SlotProcessingError) -> Self { - BlockError::BeaconChainError(BeaconChainError::SlotProcessingError(e)) + BlockError::BeaconChainError(BeaconChainError::SlotProcessingError(e).into()) } } impl From for BlockError { fn from(e: DBError) -> Self { - BlockError::BeaconChainError(BeaconChainError::DBError(e)) + BlockError::BeaconChainError(BeaconChainError::DBError(e).into()) } } impl From for BlockError { fn from(e: ArithError) -> Self { - BlockError::BeaconChainError(BeaconChainError::ArithError(e)) + BlockError::BeaconChainError(BeaconChainError::ArithError(e).into()) } } @@ -1000,7 +1002,7 @@ impl GossipVerifiedBlock { .observed_slashable .write() .observe_slashable(block.slot(), block.message().proposer_index(), block_root) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; + .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?; // Now the signature is valid, store the proposal so we don't accept another from this // validator and slot. // @@ -1010,7 +1012,7 @@ impl GossipVerifiedBlock { .observed_block_producers .write() .observe_proposal(block_root, block.message()) - .map_err(|e| BlockError::BeaconChainError(e.into()))? + .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))? { SeenBlock::Slashable => { return Err(BlockError::Slashable); @@ -1321,13 +1323,13 @@ impl ExecutionPendingBlock { .observed_slashable .write() .observe_slashable(block.slot(), block.message().proposer_index(), block_root) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; + .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?; chain .observed_block_producers .write() .observe_proposal(block_root, block.message()) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; + .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?; if let Some(parent) = chain .canonical_head @@ -1651,7 +1653,7 @@ impl ExecutionPendingBlock { // Ignore invalid attestations whilst importing attestations from a block. The // block might be very old and therefore the attestations useless to fork choice. Err(ForkChoiceError::InvalidAttestation(_)) => Ok(()), - Err(e) => Err(BlockError::BeaconChainError(e.into())), + Err(e) => Err(BlockError::BeaconChainError(Box::new(e.into()))), }?; } drop(fork_choice); @@ -1743,7 +1745,7 @@ pub fn check_block_is_finalized_checkpoint_or_descendant< if chain .store .block_exists(&block.parent_root()) - .map_err(|e| BlockError::BeaconChainError(e.into()))? + .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))? { Err(BlockError::NotFinalizedDescendant { block_parent_root: block.parent_root(), @@ -1888,7 +1890,7 @@ fn load_parent>( let root = block.parent_root(); let parent_block = chain .get_blinded_block(&block.parent_root()) - .map_err(BlockError::BeaconChainError)? + .map_err(|e| BlockError::BeaconChainError(Box::new(e)))? .ok_or_else(|| { // Return a `MissingBeaconBlock` error instead of a `ParentUnknown` error since // we've already checked fork choice for this block. diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index d11c112812..b43b259cf6 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -32,7 +32,7 @@ pub enum GossipDataColumnError { /// /// We were unable to process this data column due to an internal error. It's /// unclear if the data column is valid. - BeaconChainError(BeaconChainError), + BeaconChainError(Box), /// The proposal signature in invalid. /// /// ## Peer scoring @@ -162,13 +162,13 @@ pub enum GossipDataColumnError { impl From for GossipDataColumnError { fn from(e: BeaconChainError) -> Self { - GossipDataColumnError::BeaconChainError(e) + GossipDataColumnError::BeaconChainError(e.into()) } } impl From for GossipDataColumnError { fn from(e: BeaconStateError) -> Self { - GossipDataColumnError::BeaconChainError(BeaconChainError::BeaconStateError(e)) + GossipDataColumnError::BeaconChainError(BeaconChainError::BeaconStateError(e).into()) } } @@ -460,7 +460,7 @@ pub fn validate_data_column_sidecar_for_gossip( .observed_column_sidecars .read() .proposer_is_known(data_column) - .map_err(|e| GossipDataColumnError::BeaconChainError(e.into()))? + .map_err(|e| GossipDataColumnError::BeaconChainError(Box::new(e.into())))? { return Err(GossipDataColumnError::PriorKnown { proposer: data_column.block_proposer_index(), @@ -616,7 +616,7 @@ fn verify_proposer_and_signature( let (parent_state_root, mut parent_state) = chain .store .get_advanced_hot_state(block_parent_root, column_slot, parent_block.state_root) - .map_err(|e| GossipDataColumnError::BeaconChainError(e.into()))? + .map_err(|e| GossipDataColumnError::BeaconChainError(Box::new(e.into())))? .ok_or_else(|| { BeaconChainError::DBInconsistent(format!( "Missing state for parent block {block_parent_root:?}", @@ -748,7 +748,7 @@ pub fn observe_gossip_data_column( .observed_column_sidecars .write() .observe_sidecar(data_column_sidecar) - .map_err(|e| GossipDataColumnError::BeaconChainError(e.into()))? + .map_err(|e| GossipDataColumnError::BeaconChainError(Box::new(e.into())))? { return Err(GossipDataColumnError::PriorKnown { proposer: data_column_sidecar.block_proposer_index(), diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 8509c52c8a..2e6de463cc 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -296,7 +296,7 @@ pub enum BlockProductionError { MissingExecutionPayload, MissingKzgCommitment(String), TokioJoin(JoinError), - BeaconChain(BeaconChainError), + BeaconChain(Box), InvalidPayloadFork, InvalidBlockVariant(String), KzgError(kzg::Error), diff --git a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs index 0b9d19e156..8c3bb8c483 100644 --- a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs +++ b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs @@ -100,22 +100,13 @@ impl CheckpointMap { /// This cache stores `Eth1CacheData` that could potentially be finalized within 4 /// future epochs. +#[derive(Default)] pub struct Eth1FinalizationCache { by_checkpoint: CheckpointMap, pending_eth1: BTreeMap, last_finalized: Option, } -impl Default for Eth1FinalizationCache { - fn default() -> Self { - Self { - by_checkpoint: CheckpointMap::new(), - pending_eth1: BTreeMap::new(), - last_finalized: None, - } - } -} - /// Provides a cache of `Eth1CacheData` at epoch boundaries. This is used to /// finalize deposits when a new epoch is finalized. /// diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 1da8cb413b..aa98310c12 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -319,9 +319,9 @@ pub fn validate_execution_payload_for_gossip( .slot_clock .start_of(block.slot()) .map(|d| d.as_secs()) - .ok_or(BlockError::BeaconChainError( + .ok_or(BlockError::BeaconChainError(Box::new( BeaconChainError::UnableToComputeTimeAtSlot, - ))?; + )))?; // The block's execution payload timestamp is correct with respect to the slot if execution_payload.timestamp() != expected_timestamp { @@ -504,7 +504,7 @@ where "prepare_execution_payload_forkchoice_update_params", ) .await - .map_err(BlockProductionError::BeaconChain)?; + .map_err(|e| BlockProductionError::BeaconChain(Box::new(e)))?; let suggested_fee_recipient = execution_layer .get_suggested_fee_recipient(proposer_index) diff --git a/beacon_node/beacon_chain/src/fetch_blobs.rs b/beacon_node/beacon_chain/src/fetch_blobs.rs index 3b576da1c7..d91f103b9d 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs.rs @@ -49,7 +49,7 @@ pub enum EngineGetBlobsOutput { #[derive(Debug)] pub enum FetchEngineBlobError { BeaconStateError(BeaconStateError), - BeaconChainError(BeaconChainError), + BeaconChainError(Box), BlobProcessingError(BlockError), BlobSidecarError(BlobSidecarError), DataColumnSidecarError(DataColumnSidecarError), @@ -320,7 +320,7 @@ async fn compute_and_publish_data_columns( "compute_and_publish_data_columns", ) .await - .map_err(FetchEngineBlobError::BeaconChainError) + .map_err(|e| FetchEngineBlobError::BeaconChainError(Box::new(e))) .and_then(|r| r) } diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 94fa0a1890..03c468a35e 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -572,7 +572,8 @@ impl, Cold: ItemStore> BackgroundMigrator 1 { warn!( state_summaries_dag_roots = ?state_summaries_dag_roots, - "Prune state summaries dag found more than one root" + error = "summaries dag found more than one root", + "Notify the devs your hot DB has some inconsistency. Pruning will fix it but devs want to know about it", ); } diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs index 0b64fdbe08..a995f9d6b4 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs @@ -41,7 +41,7 @@ pub fn upgrade_to_v22( db: Arc>, genesis_state_root: Option, ) -> Result<(), Error> { - info!("Upgrading from v21 to v22"); + info!("Upgrading DB schema from v21 to v22"); let old_anchor = db.get_anchor_info(); diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs index e66178df53..d0f8202679 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs @@ -8,6 +8,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::sync::Arc; use store::{DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem}; +use tracing::{debug, info}; use types::{Hash256, Slot}; /// Dummy value to use for the canonical head block root, see below. @@ -16,6 +17,8 @@ pub const DUMMY_CANONICAL_HEAD_BLOCK_ROOT: Hash256 = Hash256::repeat_byte(0xff); pub fn upgrade_to_v23( db: Arc>, ) -> Result, Error> { + info!("Upgrading DB schema from v22 to v23"); + // 1) Set the head-tracker to empty let Some(persisted_beacon_chain_v22) = db.get_item::(&BEACON_CHAIN_DB_KEY)? @@ -37,10 +40,24 @@ pub fn upgrade_to_v23( .hot_db .iter_column_keys::(DBColumn::BeaconStateTemporary) { + let state_root = state_root_result?; + debug!( + ?state_root, + "Deleting temporary state flag on v23 schema migration" + ); ops.push(KeyValueStoreOp::DeleteKey( DBColumn::BeaconStateTemporary, - state_root_result?.as_slice().to_vec(), + state_root.as_slice().to_vec(), )); + // Here we SHOULD delete the items for key `state_root` in columns `BeaconState` and + // `BeaconStateSummary`. However, in the event we have dangling temporary states at the time + // of the migration, the first pruning routine will prune them. They will be a tree branch / + // root not part of the finalized tree and trigger a warning log once. + // + // We believe there may be race conditions concerning temporary flags where a necessary + // canonical state is marked as temporary. In current stable, a restart with that DB will + // corrupt the DB. In the unlikely case this happens we choose to leave the states and + // allow pruning to clean them. } Ok(ops) diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 9135c3ce88..f206405f67 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -44,7 +44,7 @@ const MAX_FORK_CHOICE_DISTANCE: u64 = 256; #[derive(Debug)] enum Error { - BeaconChain(BeaconChainError), + BeaconChain(Box), // We don't use the inner value directly, but it's used in the Debug impl. HeadMissingFromSnapshotCache(#[allow(dead_code)] Hash256), BeaconState(#[allow(dead_code)] BeaconStateError), @@ -64,7 +64,7 @@ enum Error { impl From for Error { fn from(e: BeaconChainError) -> Self { - Self::BeaconChain(e) + Self::BeaconChain(e.into()) } } diff --git a/beacon_node/beacon_chain/src/summaries_dag.rs b/beacon_node/beacon_chain/src/summaries_dag.rs index ab379d1eb2..8dff2ac7be 100644 --- a/beacon_node/beacon_chain/src/summaries_dag.rs +++ b/beacon_node/beacon_chain/src/summaries_dag.rs @@ -43,13 +43,6 @@ pub enum Error { state_root: Hash256, latest_block_root: Hash256, }, - StateSummariesNotContiguous { - state_root: Hash256, - state_slot: Slot, - latest_block_root: Hash256, - parent_block_root: Box, - parent_block_latest_state_summary: Box>, - }, MissingChildStateRoot(Hash256), RequestedSlotAboveSummary { starting_state_root: Hash256, @@ -163,34 +156,17 @@ impl StateSummariesDAG { **state_root } else { // Common case: not a skipped slot. + // + // If we can't find a state summmary for the parent block and previous slot, + // then there is some amount of disjointedness in the DAG. We set the parent + // state root to 0x0 in this case, and will prune any dangling states. let parent_block_root = summary.block_parent_root; - if let Some(parent_block_summaries) = - state_summaries_by_block_root.get(&parent_block_root) - { - *parent_block_summaries - .get(&previous_slot) - // Should never error: summaries are contiguous, so if there's an - // entry it must contain at least one summary at the previous slot. - .ok_or(Error::StateSummariesNotContiguous { - state_root: *state_root, - state_slot: summary.slot, - latest_block_root: summary.latest_block_root, - parent_block_root: parent_block_root.into(), - parent_block_latest_state_summary: parent_block_summaries - .iter() - .max_by(|a, b| a.0.cmp(b.0)) - .map(|(slot, (state_root, _))| (*slot, **state_root)) - .into(), - })? - .0 - } else { - // We don't know of any summary with this parent block root. We'll - // consider this summary to be a root of `state_summaries_v22` - // collection and mark it as zero. - // The test store_tests::finalizes_non_epoch_start_slot manages to send two - // disjoint trees on its second migration. - Hash256::ZERO - } + state_summaries_by_block_root + .get(&parent_block_root) + .and_then(|parent_block_summaries| { + parent_block_summaries.get(&previous_slot) + }) + .map_or(Hash256::ZERO, |(parent_state_root, _)| **parent_state_root) } }; diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index e1a5de56d1..768c971f94 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -189,7 +189,7 @@ pub enum Error { /// /// We were unable to process this sync committee message due to an internal error. It's unclear if the /// sync committee message is valid. - BeaconChainError(BeaconChainError), + BeaconChainError(Box), /// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid. /// /// ## Peer scoring @@ -232,7 +232,7 @@ pub enum Error { impl From for Error { fn from(e: BeaconChainError) -> Self { - Error::BeaconChainError(e) + Error::BeaconChainError(e.into()) } } @@ -334,7 +334,7 @@ impl VerifiedSyncContribution { .observed_sync_contributions .write() .is_known_subset(contribution, contribution_data_root) - .map_err(|e| Error::BeaconChainError(e.into()))? + .map_err(|e| Error::BeaconChainError(Box::new(e.into())))? { metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_SUBSETS); return Err(Error::SyncContributionSupersetKnown(contribution_data_root)); @@ -363,7 +363,7 @@ impl VerifiedSyncContribution { if !selection_proof .is_aggregator::() - .map_err(|e| Error::BeaconChainError(e.into()))? + .map_err(|e| Error::BeaconChainError(Box::new(e.into())))? { return Err(Error::InvalidSelectionProof { aggregator_index }); } @@ -395,7 +395,7 @@ impl VerifiedSyncContribution { .observed_sync_contributions .write() .observe_item(contribution, Some(contribution_data_root)) - .map_err(|e| Error::BeaconChainError(e.into()))? + .map_err(|e| Error::BeaconChainError(Box::new(e.into())))? { metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_SUBSETS); return Err(Error::SyncContributionSupersetKnown(contribution_data_root)); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index ca083f0572..d3689f7068 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -69,8 +69,6 @@ use types::{typenum::U4294967296, *}; pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; // Environment variable to read if `fork_from_env` feature is enabled. pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; -// Environment variable to read if `ci_logger` feature is enabled. -pub const CI_LOGGER_DIR_ENV_VAR: &str = "CI_LOGGER_DIR"; // Pre-computed data column sidecar using a single static blob from: // `beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz` @@ -2671,10 +2669,7 @@ where mut latest_block_hash: Option, sync_committee_strategy: SyncCommitteeStrategy, ) -> AddBlocksResult { - assert!( - slots.windows(2).all(|w| w[0] <= w[1]), - "Slots have to be sorted" - ); // slice.is_sorted() isn't stabilized at the moment of writing this + assert!(slots.is_sorted(), "Slots have to be in ascending order"); let mut block_hash_from_slot: HashMap = HashMap::new(); let mut state_hash_from_slot: HashMap = HashMap::new(); for slot in slots { @@ -2714,10 +2709,7 @@ where mut latest_block_hash: Option, sync_committee_strategy: SyncCommitteeStrategy, ) -> AddBlocksResult { - assert!( - slots.windows(2).all(|w| w[0] <= w[1]), - "Slots have to be sorted" - ); // slice.is_sorted() isn't stabilized at the moment of writing this + assert!(slots.is_sorted(), "Slots have to be in ascending order"); let mut block_hash_from_slot: HashMap = HashMap::new(); let mut state_hash_from_slot: HashMap = HashMap::new(); for slot in slots { diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index c6fc3416e0..6b9ff9d6ed 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -508,13 +508,11 @@ async fn justified_checkpoint_becomes_invalid() { let is_valid = Payload::Invalid { latest_valid_hash: Some(parent_hash_of_justified), }; - rig.import_block_parametric(is_valid, is_valid, None, |error| { - matches!( - error, - // The block import should fail since the beacon chain knows the justified payload - // is invalid. - BlockError::BeaconChainError(BeaconChainError::JustifiedPayloadInvalid { .. }) - ) + rig.import_block_parametric(is_valid, is_valid, None, |error| match error { + BlockError::BeaconChainError(e) => { + matches!(e.as_ref(), BeaconChainError::JustifiedPayloadInvalid { .. }) + } + _ => false, }) .await; diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 6d82542cef..d193eaf1d8 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,7 +1,7 @@ +use eth2::types::beacon_response::EmptyMetadata; use eth2::types::builder_bid::SignedBuilderBid; -use eth2::types::fork_versioned_response::EmptyMetadata; use eth2::types::{ - ContentType, EthSpec, ExecutionBlockHash, ForkName, ForkVersionDecode, ForkVersionDeserialize, + ContentType, ContextDeserialize, EthSpec, ExecutionBlockHash, ForkName, ForkVersionDecode, ForkVersionedResponse, PublicKeyBytes, SignedValidatorRegistrationData, Slot, }; use eth2::types::{FullPayloadContents, SignedBlindedBeaconBlock}; @@ -119,7 +119,7 @@ impl BuilderHttpClient { } async fn get_with_header< - T: DeserializeOwned + ForkVersionDecode + ForkVersionDeserialize, + T: DeserializeOwned + ForkVersionDecode + for<'de> ContextDeserialize<'de, ForkName>, U: IntoUrl, >( &self, @@ -147,7 +147,7 @@ impl BuilderHttpClient { self.ssz_available.store(true, Ordering::SeqCst); T::from_ssz_bytes_by_fork(&response_bytes, fork_name) .map(|data| ForkVersionedResponse { - version: Some(fork_name), + version: fork_name, metadata: EmptyMetadata {}, data, }) @@ -155,7 +155,15 @@ impl BuilderHttpClient { } ContentType::Json => { self.ssz_available.store(false, Ordering::SeqCst); - serde_json::from_slice(&response_bytes).map_err(Error::InvalidJson) + let mut de = serde_json::Deserializer::from_slice(&response_bytes); + let data = + T::context_deserialize(&mut de, fork_name).map_err(Error::InvalidJson)?; + + Ok(ForkVersionedResponse { + version: fork_name, + metadata: EmptyMetadata {}, + data, + }) } } } diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 195c53c4a0..379b46b4b1 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -4,12 +4,6 @@ version = "0.2.0" authors = ["Sigma Prime "] edition = { workspace = true } -[dev-dependencies] -operation_pool = { workspace = true } -serde_yaml = { workspace = true } -state_processing = { workspace = true } -tokio = { workspace = true } - [dependencies] beacon_chain = { workspace = true } beacon_processor = { workspace = true } @@ -46,3 +40,9 @@ tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } types = { workspace = true } + +[dev-dependencies] +operation_pool = { workspace = true } +serde_yaml = { workspace = true } +state_processing = { workspace = true } +tokio = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 3cb7b33aae..a581d5c128 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -310,8 +310,10 @@ where .map_err(|e| format!("Unable to read system time: {e:}"))? .as_secs(); let genesis_time = genesis_state.genesis_time(); - let deneb_time = - genesis_time + (deneb_fork_epoch.as_u64() * spec.seconds_per_slot); + let deneb_time = genesis_time + + (deneb_fork_epoch.as_u64() + * E::slots_per_epoch() + * spec.seconds_per_slot); // Shrink the blob availability window so users don't start // a sync right before blobs start to disappear from the P2P @@ -461,12 +463,12 @@ where let blobs = if block.message().body().has_blobs() { debug!("Downloading finalized blobs"); if let Some(response) = remote - .get_blobs::(BlockId::Root(block_root), None) + .get_blobs::(BlockId::Root(block_root), None, &spec) .await .map_err(|e| format!("Error fetching finalized blobs from remote: {e:?}"))? { debug!("Downloaded finalized blobs"); - Some(response.data) + Some(response.into_data()) } else { warn!( block_root = %block_root, diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index fa08364251..f834ad7eef 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -4,11 +4,6 @@ version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } -[dev-dependencies] -environment = { workspace = true } -eth1_test_rig = { workspace = true } -serde_yaml = { workspace = true } - [dependencies] eth2 = { workspace = true } ethereum_ssz = { workspace = true } @@ -28,3 +23,8 @@ tokio = { workspace = true } tracing = { workspace = true } tree_hash = { workspace = true } types = { workspace = true } + +[dev-dependencies] +environment = { workspace = true } +eth1_test_rig = { workspace = true } +serde_yaml = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index bf4c391a8d..c79036ba61 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -1242,6 +1242,10 @@ impl HttpJsonRpc { } else { let engine_version = self.get_client_version_v1().await?; *lock = Some(CachedResponse::new(engine_version.clone())); + if !engine_version.is_empty() { + // reset metric gauge when there's a fresh fetch + crate::metrics::reset_execution_layer_info_gauge(); + } Ok(engine_version) } } diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index b9e030703d..c46a94c5af 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -26,8 +26,8 @@ const CACHED_RESPONSE_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minu /// Stores the remembered state of a engine. #[derive(Copy, Clone, PartialEq, Debug, Eq, Default)] enum EngineStateInternal { - Synced, #[default] + Synced, Offline, Syncing, AuthFailed, @@ -403,12 +403,17 @@ mod tests { async fn test_state_notifier() { let mut state = State::default(); let initial_state: EngineState = state.state.into(); - assert_eq!(initial_state, EngineState::Offline); - state.update(EngineStateInternal::Synced); + // default state is online + assert_eq!(initial_state, EngineState::Online); // a watcher that arrives after the first update. let mut watcher = state.watch(); let new_state = watcher.next().await.expect("Last state is always present"); assert_eq!(new_state, EngineState::Online); + + // update to offline + state.update(EngineStateInternal::Offline); + let new_state = watcher.next().await.expect("Last state is always present"); + assert_eq!(new_state, EngineState::Offline); } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index bbdf1a054b..4761c47d41 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -129,8 +129,7 @@ impl TryFrom> for ProvenancedPayload ExecutionLayer { &self, age_limit: Option, ) -> Result, Error> { - self.engine() + let versions = self + .engine() .request(|engine| engine.get_engine_version(age_limit)) .await - .map_err(Into::into) + .map_err(Into::::into)?; + metrics::expose_execution_layer_info(&versions); + + Ok(versions) } /// Used during block production to determine if the merge has been triggered. @@ -1980,7 +1983,7 @@ enum InvalidBuilderPayload { expected: Option, }, Fork { - payload: Option, + payload: ForkName, expected: ForkName, }, Signature { @@ -2013,7 +2016,7 @@ impl fmt::Display for InvalidBuilderPayload { write!(f, "payload block number was {} not {:?}", payload, expected) } InvalidBuilderPayload::Fork { payload, expected } => { - write!(f, "payload fork was {:?} not {}", payload, expected) + write!(f, "payload fork was {} not {}", payload, expected) } InvalidBuilderPayload::Signature { signature, pubkey } => write!( f, @@ -2116,7 +2119,7 @@ fn verify_builder_bid( payload: header.block_number(), expected: block_number, })) - } else if bid.version != Some(current_fork) { + } else if bid.version != current_fork { Err(Box::new(InvalidBuilderPayload::Fork { payload: bid.version, expected: current_fork, diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index ab1a22677f..aba8434c8e 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -116,3 +116,29 @@ pub static EXECUTION_LAYER_PAYLOAD_BIDS: LazyLock> = LazyLoc &["source"] ) }); +pub static EXECUTION_LAYER_INFO: LazyLock> = LazyLock::new(|| { + try_create_int_gauge_vec( + "execution_layer_info", + "The build of the execution layer connected to lighthouse", + &["code", "name", "version", "commit"], + ) +}); + +pub fn reset_execution_layer_info_gauge() { + let _ = EXECUTION_LAYER_INFO.as_ref().map(|gauge| gauge.reset()); +} + +pub fn expose_execution_layer_info(els: &Vec) { + for el in els { + set_gauge_vec( + &EXECUTION_LAYER_INFO, + &[ + &el.code.to_string(), + &el.name, + &el.version, + &el.commit.to_string(), + ], + 1, + ); + } +} diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 87ea8642be..3704bcc592 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -743,7 +743,7 @@ impl MockBuilder { .await .map_err(|_| "couldn't get head".to_string())? .ok_or_else(|| "missing head block".to_string())? - .data; + .into_data(); let head_block_root = head_block_root.unwrap_or(head.canonical_root()); @@ -761,7 +761,7 @@ impl MockBuilder { .await .map_err(|_| "couldn't get finalized block".to_string())? .ok_or_else(|| "missing finalized block".to_string())? - .data + .data() .message() .body() .execution_payload() @@ -774,7 +774,7 @@ impl MockBuilder { .await .map_err(|_| "couldn't get justified block".to_string())? .ok_or_else(|| "missing justified block".to_string())? - .data + .data() .message() .body() .execution_payload() @@ -815,7 +815,7 @@ impl MockBuilder { .await .map_err(|_| "couldn't get state".to_string())? .ok_or_else(|| "missing state".to_string())? - .data; + .into_data(); let prev_randao = head_state .get_randao_mix(head_state.current_epoch()) @@ -980,7 +980,7 @@ pub fn serve( .await .map_err(|e| warp::reject::custom(Custom(e)))?; let resp: ForkVersionedResponse<_> = ForkVersionedResponse { - version: Some(fork_name), + version: fork_name, metadata: Default::default(), data: payload, }; @@ -1040,7 +1040,7 @@ pub fn serve( ), eth2::types::Accept::Json | eth2::types::Accept::Any => { let resp: ForkVersionedResponse<_> = ForkVersionedResponse { - version: Some(fork_name), + version: fork_name, metadata: Default::default(), data: signed_bid, }; diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 6ba8998a01..f752b888a7 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -4,11 +4,6 @@ version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } -[dev-dependencies] -eth1_test_rig = { workspace = true } -logging = { workspace = true } -sensitive_url = { workspace = true } - [dependencies] environment = { workspace = true } eth1 = { workspace = true } @@ -23,3 +18,8 @@ tokio = { workspace = true } tracing = { workspace = true } tree_hash = { workspace = true } types = { workspace = true } + +[dev-dependencies] +eth1_test_rig = { workspace = true } +logging = { workspace = true } +sensitive_url = { workspace = true } diff --git a/beacon_node/http_api/src/aggregate_attestation.rs b/beacon_node/http_api/src/aggregate_attestation.rs index 23af5b0cb5..809f381139 100644 --- a/beacon_node/http_api/src/aggregate_attestation.rs +++ b/beacon_node/http_api/src/aggregate_attestation.rs @@ -4,7 +4,7 @@ use crate::version::{add_consensus_version_header, V1, V2}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::types::{self, EndpointVersion, Hash256, Slot}; use std::sync::Arc; -use types::fork_versioned_response::EmptyMetadata; +use types::beacon_response::EmptyMetadata; use types::{CommitteeIndex, ForkVersionedResponse}; use warp::{ hyper::{Body, Response}, @@ -52,7 +52,7 @@ pub fn get_aggregate_attestation( if endpoint_version == V2 { let fork_versioned_response = ForkVersionedResponse { - version: Some(fork_name), + version: fork_name, metadata: EmptyMetadata {}, data: aggregate_attestation, }; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 386d9fe33a..2eaa33a964 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -34,7 +34,7 @@ mod validators; mod version; use crate::light_client::{get_light_client_bootstrap, get_light_client_updates}; use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; -use crate::version::fork_versioned_response; +use crate::version::beacon_response; use beacon_chain::{ attestation_verification::VerifiedAttestation, observed_operations::ObservationOutcome, validator_monitor::timestamp_now, AttestationError as AttnError, BeaconChain, BeaconChainError, @@ -47,9 +47,9 @@ use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; use either::Either; use eth2::types::{ - self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, - LightClientUpdatesQuery, PublishBlockRequest, ValidatorBalancesRequestBody, ValidatorId, - ValidatorStatus, ValidatorsRequestBody, + self as api_types, BroadcastValidation, ContextDeserialize, EndpointVersion, ForkChoice, + ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, ValidatorBalancesRequestBody, + ValidatorId, ValidatorStatus, ValidatorsRequestBody, }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use health_metrics::observe::Observe; @@ -89,18 +89,17 @@ use tokio_stream::{ use tracing::{debug, error, info, warn}; use types::AttestationData; use types::{ - fork_versioned_response::EmptyMetadata, Attestation, AttestationShufflingId, AttesterSlashing, - BeaconStateError, ChainSpec, Checkpoint, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, - ForkName, ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, - RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, - SyncCommitteeMessage, SyncContributionData, + Attestation, AttestationShufflingId, AttesterSlashing, BeaconStateError, ChainSpec, Checkpoint, + CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, Hash256, ProposerPreparationData, + ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedValidatorRegistrationData, + SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, }; use validator::pubkey_to_validator_index; use version::{ add_consensus_version_header, add_ssz_content_type_header, - execution_optimistic_finalized_fork_versioned_response, inconsistent_fork_rejection, - unsupported_version_rejection, V1, V2, V3, + execution_optimistic_finalized_beacon_response, inconsistent_fork_rejection, + unsupported_version_rejection, ResponseIncludesVersion, V1, V2, V3, }; use warp::http::StatusCode; use warp::hyper::Body; @@ -710,7 +709,7 @@ pub fn serve( .clone() .and(warp::path("validator_balances")) .and(warp::path::end()) - .and(warp_utils::json::json()) + .and(warp_utils::json::json_no_body()) .then( |state_id: StateId, task_spawner: TaskSpawner, @@ -1153,8 +1152,8 @@ pub fn serve( |state_id: StateId, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (data, execution_optimistic, finalized) = state_id + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id .map_state_and_execution_optimistic_and_finalized( &chain, |state, execution_optimistic, finalized| { @@ -1164,15 +1163,23 @@ pub fn serve( )); }; - Ok((deposits.clone(), execution_optimistic, finalized)) + Ok(( + deposits.clone(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) }, )?; - Ok(api_types::ExecutionOptimisticFinalizedResponse { + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) + ) + .map(|res| warp::reply::json(&res).into_response()) + .map(|resp| add_consensus_version_header(resp, fork_name)) }) }, ); @@ -1186,8 +1193,8 @@ pub fn serve( |state_id: StateId, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (data, execution_optimistic, finalized) = state_id + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id .map_state_and_execution_optimistic_and_finalized( &chain, |state, execution_optimistic, finalized| { @@ -1197,15 +1204,23 @@ pub fn serve( )); }; - Ok((withdrawals.clone(), execution_optimistic, finalized)) + Ok(( + withdrawals.clone(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) }, )?; - Ok(api_types::ExecutionOptimisticFinalizedResponse { + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) + ) + .map(|res| warp::reply::json(&res).into_response()) + .map(|resp| add_consensus_version_header(resp, fork_name)) }) }, ); @@ -1405,21 +1420,30 @@ pub fn serve( .and(warp::path("beacon")) .and(warp::path("blocks")) .and(warp::path::end()) - .and(warp_utils::json::json()) + .and(warp::body::json()) + .and(consensus_version_header_filter) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(network_globals.clone()) .then( - move |block_contents: PublishBlockRequest, + move |value: serde_json::Value, + consensus_version: ForkName, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, network_globals: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let request = PublishBlockRequest::::context_deserialize( + &value, + consensus_version, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid JSON: {e:?}")) + })?; publish_blocks::publish_block( None, - ProvenancedBlock::local_from_publish_request(block_contents), + ProvenancedBlock::local_from_publish_request(request), chain, &network_tx, BroadcastValidation::default(), @@ -1475,22 +1499,32 @@ pub fn serve( .and(warp::path("blocks")) .and(warp::query::()) .and(warp::path::end()) - .and(warp_utils::json::json()) + .and(warp::body::json()) + .and(consensus_version_header_filter) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(network_globals.clone()) .then( move |validation_level: api_types::BroadcastValidationQuery, - block_contents: PublishBlockRequest, + value: serde_json::Value, + consensus_version: ForkName, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, network_globals: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let request = PublishBlockRequest::::context_deserialize( + &value, + consensus_version, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid JSON: {e:?}")) + })?; + publish_blocks::publish_block( None, - ProvenancedBlock::local_from_publish_request(block_contents), + ProvenancedBlock::local_from_publish_request(request), chain, &network_tx, validation_level.broadcast_validation, @@ -1723,6 +1757,12 @@ pub fn serve( .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; + let require_version = match endpoint_version { + V1 => ResponseIncludesVersion::No, + V2 => ResponseIncludesVersion::Yes(fork_name), + _ => return Err(unsupported_version_rejection(endpoint_version)), + }; + match accept_header { Some(api_types::Accept::Ssz) => Response::builder() .status(200) @@ -1734,9 +1774,8 @@ pub fn serve( e )) }), - _ => execution_optimistic_finalized_fork_versioned_response( - endpoint_version, - fork_name, + _ => execution_optimistic_finalized_beacon_response( + require_version, execution_optimistic, finalized, block, @@ -1796,9 +1835,15 @@ pub fn serve( .attestations() .map(|att| att.clone_as_attestation()) .collect::>(); - let res = execution_optimistic_finalized_fork_versioned_response( - endpoint_version, - fork_name, + + let require_version = match endpoint_version { + V1 => ResponseIncludesVersion::No, + V2 => ResponseIncludesVersion::Yes(fork_name), + _ => return Err(unsupported_version_rejection(endpoint_version)), + }; + + let res = execution_optimistic_finalized_beacon_response( + require_version, execution_optimistic, finalized, &atts, @@ -1845,9 +1890,8 @@ pub fn serve( }), _ => { // Post as a V2 endpoint so we return the fork version. - execution_optimistic_finalized_fork_versioned_response( - V2, - fork_name, + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), execution_optimistic, finalized, block, @@ -1901,9 +1945,8 @@ pub fn serve( }), _ => { // Post as a V2 endpoint so we return the fork version. - let res = execution_optimistic_finalized_fork_versioned_response( - V2, - fork_name, + let res = execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), execution_optimistic, finalized, &blob_sidecar_list_filtered, @@ -2063,7 +2106,13 @@ pub fn serve( }) .collect::>(); - let res = fork_versioned_response(endpoint_version, fork_name, &attestations)?; + let require_version = match endpoint_version { + V1 => ResponseIncludesVersion::No, + V2 => ResponseIncludesVersion::Yes(fork_name), + _ => return Err(unsupported_version_rejection(endpoint_version)), + }; + + let res = beacon_response(require_version, &attestations); Ok(add_consensus_version_header( warp::reply::json(&res).into_response(), fork_name, @@ -2152,7 +2201,13 @@ pub fn serve( }) .collect::>(); - let res = fork_versioned_response(endpoint_version, fork_name, &slashings)?; + let require_version = match endpoint_version { + V1 => ResponseIncludesVersion::No, + V2 => ResponseIncludesVersion::Yes(fork_name), + _ => return Err(unsupported_version_rejection(endpoint_version)), + }; + + let res = beacon_response(require_version, &slashings); Ok(add_consensus_version_header( warp::reply::json(&res).into_response(), fork_name, @@ -2588,7 +2643,7 @@ pub fn serve( let fork_name = chain .spec - .fork_name_at_slot::(*update.signature_slot()); + .fork_name_at_slot::(update.get_slot()); match accept_header { Some(api_types::Accept::Ssz) => Response::builder() .status(200) @@ -2600,11 +2655,10 @@ pub fn serve( e )) }), - _ => Ok(warp::reply::json(&ForkVersionedResponse { - version: Some(fork_name), - metadata: EmptyMetadata {}, - data: update, - }) + _ => Ok(warp::reply::json(&beacon_response( + ResponseIncludesVersion::Yes(fork_name), + update, + )) .into_response()), } .map(|resp| add_consensus_version_header(resp, fork_name)) @@ -2649,11 +2703,10 @@ pub fn serve( e )) }), - _ => Ok(warp::reply::json(&ForkVersionedResponse { - version: Some(fork_name), - metadata: EmptyMetadata {}, - data: update, - }) + _ => Ok(warp::reply::json(&beacon_response( + ResponseIncludesVersion::Yes(fork_name), + update, + )) .into_response()), } .map(|resp| add_consensus_version_header(resp, fork_name)) @@ -2845,7 +2898,7 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( - |endpoint_version: EndpointVersion, + |_endpoint_version: EndpointVersion, state_id: StateId, accept_header: Option, task_spawner: TaskSpawner, @@ -2889,9 +2942,8 @@ pub fn serve( let fork_name = state .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; - let res = execution_optimistic_finalized_fork_versioned_response( - endpoint_version, - fork_name, + let res = execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), execution_optimistic, finalized, &state, @@ -3241,13 +3293,14 @@ pub fn serve( let direction = dir.into(); let state = peer_info.connection_status().clone().into(); - let state_matches = query.state.as_ref().is_none_or(|states| { - states.iter().any(|state_param| *state_param == state) - }); - let direction_matches = - query.direction.as_ref().is_none_or(|directions| { - directions.iter().any(|dir_param| *dir_param == direction) - }); + let state_matches = query + .state + .as_ref() + .is_none_or(|states| states.contains(&state)); + let direction_matches = query + .direction + .as_ref() + .is_none_or(|directions| directions.contains(&direction)); if state_matches && direction_matches { peers.push(api_types::PeerData { @@ -3371,7 +3424,7 @@ pub fn serve( if endpoint_version == V3 { produce_block_v3(accept_header, chain, slot, query).await } else { - produce_block_v2(endpoint_version, accept_header, chain, slot, query).await + produce_block_v2(accept_header, chain, slot, query).await } }) }, @@ -3401,8 +3454,7 @@ pub fn serve( chain: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { not_synced_filter?; - produce_blinded_block_v2(EndpointVersion(2), accept_header, chain, slot, query) - .await + produce_blinded_block_v2(accept_header, chain, slot, query).await }) }, ); diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs index 2d0a5d09a1..24b1338a72 100644 --- a/beacon_node/http_api/src/light_client.rs +++ b/beacon_node/http_api/src/light_client.rs @@ -1,14 +1,15 @@ use crate::version::{ - add_consensus_version_header, add_ssz_content_type_header, fork_versioned_response, V1, + add_consensus_version_header, add_ssz_content_type_header, beacon_response, + ResponseIncludesVersion, }; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::{ - self as api_types, ChainSpec, ForkVersionedResponse, LightClientUpdate, - LightClientUpdateResponseChunk, LightClientUpdateResponseChunkInner, LightClientUpdatesQuery, + self as api_types, ChainSpec, LightClientUpdate, LightClientUpdateResponseChunk, + LightClientUpdateResponseChunkInner, LightClientUpdatesQuery, }; use ssz::Encode; use std::sync::Arc; -use types::{ForkName, Hash256, LightClientBootstrap}; +use types::{BeaconResponse, ForkName, Hash256, LightClientBootstrap}; use warp::{ hyper::{Body, Response}, reply::Reply, @@ -52,7 +53,7 @@ pub fn get_light_client_updates( let fork_versioned_response = light_client_updates .iter() .map(|update| map_light_client_update_to_json_response::(&chain, update.clone())) - .collect::>>, Rejection>>()?; + .collect::>>>(); Ok(warp::reply::json(&fork_versioned_response).into_response()) } } @@ -88,10 +89,8 @@ pub fn get_light_client_bootstrap( warp_utils::reject::custom_server_error(format!("failed to create response: {}", e)) }), _ => { - let fork_versioned_response = map_light_client_bootstrap_to_json_response::( - fork_name, - light_client_bootstrap, - )?; + let fork_versioned_response = + map_light_client_bootstrap_to_json_response::(fork_name, light_client_bootstrap); Ok(warp::reply::json(&fork_versioned_response).into_response()) } } @@ -177,17 +176,20 @@ fn map_light_client_update_to_ssz_chunk( fn map_light_client_bootstrap_to_json_response( fork_name: ForkName, light_client_bootstrap: LightClientBootstrap, -) -> Result>, Rejection> { - fork_versioned_response(V1, fork_name, light_client_bootstrap) +) -> BeaconResponse> { + beacon_response( + ResponseIncludesVersion::Yes(fork_name), + light_client_bootstrap, + ) } fn map_light_client_update_to_json_response( chain: &BeaconChain, light_client_update: LightClientUpdate, -) -> Result>, Rejection> { +) -> BeaconResponse> { let fork_name = chain .spec .fork_name_at_slot::(*light_client_update.signature_slot()); - fork_versioned_response(V1, fork_name, light_client_update) + beacon_response(ResponseIncludesVersion::Yes(fork_name), light_client_update) } diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index 22d6f0e7ae..db82ff214c 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -3,15 +3,14 @@ use crate::{ version::{ add_consensus_block_value_header, add_consensus_version_header, add_execution_payload_blinded_header, add_execution_payload_value_header, - add_ssz_content_type_header, fork_versioned_response, inconsistent_fork_rejection, + add_ssz_content_type_header, beacon_response, inconsistent_fork_rejection, + ResponseIncludesVersion, }, }; use beacon_chain::{ BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, ProduceBlockVerification, }; -use eth2::types::{ - self as api_types, EndpointVersion, ProduceBlockV3Metadata, SkipRandaoVerification, -}; +use eth2::types::{self as api_types, ProduceBlockV3Metadata, SkipRandaoVerification}; use ssz::Encode; use std::sync::Arc; use types::{payload::BlockProductionVersion, *}; @@ -115,7 +114,7 @@ pub fn build_response_v3( warp_utils::reject::custom_server_error(format!("failed to create response: {}", e)) }), _ => Ok(warp::reply::json(&ForkVersionedResponse { - version: Some(fork_name), + version: fork_name, metadata, data: block_contents, }) @@ -129,7 +128,6 @@ pub fn build_response_v3( } pub async fn produce_blinded_block_v2( - endpoint_version: EndpointVersion, accept_header: Option, chain: Arc>, slot: Slot, @@ -155,11 +153,10 @@ pub async fn produce_blinded_block_v2( .await .map_err(warp_utils::reject::unhandled_error)?; - build_response_v2(chain, block_response_type, endpoint_version, accept_header) + build_response_v2(chain, block_response_type, accept_header) } pub async fn produce_block_v2( - endpoint_version: EndpointVersion, accept_header: Option, chain: Arc>, slot: Slot, @@ -186,13 +183,12 @@ pub async fn produce_block_v2( .await .map_err(warp_utils::reject::unhandled_error)?; - build_response_v2(chain, block_response_type, endpoint_version, accept_header) + build_response_v2(chain, block_response_type, accept_header) } pub fn build_response_v2( chain: Arc>, block_response: BeaconBlockResponseWrapper, - endpoint_version: EndpointVersion, accept_header: Option, ) -> Result, warp::Rejection> { let fork_name = block_response @@ -210,8 +206,10 @@ pub fn build_response_v2( .map_err(|e| { warp_utils::reject::custom_server_error(format!("failed to create response: {}", e)) }), - _ => fork_versioned_response(endpoint_version, fork_name, block_contents) - .map(|response| warp::reply::json(&response).into_response()) - .map(|res| add_consensus_version_header(res, fork_name)), + _ => Ok(warp::reply::json(&beacon_response( + ResponseIncludesVersion::Yes(fork_name), + block_contents, + )) + .into_response()), } } diff --git a/beacon_node/http_api/src/publish_attestations.rs b/beacon_node/http_api/src/publish_attestations.rs index cd5e912bdf..db85b8f205 100644 --- a/beacon_node/http_api/src/publish_attestations.rs +++ b/beacon_node/http_api/src/publish_attestations.rs @@ -60,13 +60,13 @@ use types::{Attestation, EthSpec, ForkName, SingleAttestation}; pub enum Error { Validation(AttestationError), Publication, - ForkChoice(#[allow(dead_code)] BeaconChainError), + ForkChoice(#[allow(dead_code)] Box), AggregationPool(#[allow(dead_code)] AttestationError), ReprocessDisabled, ReprocessFull, ReprocessTimeout, InvalidJson(#[allow(dead_code)] serde_json::Error), - FailedConversion(#[allow(dead_code)] BeaconChainError), + FailedConversion(#[allow(dead_code)] Box), } enum PublishAttestationResult { @@ -164,7 +164,7 @@ fn verify_and_publish_attestation( } if let Err(e) = fc_result { - Err(Error::ForkChoice(e)) + Err(Error::ForkChoice(Box::new(e))) } else if let Err(e) = naive_aggregation_result { Err(Error::AggregationPool(e)) } else { @@ -213,7 +213,7 @@ fn convert_to_attestation<'a, T: BeaconChainTypes>( beacon_block_root, })) } - Err(e) => Err(Error::FailedConversion(e)), + Err(e) => Err(Error::FailedConversion(Box::new(e))), } } } diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index b613cf8467..9b1a3f8677 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -123,8 +123,9 @@ pub async fn publish_block>( "Signed block published to network via HTTP API" ); - crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone())) - .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; + crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone())).map_err( + |_| BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish)), + )?; Ok(()) }; @@ -506,7 +507,7 @@ fn publish_blob_sidecars( ) -> Result<(), BlockError> { let pubsub_message = PubsubMessage::BlobSidecar(Box::new((blob.index(), blob.clone_blob()))); crate::publish_pubsub_message(sender_clone, pubsub_message) - .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) + .map_err(|_| BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish))) } fn publish_column_sidecars( @@ -536,7 +537,7 @@ fn publish_column_sidecars( }) .collect::>(); crate::publish_pubsub_messages(sender_clone, pubsub_messages) - .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) + .map_err(|_| BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish))) } async fn post_block_import_logging_and_response( @@ -593,7 +594,9 @@ async fn post_block_import_logging_and_response( Err(warp_utils::reject::custom_bad_request(msg)) } } - Err(BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) => { + Err(BlockError::BeaconChainError(e)) + if matches!(e.as_ref(), BeaconChainError::UnableToPublish) => + { Err(warp_utils::reject::custom_server_error( "unable to publish to network channel".to_string(), )) @@ -789,7 +792,7 @@ fn check_slashable( block_clone.message().proposer_index(), block_root, ) - .map_err(|e| BlockError::BeaconChainError(e.into()))? + .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))? { warn!( slot = %block_clone.slot(), diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index 9ca1a2401a..aa126bbc82 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -59,7 +59,7 @@ pub fn sync_committee_duties( } let duties = duties_from_state_load(request_epoch, request_indices, altair_fork_epoch, chain) - .map_err(|e| match e { + .map_err(|e| match *e { BeaconChainError::SyncDutiesError(BeaconStateError::SyncCommitteeNotKnown { current_epoch, .. @@ -81,7 +81,7 @@ fn duties_from_state_load( request_indices: &[u64], altair_fork_epoch: Epoch, chain: &BeaconChain, -) -> Result, BeaconStateError>>, BeaconChainError> { +) -> Result, BeaconStateError>>, Box> { // Determine what the current epoch would be if we fast-forward our system clock by // `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. // @@ -92,11 +92,17 @@ fn duties_from_state_load( let tolerant_current_epoch = chain .slot_clock .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) - .ok_or(BeaconChainError::UnableToReadSlot)? + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(Box::new)? .epoch(T::EthSpec::slots_per_epoch()); - let max_sync_committee_period = tolerant_current_epoch.sync_committee_period(&chain.spec)? + 1; - let sync_committee_period = request_epoch.sync_committee_period(&chain.spec)?; + let max_sync_committee_period = tolerant_current_epoch + .sync_committee_period(&chain.spec) + .map_err(|e| Box::new(e.into()))? + + 1; + let sync_committee_period = request_epoch + .sync_committee_period(&chain.spec) + .map_err(|e| Box::new(e.into()))?; if tolerant_current_epoch < altair_fork_epoch { // Empty response if the epoch is pre-Altair. @@ -119,13 +125,14 @@ fn duties_from_state_load( state .get_sync_committee_duties(request_epoch, request_indices, &chain.spec) .map_err(BeaconChainError::SyncDutiesError) + .map_err(Box::new) } else { - Err(BeaconChainError::SyncDutiesError( + Err(Box::new(BeaconChainError::SyncDutiesError( BeaconStateError::SyncCommitteeNotKnown { current_epoch, epoch: request_epoch, }, - )) + ))) } } diff --git a/beacon_node/http_api/src/validator.rs b/beacon_node/http_api/src/validator.rs index baa41e33ed..25b0feb99e 100644 --- a/beacon_node/http_api/src/validator.rs +++ b/beacon_node/http_api/src/validator.rs @@ -7,9 +7,10 @@ pub fn pubkey_to_validator_index( chain: &BeaconChain, state: &BeaconState, pubkey: &PublicKeyBytes, -) -> Result, BeaconChainError> { +) -> Result, Box> { chain - .validator_index(pubkey)? + .validator_index(pubkey) + .map_err(Box::new)? .filter(|&index| { state .validators() diff --git a/beacon_node/http_api/src/validators.rs b/beacon_node/http_api/src/validators.rs index f3d78e6fcd..90ddd1ee8f 100644 --- a/beacon_node/http_api/src/validators.rs +++ b/beacon_node/http_api/src/validators.rs @@ -81,8 +81,13 @@ pub fn get_beacon_state_validator_balances( .map_state_and_execution_optimistic_and_finalized( &chain, |state, execution_optimistic, finalized| { - let ids_filter_set: Option> = - optional_ids.map(|f| HashSet::from_iter(f.iter())); + let ids_filter_set: Option> = match optional_ids { + // if optional_ids (the request data body) is [], returns a `None`, so that later when calling .is_none_or() will return True + // Hence, all validators will pass through .filter(), and balances of all validators are returned, in accordance to the spec + Some([]) => None, + Some(ids) => Some(HashSet::from_iter(ids.iter())), + None => None, + }; Ok(( state diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index 59816cb897..361e8e78ea 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -5,10 +5,11 @@ use eth2::{ }; use serde::Serialize; use types::{ - fork_versioned_response::{ - ExecutionOptimisticFinalizedForkVersionedResponse, ExecutionOptimisticFinalizedMetadata, + beacon_response::{ + ExecutionOptimisticFinalizedBeaconResponse, ExecutionOptimisticFinalizedMetadata, }, - ForkName, ForkVersionedResponse, InconsistentFork, Uint256, + BeaconResponse, ForkName, ForkVersionedResponse, InconsistentFork, Uint256, + UnversionedResponse, }; use warp::reply::{self, Reply, Response}; @@ -16,47 +17,54 @@ pub const V1: EndpointVersion = EndpointVersion(1); pub const V2: EndpointVersion = EndpointVersion(2); pub const V3: EndpointVersion = EndpointVersion(3); -pub fn fork_versioned_response( - endpoint_version: EndpointVersion, - fork_name: ForkName, - data: T, -) -> Result, warp::reject::Rejection> { - let fork_name = if endpoint_version == V1 { - None - } else if endpoint_version == V2 || endpoint_version == V3 { - Some(fork_name) - } else { - return Err(unsupported_version_rejection(endpoint_version)); - }; - Ok(ForkVersionedResponse { - version: fork_name, - metadata: Default::default(), - data, - }) +#[derive(Debug, PartialEq, Clone, Serialize)] +pub enum ResponseIncludesVersion { + Yes(ForkName), + No, } -pub fn execution_optimistic_finalized_fork_versioned_response( - endpoint_version: EndpointVersion, - fork_name: ForkName, +pub fn beacon_response( + require_version: ResponseIncludesVersion, + data: T, +) -> BeaconResponse { + match require_version { + ResponseIncludesVersion::Yes(fork_name) => { + BeaconResponse::ForkVersioned(ForkVersionedResponse { + version: fork_name, + metadata: Default::default(), + data, + }) + } + ResponseIncludesVersion::No => BeaconResponse::Unversioned(UnversionedResponse { + metadata: Default::default(), + data, + }), + } +} + +pub fn execution_optimistic_finalized_beacon_response( + require_version: ResponseIncludesVersion, execution_optimistic: bool, finalized: bool, data: T, -) -> Result, warp::reject::Rejection> { - let fork_name = if endpoint_version == V1 { - None - } else if endpoint_version == V2 { - Some(fork_name) - } else { - return Err(unsupported_version_rejection(endpoint_version)); +) -> Result, warp::reject::Rejection> { + let metadata = ExecutionOptimisticFinalizedMetadata { + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), }; - Ok(ExecutionOptimisticFinalizedForkVersionedResponse { - version: fork_name, - metadata: ExecutionOptimisticFinalizedMetadata { - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }, - data, - }) + match require_version { + ResponseIncludesVersion::Yes(fork_name) => { + Ok(BeaconResponse::ForkVersioned(ForkVersionedResponse { + version: fork_name, + metadata, + data, + })) + } + ResponseIncludesVersion::No => Ok(BeaconResponse::Unversioned(UnversionedResponse { + metadata, + data, + })), + } } /// Add the 'Content-Type application/octet-stream` header to a response. diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index bb3086945b..4f3cd6c828 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -115,10 +115,10 @@ async fn state_by_root_pruned_from_fork_choice() { .unwrap() .unwrap(); - assert!(response.metadata.finalized.unwrap()); - assert!(!response.metadata.execution_optimistic.unwrap()); + assert!(response.metadata().finalized.unwrap()); + assert!(!response.metadata().execution_optimistic.unwrap()); - let mut state = response.data; + let mut state = response.into_data(); assert_eq!(state.update_tree_hash_cache().unwrap(), state_root); } } @@ -846,7 +846,7 @@ pub async fn fork_choice_before_proposal() { .get_validator_blocks::(slot_d, &randao_reveal, None) .await .unwrap() - .data + .into_data() .deconstruct() .0; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 5c9504d4a5..a5a21fd985 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -682,7 +682,7 @@ impl ApiTester { .await .unwrap() .unwrap() - .metadata + .metadata() .finalized .unwrap(); @@ -719,7 +719,7 @@ impl ApiTester { .await .unwrap() .unwrap() - .metadata + .metadata() .finalized .unwrap(); @@ -757,7 +757,7 @@ impl ApiTester { .await .unwrap() .unwrap() - .metadata + .metadata() .finalized .unwrap(); @@ -927,18 +927,32 @@ impl ApiTester { .map(|res| res.data); let expected = state_opt.map(|(state, _execution_optimistic, _finalized)| { - let mut validators = Vec::with_capacity(validator_indices.len()); + // If validator_indices is empty, return balances for all validators + if validator_indices.is_empty() { + state + .balances() + .iter() + .enumerate() + .map(|(index, balance)| ValidatorBalanceData { + index: index as u64, + balance: *balance, + }) + .collect() + } else { + // Same behaviour as before for the else branch + let mut validators = Vec::with_capacity(validator_indices.len()); - for i in validator_indices { - if i < state.balances().len() as u64 { - validators.push(ValidatorBalanceData { - index: i, - balance: *state.balances().get(i as usize).unwrap(), - }); + for i in validator_indices { + if i < state.balances().len() as u64 { + validators.push(ValidatorBalanceData { + index: i, + balance: *state.balances().get(i as usize).unwrap(), + }); + } } - } - validators + validators + } }); assert_eq!(result_index_ids, expected, "{:?}", state_id); @@ -1597,9 +1611,9 @@ impl ApiTester { let json_result = self.client.get_beacon_blocks(block_id.0).await.unwrap(); if let (Some(json), Some(expected)) = (&json_result, &expected) { - assert_eq!(&json.data, expected.as_ref(), "{:?}", block_id); + assert_eq!(json.data(), expected.as_ref(), "{:?}", block_id); assert_eq!( - json.version, + json.version(), Some(expected.fork_name(&self.chain.spec).unwrap()) ); } else { @@ -1623,8 +1637,8 @@ impl ApiTester { // Check that the legacy v1 API still works but doesn't return a version field. let v1_result = self.client.get_beacon_blocks_v1(block_id.0).await.unwrap(); if let (Some(v1_result), Some(expected)) = (&v1_result, &expected) { - assert_eq!(v1_result.version, None); - assert_eq!(&v1_result.data, expected.as_ref()); + assert_eq!(v1_result.version(), None); + assert_eq!(v1_result.data(), expected.as_ref()); } else { assert_eq!(v1_result, None); assert_eq!(expected, None); @@ -1685,9 +1699,9 @@ impl ApiTester { .unwrap(); if let (Some(json), Some(expected)) = (&json_result, &expected) { - assert_eq!(&json.data, expected, "{:?}", block_id); + assert_eq!(json.data(), expected, "{:?}", block_id); assert_eq!( - json.version, + json.version(), Some(expected.fork_name(&self.chain.spec).unwrap()) ); } else { @@ -1750,10 +1764,14 @@ impl ApiTester { }; let result = match self .client - .get_blobs::(CoreBlockId::Root(block_root), blob_indices.as_deref()) + .get_blobs::( + CoreBlockId::Root(block_root), + blob_indices.as_deref(), + &self.chain.spec, + ) .await { - Ok(result) => result.unwrap().data, + Ok(result) => result.unwrap().into_data(), Err(e) => panic!("query failed incorrectly: {e:?}"), }; @@ -1806,13 +1824,13 @@ impl ApiTester { match self .client - .get_blobs::(CoreBlockId::Slot(test_slot), None) + .get_blobs::(CoreBlockId::Slot(test_slot), None, &self.chain.spec) .await { Ok(result) => { if zero_blobs { assert_eq!( - &result.unwrap().data[..], + &result.unwrap().into_data()[..], &[], "empty blobs are always available" ); @@ -1844,7 +1862,7 @@ impl ApiTester { match self .client - .get_blobs::(CoreBlockId::Slot(test_slot), None) + .get_blobs::(CoreBlockId::Slot(test_slot), None, &self.chain.spec) .await { Ok(result) => panic!("queries for pre-Deneb slots should fail. got: {result:?}"), @@ -1861,7 +1879,7 @@ impl ApiTester { .get_beacon_blocks_attestations_v2(block_id.0) .await .unwrap() - .map(|res| res.data); + .map(|res| res.into_data()); let expected = block_id.full_block(&self.chain).await.ok().map( |(block, _execution_optimistic, _finalized)| { @@ -2071,7 +2089,7 @@ impl ApiTester { .get_light_client_bootstrap(&self.chain.store, &block_root, 1u64, &self.chain.spec); assert!(expected.is_ok()); - assert_eq!(result.unwrap().data, expected.unwrap().unwrap().0); + assert_eq!(result.unwrap().data(), &expected.unwrap().unwrap().0); self } @@ -2083,7 +2101,7 @@ impl ApiTester { .get_beacon_light_client_optimistic_update::() .await { - Ok(result) => result.map(|res| res.data), + Ok(result) => result.map(|res| res.into_data()), Err(e) => panic!("query failed incorrectly: {e:?}"), }; @@ -2102,7 +2120,7 @@ impl ApiTester { .get_beacon_light_client_finality_update::() .await { - Ok(result) => result.map(|res| res.data), + Ok(result) => result.map(|res| res.into_data()), Err(e) => panic!("query failed incorrectly: {e:?}"), }; @@ -2133,7 +2151,7 @@ impl ApiTester { .get_beacon_pool_attestations_v2(None, None) .await .unwrap() - .data; + .into_data(); assert_eq!(result, expected); @@ -2195,7 +2213,7 @@ impl ApiTester { .get_beacon_pool_attestations_v2(None, Some(0)) .await .unwrap() - .data; + .into_data(); let mut expected = self.chain.op_pool.get_all_attestations(); expected.extend(self.chain.naive_aggregation_pool.read().iter().cloned()); let expected_committee_index_filtered = expected @@ -2311,7 +2329,7 @@ impl ApiTester { .get_beacon_pool_attester_slashings_v2() .await .unwrap() - .data; + .into_data(); assert_eq!(result, expected); self @@ -2475,7 +2493,7 @@ impl ApiTester { is_syncing: false, is_optimistic: false, // these tests run without the Bellatrix fork enabled - el_offline: true, + el_offline: false, head_slot, sync_distance, }; @@ -2539,11 +2557,11 @@ impl ApiTester { pub async fn test_get_node_health(self) -> Self { let status = self.client.get_node_health().await; match status { - Ok(_) => { - panic!("should return 503 error status code"); + Ok(status) => { + assert_eq!(status, 200); } - Err(e) => { - assert_eq!(e.status().unwrap(), 503); + Err(_) => { + panic!("should return valid status"); } } self @@ -2649,9 +2667,9 @@ impl ApiTester { expected.as_mut().map(|state| state.drop_all_caches()); if let (Some(json), Some(expected)) = (&result_json, &expected) { - assert_eq!(json.data, *expected, "{:?}", state_id); + assert_eq!(json.data(), expected, "{:?}", state_id); assert_eq!( - json.version, + json.version(), Some(expected.fork_name(&self.chain.spec).unwrap()) ); } else { @@ -3157,7 +3175,7 @@ impl ApiTester { .get_validator_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .deconstruct() .0; @@ -3254,7 +3272,7 @@ impl ApiTester { ) { // Compare fork name to ForkVersionedResponse rather than metadata consensus_version, which // is deserialized to a dummy value. - assert_eq!(Some(metadata.consensus_version), response.version); + assert_eq!(metadata.consensus_version, response.version); assert_eq!(ForkName::Base, response.metadata.consensus_version); assert_eq!( metadata.execution_payload_blinded, @@ -3380,7 +3398,7 @@ impl ApiTester { ) .await .unwrap() - .data + .into_data() .deconstruct() .0; assert_eq!(block.slot(), slot); @@ -3494,7 +3512,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data; + .into_data(); let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); @@ -3509,7 +3527,7 @@ impl ApiTester { .await .unwrap() .unwrap() - .data; + .into_data(); assert_eq!(head_block.clone_as_blinded(), signed_block); @@ -3582,7 +3600,7 @@ impl ApiTester { .await .unwrap() .unwrap() - .data; + .into_data(); let signed_block = signed_block_contents.signed_block(); assert_eq!(head_block, **signed_block); @@ -3605,7 +3623,7 @@ impl ApiTester { ) .await .unwrap() - .data; + .into_data(); assert_eq!(blinded_block.slot(), slot); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } @@ -3749,7 +3767,7 @@ impl ApiTester { .await .unwrap() .unwrap() - .data; + .into_data(); let expected = attestation; assert_eq!(result, expected); @@ -4323,7 +4341,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -4369,7 +4387,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -4413,7 +4431,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -4487,7 +4505,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -4573,7 +4591,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -4665,7 +4683,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -4755,7 +4773,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -4844,7 +4862,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -4919,7 +4937,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -4982,7 +5000,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -5058,7 +5076,7 @@ impl ApiTester { .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -5089,7 +5107,7 @@ impl ApiTester { .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -5197,7 +5215,7 @@ impl ApiTester { .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -5238,7 +5256,7 @@ impl ApiTester { .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -5354,7 +5372,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -5435,7 +5453,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -5503,7 +5521,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -5571,7 +5589,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -5638,7 +5656,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() @@ -5709,7 +5727,7 @@ impl ApiTester { .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() - .data + .into_data() .body() .execution_payload() .unwrap() diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 4f1825af20..3ee967eeee 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -4,6 +4,9 @@ version = "0.2.0" authors = ["Sigma Prime "] edition = { workspace = true } +[features] +libp2p-websocket = [] + [dependencies] alloy-primitives = { workspace = true } alloy-rlp = { workspace = true } @@ -53,7 +56,21 @@ unused_port = { workspace = true } [dependencies.libp2p] version = "0.55" default-features = false -features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic", "upnp"] +features = [ + "identify", + "yamux", + "noise", + "dns", + "tcp", + "tokio", + "plaintext", + "secp256k1", + "macros", + "ecdsa", + "metrics", + "quic", + "upnp", +] [dev-dependencies] async-channel = { workspace = true } @@ -61,6 +78,3 @@ logging = { workspace = true } quickcheck = { workspace = true } quickcheck_macros = { workspace = true } tempfile = { workspace = true } - -[features] -libp2p-websocket = [] diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 9b43e8b581..72d7aa0074 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -12,7 +12,7 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::runtime::Runtime; use tokio::time::sleep; -use tracing::{debug, error, warn}; +use tracing::{debug, error, info_span, warn, Instrument}; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BlobSidecar, ChainSpec, EmptyBlock, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MinimalEthSpec, @@ -55,7 +55,7 @@ fn bellatrix_block_large(spec: &ChainSpec) -> BeaconBlock { fn test_tcp_status_rpc() { // Set up the logging. let log_level = "debug"; - let enable_logging = false; + let enable_logging = true; build_tracing_subscriber(log_level, enable_logging); let rt = Arc::new(Runtime::new().unwrap()); @@ -117,7 +117,8 @@ fn test_tcp_status_rpc() { _ => {} } } - }; + } + .instrument(info_span!("Sender")); // build the receiver future let receiver_future = async { @@ -141,7 +142,8 @@ fn test_tcp_status_rpc() { _ => {} // Ignore other events } } - }; + } + .instrument(info_span!("Receiver")); tokio::select! { _ = sender_future => {} @@ -159,7 +161,7 @@ fn test_tcp_status_rpc() { fn test_tcp_blocks_by_range_chunked_rpc() { // Set up the logging. let log_level = "debug"; - let enable_logging = false; + let enable_logging = true; build_tracing_subscriber(log_level, enable_logging); let messages_to_send = 6; @@ -245,7 +247,8 @@ fn test_tcp_blocks_by_range_chunked_rpc() { _ => {} // Ignore other behaviour events } } - }; + } + .instrument(info_span!("Sender")); // build the receiver future let receiver_future = async { @@ -286,7 +289,8 @@ fn test_tcp_blocks_by_range_chunked_rpc() { _ => {} // Ignore other events } } - }; + } + .instrument(info_span!("Receiver")); tokio::select! { _ = sender_future => {} @@ -304,7 +308,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { fn test_blobs_by_range_chunked_rpc() { // Set up the logging. let log_level = "debug"; - let enable_logging = false; + let enable_logging = true; build_tracing_subscriber(log_level, enable_logging); let slot_count = 32; @@ -373,7 +377,8 @@ fn test_blobs_by_range_chunked_rpc() { _ => {} // Ignore other behaviour events } } - }; + } + .instrument(info_span!("Sender")); // build the receiver future let receiver_future = async { @@ -407,7 +412,8 @@ fn test_blobs_by_range_chunked_rpc() { _ => {} // Ignore other events } } - }; + } + .instrument(info_span!("Receiver")); tokio::select! { _ = sender_future => {} @@ -425,7 +431,7 @@ fn test_blobs_by_range_chunked_rpc() { fn test_tcp_blocks_by_range_over_limit() { // Set up the logging. let log_level = "debug"; - let enable_logging = false; + let enable_logging = true; build_tracing_subscriber(log_level, enable_logging); let messages_to_send = 5; @@ -479,7 +485,8 @@ fn test_tcp_blocks_by_range_over_limit() { _ => {} // Ignore other behaviour events } } - }; + } + .instrument(info_span!("Sender")); // build the receiver future let receiver_future = async { @@ -512,7 +519,8 @@ fn test_tcp_blocks_by_range_over_limit() { _ => {} // Ignore other events } } - }; + } + .instrument(info_span!("Receiver")); tokio::select! { _ = sender_future => {} @@ -529,7 +537,7 @@ fn test_tcp_blocks_by_range_over_limit() { fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { // Set up the logging. let log_level = "debug"; - let enable_logging = false; + let enable_logging = true; build_tracing_subscriber(log_level, enable_logging); let messages_to_send = 10; @@ -601,7 +609,8 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { _ => {} // Ignore other behaviour events } } - }; + } + .instrument(info_span!("Sender")); // determine messages to send (PeerId, RequestId). If some, indicates we still need to send // messages @@ -648,7 +657,8 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { } } } - }; + } + .instrument(info_span!("Receiver")); tokio::select! { _ = sender_future => {} @@ -666,7 +676,7 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { fn test_tcp_blocks_by_range_single_empty_rpc() { // Set up the logging. let log_level = "trace"; - let enable_logging = false; + let enable_logging = true; build_tracing_subscriber(log_level, enable_logging); let rt = Arc::new(Runtime::new().unwrap()); @@ -734,7 +744,8 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { _ => {} // Ignore other behaviour events } } - }; + } + .instrument(info_span!("Sender")); // build the receiver future let receiver_future = async { @@ -767,7 +778,8 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { _ => {} // Ignore other events } } - }; + } + .instrument(info_span!("Receiver")); tokio::select! { _ = sender_future => {} _ = receiver_future => {} @@ -787,7 +799,7 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { fn test_tcp_blocks_by_root_chunked_rpc() { // Set up the logging. let log_level = "debug"; - let enable_logging = false; + let enable_logging = true; build_tracing_subscriber(log_level, enable_logging); let messages_to_send = 6; @@ -877,7 +889,8 @@ fn test_tcp_blocks_by_root_chunked_rpc() { _ => {} // Ignore other behaviour events } } - }; + } + .instrument(info_span!("Sender")); // build the receiver future let receiver_future = async { @@ -916,7 +929,8 @@ fn test_tcp_blocks_by_root_chunked_rpc() { _ => {} // Ignore other events } } - }; + } + .instrument(info_span!("Receiver")); tokio::select! { _ = sender_future => {} _ = receiver_future => {} @@ -932,7 +946,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { // Set up the logging. let log_level = "debug"; - let enable_logging = false; + let enable_logging = true; build_tracing_subscriber(log_level, enable_logging); let messages_to_send: u64 = 10; @@ -1015,7 +1029,8 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { _ => {} // Ignore other behaviour events } } - }; + } + .instrument(info_span!("Sender")); // determine messages to send (PeerId, RequestId). If some, indicates we still need to send // messages @@ -1062,7 +1077,8 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { } } } - }; + } + .instrument(info_span!("Receiver")); tokio::select! { _ = sender_future => {} @@ -1115,7 +1131,8 @@ fn goodbye_test(log_level: &str, enable_logging: bool, protocol: Protocol) { _ => {} // Ignore other RPC messages } } - }; + } + .instrument(info_span!("Sender")); // build the receiver future let receiver_future = async { @@ -1125,7 +1142,8 @@ fn goodbye_test(log_level: &str, enable_logging: bool, protocol: Protocol) { return; } } - }; + } + .instrument(info_span!("Receiver")); let total_future = futures::future::join(sender_future, receiver_future); @@ -1143,7 +1161,7 @@ fn goodbye_test(log_level: &str, enable_logging: bool, protocol: Protocol) { #[allow(clippy::single_match)] fn tcp_test_goodbye_rpc() { let log_level = "debug"; - let enabled_logging = false; + let enabled_logging = true; goodbye_test(log_level, enabled_logging, Protocol::Tcp); } @@ -1152,13 +1170,15 @@ fn tcp_test_goodbye_rpc() { #[allow(clippy::single_match)] fn quic_test_goodbye_rpc() { let log_level = "debug"; - let enabled_logging = false; + let enabled_logging = true; goodbye_test(log_level, enabled_logging, Protocol::Quic); } // Test that the receiver delays the responses during response rate-limiting. #[test] fn test_delayed_rpc_response() { + // Set up the logging. + build_tracing_subscriber("debug", true); let rt = Arc::new(Runtime::new().unwrap()); let spec = Arc::new(E::default_spec()); @@ -1214,7 +1234,7 @@ fn test_delayed_rpc_response() { app_request_id: _, response, } => { - debug!(%request_id, "Sender received"); + debug!(%request_id, elapsed = ?request_sent_at.elapsed(), "Sender received response"); assert_eq!(response, rpc_response); match request_id { @@ -1289,6 +1309,8 @@ fn test_delayed_rpc_response() { // once, thanks to the self-limiter on the sender side. #[test] fn test_active_requests() { + // Set up the logging. + build_tracing_subscriber("debug", true); let rt = Arc::new(Runtime::new().unwrap()); let spec = Arc::new(E::default_spec()); diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 4e36953880..cdb6ba7a83 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -4,17 +4,12 @@ version = "0.2.0" authors = ["Sigma Prime "] edition = { workspace = true } -[dev-dependencies] -bls = { workspace = true } -eth2 = { workspace = true } -eth2_network_config = { workspace = true } -genesis = { workspace = true } -gossipsub = { workspace = true } -k256 = "0.13.4" -kzg = { workspace = true } -matches = "0.1.8" -rand_chacha = "0.3.1" -serde_json = { workspace = true } +[features] +# NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill +disable-backfill = [] +fork_from_env = ["beacon_chain/fork_from_env"] +portable = ["beacon_chain/portable"] +test_logger = [] [dependencies] alloy-primitives = { workspace = true } @@ -51,10 +46,14 @@ tracing = { workspace = true } tracing-subscriber = { workspace = true } types = { workspace = true } -[features] -# NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill -disable-backfill = [] -fork_from_env = ["beacon_chain/fork_from_env"] -portable = ["beacon_chain/portable"] -test_logger = [] -ci_logger = [] +[dev-dependencies] +bls = { workspace = true } +eth2 = { workspace = true } +eth2_network_config = { workspace = true } +genesis = { workspace = true } +gossipsub = { workspace = true } +k256 = "0.13.4" +kzg = { workspace = true } +matches = "0.1.8" +rand_chacha = "0.3.1" +serde_json = { workspace = true } diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 2995a4d7e8..638f9e4824 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -537,7 +537,7 @@ impl NetworkBeaconProcessor { attestation: single_attestation, }, None, - AttnError::BeaconChainError(error), + AttnError::BeaconChainError(Box::new(error)), seen_timestamp, ); } @@ -2734,41 +2734,57 @@ impl NetworkBeaconProcessor { "attn_to_finalized_block", ); } - AttnError::BeaconChainError(BeaconChainError::DBError(Error::HotColdDBError( - HotColdDBError::FinalizedStateNotInHotDatabase { .. }, - ))) => { - debug!(%peer_id, "Attestation for finalized state"); - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - } - e @ AttnError::BeaconChainError(BeaconChainError::MaxCommitteePromises(_)) => { - debug!( - target_root = ?failed_att.attestation_data().target.root, - ?beacon_block_root, - slot = ?failed_att.attestation_data().slot, - ?attestation_type, - error = ?e, - %peer_id, - "Dropping attestation" - ); - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - } AttnError::BeaconChainError(e) => { - /* - * Lighthouse hit an unexpected error whilst processing the attestation. It - * should be impossible to trigger a `BeaconChainError` from the network, - * so we have a bug. - * - * It's not clear if the message is invalid/malicious. - */ - error!( - ?beacon_block_root, - slot = ?failed_att.attestation_data().slot, - ?attestation_type, - %peer_id, - error = ?e, - "Unable to validate attestation" - ); - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + match e.as_ref() { + BeaconChainError::DBError(Error::HotColdDBError( + HotColdDBError::FinalizedStateNotInHotDatabase { .. }, + )) => { + debug!(%peer_id, "Attestation for finalized state"); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + BeaconChainError::MaxCommitteePromises(e) => { + debug!( + target_root = ?failed_att.attestation_data().target.root, + ?beacon_block_root, + slot = ?failed_att.attestation_data().slot, + ?attestation_type, + error = ?e, + %peer_id, + "Dropping attestation" + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + _ => { + /* + * Lighthouse hit an unexpected error whilst processing the attestation. It + * should be impossible to trigger a `BeaconChainError` from the network, + * so we have a bug. + * + * It's not clear if the message is invalid/malicious. + */ + error!( + ?beacon_block_root, + slot = ?failed_att.attestation_data().slot, + ?attestation_type, + %peer_id, + error = ?e, + "Unable to validate attestation" + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + } } } diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 96d5bc8181..7c3c854ed8 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -66,7 +66,7 @@ impl NetworkBeaconProcessor { fn check_peer_relevance( &self, remote: &StatusMessage, - ) -> Result, BeaconChainError> { + ) -> Result, Box> { let local = self.chain.status_message(); let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); @@ -112,7 +112,8 @@ impl NetworkBeaconProcessor { if self .chain .block_root_at_slot(remote_finalized_slot, WhenSlotSkipped::Prev) - .map(|root_opt| root_opt != Some(remote.finalized_root))? + .map(|root_opt| root_opt != Some(remote.finalized_root)) + .map_err(Box::new)? { Some("Different finalized chain".to_string()) } else { diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 5863091cf0..38095ec434 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -107,6 +107,8 @@ impl TestRig { // deterministic seed let rng = ChaCha20Rng::from_seed([0u8; 32]); + init_tracing(); + TestRig { beacon_processor_rx, beacon_processor_rx_queue: vec![], diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs index ec24ddb036..3dca457108 100644 --- a/beacon_node/network/src/sync/tests/mod.rs +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -9,9 +9,14 @@ use beacon_processor::WorkEvent; use lighthouse_network::NetworkGlobals; use rand_chacha::ChaCha20Rng; use slot_clock::ManualSlotClock; -use std::sync::Arc; +use std::fs::OpenOptions; +use std::io::Write; +use std::sync::{Arc, Once}; use store::MemoryStore; use tokio::sync::mpsc; +use tracing_subscriber::fmt::MakeWriter; +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::util::SubscriberInitExt; use types::{ChainSpec, ForkName, MinimalEthSpec as E}; mod lookups; @@ -65,3 +70,55 @@ struct TestRig { fork_name: ForkName, spec: Arc, } + +// Environment variable to read if `fork_from_env` feature is enabled. +pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; +// Environment variable specifying the log output directory in CI. +pub const CI_LOGGER_DIR_ENV_VAR: &str = "CI_LOGGER_DIR"; + +static INIT_TRACING: Once = Once::new(); + +pub fn init_tracing() { + INIT_TRACING.call_once(|| { + if std::env::var(CI_LOGGER_DIR_ENV_VAR).is_ok() { + // Enable logging to log files for each test and each fork. + tracing_subscriber::registry() + .with( + tracing_subscriber::fmt::layer() + .with_ansi(false) + .with_writer(CILogWriter), + ) + .init(); + } + }); +} + +// CILogWriter writes logs to separate files for each test and each fork. +struct CILogWriter; + +impl<'a> MakeWriter<'a> for CILogWriter { + type Writer = Box; + + // fmt::Layer calls this method each time an event is recorded. + fn make_writer(&'a self) -> Self::Writer { + let log_dir = std::env::var(CI_LOGGER_DIR_ENV_VAR).unwrap(); + let fork_name = std::env::var(FORK_NAME_ENV_VAR) + .map(|s| format!("{s}_")) + .unwrap_or_default(); + + // The current test name can be got via the thread name. + let test_name = std::thread::current() + .name() + .unwrap_or("unnamed") + .replace(|c: char| !c.is_alphanumeric(), "_"); + + let file_path = format!("{log_dir}/{fork_name}{test_name}.log"); + let file = OpenOptions::new() + .append(true) + .create(true) + .open(&file_path) + .expect("failed to open a log file"); + + Box::new(file) + } +} diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 570b74226c..beaf818882 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -4,6 +4,9 @@ version = "0.2.0" authors = ["Michael Sproul "] edition = { workspace = true } +[features] +portable = ["beacon_chain/portable"] + [dependencies] bitvec = { workspace = true } derivative = { workspace = true } @@ -23,6 +26,3 @@ types = { workspace = true } beacon_chain = { workspace = true } maplit = { workspace = true } tokio = { workspace = true } - -[features] -portable = ["beacon_chain/portable"] diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index 97d0583e34..78280278e0 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -7,15 +7,18 @@ use state_processing::common::{ use std::collections::HashMap; use types::{ beacon_state::BeaconStateBase, - consts::altair::{PARTICIPATION_FLAG_WEIGHTS, WEIGHT_DENOMINATOR}, + consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, Attestation, BeaconState, BitList, ChainSpec, EthSpec, }; +pub const PROPOSER_REWARD_DENOMINATOR: u64 = + (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR / PROPOSER_WEIGHT; + #[derive(Debug, Clone)] pub struct AttMaxCover<'a, E: EthSpec> { /// Underlying attestation. pub att: CompactAttestationRef<'a, E>, - /// Mapping of validator indices and their rewards. + /// Mapping of validator indices and their reward *numerators*. pub fresh_validators_rewards: HashMap, } @@ -30,7 +33,7 @@ impl<'a, E: EthSpec> AttMaxCover<'a, E> { if let BeaconState::Base(ref base_state) = state { Self::new_for_base(att, state, base_state, total_active_balance, spec) } else { - Self::new_for_altair_deneb(att, state, reward_cache, spec) + Self::new_for_altair_or_later(att, state, reward_cache, spec) } } @@ -68,7 +71,7 @@ impl<'a, E: EthSpec> AttMaxCover<'a, E> { } /// Initialise an attestation cover object for Altair or later. - pub fn new_for_altair_deneb( + pub fn new_for_altair_or_later( att: CompactAttestationRef<'a, E>, state: &BeaconState, reward_cache: &'a RewardCache, @@ -103,10 +106,7 @@ impl<'a, E: EthSpec> AttMaxCover<'a, E> { } } - let proposer_reward = proposer_reward_numerator - .checked_div(WEIGHT_DENOMINATOR.checked_mul(spec.proposer_reward_quotient)?)?; - - Some((index, proposer_reward)).filter(|_| proposer_reward != 0) + Some((index, proposer_reward_numerator)).filter(|_| proposer_reward_numerator != 0) }) .collect(); @@ -163,7 +163,7 @@ impl<'a, E: EthSpec> MaxCover for AttMaxCover<'a, E> { } fn score(&self) -> usize { - self.fresh_validators_rewards.values().sum::() as usize + (self.fresh_validators_rewards.values().sum::() / PROPOSER_REWARD_DENOMINATOR) as usize } } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index ec8c6640b1..7481aa896a 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -9,7 +9,7 @@ mod reward_cache; mod sync_aggregate_id; pub use crate::bls_to_execution_changes::ReceivedPreCapella; -pub use attestation::{earliest_attestation_validators, AttMaxCover}; +pub use attestation::{earliest_attestation_validators, AttMaxCover, PROPOSER_REWARD_DENOMINATOR}; pub use attestation_storage::{CompactAttestationRef, SplitAttestation}; pub use max_cover::MaxCover; pub use persistence::{ @@ -1402,7 +1402,8 @@ mod release_tests { .retain(|validator_index, _| !seen_indices.contains(validator_index)); // Check that rewards are in decreasing order - let rewards = fresh_validators_rewards.values().sum(); + let rewards = + fresh_validators_rewards.values().sum::() / PROPOSER_REWARD_DENOMINATOR; assert!(prev_reward >= rewards); prev_reward = rewards; seen_indices.extend(fresh_validators_rewards.keys()); diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 908f0759a9..13df83efab 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -9,12 +9,6 @@ default = ["leveldb"] leveldb = ["dep:leveldb"] redb = ["dep:redb"] -[dev-dependencies] -beacon_chain = { workspace = true } -criterion = { workspace = true } -rand = { workspace = true, features = ["small_rng"] } -tempfile = { workspace = true } - [dependencies] bls = { workspace = true } db-key = "0.0.5" @@ -40,6 +34,12 @@ types = { workspace = true } xdelta3 = { workspace = true } zstd = { workspace = true } +[dev-dependencies] +beacon_chain = { workspace = true } +criterion = { workspace = true } +rand = { workspace = true, features = ["small_rng"] } +tempfile = { workspace = true } + [[bench]] name = "hdiff" harness = false diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index ed6154da80..cff08bc655 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -57,7 +57,7 @@ pub enum Error { #[cfg(feature = "leveldb")] LevelDbError(LevelDBError), #[cfg(feature = "redb")] - RedbError(redb::Error), + RedbError(Box), CacheBuildError(EpochCacheError), RandaoMixOutOfBounds, MilhouseError(milhouse::Error), @@ -161,49 +161,49 @@ impl From for Error { #[cfg(feature = "redb")] impl From for Error { fn from(e: redb::Error) -> Self { - Error::RedbError(e) + Error::RedbError(Box::new(e)) } } #[cfg(feature = "redb")] impl From for Error { fn from(e: redb::TableError) -> Self { - Error::RedbError(e.into()) + Error::RedbError(Box::new(e.into())) } } #[cfg(feature = "redb")] impl From for Error { fn from(e: redb::TransactionError) -> Self { - Error::RedbError(e.into()) + Error::RedbError(Box::new(e.into())) } } #[cfg(feature = "redb")] impl From for Error { fn from(e: redb::DatabaseError) -> Self { - Error::RedbError(e.into()) + Error::RedbError(Box::new(e.into())) } } #[cfg(feature = "redb")] impl From for Error { fn from(e: redb::StorageError) -> Self { - Error::RedbError(e.into()) + Error::RedbError(Box::new(e.into())) } } #[cfg(feature = "redb")] impl From for Error { fn from(e: redb::CommitError) -> Self { - Error::RedbError(e.into()) + Error::RedbError(Box::new(e.into())) } } #[cfg(feature = "redb")] impl From for Error { fn from(e: redb::CompactionError) -> Self { - Error::RedbError(e.into()) + Error::RedbError(Box::new(e.into())) } } diff --git a/beacon_node/tests/test.rs b/beacon_node/tests/test.rs index 0d448e6c06..ab78b65ae9 100644 --- a/beacon_node/tests/test.rs +++ b/beacon_node/tests/test.rs @@ -41,7 +41,7 @@ fn http_server_genesis_state() { .block_on(remote_node.get_debug_beacon_states(StateId::Slot(Slot::new(0)))) .expect("should fetch state from http api") .unwrap() - .data; + .into_data(); let mut db_state = node .client diff --git a/book/.markdownlint.yml b/book/.markdownlint.yml index 4f7d113364..a40a2f5dbd 100644 --- a/book/.markdownlint.yml +++ b/book/.markdownlint.yml @@ -25,4 +25,8 @@ MD036: false # MD040 code blocks should have a language specified: https://github.com/DavidAnson/markdownlint/blob/main/doc/md040.md # Set to false as the help_x.md files are code blocks without a language specified, which is fine and does not need to change -MD040: false \ No newline at end of file +MD040: false + +# MD059 Link text should be descriptive: https://github.com/DavidAnson/markdownlint/blob/main/doc/md059.md +# Set to false because it is too strict +MD059: false diff --git a/book/src/advanced_database_migrations.md b/book/src/advanced_database_migrations.md index 3c56fcadc1..e9954e2ad9 100644 --- a/book/src/advanced_database_migrations.md +++ b/book/src/advanced_database_migrations.md @@ -125,7 +125,7 @@ Several conditions need to be met in order to run `lighthouse db`: 2. The command must run as the user that owns the beacon node database. If you are using systemd then your beacon node might run as a user called `lighthousebeacon`. 3. The `--datadir` flag must be set to the location of the Lighthouse data directory. -4. The `--network` flag must be set to the correct network, e.g. `mainnet`, `holesky` or `sepolia`. +4. The `--network` flag must be set to the correct network, e.g. `mainnet`, `hoodi` or `sepolia`. The general form for a `lighthouse db` command is: diff --git a/book/src/advanced_release_candidates.md b/book/src/advanced_release_candidates.md index 9f00da9ae9..f5aee05ede 100644 --- a/book/src/advanced_release_candidates.md +++ b/book/src/advanced_release_candidates.md @@ -40,4 +40,4 @@ There can also be a scenario that a bug has been found and requires an urgent fi ## When *not* to use a release candidate -Other than the above scenarios, it is generally not recommended to use release candidates for any critical tasks on mainnet (e.g., staking). To test new release candidate features, try one of the testnets (e.g., Holesky). +Other than the above scenarios, it is generally not recommended to use release candidates for any critical tasks on mainnet (e.g., staking). To test new release candidate features, try one of the testnets (e.g., Hoodi). diff --git a/book/src/advanced_web3signer.md b/book/src/advanced_web3signer.md index 6145fd4a71..4280d58500 100644 --- a/book/src/advanced_web3signer.md +++ b/book/src/advanced_web3signer.md @@ -56,3 +56,11 @@ SSL client authentication with the "self-signed" certificate in `/home/paul/my-k > with a new timeout in milliseconds. This is the timeout before requests to Web3Signer are > considered to be failures. Setting a value that is too long may create contention and late duties > in the VC. Setting it too short will result in failed signatures and therefore missed duties. + +## Slashing protection database + +Web3signer can be configured with its own slashing protection database. This makes the local slashing protection database by Lighthouse redundant. To disable Lighthouse slashing protection database for web3signer keys, use the flag `--disable-slashing-protection-web3signer` on the validator client. + +> Note: DO NOT use this flag unless you are certain that slashing protection is enabled on web3signer. + +The `--init-slashing-protection` flag is also required to initialize the slashing protection database locally. diff --git a/book/src/api_vc_auth_header.md b/book/src/api_vc_auth_header.md index f792ee870e..3e536cf3c8 100644 --- a/book/src/api_vc_auth_header.md +++ b/book/src/api_vc_auth_header.md @@ -32,7 +32,7 @@ When starting the validator client it will output a log message containing the p to the file containing the api token. ```text -Sep 28 19:17:52.615 INFO HTTP API started api_token_file: "$HOME/holesky/validators/api-token.txt", listen_address: 127.0.0.1:5062 +Sep 28 19:17:52.615 INFO HTTP API started api_token_file: "$HOME/hoodi/validators/api-token.txt", listen_address: 127.0.0.1:5062 ``` The _path_ to the API token may also be fetched from the HTTP API itself (this endpoint is the only @@ -46,7 +46,7 @@ Response: ```json { - "token_path": "/home/karlm/.lighthouse/holesky/validators/api-token.txt" + "token_path": "/home/karlm/.lighthouse/hoodi/validators/api-token.txt" } ``` diff --git a/book/src/api_vc_endpoints.md b/book/src/api_vc_endpoints.md index e51f5d29ae..87c9a517a5 100644 --- a/book/src/api_vc_endpoints.md +++ b/book/src/api_vc_endpoints.md @@ -226,26 +226,33 @@ Example Response Body ```json { "data": { - "CONFIG_NAME": "holesky", + "CONFIG_NAME": "hoodi", "PRESET_BASE": "mainnet", - "TERMINAL_TOTAL_DIFFICULTY": "10790000", + "TERMINAL_TOTAL_DIFFICULTY": "0", "TERMINAL_BLOCK_HASH": "0x0000000000000000000000000000000000000000000000000000000000000000", "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH": "18446744073709551615", "MIN_GENESIS_ACTIVE_VALIDATOR_COUNT": "16384", - "MIN_GENESIS_TIME": "1614588812", - "GENESIS_FORK_VERSION": "0x00001020", - "GENESIS_DELAY": "1919188", - "ALTAIR_FORK_VERSION": "0x01001020", - "ALTAIR_FORK_EPOCH": "36660", - "BELLATRIX_FORK_VERSION": "0x02001020", - "BELLATRIX_FORK_EPOCH": "112260", - "CAPELLA_FORK_VERSION": "0x03001020", - "CAPELLA_FORK_EPOCH": "162304", + "MIN_GENESIS_TIME": "1742212800", + "GENESIS_FORK_VERSION": "0x10000910", + "GENESIS_DELAY": "600", + "ALTAIR_FORK_VERSION": "0x20000910", + "ALTAIR_FORK_EPOCH": "0", + "BELLATRIX_FORK_VERSION": "0x30000910", + "BELLATRIX_FORK_EPOCH": "0", + "CAPELLA_FORK_VERSION": "0x40000910", + "CAPELLA_FORK_EPOCH": "0", + "DENEB_FORK_VERSION": "0x50000910", + "DENEB_FORK_EPOCH": "0", + "ELECTRA_FORK_VERSION": "0x60000910", + "ELECTRA_FORK_EPOCH": "2048", + "FULU_FORK_VERSION": "0x70000910", + "FULU_FORK_EPOCH": "18446744073709551615", "SECONDS_PER_SLOT": "12", - "SECONDS_PER_ETH1_BLOCK": "14", + "SECONDS_PER_ETH1_BLOCK": "12", "MIN_VALIDATOR_WITHDRAWABILITY_DELAY": "256", "SHARD_COMMITTEE_PERIOD": "256", "ETH1_FOLLOW_DISTANCE": "2048", + "SUBNETS_PER_NODE": "2", "INACTIVITY_SCORE_BIAS": "4", "INACTIVITY_SCORE_RECOVERY_RATE": "16", "EJECTION_BALANCE": "16000000000", @@ -253,9 +260,36 @@ Example Response Body "MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT": "8", "CHURN_LIMIT_QUOTIENT": "65536", "PROPOSER_SCORE_BOOST": "40", - "DEPOSIT_CHAIN_ID": "5", - "DEPOSIT_NETWORK_ID": "5", - "DEPOSIT_CONTRACT_ADDRESS": "0xff50ed3d0ec03ac01d4c79aad74928bff48a7b2b", + "DEPOSIT_CHAIN_ID": "560048", + "DEPOSIT_NETWORK_ID": "560048", + "DEPOSIT_CONTRACT_ADDRESS": "0x00000000219ab540356cbb839cbe05303d7705fa", + "GAS_LIMIT_ADJUSTMENT_FACTOR": "1024", + "MAX_PAYLOAD_SIZE": "10485760", + "MAX_REQUEST_BLOCKS": "1024", + "MIN_EPOCHS_FOR_BLOCK_REQUESTS": "33024", + "TTFB_TIMEOUT": "5", + "RESP_TIMEOUT": "10", + "ATTESTATION_PROPAGATION_SLOT_RANGE": "32", + "MAXIMUM_GOSSIP_CLOCK_DISPARITY_MILLIS": "500", + "MESSAGE_DOMAIN_INVALID_SNAPPY": "0x00000000", + "MESSAGE_DOMAIN_VALID_SNAPPY": "0x01000000", + "ATTESTATION_SUBNET_PREFIX_BITS": "6", + "MAX_REQUEST_BLOCKS_DENEB": "128", + "MAX_REQUEST_BLOB_SIDECARS": "768", + "MAX_REQUEST_DATA_COLUMN_SIDECARS": "16384", + "MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS": "4096", + "BLOB_SIDECAR_SUBNET_COUNT": "6", + "MAX_BLOBS_PER_BLOCK": "6", + "MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA": "128000000000", + "MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT": "256000000000", + "MAX_BLOBS_PER_BLOCK_ELECTRA": "9", + "BLOB_SIDECAR_SUBNET_COUNT_ELECTRA": "9", + "MAX_REQUEST_BLOB_SIDECARS_ELECTRA": "1152", + "NUMBER_OF_COLUMNS": "128", + "NUMBER_OF_CUSTODY_GROUPS": "128", + "DATA_COLUMN_SIDECAR_SUBNET_COUNT": "128", + "SAMPLES_PER_SLOT": "8", + "CUSTODY_REQUIREMENT": "4", "MAX_COMMITTEES_PER_SLOT": "64", "TARGET_COMMITTEE_SIZE": "128", "MAX_VALIDATORS_PER_COMMITTEE": "2048", @@ -304,23 +338,45 @@ Example Response Body "MAX_BLS_TO_EXECUTION_CHANGES": "16", "MAX_WITHDRAWALS_PER_PAYLOAD": "16", "MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP": "16384", - "DOMAIN_DEPOSIT": "0x03000000", - "BLS_WITHDRAWAL_PREFIX": "0x00", - "RANDOM_SUBNETS_PER_VALIDATOR": "1", - "DOMAIN_SYNC_COMMITTEE": "0x07000000", - "TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE": "16", - "DOMAIN_BEACON_ATTESTER": "0x01000000", - "DOMAIN_VOLUNTARY_EXIT": "0x04000000", - "DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF": "0x08000000", - "DOMAIN_CONTRIBUTION_AND_PROOF": "0x09000000", - "EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION": "256", - "TARGET_AGGREGATORS_PER_COMMITTEE": "16", - "DOMAIN_APPLICATION_MASK": "0x00000001", - "DOMAIN_AGGREGATE_AND_PROOF": "0x06000000", - "DOMAIN_RANDAO": "0x02000000", - "DOMAIN_SELECTION_PROOF": "0x05000000", + "MAX_BLOB_COMMITMENTS_PER_BLOCK": "4096", + "FIELD_ELEMENTS_PER_BLOB": "4096", + "MIN_ACTIVATION_BALANCE": "32000000000", + "MAX_EFFECTIVE_BALANCE_ELECTRA": "2048000000000", + "MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA": "4096", + "WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA": "4096", + "PENDING_DEPOSITS_LIMIT": "134217728", + "PENDING_PARTIAL_WITHDRAWALS_LIMIT": "134217728", + "PENDING_CONSOLIDATIONS_LIMIT": "262144", + "MAX_ATTESTER_SLASHINGS_ELECTRA": "1", + "MAX_ATTESTATIONS_ELECTRA": "8", + "MAX_DEPOSIT_REQUESTS_PER_PAYLOAD": "8192", + "MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD": "16", + "MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD": "2", + "MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP": "8", + "MAX_PENDING_DEPOSITS_PER_EPOCH": "16", + "FIELD_ELEMENTS_PER_CELL": "64", + "FIELD_ELEMENTS_PER_EXT_BLOB": "8192", + "KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH": "4", "DOMAIN_BEACON_PROPOSER": "0x00000000", - "SYNC_COMMITTEE_SUBNET_COUNT": "4" + "DOMAIN_CONTRIBUTION_AND_PROOF": "0x09000000", + "DOMAIN_DEPOSIT": "0x03000000", + "DOMAIN_SELECTION_PROOF": "0x05000000", + "VERSIONED_HASH_VERSION_KZG": "1", + "TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE": "16", + "DOMAIN_VOLUNTARY_EXIT": "0x04000000", + "BLS_WITHDRAWAL_PREFIX": "0x00", + "DOMAIN_APPLICATION_MASK": "0x00000001", + "DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF": "0x08000000", + "DOMAIN_SYNC_COMMITTEE": "0x07000000", + "COMPOUNDING_WITHDRAWAL_PREFIX": "0x02", + "TARGET_AGGREGATORS_PER_COMMITTEE": "16", + "SYNC_COMMITTEE_SUBNET_COUNT": "4", + "DOMAIN_BEACON_ATTESTER": "0x01000000", + "UNSET_DEPOSIT_REQUESTS_START_INDEX": "18446744073709551615", + "FULL_EXIT_REQUEST_AMOUNT": "0", + "DOMAIN_AGGREGATE_AND_PROOF": "0x06000000", + "ETH1_ADDRESS_WITHDRAWAL_PREFIX": "0x01", + "DOMAIN_RANDAO": "0x02000000" } } ``` @@ -352,7 +408,7 @@ Example Response Body ```json { - "token_path": "/home/karlm/.lighthouse/holesky/validators/api-token.txt" + "token_path": "/home/karlm/.lighthouse/hoodi/validators/api-token.txt" } ``` diff --git a/book/src/archived_key_management.md b/book/src/archived_key_management.md index 3f600794e0..d8b00e8352 100644 --- a/book/src/archived_key_management.md +++ b/book/src/archived_key_management.md @@ -75,21 +75,21 @@ mnemonic is encrypted with a password. It is the responsibility of the user to define a strong password. The password is only required for interacting with the wallet, it is not required for recovering keys from a mnemonic. -To create a wallet, use the `lighthouse account wallet` command. For example, if we wish to create a new wallet for the Holesky testnet named `wally` and saves it in `~/.lighthouse/holesky/wallets` with a randomly generated password saved +To create a wallet, use the `lighthouse account wallet` command. For example, if we wish to create a new wallet for the Hoodi testnet named `wally` and saves it in `~/.lighthouse/hoodi/wallets` with a randomly generated password saved to `./wallet.pass`: ```bash -lighthouse --network holesky account wallet create --name wally --password-file wally.pass +lighthouse --network hoodi account wallet create --name wally --password-file wally.pass ``` -Using the above command, a wallet will be created in `~/.lighthouse/holesky/wallets` with the name +Using the above command, a wallet will be created in `~/.lighthouse/hoodi/wallets` with the name `wally`. It is encrypted using the password defined in the `wally.pass` file. During the wallet creation process, a 24-word mnemonic will be displayed. Record the mnemonic because it allows you to recreate the files in the case of data loss. > Notes: > -> - When navigating to the directory `~/.lighthouse/holesky/wallets`, one will not see the wallet name `wally`, but a hexadecimal folder containing the wallet file. However, when interacting with `lighthouse` in the CLI, the name `wally` will be used. +> - When navigating to the directory `~/.lighthouse/hoodi/wallets`, one will not see the wallet name `wally`, but a hexadecimal folder containing the wallet file. However, when interacting with `lighthouse` in the CLI, the name `wally` will be used. > - The password is not `wally.pass`, it is the _content_ of the > `wally.pass` file. > - If `wally.pass` already exists, the wallet password will be set to the content @@ -100,18 +100,18 @@ During the wallet creation process, a 24-word mnemonic will be displayed. Record Validators are fundamentally represented by a BLS keypair. In Lighthouse, we use a wallet to generate these keypairs. Once a wallet exists, the `lighthouse account validator create` command can be used to generate the BLS keypair and all necessary information to submit a validator deposit. With the `wally` wallet created in [Step 1](#step-1-create-a-wallet-and-record-the-mnemonic), we can create a validator with the command: ```bash -lighthouse --network holesky account validator create --wallet-name wally --wallet-password wally.pass --count 1 +lighthouse --network hoodi account validator create --wallet-name wally --wallet-password wally.pass --count 1 ``` This command will: -- Derive a single new BLS keypair from wallet `wally` in `~/.lighthouse/holesky/wallets`, updating it so that it generates a new key next time. -- Create a new directory `~/.lighthouse/holesky/validators` containing: +- Derive a single new BLS keypair from wallet `wally` in `~/.lighthouse/hoodi/wallets`, updating it so that it generates a new key next time. +- Create a new directory `~/.lighthouse/hoodi/validators` containing: - An encrypted keystore file `voting-keystore.json` containing the validator's voting keypair. - An `eth1_deposit_data.rlp` assuming the default deposit amount (`32 ETH`) which can be submitted to the deposit contract for the Goerli testnet. Other networks can be set via the `--network` parameter. -- Create a new directory `~/.lighthouse/holesky/secrets` which stores a password to the validator's voting keypair. +- Create a new directory `~/.lighthouse/hoodi/secrets` which stores a password to the validator's voting keypair. If you want to create another validator in the future, repeat [Step 2](#step-2-create-a-validator). The wallet keeps track of how many validators it has generated and ensures that a new validator is generated each time. The important thing is to keep the 24-word mnemonic safe so that it can be used to generate new validator keys if needed. diff --git a/book/src/faq.md b/book/src/faq.md index 62a93166b1..b97a82fcca 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -275,7 +275,7 @@ network configuration settings. Ensure that the network you wish to connect to is correct (the beacon node outputs the network it is connecting to in the initial boot-up log lines). On top of this, ensure that you are not using the same `datadir` as a previous network, i.e., if you have been running the -`Holesky` testnet and are now trying to join a new network but using the same +`Hoodi` testnet and are now trying to join a new network but using the same `datadir` (the `datadir` is also printed out in the beacon node's logs on boot-up). diff --git a/book/src/installation_docker.md b/book/src/installation_docker.md index 8ee0c56bb4..12ce4f690c 100644 --- a/book/src/installation_docker.md +++ b/book/src/installation_docker.md @@ -99,7 +99,7 @@ You can run a Docker beacon node with the following command: docker run -p 9000:9000/tcp -p 9000:9000/udp -p 9001:9001/udp -p 127.0.0.1:5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse --network mainnet beacon --http --http-address 0.0.0.0 ``` -> To join the Holesky testnet, use `--network holesky` instead. +> To join the Hoodi testnet, use `--network hoodi` instead. > The `-v` (Volumes) and `-p` (Ports) and values are described below. diff --git a/book/src/mainnet_validator.md b/book/src/mainnet_validator.md index ba35ba6f12..8da8b98f89 100644 --- a/book/src/mainnet_validator.md +++ b/book/src/mainnet_validator.md @@ -12,7 +12,7 @@ managing servers. You'll also need at least 32 ETH! Being educated is critical to a validator's success. Before submitting your mainnet deposit, we recommend: -- Thoroughly exploring the [Staking Launchpad][launchpad] website, try running through the deposit process using a testnet launchpad such as the [Holesky staking launchpad](https://holesky.launchpad.ethereum.org/en/). +- Thoroughly exploring the [Staking Launchpad][launchpad] website, try running through the deposit process using a testnet launchpad such as the [Hoodi staking launchpad](https://hoodi.launchpad.ethereum.org/en/). - Running a testnet validator. - Reading through this documentation, especially the [Slashing Protection][slashing] section. - Performing a web search and doing your own research. @@ -38,7 +38,7 @@ There are five primary steps to become a validator: > **Important note**: The guide below contains both mainnet and testnet instructions. We highly recommend *all* users to **run a testnet validator** prior to staking mainnet ETH. By far, the best technical learning experience is to run a testnet validator. You can get hands-on experience with all the tools and it's a great way to test your staking hardware. 32 ETH is a significant outlay and joining a testnet is a great way to "try before you buy". -> **Never use real ETH to join a testnet!** Testnet such as the Holesky testnet uses Holesky ETH which is worthless. This allows experimentation without real-world costs. +> **Never use real ETH to join a testnet!** Testnet such as the Hoodi testnet uses Hoodi ETH which is worthless. This allows experimentation without real-world costs. ### Step 1. Create validator keys @@ -48,7 +48,7 @@ The Ethereum Foundation provides the [staking-deposit-cli](https://github.com/et ./deposit new-mnemonic ``` -and follow the instructions to generate the keys. When prompted for a network, select `mainnet` if you want to run a mainnet validator, or select `holesky` if you want to run a Holesky testnet validator. A new mnemonic will be generated in the process. +and follow the instructions to generate the keys. When prompted for a network, select `mainnet` if you want to run a mainnet validator, or select `hoodi` if you want to run a Hoodi testnet validator. A new mnemonic will be generated in the process. > **Important note:** A mnemonic (or seed phrase) is a 24-word string randomly generated in the process. It is highly recommended to write down the mnemonic and keep it safe offline. It is important to ensure that the mnemonic is never stored in any digital form (computers, mobile phones, etc) connected to the internet. Please also make one or more backups of the mnemonic to ensure your ETH is not lost in the case of data loss. It is very important to keep your mnemonic private as it represents the ultimate control of your ETH. @@ -71,10 +71,10 @@ Mainnet: lighthouse --network mainnet account validator import --directory $HOME/staking-deposit-cli/validator_keys ``` -Holesky testnet: +Hoodi testnet: ```bash -lighthouse --network holesky account validator import --directory $HOME/staking-deposit-cli/validator_keys +lighthouse --network hoodi account validator import --directory $HOME/staking-deposit-cli/validator_keys ``` > Note: The user must specify the consensus client network that they are importing the keys by using the `--network` flag. @@ -132,10 +132,10 @@ Mainnet: lighthouse vc --network mainnet --suggested-fee-recipient YourFeeRecipientAddress ``` -Holesky testnet: +Hoodi testnet: ```bash -lighthouse vc --network holesky --suggested-fee-recipient YourFeeRecipientAddress +lighthouse vc --network hoodi --suggested-fee-recipient YourFeeRecipientAddress ``` The `validator client` manages validators using data obtained from the beacon node via a HTTP API. You are highly recommended to enter a fee-recipient by changing `YourFeeRecipientAddress` to an Ethereum address under your control. @@ -153,7 +153,7 @@ by the protocol. ### Step 5: Submit deposit (a minimum of 32ETH to activate one validator) -After you have successfully run and synced the execution client, beacon node and validator client, you can now proceed to submit the deposit. Go to the mainnet [Staking launchpad](https://launchpad.ethereum.org/en/) (or [Holesky staking launchpad](https://holesky.launchpad.ethereum.org/en/) for testnet validator) and carefully go through the steps to becoming a validator. Once you are ready, you can submit the deposit by sending ETH to the deposit contract. Upload the `deposit_data-*.json` file generated in [Step 1](#step-1-create-validator-keys) to the Staking launchpad. +After you have successfully run and synced the execution client, beacon node and validator client, you can now proceed to submit the deposit. Go to the mainnet [Staking launchpad](https://launchpad.ethereum.org/en/) (or [Hoodi staking launchpad](https://hoodi.launchpad.ethereum.org/en/) for testnet validator) and carefully go through the steps to becoming a validator. Once you are ready, you can submit the deposit by sending ETH to the deposit contract. Upload the `deposit_data-*.json` file generated in [Step 1](#step-1-create-validator-keys) to the Staking launchpad. > **Important note:** Double check that the deposit contract for mainnet is `0x00000000219ab540356cBB839Cbe05303d7705Fa` before you confirm the transaction. diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index 15567497e5..6c43ef5e32 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -54,7 +54,7 @@ Notable flags: - `--network` flag, which selects a network: - `lighthouse` (no flag): Mainnet. - `lighthouse --network mainnet`: Mainnet. - - `lighthouse --network holesky`: Holesky (testnet). + - `lighthouse --network hoodi`: Hoodi (testnet). - `lighthouse --network sepolia`: Sepolia (testnet). - `lighthouse --network chiado`: Chiado (testnet). - `lighthouse --network gnosis`: Gnosis chain. diff --git a/book/src/ui_installation.md b/book/src/ui_installation.md index 0bd14f6183..df0522f07a 100644 --- a/book/src/ui_installation.md +++ b/book/src/ui_installation.md @@ -38,7 +38,7 @@ We recommend running Siren's container next to your beacon node (on the same ser specified as well as the validator clients `API_TOKEN`, which can be obtained from the [`Validator Client Authorization Header`](./api_vc_auth_header.md). Note that the HTTP API ports must be accessible from within docker and cannot just be listening on localhost. This means using the - `--http-address 0.0.0.0` flag on the beacon node and validator client. + `--http-address 0.0.0.0` flag on the beacon node and, and both `--http-address 0.0.0.0` and `--unencrypted-http-transport` flags on the validator client. 1. Run the containers with docker compose diff --git a/book/src/validator_voluntary_exit.md b/book/src/validator_voluntary_exit.md index c17c0f4fc4..d5d1722d59 100644 --- a/book/src/validator_voluntary_exit.md +++ b/book/src/validator_voluntary_exit.md @@ -27,13 +27,13 @@ After validating the password, the user will be prompted to enter a special exit The exit phrase is the following: > Exit my validator -Below is an example for initiating a voluntary exit on the Holesky testnet. +Below is an example for initiating a voluntary exit on the Hoodi testnet. ``` -$ lighthouse --network holesky account validator exit --keystore /path/to/keystore --beacon-node http://localhost:5052 +$ lighthouse --network hoodi account validator exit --keystore /path/to/keystore --beacon-node http://localhost:5052 -Running account manager for Holesky network -validator-dir path: ~/.lighthouse/holesky/validators +Running account manager for Hoodi network +validator-dir path: ~/.lighthouse/hoodi/validators Enter the keystore password for validator in 0xabcd @@ -58,6 +58,27 @@ Please keep your validator running till exit epoch Exit epoch in approximately 1920 secs ``` +## Generate pre-signed exit message without broadcasting + +You can also generate a pre-signed exit message without broadcasting it to the network. To do so, use the `--presign` flag: + +```bash +lighthouse account validator exit --network hoodi --keystore /path/to/keystore --presign +``` + +It will prompt for the keystore password, which, upon entering the correct password, will generate a pre-signed exit message: + +``` +Successfully pre-signed voluntary exit for validator 0x[redacted]. Not publishing. +{ + "message": { + "epoch": "12959", + "validator_index": "123456" + }, + "signature": "0x97deafb740cd56eaf55b671efb35d0ce15cd1835cbcc52e20ee9cdc11e1f4ab8a5f228c378730437eb544ae70e1987cd0d2f925aa3babe686b66df823c90ac4027ef7a06d12c56d536d9bcd3a1d15f02917b170c0aa97ab102d67602a586333f" +} +``` + ## Exit via the execution layer The voluntary exit above is via the consensus layer. With the [Pectra](https://ethereum.org/en/history/#pectra) upgrade, validators with 0x01 and 0x02 withdrawal credentials can also exit their validators via the execution layer by sending a transaction using the withdrawal address. You can use [Siren](./ui.md) or the [staking launchpad](https://launchpad.ethereum.org/en/) to send an exit transaction. @@ -97,7 +118,7 @@ There are two types of withdrawal credentials, `0x00` and `0x01`. To check which - A fixed waiting period of 256 epochs (27.3 hours) for the validator's status to become withdrawable. -- A varying time of "validator sweep" that can take up to _n_ days with _n_ listed in the table below. The "validator sweep" is the process of skimming through all eligible validators by index number for withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set. +- A varying time of "validator sweep" that can take up to _n_ days with _n_ listed in the table below. The "validator sweep" is the process of skimming through all eligible validators by index number for withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set.
diff --git a/common/compare_fields/Cargo.toml b/common/compare_fields/Cargo.toml index 9972ca75ca..50e7e5f21d 100644 --- a/common/compare_fields/Cargo.toml +++ b/common/compare_fields/Cargo.toml @@ -4,11 +4,11 @@ version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } +[package.metadata.cargo-udeps.ignore] +development = ["compare_fields_derive"] # used in doc-tests + [dependencies] itertools = { workspace = true } [dev-dependencies] compare_fields_derive = { workspace = true } - -[package.metadata.cargo-udeps.ignore] -development = ["compare_fields_derive"] # used in doc-tests diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index 953fde1af7..767f67b853 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -6,14 +6,14 @@ edition = { workspace = true } build = "build.rs" -[build-dependencies] -hex = { workspace = true } -reqwest = { workspace = true } -serde_json = { workspace = true } -sha2 = { workspace = true } - [dependencies] ethabi = "16.0.0" ethereum_ssz = { workspace = true } tree_hash = { workspace = true } types = { workspace = true } + +[build-dependencies] +hex = { workspace = true } +reqwest = { workspace = true } +serde_json = { workspace = true } +sha2 = { workspace = true } diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 5d0ad1f45e..81666a6421 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -4,6 +4,10 @@ version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } +[features] +default = ["lighthouse"] +lighthouse = [] + [dependencies] derivative = { workspace = true } either = { workspace = true } @@ -33,7 +37,3 @@ zeroize = { workspace = true } [dev-dependencies] tokio = { workspace = true } - -[features] -default = ["lighthouse"] -lighthouse = [] diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 5bfa8ddd6f..2ea8929fd9 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -16,7 +16,7 @@ pub mod types; use self::mixin::{RequestAccept, ResponseOptional}; use self::types::{Error as ResponseError, *}; -use ::types::fork_versioned_response::ExecutionOptimisticFinalizedForkVersionedResponse; +use ::types::beacon_response::ExecutionOptimisticFinalizedBeaconResponse; use derivative::Derivative; use either::Either; use futures::Stream; @@ -56,7 +56,7 @@ pub enum Error { /// The `reqwest` client raised an error. HttpClient(PrettyReqwestError), /// The `reqwest_eventsource` client raised an error. - SseClient(reqwest_eventsource::Error), + SseClient(Box), /// The server returned an error message where the body was able to be parsed. ServerMessage(ErrorMessage), /// The server returned an error message with an array of errors. @@ -99,7 +99,7 @@ impl Error { match self { Error::HttpClient(error) => error.inner().status(), Error::SseClient(error) => { - if let reqwest_eventsource::Error::InvalidStatusCode(status, _) = error { + if let reqwest_eventsource::Error::InvalidStatusCode(status, _) = error.as_ref() { Some(*status) } else { None @@ -287,6 +287,54 @@ impl BeaconNodeHttpClient { } } + pub async fn get_fork_contextual( + &self, + url: U, + ctx_constructor: impl Fn(ForkName) -> Ctx, + ) -> Result>, Error> + where + U: IntoUrl, + T: ContextDeserialize<'static, Ctx>, + Meta: DeserializeOwned, + Ctx: Clone, + { + let response = self + .get_response(url, |b| b.accept(Accept::Json)) + .await + .optional()?; + + let Some(resp) = response else { + return Ok(None); + }; + + let bytes = resp.bytes().await?; + + #[derive(serde::Deserialize)] + struct Helper { + // TODO: remove this default once checkpointz follows the spec + #[serde(default = "ForkName::latest_stable")] + version: ForkName, + #[serde(flatten)] + metadata: serde_json::Value, + data: serde_json::Value, + } + + let helper: Helper = serde_json::from_slice(&bytes).map_err(Error::InvalidJson)?; + + let metadata: Meta = serde_json::from_value(helper.metadata).map_err(Error::InvalidJson)?; + + let ctx = ctx_constructor(helper.version); + + let data: T = ContextDeserialize::context_deserialize(helper.data, ctx) + .map_err(Error::InvalidJson)?; + + Ok(Some(ForkVersionedResponse { + version: helper.version, + metadata, + data, + })) + } + /// Perform a HTTP GET request using an 'accept' header, returning `None` on a 404 error. pub async fn get_bytes_opt_accept_header( &self, @@ -854,7 +902,7 @@ impl BeaconNodeHttpClient { &self, start_period: u64, count: u64, - ) -> Result>>>, Error> { + ) -> Result>>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -869,7 +917,14 @@ impl BeaconNodeHttpClient { path.query_pairs_mut() .append_pair("count", &count.to_string()); - self.get_opt(path).await + self.get_opt(path).await.map(|opt| { + opt.map(|updates: Vec<_>| { + updates + .into_iter() + .map(BeaconResponse::ForkVersioned) + .collect() + }) + }) } /// `GET beacon/light_client/bootstrap` @@ -878,7 +933,7 @@ impl BeaconNodeHttpClient { pub async fn get_light_client_bootstrap( &self, block_root: Hash256, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -888,7 +943,9 @@ impl BeaconNodeHttpClient { .push("bootstrap") .push(&format!("{:?}", block_root)); - self.get_opt(path).await + self.get_opt(path) + .await + .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } /// `GET beacon/light_client/optimistic_update` @@ -896,7 +953,7 @@ impl BeaconNodeHttpClient { /// Returns `Ok(None)` on a 404 error. pub async fn get_beacon_light_client_optimistic_update( &self, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -905,7 +962,9 @@ impl BeaconNodeHttpClient { .push("light_client") .push("optimistic_update"); - self.get_opt(path).await + self.get_opt(path) + .await + .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } /// `GET beacon/light_client/finality_update` @@ -913,7 +972,7 @@ impl BeaconNodeHttpClient { /// Returns `Ok(None)` on a 404 error. pub async fn get_beacon_light_client_finality_update( &self, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -922,7 +981,9 @@ impl BeaconNodeHttpClient { .push("light_client") .push("finality_update"); - self.get_opt(path).await + self.get_opt(path) + .await + .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } /// `GET beacon/headers?slot,parent_root` @@ -985,8 +1046,14 @@ impl BeaconNodeHttpClient { .push("beacon") .push("blocks"); - self.post_with_timeout(path, block_contents, self.timeouts.proposal) - .await?; + let fork_name = block_contents.signed_block().fork_name_unchecked(); + self.post_generic_with_consensus_version( + path, + block_contents, + Some(self.timeouts.proposal), + fork_name, + ) + .await?; Ok(()) } @@ -1204,16 +1271,12 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks( &self, block_id: BlockId, - ) -> Result< - Option>>, - Error, - > { + ) -> Result>>, Error> + { let path = self.get_beacon_blocks_path(block_id)?; - let Some(response) = self.get_response(path, |b| b).await.optional()? else { - return Ok(None); - }; - - Ok(Some(response.json().await?)) + self.get_opt(path) + .await + .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } /// `GET v1/beacon/blob_sidecars/{block_id}` @@ -1223,8 +1286,8 @@ impl BeaconNodeHttpClient { &self, block_id: BlockId, indices: Option<&[u64]>, - ) -> Result>>, Error> - { + spec: &ChainSpec, + ) -> Result>>, Error> { let mut path = self.get_blobs_path(block_id)?; if let Some(indices) = indices { let indices_string = indices @@ -1236,11 +1299,11 @@ impl BeaconNodeHttpClient { .append_pair("indices", &indices_string); } - let Some(response) = self.get_response(path, |b| b).await.optional()? else { - return Ok(None); - }; - - Ok(Some(response.json().await?)) + self.get_fork_contextual(path, |fork| { + (fork, spec.max_blobs_per_block_by_fork(fork) as usize) + }) + .await + .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } /// `GET v1/beacon/blinded_blocks/{block_id}` @@ -1250,15 +1313,13 @@ impl BeaconNodeHttpClient { &self, block_id: BlockId, ) -> Result< - Option>>, + Option>>, Error, > { let path = self.get_beacon_blinded_blocks_path(block_id)?; - let Some(response) = self.get_response(path, |b| b).await.optional()? else { - return Ok(None); - }; - - Ok(Some(response.json().await?)) + self.get_opt(path) + .await + .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } /// `GET v1/beacon/blocks` (LEGACY) @@ -1267,7 +1328,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_v1( &self, block_id: BlockId, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1276,7 +1337,9 @@ impl BeaconNodeHttpClient { .push("blocks") .push(&block_id.to_string()); - self.get_opt(path).await + self.get_opt(path) + .await + .map(|opt| opt.map(BeaconResponse::Unversioned)) } /// `GET beacon/blocks` as SSZ @@ -1357,7 +1420,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_attestations_v2( &self, block_id: BlockId, - ) -> Result>>>, Error> + ) -> Result>>>, Error> { let mut path = self.eth_path(V2)?; @@ -1368,7 +1431,9 @@ impl BeaconNodeHttpClient { .push(&block_id.to_string()) .push("attestations"); - self.get_opt(path).await + self.get_opt(path) + .await + .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } /// `POST v1/beacon/pool/attestations` @@ -1460,7 +1525,7 @@ impl BeaconNodeHttpClient { &self, slot: Option, committee_index: Option, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V2)?; path.path_segments_mut() @@ -1479,7 +1544,7 @@ impl BeaconNodeHttpClient { .append_pair("committee_index", &index.to_string()); } - self.get(path).await + self.get(path).await.map(BeaconResponse::ForkVersioned) } /// `POST v1/beacon/pool/attester_slashings` @@ -1538,7 +1603,7 @@ impl BeaconNodeHttpClient { /// `GET v2/beacon/pool/attester_slashings` pub async fn get_beacon_pool_attester_slashings_v2( &self, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V2)?; path.path_segments_mut() @@ -1547,7 +1612,7 @@ impl BeaconNodeHttpClient { .push("pool") .push("attester_slashings"); - self.get(path).await + self.get(path).await.map(BeaconResponse::ForkVersioned) } /// `POST beacon/pool/proposer_slashings` @@ -1968,10 +2033,11 @@ impl BeaconNodeHttpClient { pub async fn get_debug_beacon_states( &self, state_id: StateId, - ) -> Result>>, Error> - { + ) -> Result>>, Error> { let path = self.get_debug_beacon_states_path(state_id)?; - self.get_opt(path).await + self.get_opt(path) + .await + .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } /// `GET debug/beacon/states/{state_id}` @@ -2055,7 +2121,7 @@ impl BeaconNodeHttpClient { slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result>, Error> { + ) -> Result>, Error> { self.get_validator_blocks_modular(slot, randao_reveal, graffiti, SkipRandaoVerification::No) .await } @@ -2067,12 +2133,12 @@ impl BeaconNodeHttpClient { randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, - ) -> Result>, Error> { + ) -> Result>, Error> { let path = self .get_validator_blocks_path::(slot, randao_reveal, graffiti, skip_randao_verification) .await?; - self.get(path).await + self.get(path).await.map(BeaconResponse::ForkVersioned) } /// returns `GET v2/validator/blocks/{slot}` URL path @@ -2328,7 +2394,7 @@ impl BeaconNodeHttpClient { slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result>, Error> { + ) -> Result>, Error> { self.get_validator_blinded_blocks_modular( slot, randao_reveal, @@ -2377,7 +2443,7 @@ impl BeaconNodeHttpClient { randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, - ) -> Result>, Error> { + ) -> Result>, Error> { let path = self .get_validator_blinded_blocks_path::( slot, @@ -2387,7 +2453,7 @@ impl BeaconNodeHttpClient { ) .await?; - self.get(path).await + self.get(path).await.map(BeaconResponse::ForkVersioned) } /// `GET v2/validator/blinded_blocks/{slot}` in ssz format @@ -2476,7 +2542,7 @@ impl BeaconNodeHttpClient { slot: Slot, attestation_data_root: Hash256, committee_index: CommitteeIndex, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V2)?; path.path_segments_mut() @@ -2494,6 +2560,7 @@ impl BeaconNodeHttpClient { self.get_opt_with_timeout(path, self.timeouts.attestation) .await + .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } /// `GET validator/sync_committee_contribution` @@ -2697,7 +2764,7 @@ impl BeaconNodeHttpClient { while let Some(event) = es.next().await { match event { Ok(Event::Open) => break, - Err(err) => return Err(Error::SseClient(err)), + Err(err) => return Err(Error::SseClient(err.into())), // This should never happen as we are guaranteed to get the // Open event before any message starts coming through. Ok(Event::Message(_)) => continue, @@ -2709,7 +2776,7 @@ impl BeaconNodeHttpClient { Ok(Event::Message(message)) => { Some(EventKind::from_sse_bytes(&message.event, &message.data)) } - Err(err) => Some(Err(Error::SseClient(err))), + Err(err) => Some(Err(Error::SseClient(err.into()))), } }))) } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 06c983b1a3..00c43e3dc3 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -10,7 +10,6 @@ use mediatype::{names, MediaType, MediaTypeList}; use multiaddr::Multiaddr; use reqwest::header::HeaderMap; use serde::{Deserialize, Deserializer, Serialize}; -use serde_json::Value; use serde_utils::quoted_u64::Quoted; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; @@ -689,7 +688,7 @@ pub struct ValidatorBalancesQuery { pub id: Option>, } -#[derive(Clone, Serialize, Deserialize)] +#[derive(Clone, Default, Serialize, Deserialize)] #[serde(transparent)] pub struct ValidatorBalancesRequestBody { pub ids: Vec, @@ -1067,51 +1066,56 @@ pub struct SseExtendedPayloadAttributesGeneric { pub type SseExtendedPayloadAttributes = SseExtendedPayloadAttributesGeneric; pub type VersionedSsePayloadAttributes = ForkVersionedResponse; -impl ForkVersionDeserialize for SsePayloadAttributes { - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result { - match fork_name { - ForkName::Bellatrix => serde_json::from_value(value) - .map(Self::V1) - .map_err(serde::de::Error::custom), - ForkName::Capella => serde_json::from_value(value) - .map(Self::V2) - .map_err(serde::de::Error::custom), - ForkName::Deneb => serde_json::from_value(value) - .map(Self::V3) - .map_err(serde::de::Error::custom), - ForkName::Electra => serde_json::from_value(value) - .map(Self::V3) - .map_err(serde::de::Error::custom), - ForkName::Fulu => serde_json::from_value(value) - .map(Self::V3) - .map_err(serde::de::Error::custom), - ForkName::Base | ForkName::Altair => Err(serde::de::Error::custom(format!( - "SsePayloadAttributes deserialization for {fork_name} not implemented" - ))), - } +impl<'de> ContextDeserialize<'de, ForkName> for SsePayloadAttributes { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { + let convert_err = |e| { + serde::de::Error::custom(format!( + "SsePayloadAttributes failed to deserialize: {:?}", + e + )) + }; + Ok(match context { + ForkName::Base | ForkName::Altair => { + return Err(serde::de::Error::custom(format!( + "SsePayloadAttributes failed to deserialize: unsupported fork '{}'", + context + ))) + } + ForkName::Bellatrix => { + Self::V1(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Capella => { + Self::V2(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Deneb | ForkName::Electra | ForkName::Fulu => { + Self::V3(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + }) } } -impl ForkVersionDeserialize for SseExtendedPayloadAttributes { - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result { - let helper: SseExtendedPayloadAttributesGeneric = - serde_json::from_value(value).map_err(serde::de::Error::custom)?; +impl<'de> ContextDeserialize<'de, ForkName> for SseExtendedPayloadAttributes { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { + let helper = + SseExtendedPayloadAttributesGeneric::::deserialize(deserializer)?; + Ok(Self { proposal_slot: helper.proposal_slot, proposer_index: helper.proposer_index, parent_block_root: helper.parent_block_root, parent_block_number: helper.parent_block_number, parent_block_hash: helper.parent_block_hash, - payload_attributes: SsePayloadAttributes::deserialize_by_fork::( + payload_attributes: SsePayloadAttributes::context_deserialize( helper.payload_attributes, - fork_name, - )?, + context, + ) + .map_err(serde::de::Error::custom)?, }) } } @@ -1129,8 +1133,8 @@ pub enum EventKind { ChainReorg(SseChainReorg), ContributionAndProof(Box>), LateHead(SseLateHead), - LightClientFinalityUpdate(Box>), - LightClientOptimisticUpdate(Box>), + LightClientFinalityUpdate(Box>>), + LightClientOptimisticUpdate(Box>>), #[cfg(feature = "lighthouse")] BlockReward(BlockReward), PayloadAttributes(VersionedSsePayloadAttributes), @@ -1210,22 +1214,24 @@ impl EventKind { ServerError::InvalidServerSentEvent(format!("Payload Attributes: {:?}", e)) })?, )), - "light_client_finality_update" => Ok(EventKind::LightClientFinalityUpdate( - serde_json::from_str(data).map_err(|e| { + "light_client_finality_update" => Ok(EventKind::LightClientFinalityUpdate(Box::new( + BeaconResponse::ForkVersioned(serde_json::from_str(data).map_err(|e| { ServerError::InvalidServerSentEvent(format!( "Light Client Finality Update: {:?}", e )) - })?, - )), - "light_client_optimistic_update" => Ok(EventKind::LightClientOptimisticUpdate( - serde_json::from_str(data).map_err(|e| { - ServerError::InvalidServerSentEvent(format!( - "Light Client Optimistic Update: {:?}", - e - )) - })?, - )), + })?), + ))), + "light_client_optimistic_update" => { + Ok(EventKind::LightClientOptimisticUpdate(Box::new( + BeaconResponse::ForkVersioned(serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!( + "Light Client Optimistic Update: {:?}", + e + )) + })?), + ))) + } #[cfg(feature = "lighthouse")] "block_reward" => Ok(EventKind::BlockReward(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Block Reward: {:?}", e)), @@ -1614,7 +1620,7 @@ mod tests { } } -#[derive(Debug, Encode, Serialize, Deserialize)] +#[derive(Debug, Encode, Serialize)] #[serde(untagged)] #[serde(bound = "E: EthSpec")] #[ssz(enum_behaviour = "transparent")] @@ -1627,7 +1633,7 @@ pub type JsonProduceBlockV3Response = ForkVersionedResponse, ProduceBlockV3Metadata>; /// A wrapper over a [`BeaconBlock`] or a [`BlockContents`]. -#[derive(Debug, Encode, Serialize, Deserialize)] +#[derive(Debug, Encode, Serialize)] #[serde(untagged)] #[serde(bound = "E: EthSpec")] #[ssz(enum_behaviour = "transparent")] @@ -1741,18 +1747,18 @@ impl FullBlockContents { } } -impl ForkVersionDeserialize for FullBlockContents { - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result { - if fork_name.deneb_enabled() { +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for FullBlockContents { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { + if context.deneb_enabled() { Ok(FullBlockContents::BlockContents( - BlockContents::deserialize_by_fork::<'de, D>(value, fork_name)?, + BlockContents::context_deserialize::(deserializer, context)?, )) } else { Ok(FullBlockContents::Block( - BeaconBlock::deserialize_by_fork::<'de, D>(value, fork_name)?, + BeaconBlock::context_deserialize::(deserializer, context)?, )) } } @@ -1819,7 +1825,7 @@ impl TryFrom<&HeaderMap> for ProduceBlockV3Metadata { } /// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBlockContents`]. -#[derive(Clone, Debug, Encode, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Encode, Serialize)] #[serde(untagged)] #[serde(bound = "E: EthSpec")] #[ssz(enum_behaviour = "transparent")] @@ -1828,6 +1834,26 @@ pub enum PublishBlockRequest { Block(Arc>), } +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for PublishBlockRequest { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { + let value = + serde_json::Value::deserialize(deserializer).map_err(serde::de::Error::custom)?; + + SignedBlockContents::::context_deserialize(&value, context) + .map(PublishBlockRequest::BlockContents) + .or_else(|_| { + Arc::>::context_deserialize(&value, context) + .map(PublishBlockRequest::Block) + }) + .map_err(|_| { + serde::de::Error::custom("could not match any variant of PublishBlockRequest") + }) + } +} + impl PublishBlockRequest { pub fn new( block: Arc>, @@ -1904,7 +1930,7 @@ impl From> for PublishBlockRequest { } } -#[derive(Debug, Clone, Serialize, Deserialize, Encode)] +#[derive(Debug, Clone, PartialEq, Serialize, Encode)] #[serde(bound = "E: EthSpec")] pub struct SignedBlockContents { pub signed_block: Arc>, @@ -1913,7 +1939,33 @@ pub struct SignedBlockContents { pub blobs: BlobsList, } -#[derive(Debug, Clone, Serialize, Deserialize, Encode)] +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for SignedBlockContents { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(bound = "E: EthSpec")] + struct Helper { + signed_block: serde_json::Value, + kzg_proofs: KzgProofs, + #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] + blobs: BlobsList, + } + let helper = Helper::::deserialize(deserializer).map_err(serde::de::Error::custom)?; + + let block = SignedBeaconBlock::context_deserialize(helper.signed_block, context) + .map_err(serde::de::Error::custom)?; + + Ok(Self { + signed_block: Arc::new(block), + kzg_proofs: helper.kzg_proofs, + blobs: helper.blobs, + }) + } +} + +#[derive(Debug, Clone, Serialize, Encode)] #[serde(bound = "E: EthSpec")] pub struct BlockContents { pub block: BeaconBlock, @@ -1922,11 +1974,11 @@ pub struct BlockContents { pub blobs: BlobsList, } -impl ForkVersionDeserialize for BlockContents { - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result { +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for BlockContents { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { #[derive(Deserialize)] #[serde(bound = "E: EthSpec")] struct Helper { @@ -1935,10 +1987,13 @@ impl ForkVersionDeserialize for BlockContents { #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] blobs: BlobsList, } - let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + let helper = Helper::::deserialize(deserializer).map_err(serde::de::Error::custom)?; + + let block = BeaconBlock::context_deserialize(helper.block, context) + .map_err(serde::de::Error::custom)?; Ok(Self { - block: BeaconBlock::deserialize_by_fork::<'de, D>(helper.block, fork_name)?, + block, kzg_proofs: helper.kzg_proofs, blobs: helper.blobs, }) @@ -2010,22 +2065,22 @@ impl FullPayloadContents { } } -impl ForkVersionDeserialize for FullPayloadContents { - fn deserialize_by_fork<'de, D: Deserializer<'de>>( - value: Value, - fork_name: ForkName, - ) -> Result { - if fork_name.deneb_enabled() { - ExecutionPayloadAndBlobs::deserialize_by_fork::<'de, D>(value, fork_name) +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for FullPayloadContents { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { + if context.deneb_enabled() { + ExecutionPayloadAndBlobs::context_deserialize::(deserializer, context) .map(Self::PayloadAndBlobs) .map_err(serde::de::Error::custom) - } else if fork_name.bellatrix_enabled() { - ExecutionPayload::deserialize_by_fork::<'de, D>(value, fork_name) + } else if context.bellatrix_enabled() { + ExecutionPayload::context_deserialize::(deserializer, context) .map(Self::Payload) .map_err(serde::de::Error::custom) } else { Err(serde::de::Error::custom(format!( - "FullPayloadContents deserialization for {fork_name} not implemented" + "FullPayloadContents deserialization for {context} not implemented" ))) } } @@ -2038,23 +2093,25 @@ pub struct ExecutionPayloadAndBlobs { pub blobs_bundle: BlobsBundle, } -impl ForkVersionDeserialize for ExecutionPayloadAndBlobs { - fn deserialize_by_fork<'de, D: Deserializer<'de>>( - value: Value, - fork_name: ForkName, - ) -> Result { +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for ExecutionPayloadAndBlobs { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { #[derive(Deserialize)] #[serde(bound = "E: EthSpec")] struct Helper { execution_payload: serde_json::Value, blobs_bundle: BlobsBundle, } - let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + let helper = Helper::::deserialize(deserializer).map_err(serde::de::Error::custom)?; + Ok(Self { - execution_payload: ExecutionPayload::deserialize_by_fork::<'de, D>( + execution_payload: ExecutionPayload::context_deserialize( helper.execution_payload, - fork_name, - )?, + context, + ) + .map_err(serde::de::Error::custom)?, blobs_bundle: helper.blobs_bundle, }) } @@ -2203,6 +2260,73 @@ mod test { assert_eq!(serde_json::to_string(&y).unwrap(), pubkey_str); } + #[test] + fn test_publish_block_request_context_deserialize() { + let round_trip_test = |request: PublishBlockRequest| { + let fork_name = request.signed_block().fork_name_unchecked(); + let json_str = serde_json::to_string(&request).unwrap(); + let mut de = serde_json::Deserializer::from_str(&json_str); + let deserialized_request = + PublishBlockRequest::::context_deserialize(&mut de, fork_name) + .unwrap(); + assert_eq!(request, deserialized_request); + }; + + let rng = &mut XorShiftRng::from_seed([42; 16]); + for fork_name in ForkName::list_all() { + let signed_beacon_block = + map_fork_name!(fork_name, SignedBeaconBlock, <_>::random_for_test(rng)); + let request = if fork_name.deneb_enabled() { + let kzg_proofs = KzgProofs::::random_for_test(rng); + let blobs = BlobsList::::random_for_test(rng); + let block_contents = SignedBlockContents { + signed_block: Arc::new(signed_beacon_block), + kzg_proofs, + blobs, + }; + PublishBlockRequest::BlockContents(block_contents) + } else { + PublishBlockRequest::Block(Arc::new(signed_beacon_block)) + }; + round_trip_test(request); + println!("fork_name: {:?} PASSED", fork_name); + } + } + + #[test] + fn test_signed_block_contents_context_deserialize() { + let round_trip_test = |contents: SignedBlockContents| { + let fork_name = contents.signed_block.fork_name_unchecked(); + let json_str = serde_json::to_string(&contents).unwrap(); + let mut de = serde_json::Deserializer::from_str(&json_str); + let deserialized_contents = + SignedBlockContents::::context_deserialize(&mut de, fork_name) + .unwrap(); + assert_eq!(contents, deserialized_contents); + }; + + let mut fork_name = ForkName::Deneb; + let rng = &mut XorShiftRng::from_seed([42; 16]); + loop { + let signed_beacon_block = + map_fork_name!(fork_name, SignedBeaconBlock, <_>::random_for_test(rng)); + let kzg_proofs = KzgProofs::::random_for_test(rng); + let blobs = BlobsList::::random_for_test(rng); + let block_contents = SignedBlockContents { + signed_block: Arc::new(signed_beacon_block), + kzg_proofs, + blobs, + }; + round_trip_test(block_contents); + println!("fork_name: {:?} PASSED", fork_name); + if let Some(next_fork_name) = fork_name.next_fork() { + fork_name = next_fork_name; + } else { + break; + } + } + } + #[test] fn test_execution_payload_execution_payload_deserialize_by_fork() { let rng = &mut XorShiftRng::from_seed([42; 16]); @@ -2295,14 +2419,13 @@ mod test { fn generic_deserialize_by_fork< 'de, D: Deserializer<'de>, - O: ForkVersionDeserialize + PartialEq + Debug, + O: ContextDeserialize<'de, ForkName> + PartialEq + Debug, >( deserializer: D, original: O, fork_name: ForkName, ) { - let val = Value::deserialize(deserializer).unwrap(); - let roundtrip = O::deserialize_by_fork::<'de, D>(val, fork_name).unwrap(); + let roundtrip = O::context_deserialize::(deserializer, fork_name).unwrap(); assert_eq!(original, roundtrip); } } diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index da6c4dfd95..ec5b0cc1d7 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -6,15 +6,6 @@ edition = { workspace = true } build = "build.rs" -[build-dependencies] -eth2_config = { workspace = true } -zip = { workspace = true } - -[dev-dependencies] -ethereum_ssz = { workspace = true } -tempfile = { workspace = true } -tokio = { workspace = true } - [dependencies] bytes = { workspace = true } discv5 = { workspace = true } @@ -28,3 +19,12 @@ sha2 = { workspace = true } tracing = { workspace = true } types = { workspace = true } url = { workspace = true } + +[build-dependencies] +eth2_config = { workspace = true } +zip = { workspace = true } + +[dev-dependencies] +ethereum_ssz = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true } diff --git a/common/health_metrics/Cargo.toml b/common/health_metrics/Cargo.toml index 08591471b2..20a8c6e4e4 100644 --- a/common/health_metrics/Cargo.toml +++ b/common/health_metrics/Cargo.toml @@ -8,5 +8,5 @@ eth2 = { workspace = true } metrics = { workspace = true } [target.'cfg(target_os = "linux")'.dependencies] -psutil = "3.3.0" procfs = "0.15.1" +psutil = "3.3.0" diff --git a/common/logging/src/tracing_libp2p_discv5_logging_layer.rs b/common/logging/src/tracing_libp2p_discv5_logging_layer.rs index 90033d11ad..ef472ddc52 100644 --- a/common/logging/src/tracing_libp2p_discv5_logging_layer.rs +++ b/common/logging/src/tracing_libp2p_discv5_logging_layer.rs @@ -59,6 +59,7 @@ impl tracing_core::field::Visit for LogMessageExtractor { pub fn create_libp2p_discv5_tracing_layer( base_tracing_log_path: Option, max_log_size: u64, + file_mode: u32, ) -> Option { if let Some(mut tracing_log_path) = base_tracing_log_path { // Ensure that `tracing_log_path` only contains directories. @@ -75,12 +76,14 @@ pub fn create_libp2p_discv5_tracing_layer( let libp2p_writer = LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("libp2p.log")) .rotation(Rotation::SizeBased(RotationSize::MB(max_log_size))) - .max_keep_files(1); + .max_keep_files(1) + .file_mode(file_mode); let discv5_writer = LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("discv5.log")) .rotation(Rotation::SizeBased(RotationSize::MB(max_log_size))) - .max_keep_files(1); + .max_keep_files(1) + .file_mode(file_mode); let libp2p_writer = match libp2p_writer.build() { Ok(writer) => writer, diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index 64fb7b9aad..89973493b4 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -4,23 +4,23 @@ version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } +[features] +mallinfo2 = [] +jemalloc = ["tikv-jemallocator", "tikv-jemalloc-ctl"] +jemalloc-profiling = ["tikv-jemallocator/profiling"] + [dependencies] libc = "0.2.79" metrics = { workspace = true } parking_lot = { workspace = true } tikv-jemalloc-ctl = { version = "0.6.0", optional = true, features = ["stats"] } +[target.'cfg(not(target_os = "linux"))'.dependencies] +tikv-jemallocator = { version = "0.6.0", optional = true, features = ["stats"] } + # Jemalloc's background_threads feature requires Linux (pthreads). [target.'cfg(target_os = "linux")'.dependencies] tikv-jemallocator = { version = "0.6.0", optional = true, features = [ "stats", "background_threads", ] } - -[target.'cfg(not(target_os = "linux"))'.dependencies] -tikv-jemallocator = { version = "0.6.0", optional = true, features = ["stats"] } - -[features] -mallinfo2 = [] -jemalloc = ["tikv-jemallocator", "tikv-jemalloc-ctl"] -jemalloc-profiling = ["tikv-jemallocator/profiling"] diff --git a/common/warp_utils/src/json.rs b/common/warp_utils/src/json.rs index 6ee5e77261..bc7d61557b 100644 --- a/common/warp_utils/src/json.rs +++ b/common/warp_utils/src/json.rs @@ -31,3 +31,27 @@ pub fn json() -> impl Filter( +) -> impl Filter + Copy { + warp::header::optional::(CONTENT_TYPE_HEADER) + .and(warp::body::bytes()) + .and_then(|header: Option, bytes: Bytes| async move { + if let Some(header) = header { + if header == SSZ_CONTENT_TYPE_HEADER { + return Err(reject::unsupported_media_type( + "The request's content-type is not supported".to_string(), + )); + } + } + + // Handle the case when the HTTP request has no body, i.e., without the -d header + if bytes.is_empty() { + return Ok(T::default()); + } + + Json::decode(bytes) + .map_err(|err| reject::custom_deserialize_error(format!("{:?}", err))) + }) +} diff --git a/consensus/context_deserialize/Cargo.toml b/consensus/context_deserialize/Cargo.toml new file mode 100644 index 0000000000..30dae76136 --- /dev/null +++ b/consensus/context_deserialize/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "context_deserialize" +version = "0.1.0" +edition = "2021" + +[dependencies] +milhouse = { workspace = true } +serde = { workspace = true } +ssz_types = { workspace = true } diff --git a/consensus/context_deserialize/src/impls.rs b/consensus/context_deserialize/src/impls.rs new file mode 100644 index 0000000000..803619365f --- /dev/null +++ b/consensus/context_deserialize/src/impls.rs @@ -0,0 +1,103 @@ +use crate::ContextDeserialize; +use serde::de::{Deserialize, DeserializeSeed, Deserializer, SeqAccess, Visitor}; +use std::marker::PhantomData; +use std::sync::Arc; + +impl<'de, C, T> ContextDeserialize<'de, T> for Arc +where + C: ContextDeserialize<'de, T>, +{ + fn context_deserialize(deserializer: D, context: T) -> Result + where + D: Deserializer<'de>, + { + Ok(Arc::new(C::context_deserialize(deserializer, context)?)) + } +} + +impl<'de, T, C> ContextDeserialize<'de, C> for Vec +where + T: ContextDeserialize<'de, C>, + C: Clone, +{ + fn context_deserialize(deserializer: D, context: C) -> Result + where + D: Deserializer<'de>, + { + // Our Visitor, which owns one copy of the context T + struct ContextVisitor { + context: T, + _marker: PhantomData, + } + + impl<'de, C, T> Visitor<'de> for ContextVisitor + where + C: ContextDeserialize<'de, T>, + T: Clone, + { + type Value = Vec; + + fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.write_str("a sequence of context‐deserialized elements") + } + + fn visit_seq(self, mut seq: A) -> Result, A::Error> + where + A: SeqAccess<'de>, + { + let mut out = Vec::with_capacity(seq.size_hint().unwrap_or(0)); + // for each element, we clone the context and hand it to the seed + while let Some(elem) = seq.next_element_seed(ContextSeed { + context: self.context.clone(), + _marker: PhantomData, + })? { + out.push(elem); + } + Ok(out) + } + } + + // A little seed that hands the deserializer + context into C::context_deserialize + struct ContextSeed { + context: C, + _marker: PhantomData, + } + + impl<'de, T, C> DeserializeSeed<'de> for ContextSeed + where + T: ContextDeserialize<'de, C>, + C: Clone, + { + type Value = T; + + fn deserialize(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + T::context_deserialize(deserializer, self.context) + } + } + + deserializer.deserialize_seq(ContextVisitor { + context, + _marker: PhantomData, + }) + } +} + +macro_rules! trivial_deserialize { + ($($t:ty),* $(,)?) => { + $( + impl<'de, T> ContextDeserialize<'de, T> for $t { + fn context_deserialize(deserializer: D, _context: T) -> Result + where + D: Deserializer<'de>, + { + <$t>::deserialize(deserializer) + } + } + )* + }; +} + +trivial_deserialize!(bool, u8, u16, u32, u64, u128, i8, i16, i32, i64, i128, f32, f64); diff --git a/consensus/context_deserialize/src/lib.rs b/consensus/context_deserialize/src/lib.rs new file mode 100644 index 0000000000..9de819247b --- /dev/null +++ b/consensus/context_deserialize/src/lib.rs @@ -0,0 +1,13 @@ +pub mod impls; +pub mod milhouse; +pub mod ssz_impls; + +extern crate serde; +use serde::de::Deserializer; + +/// General-purpose deserialization trait that accepts extra context `C`. +pub trait ContextDeserialize<'de, C>: Sized { + fn context_deserialize(deserializer: D, context: C) -> Result + where + D: Deserializer<'de>; +} diff --git a/consensus/context_deserialize/src/milhouse.rs b/consensus/context_deserialize/src/milhouse.rs new file mode 100644 index 0000000000..3b86f067a3 --- /dev/null +++ b/consensus/context_deserialize/src/milhouse.rs @@ -0,0 +1,45 @@ +use crate::ContextDeserialize; +use milhouse::{List, Value, Vector}; +use serde::de::Deserializer; +use ssz_types::typenum::Unsigned; + +impl<'de, C, T, N> ContextDeserialize<'de, C> for List +where + T: ContextDeserialize<'de, C> + Value, + N: Unsigned, + C: Clone, +{ + fn context_deserialize(deserializer: D, context: C) -> Result + where + D: Deserializer<'de>, + { + // First deserialize as a Vec. + // This is not the most efficient implementation as it allocates a temporary Vec. In future + // we could write a more performant implementation using `List::builder()`. + let vec = Vec::::context_deserialize(deserializer, context)?; + + // Then convert to List, which will check the length. + List::new(vec) + .map_err(|e| serde::de::Error::custom(format!("Failed to create List: {:?}", e))) + } +} + +impl<'de, C, T, N> ContextDeserialize<'de, C> for Vector +where + T: ContextDeserialize<'de, C> + Value, + N: Unsigned, + C: Clone, +{ + fn context_deserialize(deserializer: D, context: C) -> Result + where + D: Deserializer<'de>, + { + // First deserialize as a List + let list = List::::context_deserialize(deserializer, context)?; + + // Then convert to Vector, which will check the length + Vector::try_from(list).map_err(|e| { + serde::de::Error::custom(format!("Failed to convert List to Vector: {:?}", e)) + }) + } +} diff --git a/consensus/context_deserialize/src/ssz_impls.rs b/consensus/context_deserialize/src/ssz_impls.rs new file mode 100644 index 0000000000..e989d67b29 --- /dev/null +++ b/consensus/context_deserialize/src/ssz_impls.rs @@ -0,0 +1,48 @@ +use crate::serde::de::Error; +use crate::ContextDeserialize; +use serde::de::Deserializer; +use serde::Deserialize; +use ssz_types::length::{Fixed, Variable}; +use ssz_types::typenum::Unsigned; +use ssz_types::{Bitfield, FixedVector}; + +impl<'de, C, T, N> ContextDeserialize<'de, C> for FixedVector +where + T: ContextDeserialize<'de, C>, + N: Unsigned, + C: Clone, +{ + fn context_deserialize(deserializer: D, context: C) -> Result + where + D: Deserializer<'de>, + { + let vec = Vec::::context_deserialize(deserializer, context)?; + FixedVector::new(vec).map_err(|e| D::Error::custom(format!("{:?}", e))) + } +} + +impl<'de, C, N> ContextDeserialize<'de, C> for Bitfield> +where + N: Unsigned + Clone, +{ + fn context_deserialize(deserializer: D, _context: C) -> Result + where + D: Deserializer<'de>, + { + Bitfield::>::deserialize(deserializer) + .map_err(|e| D::Error::custom(format!("{:?}", e))) + } +} + +impl<'de, C, N> ContextDeserialize<'de, C> for Bitfield> +where + N: Unsigned + Clone, +{ + fn context_deserialize(deserializer: D, _context: C) -> Result + where + D: Deserializer<'de>, + { + Bitfield::>::deserialize(deserializer) + .map_err(|e| D::Error::custom(format!("{:?}", e))) + } +} diff --git a/consensus/context_deserialize_derive/Cargo.toml b/consensus/context_deserialize_derive/Cargo.toml new file mode 100644 index 0000000000..eedae30cdf --- /dev/null +++ b/consensus/context_deserialize_derive/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "context_deserialize_derive" +version = "0.1.0" +edition = "2021" + +[lib] +proc-macro = true + +[dependencies] +quote = { workspace = true } +syn = { workspace = true } + +[dev-dependencies] +context_deserialize = { path = "../context_deserialize" } +serde = { workspace = true } +serde_json = "1.0" diff --git a/consensus/context_deserialize_derive/src/lib.rs b/consensus/context_deserialize_derive/src/lib.rs new file mode 100644 index 0000000000..0b73a43b0a --- /dev/null +++ b/consensus/context_deserialize_derive/src/lib.rs @@ -0,0 +1,118 @@ +extern crate proc_macro; +extern crate quote; +extern crate syn; + +use proc_macro::TokenStream; +use quote::quote; +use syn::{ + parse_macro_input, AttributeArgs, DeriveInput, GenericParam, LifetimeDef, Meta, NestedMeta, + WhereClause, +}; + +#[proc_macro_attribute] +pub fn context_deserialize(attr: TokenStream, item: TokenStream) -> TokenStream { + let args = parse_macro_input!(attr as AttributeArgs); + let input = parse_macro_input!(item as DeriveInput); + let ident = &input.ident; + + let mut ctx_types = Vec::new(); + let mut explicit_where: Option = None; + + for meta in args { + match meta { + NestedMeta::Meta(Meta::Path(p)) => { + ctx_types.push(p); + } + NestedMeta::Meta(Meta::NameValue(nv)) if nv.path.is_ident("bound") => { + if let syn::Lit::Str(lit_str) = &nv.lit { + let where_string = format!("where {}", lit_str.value()); + match syn::parse_str::(&where_string) { + Ok(where_clause) => { + explicit_where = Some(where_clause); + } + Err(err) => { + return syn::Error::new_spanned( + lit_str, + format!("Invalid where clause '{}': {}", lit_str.value(), err), + ) + .to_compile_error() + .into(); + } + } + } else { + return syn::Error::new_spanned( + &nv, + "Expected a string literal for `bound` value", + ) + .to_compile_error() + .into(); + } + } + _ => { + return syn::Error::new_spanned( + &meta, + "Expected paths or `bound = \"...\"` in #[context_deserialize(...)]", + ) + .to_compile_error() + .into(); + } + } + } + + if ctx_types.is_empty() { + return quote! { + compile_error!("Usage: #[context_deserialize(Type1, Type2, ..., bound = \"...\")]"); + } + .into(); + } + + let original_generics = input.generics.clone(); + + // Clone and clean generics for impl use (remove default params) + let mut impl_generics = input.generics.clone(); + for param in impl_generics.params.iter_mut() { + if let GenericParam::Type(ty) = param { + ty.eq_token = None; + ty.default = None; + } + } + + // Ensure 'de lifetime exists in impl generics + let has_de = impl_generics + .lifetimes() + .any(|LifetimeDef { lifetime, .. }| lifetime.ident == "de"); + + if !has_de { + impl_generics.params.insert(0, syn::parse_quote! { 'de }); + } + + let (_, ty_generics, _) = original_generics.split_for_impl(); + let (impl_gens, _, _) = impl_generics.split_for_impl(); + + // Generate: no `'de` applied to the type name + let mut impls = quote! {}; + for ctx in ctx_types { + impls.extend(quote! { + impl #impl_gens context_deserialize::ContextDeserialize<'de, #ctx> + for #ident #ty_generics + #explicit_where + { + fn context_deserialize( + deserializer: D, + _context: #ctx, + ) -> Result + where + D: serde::de::Deserializer<'de>, + { + ::deserialize(deserializer) + } + } + }); + } + + quote! { + #input + #impls + } + .into() +} diff --git a/consensus/context_deserialize_derive/tests/context_deserialize_derive.rs b/consensus/context_deserialize_derive/tests/context_deserialize_derive.rs new file mode 100644 index 0000000000..d6883400e0 --- /dev/null +++ b/consensus/context_deserialize_derive/tests/context_deserialize_derive.rs @@ -0,0 +1,94 @@ +use context_deserialize::ContextDeserialize; +use context_deserialize_derive::context_deserialize; +use serde::{Deserialize, Serialize}; + +#[test] +fn test_context_deserialize_derive() { + type TestContext = (); + + #[context_deserialize(TestContext)] + #[derive(Debug, PartialEq, Serialize, Deserialize)] + struct Test { + field: String, + } + + let test = Test { + field: "test".to_string(), + }; + let serialized = serde_json::to_string(&test).unwrap(); + let deserialized = + Test::context_deserialize(&mut serde_json::Deserializer::from_str(&serialized), ()) + .unwrap(); + assert_eq!(test, deserialized); +} + +#[test] +fn test_context_deserialize_derive_multiple_types() { + #[allow(dead_code)] + struct TestContext1(u64); + #[allow(dead_code)] + struct TestContext2(String); + + // This will derive: + // - ContextDeserialize for Test + // - ContextDeserialize for Test + // by just leveraging the Deserialize impl + #[context_deserialize(TestContext1, TestContext2)] + #[derive(Debug, PartialEq, Serialize, Deserialize)] + struct Test { + field: String, + } + + let test = Test { + field: "test".to_string(), + }; + let serialized = serde_json::to_string(&test).unwrap(); + let deserialized = Test::context_deserialize( + &mut serde_json::Deserializer::from_str(&serialized), + TestContext1(1), + ) + .unwrap(); + assert_eq!(test, deserialized); + + let deserialized = Test::context_deserialize( + &mut serde_json::Deserializer::from_str(&serialized), + TestContext2("2".to_string()), + ) + .unwrap(); + + assert_eq!(test, deserialized); +} + +#[test] +fn test_context_deserialize_derive_bound() { + use std::fmt::Debug; + + struct TestContext; + + #[derive(Debug, PartialEq, Serialize, Deserialize)] + struct Inner { + value: u64, + } + + #[context_deserialize( + TestContext, + bound = "T: Serialize + for<'a> Deserialize<'a> + Debug + PartialEq" + )] + #[derive(Debug, PartialEq, Serialize, Deserialize)] + struct Wrapper { + inner: T, + } + + let val = Wrapper { + inner: Inner { value: 42 }, + }; + + let serialized = serde_json::to_string(&val).unwrap(); + let deserialized = Wrapper::::context_deserialize( + &mut serde_json::Deserializer::from_str(&serialized), + TestContext, + ) + .unwrap(); + + assert_eq!(val, deserialized); +} diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index 2f721d917b..d750c05406 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -4,6 +4,9 @@ version = "0.2.0" authors = ["Michael Sproul "] edition = { workspace = true } +[features] +arbitrary = ["alloy-primitives/arbitrary"] + [dependencies] alloy-primitives = { workspace = true } ethereum_hashing = { workspace = true } @@ -13,6 +16,3 @@ safe_arith = { workspace = true } [dev-dependencies] quickcheck = { workspace = true } quickcheck_macros = { workspace = true } - -[features] -arbitrary = ["alloy-primitives/arbitrary"] diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 502ffe3cf6..7ada4488f2 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -4,10 +4,18 @@ version = "0.2.0" authors = ["Paul Hauner ", "Michael Sproul "] edition = { workspace = true } -[dev-dependencies] -beacon_chain = { workspace = true } -env_logger = { workspace = true } -tokio = { workspace = true } +[features] +default = ["legacy-arith"] +fake_crypto = ["bls/fake_crypto"] +legacy-arith = ["types/legacy-arith"] +arbitrary-fuzz = [ + "types/arbitrary-fuzz", + "merkle_proof/arbitrary", + "ethereum_ssz/arbitrary", + "ssz_types/arbitrary", + "tree_hash/arbitrary", +] +portable = ["bls/supranational-portable"] [dependencies] arbitrary = { workspace = true } @@ -30,15 +38,7 @@ test_random_derive = { path = "../../common/test_random_derive" } tree_hash = { workspace = true } types = { workspace = true } -[features] -default = ["legacy-arith"] -fake_crypto = ["bls/fake_crypto"] -legacy-arith = ["types/legacy-arith"] -arbitrary-fuzz = [ - "types/arbitrary-fuzz", - "merkle_proof/arbitrary", - "ethereum_ssz/arbitrary", - "ssz_types/arbitrary", - "tree_hash/arbitrary", -] -portable = ["bls/supranational-portable"] +[dev-dependencies] +beacon_chain = { workspace = true } +env_logger = { workspace = true } +tokio = { workspace = true } diff --git a/consensus/state_processing/src/per_epoch_processing/altair.rs b/consensus/state_processing/src/per_epoch_processing/altair.rs index 5fcd147b2e..dc4dbe7cbc 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair.rs @@ -84,7 +84,7 @@ pub fn process_epoch( Ok(EpochProcessingSummary::Altair { progressive_balances: current_epoch_progressive_balances, current_epoch_total_active_balance, - participation: participation_summary, + participation: participation_summary.into(), sync_committee, }) } diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index 5508b80807..b2228a5a1d 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -17,7 +17,7 @@ pub enum EpochProcessingSummary { Altair { progressive_balances: ProgressiveBalancesCache, current_epoch_total_active_balance: u64, - participation: ParticipationEpochSummary, + participation: Box>, sync_committee: Arc>, }, } diff --git a/consensus/swap_or_not_shuffle/Cargo.toml b/consensus/swap_or_not_shuffle/Cargo.toml index dac83e7553..b6fdc1a728 100644 --- a/consensus/swap_or_not_shuffle/Cargo.toml +++ b/consensus/swap_or_not_shuffle/Cargo.toml @@ -4,17 +4,17 @@ version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } -[[bench]] -name = "benches" -harness = false - -[dev-dependencies] -criterion = { workspace = true } +[features] +arbitrary = ["alloy-primitives/arbitrary"] [dependencies] alloy-primitives = { workspace = true } ethereum_hashing = { workspace = true } fixed_bytes = { workspace = true } -[features] -arbitrary = ["alloy-primitives/arbitrary"] +[dev-dependencies] +criterion = { workspace = true } + +[[bench]] +name = "benches" +harness = false diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 013230f158..ec6835defc 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -4,9 +4,15 @@ version = "0.2.1" authors = ["Paul Hauner ", "Age Manning "] edition = { workspace = true } -[[bench]] -name = "benches" -harness = false +[features] +default = ["sqlite", "legacy-arith"] +# Allow saturating arithmetic on slots and epochs. Enabled by default, but deprecated. +legacy-arith = [] +sqlite = ["dep:rusqlite"] +# The `arbitrary-fuzz` feature is a no-op provided for backwards compatibility. +# For simplicity `Arbitrary` is now derived regardless of the feature's presence. +arbitrary-fuzz = [] +portable = ["bls/supranational-portable"] [dependencies] alloy-primitives = { workspace = true } @@ -17,6 +23,8 @@ arbitrary = { workspace = true, features = ["derive"] } bls = { workspace = true, features = ["arbitrary"] } compare_fields = { workspace = true } compare_fields_derive = { workspace = true } +context_deserialize = { workspace = true } +context_deserialize_derive = { workspace = true } derivative = { workspace = true } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } ethereum_hashing = { workspace = true } @@ -60,12 +68,6 @@ paste = { workspace = true } state_processing = { workspace = true } tokio = { workspace = true } -[features] -default = ["sqlite", "legacy-arith"] -# Allow saturating arithmetic on slots and epochs. Enabled by default, but deprecated. -legacy-arith = [] -sqlite = ["dep:rusqlite"] -# The `arbitrary-fuzz` feature is a no-op provided for backwards compatibility. -# For simplicity `Arbitrary` is now derived regardless of the feature's presence. -arbitrary-fuzz = [] -portable = ["bls/supranational-portable"] +[[bench]] +name = "benches" +harness = false diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 6edd8d3892..a280afeaae 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -1,8 +1,9 @@ use super::{AttestationBase, AttestationElectra, AttestationRef}; use super::{ - ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, SelectionProof, Signature, - SignedRoot, + ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, PublicKey, SecretKey, SelectionProof, + Signature, SignedRoot, }; +use crate::context_deserialize; use crate::test_utils::TestRandom; use crate::Attestation; use serde::{Deserialize, Serialize}; @@ -26,6 +27,7 @@ use tree_hash_derive::TreeHash; TestRandom, TreeHash, ), + context_deserialize(ForkName), serde(bound = "E: EthSpec"), arbitrary(bound = "E: EthSpec"), ), diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index e2973132b0..286e4622f8 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -1,8 +1,9 @@ +use crate::context_deserialize; use crate::slot_data::SlotData; use crate::{test_utils::TestRandom, Hash256, Slot}; -use crate::{Checkpoint, ForkVersionDeserialize}; +use crate::{Checkpoint, ContextDeserialize, ForkName}; use derivative::Derivative; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::BitVector; use std::collections::HashSet; @@ -47,6 +48,7 @@ impl From for Error { arbitrary::Arbitrary, TreeHash, ), + context_deserialize(ForkName), derivative(PartialEq, Hash(bound = "E: EthSpec")), serde(bound = "E: EthSpec", deny_unknown_fields), arbitrary(bound = "E: EthSpec"), @@ -532,45 +534,44 @@ impl<'a, E: EthSpec> From> for AttestationRef<'a, E> } } -impl ForkVersionDeserialize for Attestation { - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::Value, - fork_name: crate::ForkName, - ) -> Result { - if fork_name.electra_enabled() { - let attestation: AttestationElectra = - serde_json::from_value(value).map_err(serde::de::Error::custom)?; - Ok(Attestation::Electra(attestation)) +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for Attestation { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { + if context.electra_enabled() { + AttestationElectra::::deserialize(deserializer) + .map_err(serde::de::Error::custom) + .map(Attestation::Electra) } else { - let attestation: AttestationBase = - serde_json::from_value(value).map_err(serde::de::Error::custom)?; - Ok(Attestation::Base(attestation)) + AttestationBase::::deserialize(deserializer) + .map_err(serde::de::Error::custom) + .map(Attestation::Base) } } } -impl ForkVersionDeserialize for Vec> { - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::Value, - fork_name: crate::ForkName, - ) -> Result { - if fork_name.electra_enabled() { - let attestations: Vec> = - serde_json::from_value(value).map_err(serde::de::Error::custom)?; - Ok(attestations - .into_iter() - .map(Attestation::Electra) - .collect::>()) +/* +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for Vec> { + fn context_deserialize( + deserializer: D, + context: ForkName, + ) -> Result + where + D: Deserializer<'de>, + { + if context.electra_enabled() { + >>::deserialize(deserializer) + .map_err(serde::de::Error::custom) + .map(|vec| vec.into_iter().map(Attestation::Electra).collect::>()) } else { - let attestations: Vec> = - serde_json::from_value(value).map_err(serde::de::Error::custom)?; - Ok(attestations - .into_iter() - .map(Attestation::Base) - .collect::>()) + >>::deserialize(deserializer) + .map_err(serde::de::Error::custom) + .map(|vec| vec.into_iter().map(Attestation::Base).collect::>()) } } } +*/ #[derive( Debug, @@ -585,6 +586,7 @@ impl ForkVersionDeserialize for Vec> { TreeHash, PartialEq, )] +#[context_deserialize(ForkName)] pub struct SingleAttestation { #[serde(with = "serde_utils::quoted_u64")] pub committee_index: u64, diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index 7578981f51..d0d4dcc553 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -1,12 +1,11 @@ -use crate::test_utils::TestRandom; -use crate::{Checkpoint, Hash256, SignedRoot, Slot}; - use crate::slot_data::SlotData; +use crate::test_utils::TestRandom; +use crate::{Checkpoint, ForkName, Hash256, SignedRoot, Slot}; +use context_deserialize_derive::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; - /// The data upon which an attestation is based. /// /// Spec v0.12.1 @@ -25,6 +24,7 @@ use tree_hash_derive::TreeHash; TestRandom, Default, )] +#[context_deserialize(ForkName)] pub struct AttestationData { pub slot: Slot, #[serde(with = "serde_utils::quoted_u64")] diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/attester_slashing.rs index f6aa654d44..8fb5862f21 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/attester_slashing.rs @@ -1,10 +1,12 @@ +use crate::context_deserialize; use crate::indexed_attestation::{ IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, }; use crate::{test_utils::TestRandom, EthSpec}; +use crate::{ContextDeserialize, ForkName}; use derivative::Derivative; use rand::{Rng, RngCore}; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; use superstruct::superstruct; use test_random_derive::TestRandom; @@ -25,6 +27,7 @@ use tree_hash_derive::TreeHash; TestRandom, arbitrary::Arbitrary ), + context_deserialize(ForkName), derivative(PartialEq, Eq, Hash(bound = "E: EthSpec")), serde(bound = "E: EthSpec"), arbitrary(bound = "E: EthSpec") @@ -171,25 +174,27 @@ impl TestRandom for AttesterSlashing { } } -impl crate::ForkVersionDeserialize for Vec> { - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::Value, - fork_name: crate::ForkName, - ) -> Result { - if fork_name.electra_enabled() { - let slashings: Vec> = - serde_json::from_value(value).map_err(serde::de::Error::custom)?; - Ok(slashings - .into_iter() - .map(AttesterSlashing::Electra) - .collect::>()) +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for Vec> { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { + if context.electra_enabled() { + >>::deserialize(deserializer) + .map_err(serde::de::Error::custom) + .map(|vec| { + vec.into_iter() + .map(AttesterSlashing::Electra) + .collect::>() + }) } else { - let slashings: Vec> = - serde_json::from_value(value).map_err(serde::de::Error::custom)?; - Ok(slashings - .into_iter() - .map(AttesterSlashing::Base) - .collect::>()) + >>::deserialize(deserializer) + .map_err(serde::de::Error::custom) + .map(|vec| { + vec.into_iter() + .map(AttesterSlashing::Base) + .collect::>() + }) } } } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 6ea897cf1a..385cd0fcf5 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -2,7 +2,7 @@ use crate::attestation::AttestationBase; use crate::test_utils::TestRandom; use crate::*; use derivative::Derivative; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; use std::fmt; @@ -765,23 +765,21 @@ impl From>> } } -impl> ForkVersionDeserialize +impl<'de, E: EthSpec, Payload: AbstractExecPayload> ContextDeserialize<'de, ForkName> for BeaconBlock { - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { Ok(map_fork_name!( - fork_name, + context, Self, - serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!( - "BeaconBlock failed to deserialize: {:?}", - e - )))? + serde::Deserialize::deserialize(deserializer)? )) } } + pub enum BlockImportSource { Gossip, Lookup, diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 10c1a11ede..4440c5cf25 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -3,7 +3,7 @@ use crate::*; use derivative::Derivative; use merkle_proof::{MerkleTree, MerkleTreeError}; use metastruct::metastruct; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; use superstruct::superstruct; @@ -48,6 +48,7 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; deny_unknown_fields ), arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload"), + context_deserialize(ForkName), ), specific_variant_attributes( Base(metastruct(mappings(beacon_block_body_base_fields(groups(fields))))), @@ -61,10 +62,11 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] -#[derive(Debug, Clone, Serialize, Deserialize, Derivative, arbitrary::Arbitrary)] +#[derive(Debug, Clone, Serialize, Deserialize, Derivative, TreeHash, arbitrary::Arbitrary)] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(untagged)] #[serde(bound = "E: EthSpec, Payload: AbstractExecPayload")] +#[tree_hash(enum_behaviour = "transparent")] #[arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload")] pub struct BeaconBlockBody = FullPayload> { pub randao_reveal: Signature, @@ -980,6 +982,21 @@ impl From>> } } +impl<'de, E: EthSpec, Payload: AbstractExecPayload> ContextDeserialize<'de, ForkName> + for BeaconBlockBody +{ + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { + Ok(map_fork_name!( + context, + Self, + serde::Deserialize::deserialize(deserializer)? + )) + } +} + /// Util method helpful for logging. pub fn format_kzg_commitments(commitments: &[KzgCommitment]) -> String { let commitment_strings: Vec = commitments.iter().map(|x| x.to_string()).collect(); diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index b382359313..8416f975db 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -1,6 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; +use context_deserialize_derive::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -24,6 +25,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct BeaconBlockHeader { pub slot: Slot, #[serde(with = "serde_utils::quoted_u64")] diff --git a/consensus/types/src/beacon_response.rs b/consensus/types/src/beacon_response.rs new file mode 100644 index 0000000000..2e45854364 --- /dev/null +++ b/consensus/types/src/beacon_response.rs @@ -0,0 +1,239 @@ +use crate::{ContextDeserialize, ForkName}; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_json::value::Value; + +pub trait ForkVersionDecode: Sized { + /// SSZ decode with explicit fork variant. + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result; +} + +/// The metadata of type M should be set to `EmptyMetadata` if you don't care about adding fields other than +/// version. If you *do* care about adding other fields you can mix in any type that implements +/// `Deserialize`. +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct ForkVersionedResponse { + pub version: ForkName, + #[serde(flatten)] + pub metadata: M, + pub data: T, +} + +// Used for responses to V1 endpoints that don't have a version field. +/// The metadata of type M should be set to `EmptyMetadata` if you don't care about adding fields other than +/// version. If you *do* care about adding other fields you can mix in any type that implements +/// `Deserialize`. +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct UnversionedResponse { + pub metadata: M, + pub data: T, +} + +#[derive(Debug, PartialEq, Clone, Serialize)] +#[serde(untagged)] +pub enum BeaconResponse { + ForkVersioned(ForkVersionedResponse), + Unversioned(UnversionedResponse), +} + +impl BeaconResponse { + pub fn version(&self) -> Option { + match self { + BeaconResponse::ForkVersioned(response) => Some(response.version), + BeaconResponse::Unversioned(_) => None, + } + } + + pub fn data(&self) -> &T { + match self { + BeaconResponse::ForkVersioned(response) => &response.data, + BeaconResponse::Unversioned(response) => &response.data, + } + } + + pub fn metadata(&self) -> &M { + match self { + BeaconResponse::ForkVersioned(response) => &response.metadata, + BeaconResponse::Unversioned(response) => &response.metadata, + } + } +} + +/// Metadata type similar to unit (i.e. `()`) but deserializes from a map (`serde_json::Value`). +/// +/// Unfortunately the braces are semantically significant, i.e. `struct EmptyMetadata;` does not +/// work. +#[derive(Debug, PartialEq, Clone, Default, Deserialize, Serialize)] +pub struct EmptyMetadata {} + +/// Fork versioned response with extra information about finalization & optimistic execution. +pub type ExecutionOptimisticFinalizedBeaconResponse = + BeaconResponse; + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct ExecutionOptimisticFinalizedMetadata { + pub execution_optimistic: Option, + pub finalized: Option, +} + +impl<'de, T, M> Deserialize<'de> for ForkVersionedResponse +where + T: ContextDeserialize<'de, ForkName>, + M: DeserializeOwned, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + struct Helper { + version: ForkName, + #[serde(flatten)] + metadata: Value, + data: Value, + } + + let helper = Helper::deserialize(deserializer)?; + + // Deserialize metadata + let metadata = serde_json::from_value(helper.metadata).map_err(serde::de::Error::custom)?; + + // Deserialize `data` using ContextDeserialize + let data = T::context_deserialize(helper.data, helper.version) + .map_err(serde::de::Error::custom)?; + + Ok(ForkVersionedResponse { + version: helper.version, + metadata, + data, + }) + } +} + +impl<'de, T, M> Deserialize<'de> for UnversionedResponse +where + T: DeserializeOwned, + M: DeserializeOwned, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + struct Helper { + #[serde(flatten)] + metadata: M, + data: T, + } + + let helper = Helper::deserialize(deserializer)?; + + Ok(UnversionedResponse { + metadata: helper.metadata, + data: helper.data, + }) + } +} + +impl BeaconResponse { + pub fn map_data(self, f: impl FnOnce(T) -> U) -> BeaconResponse { + match self { + BeaconResponse::ForkVersioned(response) => { + BeaconResponse::ForkVersioned(response.map_data(f)) + } + BeaconResponse::Unversioned(response) => { + BeaconResponse::Unversioned(response.map_data(f)) + } + } + } + + pub fn into_data(self) -> T { + match self { + BeaconResponse::ForkVersioned(response) => response.data, + BeaconResponse::Unversioned(response) => response.data, + } + } +} + +impl UnversionedResponse { + pub fn map_data(self, f: impl FnOnce(T) -> U) -> UnversionedResponse { + let UnversionedResponse { metadata, data } = self; + UnversionedResponse { + metadata, + data: f(data), + } + } +} + +impl ForkVersionedResponse { + /// Apply a function to the inner `data`, potentially changing its type. + pub fn map_data(self, f: impl FnOnce(T) -> U) -> ForkVersionedResponse { + let ForkVersionedResponse { + version, + metadata, + data, + } = self; + ForkVersionedResponse { + version, + metadata, + data: f(data), + } + } +} + +impl From> for BeaconResponse { + fn from(response: ForkVersionedResponse) -> Self { + BeaconResponse::ForkVersioned(response) + } +} + +impl From> for BeaconResponse { + fn from(response: UnversionedResponse) -> Self { + BeaconResponse::Unversioned(response) + } +} + +#[cfg(test)] +mod fork_version_response_tests { + use crate::{ + ExecutionPayload, ExecutionPayloadBellatrix, ForkName, ForkVersionedResponse, + MainnetEthSpec, + }; + use serde_json::json; + + #[test] + fn fork_versioned_response_deserialize_correct_fork() { + type E = MainnetEthSpec; + + let response_json = + serde_json::to_string(&json!(ForkVersionedResponse::> { + version: ForkName::Bellatrix, + metadata: Default::default(), + data: ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix::default()), + })) + .unwrap(); + + let result: Result>, _> = + serde_json::from_str(&response_json); + + assert!(result.is_ok()); + } + + #[test] + fn fork_versioned_response_deserialize_incorrect_fork() { + type E = MainnetEthSpec; + + let response_json = + serde_json::to_string(&json!(ForkVersionedResponse::> { + version: ForkName::Capella, + metadata: Default::default(), + data: ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix::default()), + })) + .unwrap(); + + let result: Result>, _> = + serde_json::from_str(&response_json); + + assert!(result.is_err()); + } +} diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 4aed79898d..ce41eddc17 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1,6 +1,7 @@ use self::committee_cache::get_active_validator_indices; use crate::historical_summary::HistoricalSummary; use crate::test_utils::TestRandom; +use crate::ContextDeserialize; use crate::FixedBytesExtended; use crate::*; use compare_fields::CompareFields; @@ -11,7 +12,7 @@ use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use metastruct::{metastruct, NumFields}; pub use pubkey_cache::PubkeyCache; use safe_arith::{ArithError, SafeArith}; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; use ssz::{ssz_encode, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::hash::Hash; @@ -2748,18 +2749,15 @@ impl CompareFields for BeaconState { } } -impl ForkVersionDeserialize for BeaconState { - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result { +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for BeaconState { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { Ok(map_fork_name!( - fork_name, + context, Self, - serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!( - "BeaconState failed to deserialize: {:?}", - e - )))? + serde::Deserialize::deserialize(deserializer)? )) } } diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index ff4555747c..f7a5725c5a 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -1,9 +1,10 @@ +use crate::context_deserialize; use crate::test_utils::TestRandom; use crate::{ beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, AbstractExecPayload, BeaconBlockHeader, - BeaconStateError, Blob, ChainSpec, Epoch, EthSpec, FixedVector, ForkName, - ForkVersionDeserialize, Hash256, KzgProofs, RuntimeFixedVector, RuntimeVariableList, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, VariableList, + BeaconStateError, Blob, ChainSpec, Epoch, EthSpec, FixedVector, ForkName, Hash256, KzgProofs, + RuntimeFixedVector, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + VariableList, }; use bls::Signature; use derivative::Derivative; @@ -25,6 +26,7 @@ use tree_hash_derive::TreeHash; #[derive( Serialize, Deserialize, Encode, Decode, TreeHash, Copy, Clone, Debug, PartialEq, Eq, Hash, )] +#[context_deserialize(ForkName)] pub struct BlobIdentifier { pub block_root: Hash256, pub index: u64, @@ -54,6 +56,7 @@ impl Ord for BlobIdentifier { Derivative, arbitrary::Arbitrary, )] +#[context_deserialize(ForkName)] #[serde(bound = "E: EthSpec")] #[arbitrary(bound = "E: EthSpec")] #[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] @@ -296,12 +299,3 @@ pub type BlobSidecarList = RuntimeVariableList>>; /// Alias for a non length-constrained list of `BlobSidecar`s. pub type FixedBlobSidecarList = RuntimeFixedVector>>>; pub type BlobsList = VariableList, ::MaxBlobCommitmentsPerBlock>; - -impl ForkVersionDeserialize for BlobSidecarList { - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::value::Value, - _: ForkName, - ) -> Result { - serde_json::from_value::>(value).map_err(serde::de::Error::custom) - } -} diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index 07d71b360f..b333862220 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -19,6 +19,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct BlsToExecutionChange { #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 5c146c4154..f8d61a0da5 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,9 +1,10 @@ use crate::beacon_block_body::KzgCommitments; use crate::{ - test_utils::TestRandom, ChainSpec, EthSpec, ExecutionPayloadHeaderBellatrix, - ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, - ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, - ExecutionRequests, ForkName, ForkVersionDecode, ForkVersionDeserialize, SignedRoot, Uint256, + test_utils::TestRandom, ChainSpec, ContextDeserialize, EthSpec, + ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, + ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderRef, + ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, SignedRoot, + Uint256, }; use bls::PublicKeyBytes; use bls::Signature; @@ -127,46 +128,58 @@ impl ForkVersionDecode for SignedBuilderBid { } } -impl ForkVersionDeserialize for BuilderBid { - fn deserialize_by_fork<'de, D: Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result { +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for BuilderBid { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { let convert_err = |e| serde::de::Error::custom(format!("BuilderBid failed to deserialize: {:?}", e)); - - Ok(match fork_name { + Ok(match context { ForkName::Bellatrix => { - Self::Bellatrix(serde_json::from_value(value).map_err(convert_err)?) + Self::Bellatrix(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Capella => { + Self::Capella(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Deneb => { + Self::Deneb(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Electra => { + Self::Electra(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Fulu => { + Self::Fulu(Deserialize::deserialize(deserializer).map_err(convert_err)?) } - ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), - ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), - ForkName::Electra => Self::Electra(serde_json::from_value(value).map_err(convert_err)?), - ForkName::Fulu => Self::Fulu(serde_json::from_value(value).map_err(convert_err)?), ForkName::Base | ForkName::Altair => { return Err(serde::de::Error::custom(format!( "BuilderBid failed to deserialize: unsupported fork '{}'", - fork_name + context ))); } }) } } -impl ForkVersionDeserialize for SignedBuilderBid { - fn deserialize_by_fork<'de, D: Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result { +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for SignedBuilderBid { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { #[derive(Deserialize)] struct Helper { - pub message: serde_json::Value, - pub signature: Signature, + message: serde_json::Value, + signature: Signature, } - let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; - Ok(Self { - message: BuilderBid::deserialize_by_fork::<'de, D>(helper.message, fork_name)?, + let helper = Helper::deserialize(deserializer)?; + + // Deserialize `data` using ContextDeserialize + let message = BuilderBid::::context_deserialize(helper.message, context) + .map_err(serde::de::Error::custom)?; + + Ok(SignedBuilderBid { + message, signature: helper.signature, }) } diff --git a/consensus/types/src/checkpoint.rs b/consensus/types/src/checkpoint.rs index 044fc57f22..c3cb1d5c36 100644 --- a/consensus/types/src/checkpoint.rs +++ b/consensus/types/src/checkpoint.rs @@ -1,5 +1,6 @@ use crate::test_utils::TestRandom; -use crate::{Epoch, Hash256}; +use crate::{Epoch, ForkName, Hash256}; +use context_deserialize_derive::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -24,6 +25,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct Checkpoint { pub epoch: Epoch, pub root: Hash256, diff --git a/consensus/types/src/consolidation_request.rs b/consensus/types/src/consolidation_request.rs index e2df0bb972..c7375dab84 100644 --- a/consensus/types/src/consolidation_request.rs +++ b/consensus/types/src/consolidation_request.rs @@ -1,4 +1,5 @@ -use crate::{test_utils::TestRandom, Address, PublicKeyBytes, SignedRoot}; +use crate::context_deserialize; +use crate::{test_utils::TestRandom, Address, ForkName, PublicKeyBytes, SignedRoot}; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -19,6 +20,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct ConsolidationRequest { pub source_address: Address, pub source_pubkey: PublicKeyBytes, diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/contribution_and_proof.rs index 321c12d220..e918beacb0 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/contribution_and_proof.rs @@ -1,7 +1,8 @@ use super::{ - ChainSpec, EthSpec, Fork, Hash256, SecretKey, Signature, SignedRoot, SyncCommitteeContribution, - SyncSelectionProof, + ChainSpec, EthSpec, Fork, ForkName, Hash256, SecretKey, Signature, SignedRoot, + SyncCommitteeContribution, SyncSelectionProof, }; +use crate::context_deserialize; use crate::test_utils::TestRandom; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -23,6 +24,7 @@ use tree_hash_derive::TreeHash; )] #[serde(bound = "E: EthSpec")] #[arbitrary(bound = "E: EthSpec")] +#[context_deserialize(ForkName)] pub struct ContributionAndProof { /// The index of the validator that created the sync contribution. #[serde(with = "serde_utils::quoted_u64")] diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index d2802670b6..5ec2b28b2b 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -1,7 +1,8 @@ use crate::beacon_block_body::{KzgCommitments, BLOB_KZG_COMMITMENTS_INDEX}; +use crate::context_deserialize; use crate::test_utils::TestRandom; use crate::{ - BeaconBlockHeader, BeaconStateError, Epoch, EthSpec, Hash256, RuntimeVariableList, + BeaconBlockHeader, BeaconStateError, Epoch, EthSpec, ForkName, Hash256, RuntimeVariableList, SignedBeaconBlockHeader, Slot, }; use bls::Signature; @@ -84,6 +85,7 @@ pub type DataColumnSidecarList = Vec>>; #[serde(bound = "E: EthSpec")] #[arbitrary(bound = "E: EthSpec")] #[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[context_deserialize(ForkName)] pub struct DataColumnSidecar { #[serde(with = "serde_utils::quoted_u64")] pub index: ColumnIndex, diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit.rs index c818c7d808..8b4b6af95d 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit.rs @@ -1,3 +1,4 @@ +use crate::context_deserialize; use crate::test_utils::TestRandom; use crate::*; use serde::{Deserialize, Serialize}; @@ -24,6 +25,7 @@ pub const DEPOSIT_TREE_DEPTH: usize = 32; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct Deposit { pub proof: FixedVector, pub data: DepositData, diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index f62829e795..d29e8c8d14 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -1,6 +1,5 @@ use crate::test_utils::TestRandom; use crate::*; - use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -22,6 +21,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct DepositData { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit_message.rs index 6184d0aeb3..5c2a0b7c2b 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit_message.rs @@ -21,6 +21,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct DepositMessage { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, diff --git a/consensus/types/src/deposit_request.rs b/consensus/types/src/deposit_request.rs index a21760551b..141258b5ab 100644 --- a/consensus/types/src/deposit_request.rs +++ b/consensus/types/src/deposit_request.rs @@ -1,5 +1,6 @@ +use crate::context_deserialize; use crate::test_utils::TestRandom; -use crate::{Hash256, PublicKeyBytes}; +use crate::{ForkName, Hash256, PublicKeyBytes}; use bls::SignatureBytes; use serde::{Deserialize, Serialize}; use ssz::Encode; @@ -20,6 +21,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct DepositRequest { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index e2c4e511ef..7bd0d3228d 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -1,6 +1,7 @@ use super::Hash256; +use crate::context_deserialize; use crate::test_utils::TestRandom; - +use crate::ForkName; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -24,6 +25,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct Eth1Data { pub deposit_root: Hash256, #[serde(with = "serde_utils::quoted_u64")] diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs index 677b3d3408..6c031f6899 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution_block_hash.rs @@ -112,3 +112,22 @@ impl fmt::Display for ExecutionBlockHash { write!(f, "{}", self.0) } } + +impl From for ExecutionBlockHash { + fn from(hash: Hash256) -> Self { + Self(hash) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_from_hash256() { + let hash = Hash256::random(); + let ex_hash = ExecutionBlockHash::from(hash); + + assert_eq!(ExecutionBlockHash(hash), ex_hash); + } +} diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 5d756c8529..b4b0608150 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -1,6 +1,6 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -30,6 +30,7 @@ pub type Withdrawals = VariableList::MaxWithdrawal Derivative, arbitrary::Arbitrary ), + context_deserialize(ForkName), derivative(PartialEq, Hash(bound = "E: EthSpec")), serde(bound = "E: EthSpec", deny_unknown_fields), arbitrary(bound = "E: EthSpec") @@ -133,28 +134,35 @@ impl ExecutionPayload { } } -impl ForkVersionDeserialize for ExecutionPayload { - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result { +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for ExecutionPayload { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { let convert_err = |e| { serde::de::Error::custom(format!("ExecutionPayload failed to deserialize: {:?}", e)) }; - - Ok(match fork_name { - ForkName::Bellatrix => { - Self::Bellatrix(serde_json::from_value(value).map_err(convert_err)?) - } - ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), - ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), - ForkName::Electra => Self::Electra(serde_json::from_value(value).map_err(convert_err)?), - ForkName::Fulu => Self::Fulu(serde_json::from_value(value).map_err(convert_err)?), + Ok(match context { ForkName::Base | ForkName::Altair => { return Err(serde::de::Error::custom(format!( "ExecutionPayload failed to deserialize: unsupported fork '{}'", - fork_name - ))); + context + ))) + } + ForkName::Bellatrix => { + Self::Bellatrix(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Capella => { + Self::Capella(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Deneb => { + Self::Deneb(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Electra => { + Self::Electra(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Fulu => { + Self::Fulu(Deserialize::deserialize(deserializer).map_err(convert_err)?) } }) } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 3012041b8b..a16f29819d 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -1,6 +1,6 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -25,7 +25,8 @@ use tree_hash_derive::TreeHash; ), derivative(PartialEq, Hash(bound = "E: EthSpec")), serde(bound = "E: EthSpec", deny_unknown_fields), - arbitrary(bound = "E: EthSpec") + arbitrary(bound = "E: EthSpec"), + context_deserialize(ForkName), ), ref_attributes( derive(PartialEq, TreeHash, Debug), @@ -472,31 +473,38 @@ impl TryFrom> for ExecutionPayloadHeaderFu } } -impl ForkVersionDeserialize for ExecutionPayloadHeader { - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result { +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for ExecutionPayloadHeader { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { let convert_err = |e| { serde::de::Error::custom(format!( "ExecutionPayloadHeader failed to deserialize: {:?}", e )) }; - - Ok(match fork_name { - ForkName::Bellatrix => { - Self::Bellatrix(serde_json::from_value(value).map_err(convert_err)?) - } - ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), - ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), - ForkName::Electra => Self::Electra(serde_json::from_value(value).map_err(convert_err)?), - ForkName::Fulu => Self::Fulu(serde_json::from_value(value).map_err(convert_err)?), + Ok(match context { ForkName::Base | ForkName::Altair => { return Err(serde::de::Error::custom(format!( "ExecutionPayloadHeader failed to deserialize: unsupported fork '{}'", - fork_name - ))); + context + ))) + } + ForkName::Bellatrix => { + Self::Bellatrix(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Capella => { + Self::Capella(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Deneb => { + Self::Deneb(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Electra => { + Self::Electra(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Fulu => { + Self::Fulu(Deserialize::deserialize(deserializer).map_err(convert_err)?) } }) } diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution_requests.rs index 223c6444cc..2fec3b5f66 100644 --- a/consensus/types/src/execution_requests.rs +++ b/consensus/types/src/execution_requests.rs @@ -1,5 +1,6 @@ +use crate::context_deserialize; use crate::test_utils::TestRandom; -use crate::{ConsolidationRequest, DepositRequest, EthSpec, Hash256, WithdrawalRequest}; +use crate::{ConsolidationRequest, DepositRequest, EthSpec, ForkName, Hash256, WithdrawalRequest}; use alloy_primitives::Bytes; use derivative::Derivative; use ethereum_hashing::{DynamicContext, Sha256Context}; @@ -33,6 +34,7 @@ pub type ConsolidationRequests = #[serde(bound = "E: EthSpec")] #[arbitrary(bound = "E: EthSpec")] #[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[context_deserialize(ForkName)] pub struct ExecutionRequests { pub deposits: DepositRequests, pub withdrawals: WithdrawalRequests, diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork.rs index b23113f436..239ffe33c0 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork.rs @@ -1,5 +1,6 @@ use crate::test_utils::TestRandom; -use crate::Epoch; +use crate::{Epoch, ForkName}; +use context_deserialize_derive::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -23,6 +24,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct Fork { #[serde(with = "serde_utils::bytes_4_hex")] pub previous_version: [u8; 4], diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork_data.rs index 52ce57a2a9..1ac91084d2 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork_data.rs @@ -1,5 +1,6 @@ use crate::test_utils::TestRandom; -use crate::{Hash256, SignedRoot}; +use crate::{ForkName, Hash256, SignedRoot}; +use context_deserialize_derive::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -22,6 +23,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct ForkData { #[serde(with = "serde_utils::bytes_4_hex")] pub current_version: [u8; 4], diff --git a/consensus/types/src/fork_versioned_response.rs b/consensus/types/src/fork_versioned_response.rs deleted file mode 100644 index 7e4efd05d6..0000000000 --- a/consensus/types/src/fork_versioned_response.rs +++ /dev/null @@ -1,152 +0,0 @@ -use crate::ForkName; -use serde::de::DeserializeOwned; -use serde::{Deserialize, Deserializer, Serialize}; -use serde_json::value::Value; -use std::sync::Arc; - -pub trait ForkVersionDecode: Sized { - /// SSZ decode with explicit fork variant. - fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result; -} - -pub trait ForkVersionDeserialize: Sized + DeserializeOwned { - fn deserialize_by_fork<'de, D: Deserializer<'de>>( - value: Value, - fork_name: ForkName, - ) -> Result; -} - -/// Deserialize is only implemented for types that implement ForkVersionDeserialize. -/// -/// The metadata of type M should be set to `EmptyMetadata` if you don't care about adding fields other than -/// version. If you *do* care about adding other fields you can mix in any type that implements -/// `Deserialize`. -#[derive(Debug, PartialEq, Clone, Serialize)] -pub struct ForkVersionedResponse { - #[serde(skip_serializing_if = "Option::is_none")] - pub version: Option, - #[serde(flatten)] - pub metadata: M, - pub data: T, -} - -/// Metadata type similar to unit (i.e. `()`) but deserializes from a map (`serde_json::Value`). -/// -/// Unfortunately the braces are semantically significant, i.e. `struct EmptyMetadata;` does not -/// work. -#[derive(Debug, PartialEq, Clone, Default, Deserialize, Serialize)] -pub struct EmptyMetadata {} - -/// Fork versioned response with extra information about finalization & optimistic execution. -pub type ExecutionOptimisticFinalizedForkVersionedResponse = - ForkVersionedResponse; - -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct ExecutionOptimisticFinalizedMetadata { - pub execution_optimistic: Option, - pub finalized: Option, -} - -impl<'de, F, M> serde::Deserialize<'de> for ForkVersionedResponse -where - F: ForkVersionDeserialize, - M: DeserializeOwned, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - #[derive(Deserialize)] - struct Helper { - version: Option, - #[serde(flatten)] - metadata: serde_json::Value, - data: serde_json::Value, - } - - let helper = Helper::deserialize(deserializer)?; - let data = match helper.version { - Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, - None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, - }; - let metadata = serde_json::from_value(helper.metadata).map_err(serde::de::Error::custom)?; - - Ok(ForkVersionedResponse { - version: helper.version, - metadata, - data, - }) - } -} - -impl ForkVersionDeserialize for Arc { - fn deserialize_by_fork<'de, D: Deserializer<'de>>( - value: Value, - fork_name: ForkName, - ) -> Result { - Ok(Arc::new(F::deserialize_by_fork::<'de, D>( - value, fork_name, - )?)) - } -} - -impl ForkVersionedResponse { - /// Apply a function to the inner `data`, potentially changing its type. - pub fn map_data(self, f: impl FnOnce(T) -> U) -> ForkVersionedResponse { - let ForkVersionedResponse { - version, - metadata, - data, - } = self; - ForkVersionedResponse { - version, - metadata, - data: f(data), - } - } -} - -#[cfg(test)] -mod fork_version_response_tests { - use crate::{ - ExecutionPayload, ExecutionPayloadBellatrix, ForkName, ForkVersionedResponse, - MainnetEthSpec, - }; - use serde_json::json; - - #[test] - fn fork_versioned_response_deserialize_correct_fork() { - type E = MainnetEthSpec; - - let response_json = - serde_json::to_string(&json!(ForkVersionedResponse::> { - version: Some(ForkName::Bellatrix), - metadata: Default::default(), - data: ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix::default()), - })) - .unwrap(); - - let result: Result>, _> = - serde_json::from_str(&response_json); - - assert!(result.is_ok()); - } - - #[test] - fn fork_versioned_response_deserialize_incorrect_fork() { - type E = MainnetEthSpec; - - let response_json = - serde_json::to_string(&json!(ForkVersionedResponse::> { - version: Some(ForkName::Capella), - metadata: Default::default(), - data: ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix::default()), - })) - .unwrap(); - - let result: Result>, _> = - serde_json::from_str(&response_json); - - assert!(result.is_err()); - } -} diff --git a/consensus/types/src/historical_batch.rs b/consensus/types/src/historical_batch.rs index 7bac9699eb..3a02810bba 100644 --- a/consensus/types/src/historical_batch.rs +++ b/consensus/types/src/historical_batch.rs @@ -22,6 +22,7 @@ use tree_hash_derive::TreeHash; arbitrary::Arbitrary, )] #[arbitrary(bound = "E: EthSpec")] +#[context_deserialize(ForkName)] pub struct HistoricalBatch { #[test_random(default)] pub block_roots: Vector, diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/historical_summary.rs index 8c82d52b81..7ad423dade 100644 --- a/consensus/types/src/historical_summary.rs +++ b/consensus/types/src/historical_summary.rs @@ -1,5 +1,6 @@ +use crate::context_deserialize; use crate::test_utils::TestRandom; -use crate::{BeaconState, EthSpec, Hash256}; +use crate::{BeaconState, EthSpec, ForkName, Hash256}; use compare_fields_derive::CompareFields; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -28,6 +29,7 @@ use tree_hash_derive::TreeHash; Default, arbitrary::Arbitrary, )] +#[context_deserialize(ForkName)] pub struct HistoricalSummary { block_summary_root: Hash256, state_summary_root: Hash256, diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index f3243a9f05..ea65d78504 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -1,4 +1,7 @@ -use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, EthSpec, VariableList}; +use crate::context_deserialize; +use crate::{ + test_utils::TestRandom, AggregateSignature, AttestationData, EthSpec, ForkName, VariableList, +}; use core::slice::Iter; use derivative::Derivative; use serde::{Deserialize, Serialize}; @@ -29,6 +32,7 @@ use tree_hash_derive::TreeHash; arbitrary::Arbitrary, TreeHash, ), + context_deserialize(ForkName), derivative(PartialEq, Hash(bound = "E: EthSpec")), serde(bound = "E: EthSpec", deny_unknown_fields), arbitrary(bound = "E: EthSpec"), diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 70f07f0109..f0555a06d6 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -22,6 +22,7 @@ pub mod beacon_block; pub mod beacon_block_body; pub mod beacon_block_header; pub mod beacon_committee; +pub mod beacon_response; pub mod beacon_state; pub mod bls_to_execution_change; pub mod builder_bid; @@ -44,7 +45,6 @@ pub mod execution_payload_header; pub mod fork; pub mod fork_data; pub mod fork_name; -pub mod fork_versioned_response; pub mod graffiti; pub mod historical_batch; pub mod historical_summary; @@ -138,6 +138,9 @@ pub use crate::beacon_block_body::{ }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; +pub use crate::beacon_response::{ + BeaconResponse, ForkVersionDecode, ForkVersionedResponse, UnversionedResponse, +}; pub use crate::beacon_state::{Error as BeaconStateError, *}; pub use crate::blob_sidecar::{BlobIdentifier, BlobSidecar, BlobSidecarList, BlobsList}; pub use crate::bls_to_execution_change::BlsToExecutionChange; @@ -178,9 +181,6 @@ pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; pub use crate::fork_name::{ForkName, InconsistentFork}; -pub use crate::fork_versioned_response::{ - ForkVersionDecode, ForkVersionDeserialize, ForkVersionedResponse, -}; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::{ @@ -287,6 +287,8 @@ pub use bls::{ AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, Signature, SignatureBytes, }; +pub use context_deserialize::ContextDeserialize; +pub use context_deserialize_derive::context_deserialize; pub use kzg::{KzgCommitment, KzgProof, VERSIONED_HASH_VERSION_KZG}; pub use milhouse::{self, List, Vector}; pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index aa0d8836d1..e82b34cc8c 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -1,12 +1,12 @@ +use crate::context_deserialize; use crate::{ - light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec, EthSpec, FixedVector, - ForkName, ForkVersionDeserialize, Hash256, LightClientHeader, LightClientHeaderAltair, + light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec, ContextDeserialize, + EthSpec, FixedVector, ForkName, Hash256, LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, SignedBlindedBeaconBlock, Slot, SyncCommittee, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; -use serde_json::Value; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::sync::Arc; @@ -34,6 +34,7 @@ use tree_hash_derive::TreeHash; ), serde(bound = "E: EthSpec", deny_unknown_fields), arbitrary(bound = "E: EthSpec"), + context_deserialize(ForkName), ) )] #[derive( @@ -213,20 +214,40 @@ impl LightClientBootstrap { } } -impl ForkVersionDeserialize for LightClientBootstrap { - fn deserialize_by_fork<'de, D: Deserializer<'de>>( - value: Value, - fork_name: ForkName, - ) -> Result { - if fork_name.altair_enabled() { - Ok(serde_json::from_value::>(value) - .map_err(serde::de::Error::custom))? - } else { - Err(serde::de::Error::custom(format!( - "LightClientBootstrap failed to deserialize: unsupported fork '{}'", - fork_name - ))) - } +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientBootstrap { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { + let convert_err = |e| { + serde::de::Error::custom(format!( + "LightClientBootstrap failed to deserialize: {:?}", + e + )) + }; + Ok(match context { + ForkName::Base => { + return Err(serde::de::Error::custom(format!( + "LightClientBootstrap failed to deserialize: unsupported fork '{}'", + context + ))) + } + ForkName::Altair | ForkName::Bellatrix => { + Self::Altair(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Capella => { + Self::Capella(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Deneb => { + Self::Deneb(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Electra => { + Self::Electra(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Fulu => { + Self::Fulu(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + }) } } diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index ee3b53c853..9189dcd0a0 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -1,13 +1,13 @@ use super::{EthSpec, FixedVector, Hash256, LightClientHeader, Slot, SyncAggregate}; +use crate::context_deserialize; use crate::ChainSpec; use crate::{ - light_client_update::*, test_utils::TestRandom, ForkName, ForkVersionDeserialize, + light_client_update::*, test_utils::TestRandom, ContextDeserialize, ForkName, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, SignedBlindedBeaconBlock, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; -use serde_json::Value; use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; @@ -33,11 +33,10 @@ use tree_hash_derive::TreeHash; ), serde(bound = "E: EthSpec", deny_unknown_fields), arbitrary(bound = "E: EthSpec"), + context_deserialize(ForkName), ) )] -#[derive( - Debug, Clone, Serialize, Encode, TreeHash, Deserialize, arbitrary::Arbitrary, PartialEq, -)] +#[derive(Debug, Clone, Serialize, Encode, TreeHash, arbitrary::Arbitrary, PartialEq)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] @@ -233,20 +232,40 @@ impl LightClientFinalityUpdate { } } -impl ForkVersionDeserialize for LightClientFinalityUpdate { - fn deserialize_by_fork<'de, D: Deserializer<'de>>( - value: Value, - fork_name: ForkName, - ) -> Result { - if fork_name.altair_enabled() { - serde_json::from_value::>(value) - .map_err(serde::de::Error::custom) - } else { - Err(serde::de::Error::custom(format!( - "LightClientFinalityUpdate failed to deserialize: unsupported fork '{}'", - fork_name - ))) - } +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientFinalityUpdate { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { + let convert_err = |e| { + serde::de::Error::custom(format!( + "LightClientFinalityUpdate failed to deserialize: {:?}", + e + )) + }; + Ok(match context { + ForkName::Base => { + return Err(serde::de::Error::custom(format!( + "LightClientFinalityUpdate failed to deserialize: unsupported fork '{}'", + context + ))) + } + ForkName::Altair | ForkName::Bellatrix => { + Self::Altair(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Capella => { + Self::Capella(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Deneb => { + Self::Deneb(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Electra => { + Self::Electra(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Fulu => { + Self::Fulu(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + }) } } diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index 0be26a7036..36f2932ecd 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -1,6 +1,5 @@ +use crate::context_deserialize; use crate::ChainSpec; -use crate::ForkName; -use crate::ForkVersionDeserialize; use crate::{light_client_update::*, BeaconBlockBody}; use crate::{ test_utils::TestRandom, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, @@ -8,8 +7,9 @@ use crate::{ SignedBlindedBeaconBlock, }; use crate::{BeaconBlockHeader, ExecutionPayloadHeader}; +use crate::{ContextDeserialize, ForkName}; use derivative::Derivative; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; use ssz::Decode; use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; @@ -35,11 +35,10 @@ use tree_hash_derive::TreeHash; ), serde(bound = "E: EthSpec", deny_unknown_fields), arbitrary(bound = "E: EthSpec"), + context_deserialize(ForkName), ) )] -#[derive( - Debug, Clone, Serialize, TreeHash, Encode, Deserialize, arbitrary::Arbitrary, PartialEq, -)] +#[derive(Debug, Clone, Serialize, TreeHash, Encode, arbitrary::Arbitrary, PartialEq)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] @@ -334,31 +333,40 @@ impl Default for LightClientHeaderFulu { } } -impl ForkVersionDeserialize for LightClientHeader { - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result { - match fork_name { - ForkName::Altair | ForkName::Bellatrix => serde_json::from_value(value) - .map(|light_client_header| Self::Altair(light_client_header)) - .map_err(serde::de::Error::custom), - ForkName::Capella => serde_json::from_value(value) - .map(|light_client_header| Self::Capella(light_client_header)) - .map_err(serde::de::Error::custom), - ForkName::Deneb => serde_json::from_value(value) - .map(|light_client_header| Self::Deneb(light_client_header)) - .map_err(serde::de::Error::custom), - ForkName::Electra => serde_json::from_value(value) - .map(|light_client_header| Self::Electra(light_client_header)) - .map_err(serde::de::Error::custom), - ForkName::Fulu => serde_json::from_value(value) - .map(|light_client_header| Self::Fulu(light_client_header)) - .map_err(serde::de::Error::custom), - ForkName::Base => Err(serde::de::Error::custom(format!( - "LightClientHeader deserialization for {fork_name} not implemented" - ))), - } +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientHeader { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { + let convert_err = |e| { + serde::de::Error::custom(format!( + "LightClientFinalityUpdate failed to deserialize: {:?}", + e + )) + }; + Ok(match context { + ForkName::Base => { + return Err(serde::de::Error::custom(format!( + "LightClientFinalityUpdate failed to deserialize: unsupported fork '{}'", + context + ))) + } + ForkName::Altair | ForkName::Bellatrix => { + Self::Altair(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Capella => { + Self::Capella(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Deneb => { + Self::Deneb(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Electra => { + Self::Electra(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Fulu => { + Self::Fulu(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + }) } } diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index fcf357757b..5701ebd875 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -1,4 +1,5 @@ -use super::{EthSpec, ForkName, ForkVersionDeserialize, LightClientHeader, Slot, SyncAggregate}; +use super::{ContextDeserialize, EthSpec, ForkName, LightClientHeader, Slot, SyncAggregate}; +use crate::context_deserialize; use crate::test_utils::TestRandom; use crate::{ light_client_update::*, ChainSpec, LightClientHeaderAltair, LightClientHeaderCapella, @@ -7,7 +8,6 @@ use crate::{ }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; -use serde_json::Value; use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; @@ -36,11 +36,10 @@ use tree_hash_derive::TreeHash; ), serde(bound = "E: EthSpec", deny_unknown_fields), arbitrary(bound = "E: EthSpec"), + context_deserialize(ForkName), ) )] -#[derive( - Debug, Clone, Serialize, Encode, TreeHash, Deserialize, arbitrary::Arbitrary, PartialEq, -)] +#[derive(Debug, Clone, Serialize, Encode, TreeHash, arbitrary::Arbitrary, PartialEq)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] @@ -206,22 +205,40 @@ impl LightClientOptimisticUpdate { } } -impl ForkVersionDeserialize for LightClientOptimisticUpdate { - fn deserialize_by_fork<'de, D: Deserializer<'de>>( - value: Value, - fork_name: ForkName, - ) -> Result { - if fork_name.altair_enabled() { - Ok( - serde_json::from_value::>(value) - .map_err(serde::de::Error::custom), - )? - } else { - Err(serde::de::Error::custom(format!( - "LightClientOptimisticUpdate failed to deserialize: unsupported fork '{}'", - fork_name - ))) - } +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientOptimisticUpdate { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { + let convert_err = |e| { + serde::de::Error::custom(format!( + "LightClientOptimisticUpdate failed to deserialize: {:?}", + e + )) + }; + Ok(match context { + ForkName::Base => { + return Err(serde::de::Error::custom(format!( + "LightClientOptimisticUpdate failed to deserialize: unsupported fork '{}'", + context + ))) + } + ForkName::Altair | ForkName::Bellatrix => { + Self::Altair(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Capella => { + Self::Capella(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Deneb => { + Self::Deneb(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Electra => { + Self::Electra(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Fulu => { + Self::Fulu(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + }) } } diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index e1fce47975..92aeeb33bb 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -1,8 +1,9 @@ use super::{EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; +use crate::context_deserialize; use crate::light_client_header::LightClientHeaderElectra; use crate::LightClientHeader; use crate::{ - beacon_state, test_utils::TestRandom, ChainSpec, Epoch, ForkName, ForkVersionDeserialize, + beacon_state, test_utils::TestRandom, ChainSpec, ContextDeserialize, Epoch, ForkName, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, LightClientHeaderFulu, SignedBlindedBeaconBlock, }; @@ -10,7 +11,6 @@ use derivative::Derivative; use safe_arith::ArithError; use safe_arith::SafeArith; use serde::{Deserialize, Deserializer, Serialize}; -use serde_json::Value; use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; @@ -117,11 +117,10 @@ impl From for Error { ), serde(bound = "E: EthSpec", deny_unknown_fields), arbitrary(bound = "E: EthSpec"), + context_deserialize(ForkName), ) )] -#[derive( - Debug, Clone, Serialize, Encode, TreeHash, Deserialize, arbitrary::Arbitrary, PartialEq, -)] +#[derive(Debug, Clone, Serialize, Encode, TreeHash, arbitrary::Arbitrary, PartialEq)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] @@ -180,19 +179,37 @@ pub struct LightClientUpdate { pub signature_slot: Slot, } -impl ForkVersionDeserialize for LightClientUpdate { - fn deserialize_by_fork<'de, D: Deserializer<'de>>( - value: Value, - fork_name: ForkName, - ) -> Result { - match fork_name { - ForkName::Base => Err(serde::de::Error::custom(format!( - "LightClientUpdate failed to deserialize: unsupported fork '{}'", - fork_name - ))), - _ => Ok(serde_json::from_value::>(value) - .map_err(serde::de::Error::custom))?, - } +impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientUpdate { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { + let convert_err = |e| { + serde::de::Error::custom(format!("LightClientUpdate failed to deserialize: {:?}", e)) + }; + Ok(match context { + ForkName::Base => { + return Err(serde::de::Error::custom(format!( + "LightClientUpdate failed to deserialize: unsupported fork '{}'", + context + ))) + } + ForkName::Altair | ForkName::Bellatrix => { + Self::Altair(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Capella => { + Self::Capella(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Deneb => { + Self::Deneb(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Electra => { + Self::Electra(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + ForkName::Fulu => { + Self::Fulu(Deserialize::deserialize(deserializer).map_err(convert_err)?) + } + }) } } diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/pending_attestation.rs index 0bccab5079..b7b4a19f4b 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/pending_attestation.rs @@ -1,6 +1,6 @@ +use crate::context_deserialize; use crate::test_utils::TestRandom; -use crate::{AttestationData, BitList, EthSpec}; - +use crate::{AttestationData, BitList, EthSpec, ForkName}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -22,6 +22,7 @@ use tree_hash_derive::TreeHash; arbitrary::Arbitrary, )] #[arbitrary(bound = "E: EthSpec")] +#[context_deserialize(ForkName)] pub struct PendingAttestation { pub aggregation_bits: BitList, pub data: AttestationData, diff --git a/consensus/types/src/pending_consolidation.rs b/consensus/types/src/pending_consolidation.rs index 6e0b74a738..9a513f2744 100644 --- a/consensus/types/src/pending_consolidation.rs +++ b/consensus/types/src/pending_consolidation.rs @@ -1,4 +1,6 @@ +use crate::context_deserialize; use crate::test_utils::TestRandom; +use crate::ForkName; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -18,6 +20,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct PendingConsolidation { #[serde(with = "serde_utils::quoted_u64")] pub source_index: u64, diff --git a/consensus/types/src/pending_deposit.rs b/consensus/types/src/pending_deposit.rs index 3bee86417d..970c326467 100644 --- a/consensus/types/src/pending_deposit.rs +++ b/consensus/types/src/pending_deposit.rs @@ -18,6 +18,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct PendingDeposit { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, diff --git a/consensus/types/src/pending_partial_withdrawal.rs b/consensus/types/src/pending_partial_withdrawal.rs index 846dd97360..ca49032859 100644 --- a/consensus/types/src/pending_partial_withdrawal.rs +++ b/consensus/types/src/pending_partial_withdrawal.rs @@ -1,5 +1,6 @@ +use crate::context_deserialize; use crate::test_utils::TestRandom; -use crate::Epoch; +use crate::{Epoch, ForkName}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -19,6 +20,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct PendingPartialWithdrawal { #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, diff --git a/consensus/types/src/proposer_slashing.rs b/consensus/types/src/proposer_slashing.rs index ee55d62c20..7b03dbb83e 100644 --- a/consensus/types/src/proposer_slashing.rs +++ b/consensus/types/src/proposer_slashing.rs @@ -1,5 +1,6 @@ +use crate::context_deserialize; use crate::test_utils::TestRandom; -use crate::SignedBeaconBlockHeader; +use crate::{ForkName, SignedBeaconBlockHeader}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -23,6 +24,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct ProposerSlashing { pub signed_header_1: SignedBeaconBlockHeader, pub signed_header_2: SignedBeaconBlockHeader, diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs index d6b1c10e99..454c8b9e18 100644 --- a/consensus/types/src/runtime_var_list.rs +++ b/consensus/types/src/runtime_var_list.rs @@ -1,5 +1,7 @@ +use crate::ContextDeserialize; use derivative::Derivative; -use serde::{Deserialize, Serialize}; +use serde::de::Error as DeError; +use serde::{Deserialize, Deserializer, Serialize}; use ssz::Decode; use ssz_types::Error; use std::ops::{Deref, Index, IndexMut}; @@ -217,6 +219,28 @@ where } } +impl<'de, C, T> ContextDeserialize<'de, (C, usize)> for RuntimeVariableList +where + T: ContextDeserialize<'de, C>, + C: Clone, +{ + fn context_deserialize(deserializer: D, context: (C, usize)) -> Result + where + D: Deserializer<'de>, + { + // first parse out a Vec using the Vec impl you already have + let vec: Vec = Vec::context_deserialize(deserializer, context.0)?; + if vec.len() > context.1 { + return Err(DeError::custom(format!( + "RuntimeVariableList lengh {} exceeds max_len {}", + vec.len(), + context.1 + ))); + } + Ok(RuntimeVariableList::from_vec(vec, context.1)) + } +} + #[cfg(test)] mod test { use super::*; diff --git a/consensus/types/src/signed_aggregate_and_proof.rs b/consensus/types/src/signed_aggregate_and_proof.rs index 26eca19bf1..7b1f97e521 100644 --- a/consensus/types/src/signed_aggregate_and_proof.rs +++ b/consensus/types/src/signed_aggregate_and_proof.rs @@ -2,11 +2,11 @@ use super::{ AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, }; use super::{ - AttestationRef, ChainSpec, Domain, EthSpec, Fork, Hash256, SecretKey, SelectionProof, - Signature, SignedRoot, + Attestation, AttestationRef, ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, SecretKey, + SelectionProof, Signature, SignedRoot, }; +use crate::context_deserialize; use crate::test_utils::TestRandom; -use crate::Attestation; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use superstruct::superstruct; @@ -32,6 +32,7 @@ use tree_hash_derive::TreeHash; TestRandom, TreeHash, ), + context_deserialize(ForkName), serde(bound = "E: EthSpec"), arbitrary(bound = "E: EthSpec"), ), diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 6379d73ec0..85bed35a19 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -3,7 +3,7 @@ use crate::test_utils::TestRandom; use crate::*; use derivative::Derivative; use merkle_proof::MerkleTree; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; use std::fmt; use superstruct::superstruct; @@ -703,20 +703,17 @@ impl SignedBeaconBlock { } } -impl> ForkVersionDeserialize +impl<'de, E: EthSpec, Payload: AbstractExecPayload> ContextDeserialize<'de, ForkName> for SignedBeaconBlock { - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result { + fn context_deserialize(deserializer: D, context: ForkName) -> Result + where + D: Deserializer<'de>, + { Ok(map_fork_name!( - fork_name, + context, Self, - serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!( - "SignedBeaconBlock failed to deserialize: {:?}", - e - )))? + serde::Deserialize::deserialize(deserializer)? )) } } diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index 3d4269a2ce..9106fa8372 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -1,5 +1,6 @@ +use crate::context_deserialize; use crate::{ - test_utils::TestRandom, BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, Hash256, + test_utils::TestRandom, BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, PublicKey, Signature, SignedRoot, }; use serde::{Deserialize, Serialize}; @@ -24,6 +25,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct SignedBeaconBlockHeader { pub message: BeaconBlockHeader, pub signature: Signature, diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/signed_bls_to_execution_change.rs index a7bfd7c271..383663e36b 100644 --- a/consensus/types/src/signed_bls_to_execution_change.rs +++ b/consensus/types/src/signed_bls_to_execution_change.rs @@ -19,6 +19,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct SignedBlsToExecutionChange { pub message: BlsToExecutionChange, pub signature: Signature, diff --git a/consensus/types/src/signed_contribution_and_proof.rs b/consensus/types/src/signed_contribution_and_proof.rs index 068fd980ae..42115bfbc0 100644 --- a/consensus/types/src/signed_contribution_and_proof.rs +++ b/consensus/types/src/signed_contribution_and_proof.rs @@ -1,7 +1,8 @@ use super::{ - ChainSpec, ContributionAndProof, Domain, EthSpec, Fork, Hash256, SecretKey, Signature, - SignedRoot, SyncCommitteeContribution, SyncSelectionProof, + ChainSpec, ContributionAndProof, Domain, EthSpec, Fork, ForkName, Hash256, SecretKey, + Signature, SignedRoot, SyncCommitteeContribution, SyncSelectionProof, }; +use crate::context_deserialize; use crate::test_utils::TestRandom; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -24,6 +25,7 @@ use tree_hash_derive::TreeHash; )] #[serde(bound = "E: EthSpec")] #[arbitrary(bound = "E: EthSpec")] +#[context_deserialize(ForkName)] pub struct SignedContributionAndProof { /// The `ContributionAndProof` that was signed. pub message: ContributionAndProof, diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/signed_voluntary_exit.rs index 30eda11791..b6451d3ab5 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/signed_voluntary_exit.rs @@ -1,4 +1,5 @@ -use crate::{test_utils::TestRandom, VoluntaryExit}; +use crate::context_deserialize; +use crate::{test_utils::TestRandom, ForkName, VoluntaryExit}; use bls::Signature; use serde::{Deserialize, Serialize}; @@ -22,6 +23,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct SignedVoluntaryExit { pub message: VoluntaryExit, pub signature: Signature, diff --git a/consensus/types/src/signing_data.rs b/consensus/types/src/signing_data.rs index f30d5fdfcb..aa25ecffd9 100644 --- a/consensus/types/src/signing_data.rs +++ b/consensus/types/src/signing_data.rs @@ -1,5 +1,6 @@ +use crate::context_deserialize; use crate::test_utils::TestRandom; -use crate::Hash256; +use crate::{ForkName, Hash256}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -19,6 +20,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct SigningData { pub object_root: Hash256, pub domain: Hash256, diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_aggregate.rs index 12b91501ae..4f810db22a 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_aggregate.rs @@ -1,6 +1,7 @@ use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; +use crate::context_deserialize; use crate::test_utils::TestRandom; -use crate::{AggregateSignature, BitVector, EthSpec, SyncCommitteeContribution}; +use crate::{AggregateSignature, BitVector, EthSpec, ForkName, SyncCommitteeContribution}; use derivative::Derivative; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; @@ -36,6 +37,7 @@ impl From for Error { #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(bound = "E: EthSpec")] #[arbitrary(bound = "E: EthSpec")] +#[context_deserialize(ForkName)] pub struct SyncAggregate { pub sync_committee_bits: BitVector, pub sync_committee_signature: AggregateSignature, diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_aggregator_selection_data.rs index 3da130bb06..a61cd47d04 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_aggregator_selection_data.rs @@ -1,6 +1,6 @@ +use crate::context_deserialize; use crate::test_utils::TestRandom; -use crate::{SignedRoot, Slot}; - +use crate::{ForkName, SignedRoot, Slot}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -19,6 +19,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct SyncAggregatorSelectionData { pub slot: Slot, #[serde(with = "serde_utils::quoted_u64")] diff --git a/consensus/types/src/sync_committee.rs b/consensus/types/src/sync_committee.rs index 032f0d61f9..c7ec7bdcc3 100644 --- a/consensus/types/src/sync_committee.rs +++ b/consensus/types/src/sync_committee.rs @@ -1,5 +1,6 @@ +use crate::context_deserialize; use crate::test_utils::TestRandom; -use crate::{EthSpec, FixedVector, SyncSubnetId}; +use crate::{EthSpec, FixedVector, ForkName, SyncSubnetId}; use bls::PublicKeyBytes; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; @@ -38,6 +39,7 @@ impl From for Error { )] #[serde(bound = "E: EthSpec")] #[arbitrary(bound = "E: EthSpec")] +#[context_deserialize(ForkName)] pub struct SyncCommittee { pub pubkeys: FixedVector, pub aggregate_pubkey: PublicKeyBytes, diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index e160332f45..e2ac414cfa 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -1,4 +1,5 @@ -use super::{AggregateSignature, EthSpec, SignedRoot}; +use super::{AggregateSignature, EthSpec, ForkName, SignedRoot}; +use crate::context_deserialize; use crate::slot_data::SlotData; use crate::{test_utils::TestRandom, BitVector, Hash256, Slot, SyncCommitteeMessage}; use serde::{Deserialize, Serialize}; @@ -28,6 +29,7 @@ pub enum Error { )] #[serde(bound = "E: EthSpec")] #[arbitrary(bound = "E: EthSpec")] +#[context_deserialize(ForkName)] pub struct SyncCommitteeContribution { pub slot: Slot, pub beacon_block_root: Hash256, diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee_message.rs index d7d309cd56..4b442b3053 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee_message.rs @@ -1,7 +1,9 @@ -use crate::test_utils::TestRandom; -use crate::{ChainSpec, Domain, EthSpec, Fork, Hash256, SecretKey, Signature, SignedRoot, Slot}; - +use crate::context_deserialize; use crate::slot_data::SlotData; +use crate::test_utils::TestRandom; +use crate::{ + ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, SecretKey, Signature, SignedRoot, Slot, +}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -20,6 +22,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct SyncCommitteeMessage { pub slot: Slot, pub beacon_block_root: Hash256, diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 027958b178..165f477ff4 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,3 +1,4 @@ +use crate::context_deserialize; use crate::{ test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, @@ -23,6 +24,7 @@ use tree_hash_derive::TreeHash; TestRandom, TreeHash, )] +#[context_deserialize(ForkName)] pub struct Validator { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 76574a89ab..75260add4b 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -1,3 +1,4 @@ +use crate::context_deserialize; use crate::{ test_utils::TestRandom, ChainSpec, Domain, Epoch, ForkName, Hash256, SecretKey, SignedRoot, SignedVoluntaryExit, @@ -24,6 +25,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct VoluntaryExit { /// Earliest epoch when voluntary exit can be processed. pub epoch: Epoch, @@ -57,7 +59,6 @@ impl VoluntaryExit { } else { spec.fork_version_for_name(fork_name) }; - spec.compute_domain(Domain::VoluntaryExit, fork_version, genesis_validators_root) } } diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index 7f98ff1e60..9ca50fccfb 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -19,6 +19,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct Withdrawal { #[serde(with = "serde_utils::quoted_u64")] pub index: u64, diff --git a/consensus/types/src/withdrawal_request.rs b/consensus/types/src/withdrawal_request.rs index 1296426ac0..57c6e798eb 100644 --- a/consensus/types/src/withdrawal_request.rs +++ b/consensus/types/src/withdrawal_request.rs @@ -1,5 +1,6 @@ +use crate::context_deserialize; use crate::test_utils::TestRandom; -use crate::{Address, PublicKeyBytes}; +use crate::{Address, ForkName, PublicKeyBytes}; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -20,6 +21,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] +#[context_deserialize(ForkName)] pub struct WithdrawalRequest { #[serde(with = "serde_utils::address_hex")] pub source_address: Address, diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index d02e01b80c..4661288679 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -4,6 +4,14 @@ version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } +[features] +arbitrary = [] +default = ["supranational"] +fake_crypto = [] +supranational = ["blst"] +supranational-portable = ["supranational", "blst/portable"] +supranational-force-adx = ["supranational", "blst/force-adx"] + [dependencies] alloy-primitives = { workspace = true } arbitrary = { workspace = true } @@ -18,11 +26,3 @@ safe_arith = { workspace = true } serde = { workspace = true } tree_hash = { workspace = true } zeroize = { workspace = true } - -[features] -arbitrary = [] -default = ["supranational"] -fake_crypto = [] -supranational = ["blst"] -supranational-portable = ["supranational", "blst/portable"] -supranational-force-adx = ["supranational", "blst/force-adx"] diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 9acbe2569c..b39feb5011 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -5,6 +5,9 @@ version = "7.1.0-beta.0" authors = ["Paul Hauner "] edition = { workspace = true } +[package.metadata.cargo-udeps.ignore] +normal = ["malloc_utils"] + [features] portable = ["bls/supranational-portable"] fake_crypto = ['bls/fake_crypto'] @@ -42,6 +45,3 @@ tracing-subscriber = { workspace = true } tree_hash = { workspace = true } types = { workspace = true } validator_dir = { workspace = true } - -[package.metadata.cargo-udeps.ignore] -normal = ["malloc_utils"] diff --git a/lcli/src/block_root.rs b/lcli/src/block_root.rs index 80087fd6d4..3c07d4f9ef 100644 --- a/lcli/src/block_root.rs +++ b/lcli/src/block_root.rs @@ -79,7 +79,7 @@ pub fn run( .await .map_err(|e| format!("Failed to download block: {:?}", e))? .ok_or_else(|| format!("Unable to locate block at {:?}", block_id))? - .data; + .into_data(); Ok::<_, String>(block) }) .map_err(|e| format!("Failed to complete task: {:?}", e))? diff --git a/lcli/src/http_sync.rs b/lcli/src/http_sync.rs index 1ef40e6397..cb6a9d2b1d 100644 --- a/lcli/src/http_sync.rs +++ b/lcli/src/http_sync.rs @@ -123,11 +123,11 @@ async fn get_block_from_source( .unwrap() .unwrap(); let blobs_from_source = source - .get_blobs::(block_id, None) + .get_blobs::(block_id, None, spec) .await .unwrap() .unwrap() - .data; + .into_data(); let (kzg_proofs, blobs): (Vec<_>, Vec<_>) = blobs_from_source .iter() diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 05f4900c46..105100aeb1 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -675,6 +675,7 @@ fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) -> extra_info: false, }, "", + 0o600, ); let env = env_builder diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 834123e939..9456f34570 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -102,7 +102,7 @@ pub fn run( }) .map_err(|e| format!("Failed to complete task: {:?}", e))? .ok_or_else(|| format!("Unable to locate state at {:?}", state_id))? - .data; + .into_data(); let state_root = match state_id { StateId::Root(root) => Some(root), _ => None, diff --git a/lcli/src/state_root.rs b/lcli/src/state_root.rs index b2308999d4..7b10ab9362 100644 --- a/lcli/src/state_root.rs +++ b/lcli/src/state_root.rs @@ -50,7 +50,7 @@ pub fn run( }) .map_err(|e| format!("Failed to complete task: {:?}", e))? .ok_or_else(|| format!("Unable to locate state at {:?}", state_id))? - .data + .into_data() } _ => return Err("must supply either --state-path or --beacon-url".into()), }; diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 4831f86491..2226105c34 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -154,7 +154,7 @@ pub fn run( .await .map_err(|e| format!("Failed to download block: {:?}", e))? .ok_or_else(|| format!("Unable to locate block at {:?}", block_id))? - .data; + .into_data(); if block.slot() == inner_spec.genesis_slot { return Err("Cannot run on the genesis block".to_string()); @@ -165,7 +165,7 @@ pub fn run( .await .map_err(|e| format!("Failed to download parent block: {:?}", e))? .ok_or_else(|| format!("Unable to locate parent block at {:?}", block_id))? - .data; + .into_data(); let state_root = parent_block.state_root(); let state_id = StateId::Root(state_root); @@ -174,7 +174,7 @@ pub fn run( .await .map_err(|e| format!("Failed to download state: {:?}", e))? .ok_or_else(|| format!("Unable to locate state at {:?}", state_id))? - .data; + .into_data(); Ok((pre_state, Some(state_root), block)) }) diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 04c8efcdba..cc17f638fd 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -6,6 +6,11 @@ edition = { workspace = true } autotests = false rust-version = "1.83.0" +# Prevent cargo-udeps from flagging the dummy package `target_check`, which exists only +# to assert properties of the compilation target. +[package.metadata.cargo-udeps.ignore] +normal = ["target_check"] + [features] default = ["slasher-lmdb", "beacon-node-leveldb"] # Writes debugging .ssz files to /tmp during block processing. @@ -32,12 +37,6 @@ beacon-node-redb = ["store/redb"] # Deprecated. This is now enabled by default on non windows targets. jemalloc = [] -[target.'cfg(not(target_os = "windows"))'.dependencies] -malloc_utils = { workspace = true, features = ["jemalloc"] } - -[target.'cfg(target_os = "windows")'.dependencies] -malloc_utils = { workspace = true } - [dependencies] account_manager = { "path" = "../account_manager" } account_utils = { workspace = true } @@ -69,6 +68,12 @@ unused_port = { workspace = true } validator_client = { workspace = true } validator_manager = { path = "../validator_manager" } +[target.'cfg(not(target_os = "windows"))'.dependencies] +malloc_utils = { workspace = true, features = ["jemalloc"] } + +[target.'cfg(target_os = "windows")'.dependencies] +malloc_utils = { workspace = true } + [dev-dependencies] beacon_node_fallback = { workspace = true } beacon_processor = { workspace = true } @@ -85,8 +90,3 @@ zeroize = { workspace = true } [[test]] name = "lighthouse_tests" path = "tests/main.rs" - -# Prevent cargo-udeps from flagging the dummy package `target_check`, which exists only -# to assert properties of the compilation target. -[package.metadata.cargo-udeps.ignore] -normal = ["target_check"] diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 9b0284e06d..a66b7e128f 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -26,14 +26,7 @@ use types::{EthSpec, GnosisEthSpec, MainnetEthSpec, MinimalEthSpec}; #[cfg(target_family = "unix")] use { futures::Future, - std::{ - fs::{read_dir, set_permissions, Permissions}, - os::unix::fs::PermissionsExt, - path::Path, - pin::Pin, - task::Context, - task::Poll, - }, + std::{pin::Pin, task::Context, task::Poll}, tokio::signal::unix::{signal, Signal, SignalKind}, }; @@ -208,6 +201,7 @@ impl EnvironmentBuilder { mut self, config: LoggerConfig, logfile_prefix: &str, + file_mode: u32, ) -> ( Self, LoggingLayer, @@ -220,9 +214,6 @@ impl EnvironmentBuilder { _ => logfile_prefix, }; - #[cfg(target_family = "unix")] - let file_mode = if config.is_restricted { 0o600 } else { 0o644 }; - let file_logging_layer = match config.path { None => { eprintln!("No logfile path provided, logging to file is disabled"); @@ -239,7 +230,8 @@ impl EnvironmentBuilder { .max_keep_files(config.max_log_number.try_into().unwrap_or_else(|e| { eprintln!("Failed to convert max_log_number to u64: {}", e); 10 - })); + })) + .file_mode(file_mode); if config.compression { appender = appender.compression(Compression::Gzip); @@ -247,9 +239,6 @@ impl EnvironmentBuilder { match appender.build() { Ok(file_appender) => { - #[cfg(target_family = "unix")] - set_logfile_permissions(&path, filename_prefix, file_mode); - let (writer, guard) = tracing_appender::non_blocking(file_appender); Some(LoggingLayer::new( writer, @@ -543,37 +532,3 @@ impl Future for SignalFuture { } } } - -#[cfg(target_family = "unix")] -fn set_logfile_permissions(log_dir: &Path, filename_prefix: &str, file_mode: u32) { - let newest = read_dir(log_dir) - .ok() - .into_iter() - .flat_map(|entries| entries.filter_map(Result::ok)) - .filter_map(|entry| { - let path = entry.path(); - let fname = path.file_name()?.to_string_lossy(); - if path.is_file() && fname.starts_with(filename_prefix) && fname.ends_with(".log") { - let modified = entry.metadata().ok()?.modified().ok()?; - Some((path, modified)) - } else { - None - } - }) - .max_by_key(|(_path, mtime)| *mtime); - - match newest { - Some((file, _mtime)) => { - if let Err(e) = set_permissions(&file, Permissions::from_mode(file_mode)) { - eprintln!("Failed to set permissions on {}: {}", file.display(), e); - } - } - None => { - eprintln!( - "Couldn't find a newly created logfile in {} matching prefix \"{}\".", - log_dir.display(), - filename_prefix - ); - } - } -} diff --git a/lighthouse/environment/src/tracing_common.rs b/lighthouse/environment/src/tracing_common.rs index dd9fe45cad..d78eb0d85a 100644 --- a/lighthouse/environment/src/tracing_common.rs +++ b/lighthouse/environment/src/tracing_common.rs @@ -33,11 +33,21 @@ pub fn construct_logger( let subcommand_name = matches.subcommand_name(); let logfile_prefix = subcommand_name.unwrap_or("lighthouse"); + let file_mode = if logger_config.is_restricted { + 0o600 + } else { + 0o644 + }; + let (builder, stdout_logging_layer, file_logging_layer, sse_logging_layer_opt) = - environment_builder.init_tracing(logger_config.clone(), logfile_prefix); + environment_builder.init_tracing(logger_config.clone(), logfile_prefix, file_mode); let libp2p_discv5_layer = if let Some(subcommand_name) = subcommand_name { - if subcommand_name == "beacon_node" || subcommand_name == "boot_node" { + if subcommand_name == "beacon_node" + || subcommand_name == "boot_node" + || subcommand_name == "basic-sim" + || subcommand_name == "fallback-sim" + { if logger_config.max_log_size == 0 || logger_config.max_log_number == 0 { // User has explicitly disabled logging to file. None @@ -45,6 +55,7 @@ pub fn construct_logger( create_libp2p_discv5_tracing_layer( logger_config.path.clone(), logger_config.max_log_size, + file_mode, ) } } else { diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index 159c89badb..9d9844c4c4 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -83,3 +83,7 @@ The script comes with some CLI options, which can be viewed with `./start_local_ ```bash ./start_local_testnet.sh -b false ``` + +## Further reading about Kurtosis + +You may refer to [this article](https://ethpandaops.io/posts/kurtosis-deep-dive/) for information about Kurtosis. \ No newline at end of file diff --git a/scripts/mdlint.sh b/scripts/mdlint.sh index 5274f108d2..55d8d1f969 100755 --- a/scripts/mdlint.sh +++ b/scripts/mdlint.sh @@ -14,10 +14,10 @@ if [[ $exit_code == 0 ]]; then echo "All markdown files are properly formatted." exit 0 elif [[ $exit_code == 1 ]]; then - echo "Exiting with errors. Run 'make mdlint' locally and commit the changes. Note that not all errors can be fixed automatically, if there are still errors after running 'make mdlint', look for the errors and fix manually." + echo "Exiting with errors. Run 'make mdlint' locally and commit the changes. Note that not all errors can be fixed automatically, if there are still errors after running 'make mdlint', look for the errors and fix manually." docker run --rm -v ./book:/workdir ghcr.io/igorshubovych/markdownlint-cli:latest '**/*.md' --ignore node_modules --fix exit 1 else echo "Exiting with exit code >1. Check for the error logs and fix them accordingly." exit 1 -fi \ No newline at end of file +fi diff --git a/testing/ef_tests/src/cases/light_client_verify_is_better_update.rs b/testing/ef_tests/src/cases/light_client_verify_is_better_update.rs index de281d906c..b2afc047c5 100644 --- a/testing/ef_tests/src/cases/light_client_verify_is_better_update.rs +++ b/testing/ef_tests/src/cases/light_client_verify_is_better_update.rs @@ -3,8 +3,7 @@ use decode::ssz_decode_light_client_update; use serde::Deserialize; use types::{LightClientUpdate, Slot}; -#[derive(Debug, Clone, Deserialize)] -#[serde(deny_unknown_fields)] +#[derive(Debug, Clone)] pub struct LightClientVerifyIsBetterUpdate { light_client_updates: Vec>, } diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index 711974dd43..b458b85fdd 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -22,7 +22,7 @@ pub struct MerkleProof { #[derive(Debug)] pub enum GenericMerkleProofValidity { - BeaconState(BeaconStateMerkleProofValidity), + BeaconState(Box>), BeaconBlockBody(Box>), } @@ -47,6 +47,7 @@ impl LoadCase for GenericMerkleProofValidity { if suite_name == "BeaconState" { BeaconStateMerkleProofValidity::load_from_dir(path, fork_name) + .map(Box::new) .map(GenericMerkleProofValidity::BeaconState) } else if suite_name == "BeaconBlockBody" { BeaconBlockBodyMerkleProofValidity::load_from_dir(path, fork_name) diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 55c42eb9d3..07d8d98f1d 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -3,6 +3,9 @@ name = "execution_engine_integration" version = "0.1.0" edition = { workspace = true } +[features] +portable = ["types/portable"] + [dependencies] async-channel = { workspace = true } deposit_contract = { workspace = true } @@ -23,6 +26,3 @@ tempfile = { workspace = true } tokio = { workspace = true } types = { workspace = true } unused_port = { workspace = true } - -[features] -portable = ["types/portable"] diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 12b0afcc75..cf0d03c24f 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -20,5 +20,6 @@ rayon = { workspace = true } sensitive_url = { path = "../../common/sensitive_url" } serde_json = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } tracing-subscriber = { workspace = true } types = { workspace = true } diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index f27fc7f875..1c27ca7792 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -17,10 +17,11 @@ use std::time::Duration; use environment::tracing_common; use tracing_subscriber::prelude::*; -use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; use logging::build_workspace_filter; use tokio::time::sleep; +use tracing::Level; use types::{Epoch, EthSpec, MinimalEthSpec}; const END_EPOCH: u64 = 16; @@ -36,12 +37,13 @@ const SUGGESTED_FEE_RECIPIENT: [u8; 20] = #[allow(clippy::large_stack_frames)] pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { - let node_count = matches + let (_name, subcommand_matches) = matches.subcommand().expect("subcommand"); + let node_count = subcommand_matches .get_one::("nodes") .expect("missing nodes default") .parse::() .expect("missing nodes default"); - let proposer_nodes = matches + let proposer_nodes = subcommand_matches .get_one::("proposer-nodes") .unwrap_or(&String::from("0")) .parse::() @@ -49,23 +51,25 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { // extra beacon node added with delay let extra_nodes: usize = 1; println!("PROPOSER-NODES: {}", proposer_nodes); - let validators_per_node = matches + let validators_per_node = subcommand_matches .get_one::("validators-per-node") .expect("missing validators-per-node default") .parse::() .expect("missing validators-per-node default"); - let speed_up_factor = matches + let speed_up_factor = subcommand_matches .get_one::("speed-up-factor") .expect("missing speed-up-factor default") .parse::() .expect("missing speed-up-factor default"); - let log_level = matches + let log_level = subcommand_matches .get_one::("debug-level") .expect("missing debug-level"); - let continue_after_checks = matches.get_flag("continue-after-checks"); - let log_dir = matches.get_one::("log-dir").map(PathBuf::from); - let disable_stdout_logging = matches.get_flag("disable-stdout-logging"); + let continue_after_checks = subcommand_matches.get_flag("continue-after-checks"); + let log_dir = subcommand_matches + .get_one::("log-dir") + .map(PathBuf::from); + let disable_stdout_logging = subcommand_matches.get_flag("disable-stdout-logging"); println!("Basic Simulator:"); println!(" nodes: {}", node_count); @@ -98,7 +102,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { stdout_logging_layer, file_logging_layer, _sse_logging_layer_opt, - _libp2p_discv5_layer, + libp2p_discv5_layer, ) = tracing_common::construct_logger( LoggerConfig { path: log_dir, @@ -138,6 +142,17 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { .boxed(), ); } + if let Some(libp2p_discv5_layer) = libp2p_discv5_layer { + logging_layers.push( + libp2p_discv5_layer + .with_filter( + EnvFilter::builder() + .with_default_directive(Level::DEBUG.into()) + .from_env_lossy(), + ) + .boxed(), + ); + } if let Err(e) = tracing_subscriber::registry() .with(logging_layers) diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index cd0e2e726e..1b2d4024d1 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -97,7 +97,7 @@ async fn verify_validator_count( let vc = remote_node .get_debug_beacon_states::(StateId::Head) .await - .map(|body| body.unwrap().data) + .map(|body| body.unwrap().into_data()) .map_err(|e| format!("Get state root via http failed: {:?}", e))? .validators() .len(); @@ -197,7 +197,7 @@ pub async fn verify_full_sync_aggregates_up_to( slot ) }) - .data + .data() .message() .body() .sync_aggregate() @@ -235,7 +235,7 @@ pub async fn verify_transition_block_finalized( let execution_block_hash: ExecutionBlockHash = remote_node .get_beacon_blocks::(BlockId::Finalized) .await - .map(|body| body.unwrap().data) + .map(|body| body.unwrap().into_data()) .map_err(|e| format!("Get state root via http failed: {:?}", e))? .message() .execution_payload() @@ -308,7 +308,7 @@ pub(crate) async fn verify_light_client_updates( .await .map_err(|e| format!("Error while getting light client updates: {:?}", e))? .ok_or(format!("Light client optimistic update not found {slot:?}"))? - .data + .data() .signature_slot(); let signature_slot_distance = slot - signature_slot; if signature_slot_distance > light_client_update_slot_tolerance { @@ -337,7 +337,7 @@ pub(crate) async fn verify_light_client_updates( .await .map_err(|e| format!("Error while getting light client updates: {:?}", e))? .ok_or(format!("Light client finality update not found {slot:?}"))? - .data + .data() .signature_slot(); let signature_slot_distance = slot - signature_slot; if signature_slot_distance > light_client_update_slot_tolerance { @@ -385,7 +385,7 @@ pub async fn ensure_node_synced_up_to_slot( .ok() .flatten() .ok_or(format!("No head block exists on node {node_index}"))? - .data; + .into_data(); // Check the head block is synced with the rest of the network. if head.slot() >= upto_slot { @@ -422,7 +422,7 @@ pub async fn verify_full_blob_production_up_to( // the `verify_full_block_production_up_to` function. if block.is_some() { remote_node - .get_blobs::(BlockId::Slot(Slot::new(slot)), None) + .get_blobs::(BlockId::Slot(Slot::new(slot)), None, &E::default_spec()) .await .map_err(|e| format!("Failed to get blobs at slot {slot:?}: {e:?}"))? .ok_or_else(|| format!("No blobs available at slot {slot:?}"))?; diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index b77efbfeae..2d0cacd941 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -5,6 +5,7 @@ use clap::ArgMatches; use crate::retry::with_retry; use environment::tracing_common; use futures::prelude::*; +use logging::build_workspace_filter; use node_test_rig::{ environment::{EnvironmentBuilder, LoggerConfig}, testing_validator_config, ValidatorFiles, @@ -15,8 +16,9 @@ use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; use tokio::time::sleep; +use tracing::Level; use tracing_subscriber::prelude::*; -use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; use types::{Epoch, EthSpec, MinimalEthSpec}; const END_EPOCH: u64 = 16; const GENESIS_DELAY: u64 = 32; @@ -39,40 +41,43 @@ const SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { - let vc_count = matches + let (_name, subcommand_matches) = matches.subcommand().expect("subcommand"); + let vc_count = subcommand_matches .get_one::("vc-count") .expect("missing vc-count default") .parse::() .expect("missing vc-count default"); - let validators_per_vc = matches + let validators_per_vc = subcommand_matches .get_one::("validators-per-vc") .expect("missing validators-per-vc default") .parse::() .expect("missing validators-per-vc default"); - let bns_per_vc = matches + let bns_per_vc = subcommand_matches .get_one::("bns-per-vc") .expect("missing bns-per-vc default") .parse::() .expect("missing bns-per-vc default"); assert!(bns_per_vc > 1); - let speed_up_factor = matches + let speed_up_factor = subcommand_matches .get_one::("speed-up-factor") .expect("missing speed-up-factor default") .parse::() .expect("missing speed-up-factor default"); - let log_level = matches + let log_level = subcommand_matches .get_one::("debug-level") .expect("missing debug-level default"); - let continue_after_checks = matches.get_flag("continue-after-checks"); + let continue_after_checks = subcommand_matches.get_flag("continue-after-checks"); - let log_dir = matches.get_one::("log-dir").map(PathBuf::from); + let log_dir = subcommand_matches + .get_one::("log-dir") + .map(PathBuf::from); - let disable_stdout_logging = matches.get_flag("disable-stdout-logging"); + let disable_stdout_logging = subcommand_matches.get_flag("disable-stdout-logging"); println!("Fallback Simulator:"); println!(" vc-count: {}", vc_count); @@ -104,7 +109,7 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { stdout_logging_layer, file_logging_layer, _sse_logging_layer_opt, - _libp2p_discv5_layer, + libp2p_discv5_layer, ) = tracing_common::construct_logger( LoggerConfig { path: log_dir, @@ -126,11 +131,13 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { EnvironmentBuilder::minimal(), ); + let workspace_filter = build_workspace_filter()?; let mut logging_layers = vec![]; if !disable_stdout_logging { logging_layers.push( stdout_logging_layer .with_filter(logger_config.debug_level) + .with_filter(workspace_filter.clone()) .boxed(), ); } @@ -138,6 +145,18 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { logging_layers.push( file_logging_layer .with_filter(logger_config.logfile_debug_level) + .with_filter(workspace_filter) + .boxed(), + ); + } + if let Some(libp2p_discv5_layer) = libp2p_discv5_layer { + logging_layers.push( + libp2p_discv5_layer + .with_filter( + EnvFilter::builder() + .with_default_directive(Level::DEBUG.into()) + .from_env_lossy(), + ) .boxed(), ); } diff --git a/testing/simulator/src/main.rs b/testing/simulator/src/main.rs index a259ac1133..1cc4a1779b 100644 --- a/testing/simulator/src/main.rs +++ b/testing/simulator/src/main.rs @@ -29,15 +29,15 @@ fn main() { Builder::from_env(Env::default()).init(); let matches = cli_app().get_matches(); - match matches.subcommand() { - Some(("basic-sim", matches)) => match basic_sim::run_basic_sim(matches) { + match matches.subcommand_name() { + Some("basic-sim") => match basic_sim::run_basic_sim(&matches) { Ok(()) => println!("Simulation exited successfully"), Err(e) => { eprintln!("Simulation exited with error: {}", e); std::process::exit(1) } }, - Some(("fallback-sim", matches)) => match fallback_sim::run_fallback_sim(matches) { + Some("fallback-sim") => match fallback_sim::run_fallback_sim(&matches) { Ok(()) => println!("Simulation exited successfully"), Err(e) => { eprintln!("Simulation exited with error: {}", e); diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 7c29715346..66376f0a51 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -3,6 +3,9 @@ name = "state_transition_vectors" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } + +[features] +portable = ["beacon_chain/portable"] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -11,6 +14,3 @@ ethereum_ssz = { workspace = true } state_processing = { workspace = true } tokio = { workspace = true } types = { workspace = true } - -[features] -portable = ["beacon_chain/portable"] diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index f68fa56e16..b4637b4030 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -10,6 +10,7 @@ edition = { workspace = true } account_utils = { workspace = true } async-channel = { workspace = true } environment = { workspace = true } +eth2 = { workspace = true } eth2_keystore = { workspace = true } eth2_network_config = { workspace = true } futures = { workspace = true } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 8678eff0ee..4bc0f62346 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -20,6 +20,7 @@ mod tests { use account_utils::validator_definitions::{ SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, }; + use eth2::types::FullBlockContents; use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; use initialized_validators::{ @@ -45,7 +46,9 @@ mod tests { use tokio::time::sleep; use types::{attestation::AttestationBase, *}; use url::Url; - use validator_store::{Error as ValidatorStoreError, SignedBlock, ValidatorStore}; + use validator_store::{ + Error as ValidatorStoreError, SignedBlock, UnsignedBlock, ValidatorStore, + }; /// If the we are unable to reach the Web3Signer HTTP API within this time out then we will /// assume it failed to start. @@ -595,8 +598,9 @@ mod tests { async move { let block = BeaconBlock::::Base(BeaconBlockBase::empty(&spec)); let block_slot = block.slot(); + let unsigned_block = UnsignedBlock::Full(FullBlockContents::Block(block)); validator_store - .sign_block(pubkey, block.into(), block_slot) + .sign_block(pubkey, unsigned_block, block_slot) .await .unwrap() } @@ -665,12 +669,10 @@ mod tests { async move { let mut altair_block = BeaconBlockAltair::empty(&spec); altair_block.slot = altair_fork_slot; + let unsigned_block = + UnsignedBlock::Full(FullBlockContents::Block(altair_block.into())); validator_store - .sign_block( - pubkey, - BeaconBlock::::Altair(altair_block).into(), - altair_fork_slot, - ) + .sign_block(pubkey, unsigned_block, altair_fork_slot) .await .unwrap() } @@ -752,12 +754,10 @@ mod tests { async move { let mut bellatrix_block = BeaconBlockBellatrix::empty(&spec); bellatrix_block.slot = bellatrix_fork_slot; + let unsigned_block = + UnsignedBlock::Full(FullBlockContents::Block(bellatrix_block.into())); validator_store - .sign_block( - pubkey, - BeaconBlock::::Bellatrix(bellatrix_block).into(), - bellatrix_fork_slot, - ) + .sign_block(pubkey, unsigned_block, bellatrix_fork_slot) .await .unwrap() } @@ -876,8 +876,9 @@ mod tests { .assert_signatures_match("first_block", |pubkey, validator_store| async move { let block = first_block(); let slot = block.slot(); + let unsigned_block = UnsignedBlock::Full(FullBlockContents::Block(block)); validator_store - .sign_block(pubkey, block.into(), slot) + .sign_block(pubkey, unsigned_block, slot) .await .unwrap() }) @@ -887,8 +888,9 @@ mod tests { move |pubkey, validator_store| async move { let block = double_vote_block(); let slot = block.slot(); + let unsigned_block = UnsignedBlock::Full(FullBlockContents::Block(block)); validator_store - .sign_block(pubkey, block.into(), slot) + .sign_block(pubkey, unsigned_block, slot) .await .map(|_| ()) }, diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index 8d022f8e75..e11cc97e79 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -482,12 +482,26 @@ impl BeaconNodeFallback { for (result, node) in results { if let Err(e) = result { - if *e != CandidateError::PreGenesis { - warn!( - error = ?e, - endpoint = %node, - "A connected beacon node errored during routine health check" - ); + match e { + // Avoid spamming warns before genesis. + CandidateError::PreGenesis => {} + // Uninitialized *should* only occur during start-up before the + // slot clock has been initialized. + // Seeing this log in any other circumstance would indicate a serious bug. + CandidateError::Uninitialized => { + debug!( + error = ?e, + endpoint = %node, + "A connected beacon node is uninitialized" + ); + } + _ => { + warn!( + error = ?e, + endpoint = %node, + "A connected beacon node errored during routine health check" + ); + } } } } diff --git a/validator_client/lighthouse_validator_store/src/lib.rs b/validator_client/lighthouse_validator_store/src/lib.rs index d07f95f11c..2cb6ba435e 100644 --- a/validator_client/lighthouse_validator_store/src/lib.rs +++ b/validator_client/lighthouse_validator_store/src/lib.rs @@ -1,5 +1,6 @@ use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; use doppelganger_service::DoppelgangerService; +use eth2::types::PublishBlockRequest; use initialized_validators::InitializedValidators; use logging::crit; use parking_lot::{Mutex, RwLock}; @@ -733,14 +734,18 @@ impl ValidatorStore for LighthouseValidatorS current_slot: Slot, ) -> Result, Error> { match block { - UnsignedBlock::Full(block) => self - .sign_abstract_block(validator_pubkey, block, current_slot) - .await - .map(SignedBlock::Full), + UnsignedBlock::Full(block) => { + let (block, blobs) = block.deconstruct(); + self.sign_abstract_block(validator_pubkey, block, current_slot) + .await + .map(|block| { + SignedBlock::Full(PublishBlockRequest::new(Arc::new(block), blobs)) + }) + } UnsignedBlock::Blinded(block) => self .sign_abstract_block(validator_pubkey, block, current_slot) .await - .map(SignedBlock::Blinded), + .map(|block| SignedBlock::Blinded(Arc::new(block))), } } diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 88e6dd794d..3860af514d 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -5,9 +5,9 @@ authors = ["Michael Sproul ", "pscott AttestationService BlockService { ) -> Result<(), BlockError> { let signing_timer = validator_metrics::start_timer(&validator_metrics::BLOCK_SIGNING_TIMES); - let (block, maybe_blobs) = match unsigned_block { - UnsignedBlock::Full(block_contents) => { - let (block, maybe_blobs) = block_contents.deconstruct(); - (block.into(), maybe_blobs) - } - UnsignedBlock::Blinded(block) => (block.into(), None), - }; - let res = self .validator_store - .sign_block(*validator_pubkey, block, slot) - .await - .map(|block| match block { - validator_store::SignedBlock::Full(block) => { - SignedBlock::Full(PublishBlockRequest::new(Arc::new(block), maybe_blobs)) - } - validator_store::SignedBlock::Blinded(block) => { - SignedBlock::Blinded(Arc::new(block)) - } - }); + .sign_block(*validator_pubkey, unsigned_block, slot) + .await; let signed_block = match res { Ok(block) => block, @@ -398,12 +378,13 @@ impl BlockService { }) .await?; + let metadata = BlockMetadata::from(&signed_block); info!( - block_type = ?signed_block.block_type(), - deposits = signed_block.num_deposits(), - attestations = signed_block.num_attestations(), + block_type = ?metadata.block_type, + deposits = metadata.num_deposits, + attestations = metadata.num_attestations, graffiti = ?graffiti.map(|g| g.as_utf8_lossy()), - slot = signed_block.slot().as_u64(), + slot = metadata.slot.as_u64(), "Successfully published block" ); Ok(()) @@ -508,7 +489,6 @@ impl BlockService { signed_block: &SignedBlock, beacon_node: BeaconNodeHttpClient, ) -> Result<(), BlockError> { - let slot = signed_block.slot(); match signed_block { SignedBlock::Full(signed_block) => { let _post_timer = validator_metrics::start_timer_vec( @@ -518,7 +498,9 @@ impl BlockService { beacon_node .post_beacon_blocks_v2_ssz(signed_block, None) .await - .or_else(|e| handle_block_post_error(e, slot))? + .or_else(|e| { + handle_block_post_error(e, signed_block.signed_block().message().slot()) + })? } SignedBlock::Blinded(signed_block) => { let _post_timer = validator_metrics::start_timer_vec( @@ -528,7 +510,7 @@ impl BlockService { beacon_node .post_beacon_blinded_blocks_v2_ssz(signed_block, None) .await - .or_else(|e| handle_block_post_error(e, slot))? + .or_else(|e| handle_block_post_error(e, signed_block.message().slot()))? } } Ok::<_, BlockError>(()) @@ -557,13 +539,17 @@ impl BlockService { )) })?; - let unsigned_block = match block_response.data { - eth2::types::ProduceBlockV3Response::Full(block) => UnsignedBlock::Full(block), - eth2::types::ProduceBlockV3Response::Blinded(block) => UnsignedBlock::Blinded(block), + let (block_proposer, unsigned_block) = match block_response.data { + eth2::types::ProduceBlockV3Response::Full(block) => { + (block.block().proposer_index(), UnsignedBlock::Full(block)) + } + eth2::types::ProduceBlockV3Response::Blinded(block) => { + (block.proposer_index(), UnsignedBlock::Blinded(block)) + } }; info!(slot = slot.as_u64(), "Received unsigned block"); - if proposer_index != Some(unsigned_block.proposer_index()) { + if proposer_index != Some(block_proposer) { return Err(BlockError::Recoverable( "Proposer index does not match block proposer. Beacon chain re-orged".to_string(), )); @@ -573,49 +559,30 @@ impl BlockService { } } -pub enum UnsignedBlock { - Full(FullBlockContents), - Blinded(BlindedBeaconBlock), +/// Wrapper for values we want to log about a block we signed, for easy extraction from the possible +/// variants. +struct BlockMetadata { + block_type: BlockType, + slot: Slot, + num_deposits: usize, + num_attestations: usize, } -impl UnsignedBlock { - pub fn proposer_index(&self) -> u64 { - match self { - UnsignedBlock::Full(block) => block.block().proposer_index(), - UnsignedBlock::Blinded(block) => block.proposer_index(), - } - } -} - -#[derive(Debug)] -pub enum SignedBlock { - Full(PublishBlockRequest), - Blinded(Arc>), -} - -impl SignedBlock { - pub fn block_type(&self) -> BlockType { - match self { - SignedBlock::Full(_) => BlockType::Full, - SignedBlock::Blinded(_) => BlockType::Blinded, - } - } - pub fn slot(&self) -> Slot { - match self { - SignedBlock::Full(block) => block.signed_block().message().slot(), - SignedBlock::Blinded(block) => block.message().slot(), - } - } - pub fn num_deposits(&self) -> usize { - match self { - SignedBlock::Full(block) => block.signed_block().message().body().deposits().len(), - SignedBlock::Blinded(block) => block.message().body().deposits().len(), - } - } - pub fn num_attestations(&self) -> usize { - match self { - SignedBlock::Full(block) => block.signed_block().message().body().attestations_len(), - SignedBlock::Blinded(block) => block.message().body().attestations_len(), +impl From<&SignedBlock> for BlockMetadata { + fn from(value: &SignedBlock) -> Self { + match value { + SignedBlock::Full(block) => BlockMetadata { + block_type: BlockType::Full, + slot: block.signed_block().message().slot(), + num_deposits: block.signed_block().message().body().deposits().len(), + num_attestations: block.signed_block().message().body().attestations_len(), + }, + SignedBlock::Blinded(block) => BlockMetadata { + block_type: BlockType::Blinded, + slot: block.message().slot(), + num_deposits: block.message().body().deposits().len(), + num_attestations: block.message().body().attestations_len(), + }, } } } diff --git a/validator_client/validator_store/Cargo.toml b/validator_client/validator_store/Cargo.toml index 91df9dc3ab..8c5451b2d0 100644 --- a/validator_client/validator_store/Cargo.toml +++ b/validator_client/validator_store/Cargo.toml @@ -5,5 +5,6 @@ edition = { workspace = true } authors = ["Sigma Prime "] [dependencies] +eth2 = { workspace = true } slashing_protection = { workspace = true } types = { workspace = true } diff --git a/validator_client/validator_store/src/lib.rs b/validator_client/validator_store/src/lib.rs index f4ef941676..6fd2e27064 100644 --- a/validator_client/validator_store/src/lib.rs +++ b/validator_client/validator_store/src/lib.rs @@ -1,12 +1,13 @@ +use eth2::types::{FullBlockContents, PublishBlockRequest}; use slashing_protection::NotSafe; use std::fmt::Debug; use std::future::Future; +use std::sync::Arc; use types::{ - Address, Attestation, AttestationError, BeaconBlock, BlindedBeaconBlock, Epoch, EthSpec, - Graffiti, Hash256, PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, - SignedBeaconBlock, SignedBlindedBeaconBlock, SignedContributionAndProof, - SignedValidatorRegistrationData, Slot, SyncCommitteeContribution, SyncCommitteeMessage, - SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, + Address, Attestation, AttestationError, BlindedBeaconBlock, Epoch, EthSpec, Graffiti, Hash256, + PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBlindedBeaconBlock, + SignedContributionAndProof, SignedValidatorRegistrationData, Slot, SyncCommitteeContribution, + SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, }; #[derive(Debug, PartialEq, Clone)] @@ -171,40 +172,16 @@ pub trait ValidatorStore: Send + Sync { fn proposal_data(&self, pubkey: &PublicKeyBytes) -> Option; } -#[derive(Clone, Debug, PartialEq)] +#[derive(Debug)] pub enum UnsignedBlock { - Full(BeaconBlock), + Full(FullBlockContents), Blinded(BlindedBeaconBlock), } -impl From> for UnsignedBlock { - fn from(block: BeaconBlock) -> Self { - UnsignedBlock::Full(block) - } -} - -impl From> for UnsignedBlock { - fn from(block: BlindedBeaconBlock) -> Self { - UnsignedBlock::Blinded(block) - } -} - #[derive(Clone, Debug, PartialEq)] pub enum SignedBlock { - Full(SignedBeaconBlock), - Blinded(SignedBlindedBeaconBlock), -} - -impl From> for SignedBlock { - fn from(block: SignedBeaconBlock) -> Self { - SignedBlock::Full(block) - } -} - -impl From> for SignedBlock { - fn from(block: SignedBlindedBeaconBlock) -> Self { - SignedBlock::Blinded(block) - } + Full(PublishBlockRequest), + Blinded(Arc>), } /// A wrapper around `PublicKeyBytes` which encodes information about the status of a validator diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index b40fe61a82..07578033cd 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -84,7 +84,6 @@ pub fn cli_app() -> Command { .long(COUNT_FLAG) .value_name("VALIDATOR_COUNT") .help("The number of validators to create, regardless of how many already exist") - .conflicts_with("at-most") .action(ArgAction::Set) .display_order(0), ) diff --git a/wordlist.txt b/wordlist.txt index 9feb07b67b..682fae0261 100644 --- a/wordlist.txt +++ b/wordlist.txt @@ -46,6 +46,7 @@ Goerli Grafana Holesky Homebrew +Hoodi Infura IPs IPv