diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index c3119db378..f2ccaf438a 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -5,6 +5,7 @@ on: branches: - unstable - stable + - capella tags: - v* @@ -34,6 +35,11 @@ jobs: run: | echo "VERSION=latest" >> $GITHUB_ENV echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV + - name: Extract version (if capella) + if: github.event.ref == 'refs/heads/capella' + run: | + echo "VERSION=capella" >> $GITHUB_ENV + echo "VERSION_SUFFIX=" >> $GITHUB_ENV - name: Extract version (if tagged release) if: startsWith(github.event.ref, 'refs/tags') run: | diff --git a/Cargo.lock b/Cargo.lock index 12b70f58ee..1ce987816c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -211,9 +211,8 @@ checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "arbitrary" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e90af4de65aa7b293ef2d09daff88501eb254f58edde2e1ac02c82d873eadad" +version = "1.2.2" +source = "git+https://github.com/michaelsproul/arbitrary?rev=a572fd8743012a4f1ada5ee5968b1b3619c427ba#a572fd8743012a4f1ada5ee5968b1b3619c427ba" dependencies = [ "derive_arbitrary", ] @@ -545,7 +544,7 @@ checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" [[package]] name = "beacon-api-client" version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client?rev=7d5d8dad1648f771573f42585ad8080a45b05689#7d5d8dad1648f771573f42585ad8080a45b05689" +source = "git+https://github.com/ralexstokes/beacon-api-client#53690a711e33614d59d4d44fb09762b4699e2a4e" dependencies = [ "ethereum-consensus", "http", @@ -607,7 +606,7 @@ dependencies = [ "state_processing", "store", "strum", - "superstruct", + "superstruct 0.5.0", "task_executor", "tempfile", "tokio", @@ -1057,8 +1056,10 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "lighthouse_network", + "logging", "monitoring_api", "network", + "operation_pool", "parking_lot 0.12.1", "sensitive_url", "serde", @@ -1068,6 +1069,7 @@ dependencies = [ "slasher_service", "slog", "slot_clock", + "state_processing", "store", "task_executor", "time 0.3.17", @@ -1678,10 +1680,10 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8beee4701e2e229e8098bbdecdca12449bc3e322f137d269182fa1291e20bd00" +version = "1.2.2" +source = "git+https://github.com/michaelsproul/arbitrary?rev=a572fd8743012a4f1ada5ee5968b1b3619c427ba#a572fd8743012a4f1ada5ee5968b1b3619c427ba" dependencies = [ + "darling 0.14.3", "proc-macro2", "quote", "syn", @@ -2066,7 +2068,7 @@ dependencies = [ "slog", "sloggers", "state_processing", - "superstruct", + "superstruct 0.5.0", "task_executor", "tokio", "tree_hash", @@ -2219,9 +2221,10 @@ dependencies = [ [[package]] name = "eth2_ssz_derive" -version = "0.3.0" +version = "0.3.1" dependencies = [ "darling 0.13.4", + "eth2_ssz", "proc-macro2", "quote", "syn", @@ -2332,7 +2335,7 @@ dependencies = [ [[package]] name = "ethereum-consensus" version = "0.1.1" -source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=a8110af76d97bf2bf27fb987a671808fcbdf1834#a8110af76d97bf2bf27fb987a671808fcbdf1834" +source = "git+https://github.com/ralexstokes//ethereum-consensus?rev=9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d#9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" dependencies = [ "async-stream", "blst", @@ -2341,6 +2344,7 @@ dependencies = [ "hex", "integer-sqrt", "multiaddr 0.14.0", + "multihash", "rand 0.8.5", "serde", "serde_json", @@ -2499,7 +2503,7 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "lru 0.7.8", - "mev-build-rs", + "mev-rs", "parking_lot 0.12.1", "rand 0.8.5", "reqwest", @@ -2511,6 +2515,7 @@ dependencies = [ "ssz-rs", "state_processing", "strum", + "superstruct 0.6.0", "task_executor", "tempfile", "tokio", @@ -3213,6 +3218,7 @@ dependencies = [ "eth2_ssz", "execution_layer", "futures", + "genesis", "hex", "lazy_static", "lighthouse_metrics", @@ -3221,6 +3227,7 @@ dependencies = [ "logging", "lru 0.7.8", "network", + "operation_pool", "parking_lot 0.12.1", "proto_array", "safe_arith", @@ -4399,13 +4406,15 @@ dependencies = [ "smallvec", "snap", "strum", - "superstruct", + "superstruct 0.5.0", "task_executor", "tempfile", "tiny-keccak", "tokio", "tokio-io-timeout", "tokio-util 0.6.10", + "tree_hash", + "tree_hash_derive", "types", "unsigned-varint 0.6.0", "unused_port", @@ -4663,18 +4672,19 @@ dependencies = [ ] [[package]] -name = "mev-build-rs" +name = "mev-rs" version = "0.2.1" -source = "git+https://github.com/ralexstokes/mev-rs?rev=6c99b0fbdc0427b1625469d2e575303ce08de5b8#6c99b0fbdc0427b1625469d2e575303ce08de5b8" +source = "git+https://github.com/ralexstokes//mev-rs?rev=7813d4a4a564e0754e9aaab2d95520ba437c3889#7813d4a4a564e0754e9aaab2d95520ba437c3889" dependencies = [ "async-trait", "axum", "beacon-api-client", "ethereum-consensus", + "hyper", "serde", - "serde_json", "ssz-rs", "thiserror", + "tokio", "tracing", ] @@ -4997,6 +5007,7 @@ dependencies = [ "lru_cache", "matches", "num_cpus", + "operation_pool", "rand 0.8.5", "rlp", "slog", @@ -5332,6 +5343,7 @@ dependencies = [ "lighthouse_metrics", "maplit", "parking_lot 0.12.1", + "rand 0.8.5", "rayon", "serde", "serde_derive", @@ -6772,6 +6784,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-big-array" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18b20e7752957bbe9661cff4e0bb04d183d0948cdab2ea58cdb9df36a61dfe62" +dependencies = [ + "serde", + "serde_derive", +] + [[package]] name = "serde_array_query" version = "0.1.0" @@ -7277,11 +7299,10 @@ dependencies = [ [[package]] name = "ssz-rs" version = "0.8.0" -source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737" +source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" dependencies = [ "bitvec 1.0.1", "hex", - "lazy_static", "num-bigint", "serde", "sha2 0.9.9", @@ -7292,7 +7313,7 @@ dependencies = [ [[package]] name = "ssz-rs-derive" version = "0.8.0" -source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737" +source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1" dependencies = [ "proc-macro2", "quote", @@ -7451,6 +7472,20 @@ dependencies = [ "syn", ] +[[package]] +name = "superstruct" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b9e5728aa1a87141cefd4e7509903fc01fa0dcb108022b1e841a67c5159fc5" +dependencies = [ + "darling 0.13.4", + "itertools", + "proc-macro2", + "quote", + "smallvec", + "syn", +] + [[package]] name = "swap_or_not_shuffle" version = "0.2.0" @@ -8266,6 +8301,7 @@ dependencies = [ "rusqlite", "safe_arith", "serde", + "serde-big-array", "serde_derive", "serde_json", "serde_with", @@ -8273,7 +8309,7 @@ dependencies = [ "slog", "smallvec", "state_processing", - "superstruct", + "superstruct 0.6.0", "swap_or_not_shuffle", "tempfile", "test_random_derive", diff --git a/Cargo.toml b/Cargo.toml index 251da36cbf..46852645eb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -100,6 +100,14 @@ eth2_hashing = { path = "crypto/eth2_hashing" } tree_hash = { path = "consensus/tree_hash" } tree_hash_derive = { path = "consensus/tree_hash_derive" } eth2_serde_utils = { path = "consensus/serde_utils" } +arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="a572fd8743012a4f1ada5ee5968b1b3619c427ba" } + +[patch."https://github.com/ralexstokes/mev-rs"] +mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" } +[patch."https://github.com/ralexstokes/ethereum-consensus"] +ethereum-consensus = { git = "https://github.com/ralexstokes//ethereum-consensus", rev = "9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" } +[patch."https://github.com/ralexstokes/ssz-rs"] +ssz-rs = { git = "https://github.com/ralexstokes//ssz-rs", rev = "adf1a0b14cef90b9536f28ef89da1fab316465e1" } [profile.maxperf] inherits = "release" diff --git a/Dockerfile b/Dockerfile index 72423b17c6..7a0602a221 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.62.1-bullseye AS builder +FROM rust:1.65.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG FEATURES diff --git a/Makefile b/Makefile index 85872f016d..05c6c74d51 100644 --- a/Makefile +++ b/Makefile @@ -28,12 +28,15 @@ CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,jemalloc # Cargo profile for Cross builds. Default is for local builds, CI uses an override. CROSS_PROFILE ?= release +# List of features to use when running EF tests. +EF_TEST_FEATURES ?= + # Cargo profile for regular builds. PROFILE ?= release # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. -FORKS=phase0 altair merge +FORKS=phase0 altair merge capella # Builds the Lighthouse binary in release (optimized). # @@ -112,9 +115,9 @@ check-benches: # Runs only the ef-test vectors. run-ef-tests: rm -rf $(EF_TESTS)/.accessed_file_log.txt - cargo test --release -p ef_tests --features "ef_tests" - cargo test --release -p ef_tests --features "ef_tests,fake_crypto" - cargo test --release -p ef_tests --features "ef_tests,milagro" + cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)" + cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto" + cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests # Run the tests in the `beacon_chain` crate for all known forks. diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index dd185ac757..5b85833048 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -33,7 +33,7 @@ slot_clock = { path = "../../common/slot_clock" } eth2_hashing = "0.3.0" eth2_ssz = "0.4.1" eth2_ssz_types = "0.2.2" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" state_processing = { path = "../../consensus/state_processing" } tree_hash = "0.4.1" types = { path = "../../consensus/types" } diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs index 3f186c37c1..786402c997 100644 --- a/beacon_node/beacon_chain/src/beacon_block_reward.rs +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -15,12 +15,12 @@ use store::{ consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, RelativeEpoch, }; -use types::{BeaconBlockRef, BeaconState, BeaconStateError, ExecPayload, Hash256}; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Hash256}; type BeaconBlockSubRewardValue = u64; impl BeaconChain { - pub fn compute_beacon_block_reward>( + pub fn compute_beacon_block_reward>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, block_root: Hash256, @@ -97,7 +97,7 @@ impl BeaconChain { }) } - fn compute_beacon_block_sync_aggregate_reward>( + fn compute_beacon_block_sync_aggregate_reward>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, state: &BeaconState, @@ -111,7 +111,7 @@ impl BeaconChain { } } - fn compute_beacon_block_proposer_slashing_reward>( + fn compute_beacon_block_proposer_slashing_reward>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, state: &BeaconState, @@ -132,7 +132,7 @@ impl BeaconChain { Ok(proposer_slashing_reward) } - fn compute_beacon_block_attester_slashing_reward>( + fn compute_beacon_block_attester_slashing_reward>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, state: &BeaconState, @@ -155,7 +155,7 @@ impl BeaconChain { Ok(attester_slashing_reward) } - fn compute_beacon_block_attestation_reward_base>( + fn compute_beacon_block_attestation_reward_base>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, block_root: Hash256, @@ -173,7 +173,7 @@ impl BeaconChain { Ok(block_attestation_reward) } - fn compute_beacon_block_attestation_reward_altair>( + fn compute_beacon_block_attestation_reward_altair>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, state: &mut BeaconState, diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4ec13f8f53..9bcf8a0d6e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -12,6 +12,7 @@ use crate::block_verification::{ signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER, }; +pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; use crate::chain_config::ChainConfig; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; @@ -58,8 +59,10 @@ use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{metrics, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead}; use eth2::types::{EventKind, SseBlock, SyncDuty}; use execution_layer::{ - BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, + BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, + PayloadAttributes, PayloadStatus, }; +pub use fork_choice::CountUnrealized; use fork_choice::{ AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, @@ -67,7 +70,7 @@ use fork_choice::{ use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; -use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool}; +use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella}; use parking_lot::{Mutex, RwLock}; use proto_array::{CountUnrealizedFull, DoNotReOrg, ProposerHeadError}; use safe_arith::SafeArith; @@ -79,8 +82,8 @@ use state_processing::{ common::get_attesting_indices_from_state, per_block_processing, per_block_processing::{ - errors::AttestationValidationError, verify_attestation_for_block_inclusion, - VerifySignatures, + errors::AttestationValidationError, get_expected_withdrawals, + verify_attestation_for_block_inclusion, VerifySignatures, }, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, @@ -103,9 +106,6 @@ use types::beacon_state::CloneConfig; use types::consts::merge::INTERVALS_PER_SLOT; use types::*; -pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; -pub use fork_choice::CountUnrealized; - pub type ForkChoiceError = fork_choice::Error; /// Alias to appease clippy. @@ -269,7 +269,7 @@ pub trait BeaconChainTypes: Send + Sync + 'static { } /// Used internally to split block production into discrete functions. -struct PartialBeaconBlock { +struct PartialBeaconBlock> { state: BeaconState, slot: Slot, proposer_index: u64, @@ -283,7 +283,8 @@ struct PartialBeaconBlock { deposits: Vec, voluntary_exits: Vec, sync_aggregate: Option>, - prepare_payload_handle: Option>, + prepare_payload_handle: Option>, + bls_to_execution_changes: Vec, } pub type BeaconForkChoice = ForkChoice< @@ -360,6 +361,9 @@ pub struct BeaconChain { /// Maintains a record of which validators we've seen attester slashings for. pub(crate) observed_attester_slashings: Mutex, T::EthSpec>>, + /// Maintains a record of which validators we've seen BLS to execution changes for. + pub(crate) observed_bls_to_execution_changes: + Mutex>, /// The most recently validated light client finality update received on gossip. pub latest_seen_finality_update: Mutex>>, /// The most recently validated light client optimistic update received on gossip. @@ -959,21 +963,22 @@ impl BeaconChain { Some(DatabaseBlock::Blinded(block)) => block, None => return Ok(None), }; + let fork = blinded_block.fork_name(&self.spec)?; // If we only have a blinded block, load the execution payload from the EL. let block_message = blinded_block.message(); - let execution_payload_header = &block_message + let execution_payload_header = block_message .execution_payload() .map_err(|_| Error::BlockVariantLacksExecutionPayload(*block_root))? - .execution_payload_header; + .to_execution_payload_header(); - let exec_block_hash = execution_payload_header.block_hash; + let exec_block_hash = execution_payload_header.block_hash(); let execution_payload = self .execution_layer .as_ref() .ok_or(Error::ExecutionLayerMissing)? - .get_payload_by_block_hash(exec_block_hash) + .get_payload_by_block_hash(exec_block_hash, fork) .await .map_err(|e| { Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, Box::new(e)) @@ -981,9 +986,9 @@ impl BeaconChain { .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; // Verify payload integrity. - let header_from_payload = ExecutionPayloadHeader::from(&execution_payload); - if header_from_payload != *execution_payload_header { - for txn in &execution_payload.transactions { + let header_from_payload = ExecutionPayloadHeader::from(execution_payload.to_ref()); + if header_from_payload != execution_payload_header { + for txn in execution_payload.transactions() { debug!( self.log, "Reconstructed txn"; @@ -994,8 +999,8 @@ impl BeaconChain { return Err(Error::InconsistentPayloadReconstructed { slot: blinded_block.slot(), exec_block_hash, - canonical_transactions_root: execution_payload_header.transactions_root, - reconstructed_transactions_root: header_from_payload.transactions_root, + canonical_transactions_root: execution_payload_header.transactions_root(), + reconstructed_transactions_root: header_from_payload.transactions_root(), }); } @@ -2218,6 +2223,79 @@ impl BeaconChain { } } + /// Verify a signed BLS to execution change before allowing it to propagate on the gossip network. + pub fn verify_bls_to_execution_change_for_http_api( + &self, + bls_to_execution_change: SignedBlsToExecutionChange, + ) -> Result, Error> { + // Before checking the gossip duplicate filter, check that no prior change is already + // in our op pool. Ignore these messages: do not gossip, do not try to override the pool. + match self + .op_pool + .bls_to_execution_change_in_pool_equals(&bls_to_execution_change) + { + Some(true) => return Ok(ObservationOutcome::AlreadyKnown), + Some(false) => return Err(Error::BlsToExecutionConflictsWithPool), + None => (), + } + + // Use the head state to save advancing to the wall-clock slot unnecessarily. The message is + // signed with respect to the genesis fork version, and the slot check for gossip is applied + // separately. This `Arc` clone of the head is nice and cheap. + let head_snapshot = self.head().snapshot; + let head_state = &head_snapshot.beacon_state; + + Ok(self + .observed_bls_to_execution_changes + .lock() + .verify_and_observe(bls_to_execution_change, head_state, &self.spec)?) + } + + /// Verify a signed BLS to execution change before allowing it to propagate on the gossip network. + pub fn verify_bls_to_execution_change_for_gossip( + &self, + bls_to_execution_change: SignedBlsToExecutionChange, + ) -> Result, Error> { + // Ignore BLS to execution changes on gossip prior to Capella. + if !self.current_slot_is_post_capella()? { + return Err(Error::BlsToExecutionPriorToCapella); + } + self.verify_bls_to_execution_change_for_http_api(bls_to_execution_change) + .or_else(|e| { + // On gossip treat conflicts the same as duplicates [IGNORE]. + match e { + Error::BlsToExecutionConflictsWithPool => Ok(ObservationOutcome::AlreadyKnown), + e => Err(e), + } + }) + } + + /// Check if the current slot is greater than or equal to the Capella fork epoch. + pub fn current_slot_is_post_capella(&self) -> Result { + let current_fork = self.spec.fork_name_at_slot::(self.slot()?); + if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork { + Ok(false) + } else { + Ok(true) + } + } + + /// Import a BLS to execution change to the op pool. + /// + /// Return `true` if the change was added to the pool. + pub fn import_bls_to_execution_change( + &self, + bls_to_execution_change: SigVerifiedOp, + received_pre_capella: ReceivedPreCapella, + ) -> bool { + if self.eth1_chain.is_some() { + self.op_pool + .insert_bls_to_execution_change(bls_to_execution_change, received_pre_capella) + } else { + false + } + } + /// Attempt to obtain sync committee duties from the head. pub fn sync_committee_duties_from_head( &self, @@ -3444,7 +3522,7 @@ impl BeaconChain { /// /// The produced block will not be inherently valid, it must be signed by a block producer. /// Block signing is out of the scope of this function and should be done by a separate program. - pub async fn produce_block>( + pub async fn produce_block + 'static>( self: &Arc, randao_reveal: Signature, slot: Slot, @@ -3460,7 +3538,9 @@ impl BeaconChain { } /// Same as `produce_block` but allowing for configuration of RANDAO-verification. - pub async fn produce_block_with_verification>( + pub async fn produce_block_with_verification< + Payload: AbstractExecPayload + 'static, + >( self: &Arc, randao_reveal: Signature, slot: Slot, @@ -3980,7 +4060,7 @@ impl BeaconChain { /// The provided `state_root_opt` should only ever be set to `Some` if the contained value is /// equal to the root of `state`. Providing this value will serve as an optimization to avoid /// performing a tree hash in some scenarios. - pub async fn produce_block_on_state>( + pub async fn produce_block_on_state + 'static>( self: &Arc, state: BeaconState, state_root_opt: Option, @@ -4015,16 +4095,20 @@ impl BeaconChain { // // Wait for the execution layer to return an execution payload (if one is required). let prepare_payload_handle = partial_beacon_block.prepare_payload_handle.take(); - let execution_payload = if let Some(prepare_payload_handle) = prepare_payload_handle { - let execution_payload = prepare_payload_handle - .await - .map_err(BlockProductionError::TokioJoin)? - .ok_or(BlockProductionError::ShuttingDown)??; - Some(execution_payload) + let block_contents = if let Some(prepare_payload_handle) = prepare_payload_handle { + Some( + prepare_payload_handle + .await + .map_err(BlockProductionError::TokioJoin)? + .ok_or(BlockProductionError::ShuttingDown)??, + ) } else { None }; + //FIXME(sean) waiting for the BN<>EE api for this to stabilize + let kzg_commitments = vec![]; + // Part 3/3 (blocking) // // Perform the final steps of combining all the parts and computing the state root. @@ -4034,7 +4118,8 @@ impl BeaconChain { move || { chain.complete_partial_beacon_block( partial_beacon_block, - execution_payload, + block_contents, + kzg_commitments, verification, ) }, @@ -4045,7 +4130,7 @@ impl BeaconChain { .map_err(BlockProductionError::TokioJoin)? } - fn produce_partial_beacon_block>( + fn produce_partial_beacon_block + 'static>( self: &Arc, mut state: BeaconState, state_root_opt: Option, @@ -4105,7 +4190,7 @@ impl BeaconChain { // allows it to run concurrently with things like attestation packing. let prepare_payload_handle = match &state { BeaconState::Base(_) | BeaconState::Altair(_) => None, - BeaconState::Merge(_) => { + BeaconState::Merge(_) | BeaconState::Capella(_) | BeaconState::Eip4844(_) => { let prepare_payload_handle = get_execution_payload(self.clone(), &state, proposer_index, builder_params)?; Some(prepare_payload_handle) @@ -4118,6 +4203,10 @@ impl BeaconChain { let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; let deposits = eth1_chain.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?; + let bls_to_execution_changes = self + .op_pool + .get_bls_to_execution_changes(&state, &self.spec); + // Iterate through the naive aggregation pool and ensure all the attestations from there // are included in the operation pool. let unagg_import_timer = @@ -4276,13 +4365,15 @@ impl BeaconChain { voluntary_exits, sync_aggregate, prepare_payload_handle, + bls_to_execution_changes, }) } - fn complete_partial_beacon_block>( + fn complete_partial_beacon_block>( &self, partial_beacon_block: PartialBeaconBlock, - execution_payload: Option, + block_contents: Option>, + kzg_commitments: Vec, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { let PartialBeaconBlock { @@ -4303,6 +4394,7 @@ impl BeaconChain { // this function. We can assume that the handle has already been consumed in order to // produce said `execution_payload`. prepare_payload_handle: _, + bls_to_execution_changes, } = partial_beacon_block; let inner_block = match &state { @@ -4358,8 +4450,60 @@ impl BeaconChain { voluntary_exits: voluntary_exits.into(), sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, - execution_payload: execution_payload - .ok_or(BlockProductionError::MissingExecutionPayload)?, + execution_payload: block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .to_payload() + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + }, + }), + BeaconState::Capella(_) => BeaconBlock::Capella(BeaconBlockCapella { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .to_payload() + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + bls_to_execution_changes: bls_to_execution_changes.into(), + }, + }), + BeaconState::Eip4844(_) => BeaconBlock::Eip4844(BeaconBlockEip4844 { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyEip4844 { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .to_payload() + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + bls_to_execution_changes: bls_to_execution_changes.into(), + blob_kzg_commitments: VariableList::from(kzg_commitments), }, }), }; @@ -4614,16 +4758,40 @@ impl BeaconChain { return Ok(()); } + let withdrawals = match self.spec.fork_name_at_slot::(prepare_slot) { + ForkName::Base | ForkName::Altair | ForkName::Merge => None, + ForkName::Capella | ForkName::Eip4844 => { + // We must use the advanced state because balances can change at epoch boundaries + // and balances affect withdrawals. + // FIXME(mark) + // Might implement caching here in the future.. + let prepare_state = self + .state_at_slot(prepare_slot, StateSkipConfig::WithoutStateRoots) + .map_err(|e| { + error!(self.log, "State advance for withdrawals failed"; "error" => ?e); + e + })?; + Some(get_expected_withdrawals(&prepare_state, &self.spec)) + } + } + .transpose() + .map_err(|e| { + error!(self.log, "Error preparing beacon proposer"; "error" => ?e); + e + }) + .map(|withdrawals_opt| withdrawals_opt.map(|w| w.into())) + .map_err(Error::PrepareProposerFailed)?; + let head_root = forkchoice_update_params.head_root; - let payload_attributes = PayloadAttributes { - timestamp: self - .slot_clock + let payload_attributes = PayloadAttributes::new( + self.slot_clock .start_of(prepare_slot) .ok_or(Error::InvalidSlot(prepare_slot))? .as_secs(), - prev_randao: pre_payload_attributes.prev_randao, - suggested_fee_recipient: execution_layer.get_suggested_fee_recipient(proposer).await, - }; + pre_payload_attributes.prev_randao, + execution_layer.get_suggested_fee_recipient(proposer).await, + withdrawals, + ); debug!( self.log, @@ -4772,7 +4940,7 @@ impl BeaconChain { { // We are a proposer, check for terminal_pow_block_hash if let Some(terminal_pow_block_hash) = execution_layer - .get_terminal_pow_block_hash(&self.spec, payload_attributes.timestamp) + .get_terminal_pow_block_hash(&self.spec, payload_attributes.timestamp()) .await .map_err(Error::ForkchoiceUpdate)? { @@ -4947,7 +5115,7 @@ impl BeaconChain { /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic` or has /// `ExecutionStatus::Invalid`. - pub fn is_optimistic_or_invalid_block>( + pub fn is_optimistic_or_invalid_block>( &self, block: &SignedBeaconBlock, ) -> Result { @@ -4973,7 +5141,7 @@ impl BeaconChain { /// /// There is a potential race condition when syncing where the block_root of `head_block` could /// be pruned from the fork choice store before being read. - pub fn is_optimistic_or_invalid_head_block>( + pub fn is_optimistic_or_invalid_head_block>( &self, head_block: &SignedBeaconBlock, ) -> Result { diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 0b789b8b61..b17613da0d 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -16,7 +16,7 @@ use std::sync::Arc; use store::{Error as StoreError, HotColdDB, ItemStore}; use superstruct::superstruct; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, ExecPayload, + AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256, Slot, }; @@ -260,7 +260,7 @@ where self.time = slot } - fn on_verified_block>( + fn on_verified_block>( &mut self, _block: BeaconBlockRef, block_root: Hash256, diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index 8491622cb0..7d89df9829 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -1,20 +1,20 @@ use serde_derive::Serialize; use std::sync::Arc; use types::{ - beacon_state::CloneConfig, BeaconState, EthSpec, ExecPayload, FullPayload, Hash256, + beacon_state::CloneConfig, AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256, SignedBeaconBlock, }; /// Represents some block and its associated state. Generally, this will be used for tracking the /// head, justified head and finalized head. #[derive(Clone, Serialize, PartialEq, Debug)] -pub struct BeaconSnapshot = FullPayload> { +pub struct BeaconSnapshot = FullPayload> { pub beacon_block: Arc>, pub beacon_block_root: Hash256, pub beacon_state: BeaconState, } -impl> BeaconSnapshot { +impl> BeaconSnapshot { /// Create a new checkpoint. pub fn new( beacon_block: Arc>, diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs new file mode 100644 index 0000000000..f792882033 --- /dev/null +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -0,0 +1,136 @@ +use derivative::Derivative; +use slot_clock::SlotClock; + +use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; +use crate::BeaconChainError; +use bls::PublicKey; +use types::{consts::eip4844::BLS_MODULUS, BeaconStateError, BlobsSidecar, Slot}; + +pub enum BlobError { + /// The blob sidecar is from a slot that is later than the current slot (with respect to the + /// gossip clock disparity). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + FutureSlot { + message_slot: Slot, + latest_permissible_slot: Slot, + }, + /// The blob sidecar is from a slot that is prior to the earliest permissible slot (with + /// respect to the gossip clock disparity). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + PastSlot { + message_slot: Slot, + earliest_permissible_slot: Slot, + }, + + /// The blob sidecar contains an incorrectly formatted `BLSFieldElement` > `BLS_MODULUS`. + /// + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + BlobOutOfRange { blob_index: usize }, + + /// The blob sidecar contains a KZGCommitment that is not a valid G1 point on + /// the bls curve. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + InvalidKZGCommitment, + /// The proposal signature in invalid. + /// + /// ## Peer scoring + /// + /// The signature on the blob sidecar invalid and the peer is faulty. + ProposalSignatureInvalid, + + /// A blob sidecar for this proposer and slot has already been observed. + /// + /// ## Peer scoring + /// + /// The `proposer` has already proposed a sidecar at this slot. The existing sidecar may or may not + /// be equal to the given sidecar. + RepeatSidecar { proposer: u64, slot: Slot }, + + /// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid. + /// + /// ## Peer scoring + /// + /// We were unable to process this sync committee message due to an internal error. It's unclear if the + /// sync committee message is valid. + BeaconChainError(BeaconChainError), +} + +impl From for BlobError { + fn from(e: BeaconChainError) -> Self { + BlobError::BeaconChainError(e) + } +} + +impl From for BlobError { + fn from(e: BeaconStateError) -> Self { + BlobError::BeaconChainError(BeaconChainError::BeaconStateError(e)) + } +} + +/// A wrapper around a `BlobsSidecar` that indicates it has been verified w.r.t the corresponding +/// `SignedBeaconBlock`. +#[derive(Derivative)] +#[derivative(Debug(bound = "T: BeaconChainTypes"))] +pub struct VerifiedBlobsSidecar<'a, T: BeaconChainTypes> { + pub blob_sidecar: &'a BlobsSidecar, +} + +impl<'a, T: BeaconChainTypes> VerifiedBlobsSidecar<'a, T> { + pub fn verify( + blob_sidecar: &'a BlobsSidecar, + chain: &BeaconChain, + ) -> Result { + let blob_slot = blob_sidecar.beacon_block_slot; + // Do not gossip or process blobs from future or past slots. + let latest_permissible_slot = chain + .slot_clock + .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .ok_or(BeaconChainError::UnableToReadSlot)?; + if blob_slot > latest_permissible_slot { + return Err(BlobError::FutureSlot { + message_slot: latest_permissible_slot, + latest_permissible_slot: blob_slot, + }); + } + + let earliest_permissible_slot = chain + .slot_clock + .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .ok_or(BeaconChainError::UnableToReadSlot)?; + if blob_slot > earliest_permissible_slot { + return Err(BlobError::PastSlot { + message_slot: earliest_permissible_slot, + earliest_permissible_slot: blob_slot, + }); + } + + // Verify that blobs are properly formatted + //TODO: add the check while constructing a Blob type from bytes instead of after + for (i, blob) in blob_sidecar.blobs.iter().enumerate() { + if blob.iter().any(|b| *b >= *BLS_MODULUS) { + return Err(BlobError::BlobOutOfRange { blob_index: i }); + } + } + + // Verify that the KZG proof is a valid G1 point + if PublicKey::deserialize(&blob_sidecar.kzg_aggregate_proof.0).is_err() { + return Err(BlobError::InvalidKZGCommitment); + } + + // TODO: Check that we have not already received a sidecar with a valid signature for this slot. + + Ok(Self { blob_sidecar }) + } +} diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs index 3bddd2a521..fd0cfc7e9b 100644 --- a/beacon_node/beacon_chain/src/block_reward.rs +++ b/beacon_node/beacon_chain/src/block_reward.rs @@ -5,10 +5,10 @@ use state_processing::{ common::get_attesting_indices_from_state, per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards, }; -use types::{BeaconBlockRef, BeaconState, EthSpec, ExecPayload, Hash256}; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, EthSpec, Hash256}; impl BeaconChain { - pub fn compute_block_reward>( + pub fn compute_block_reward>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, block_root: Hash256, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 4f65a05c56..7d5d350108 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -88,6 +88,7 @@ use std::time::Duration; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use tree_hash::TreeHash; +use types::ExecPayload; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch, EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, @@ -1185,7 +1186,7 @@ impl ExecutionPendingBlock { .message() .body() .execution_payload() - .map(|full_payload| full_payload.execution_payload.block_hash); + .map(|full_payload| full_payload.block_hash()); // Ensure the block is a candidate for optimistic import. if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? @@ -1850,7 +1851,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( } /// Obtains a read-locked `ValidatorPubkeyCache` from the `chain`. -fn get_validator_pubkey_cache( +pub fn get_validator_pubkey_cache( chain: &BeaconChain, ) -> Result>, BlockError> { chain diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 48419d46ed..0bff5aa075 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -800,6 +800,7 @@ where observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), + observed_bls_to_execution_changes: <_>::default(), latest_seen_finality_update: <_>::default(), latest_seen_optimistic_update: <_>::default(), eth1_chain: self.eth1_chain, diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index dd64e02edf..19eddf6026 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -930,8 +930,12 @@ impl BeaconChain { .execution_status .is_optimistic_or_invalid(); - self.op_pool - .prune_all(&new_snapshot.beacon_state, self.epoch()?); + self.op_pool.prune_all( + &new_snapshot.beacon_block, + &new_snapshot.beacon_state, + self.epoch()?, + &self.spec, + ); self.observed_block_producers.write().prune( new_view diff --git a/beacon_node/beacon_chain/src/capella_readiness.rs b/beacon_node/beacon_chain/src/capella_readiness.rs new file mode 100644 index 0000000000..bb729d8999 --- /dev/null +++ b/beacon_node/beacon_chain/src/capella_readiness.rs @@ -0,0 +1,122 @@ +//! Provides tools for checking if a node is ready for the Capella upgrade and following merge +//! transition. + +use crate::{BeaconChain, BeaconChainTypes}; +use execution_layer::http::{ + ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V2, +}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use std::time::Duration; +use types::*; + +/// The time before the Capella fork when we will start issuing warnings about preparation. +use super::merge_readiness::SECONDS_IN_A_WEEK; +pub const CAPELLA_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; +pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300; + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +#[serde(tag = "type")] +pub enum CapellaReadiness { + /// The execution engine is capella-enabled (as far as we can tell) + Ready, + /// We are connected to an execution engine which doesn't support the V2 engine api methods + V2MethodsNotSupported { error: String }, + /// The transition configuration with the EL failed, there might be a problem with + /// connectivity, authentication or a difference in configuration. + ExchangeCapabilitiesFailed { error: String }, + /// The user has not configured an execution endpoint + NoExecutionEndpoint, +} + +impl fmt::Display for CapellaReadiness { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + CapellaReadiness::Ready => { + write!(f, "This node appears ready for Capella.") + } + CapellaReadiness::ExchangeCapabilitiesFailed { error } => write!( + f, + "Could not exchange capabilities with the \ + execution endpoint: {}", + error + ), + CapellaReadiness::NoExecutionEndpoint => write!( + f, + "The --execution-endpoint flag is not specified, this is a \ + requirement post-merge" + ), + CapellaReadiness::V2MethodsNotSupported { error } => write!( + f, + "Execution endpoint does not support Capella methods: {}", + error + ), + } + } +} + +impl BeaconChain { + /// Returns `true` if capella epoch is set and Capella fork has occurred or will + /// occur within `CAPELLA_READINESS_PREPARATION_SECONDS` + pub fn is_time_to_prepare_for_capella(&self, current_slot: Slot) -> bool { + if let Some(capella_epoch) = self.spec.capella_fork_epoch { + let capella_slot = capella_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let capella_readiness_preparation_slots = + CAPELLA_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot; + // Return `true` if Capella has happened or is within the preparation time. + current_slot + capella_readiness_preparation_slots > capella_slot + } else { + // The Capella fork epoch has not been defined yet, no need to prepare. + false + } + } + + /// Attempts to connect to the EL and confirm that it is ready for capella. + pub async fn check_capella_readiness(&self) -> CapellaReadiness { + if let Some(el) = self.execution_layer.as_ref() { + match el + .get_engine_capabilities(Some(Duration::from_secs( + ENGINE_CAPABILITIES_REFRESH_INTERVAL, + ))) + .await + { + Err(e) => { + // The EL was either unreachable or responded with an error + CapellaReadiness::ExchangeCapabilitiesFailed { + error: format!("{:?}", e), + } + } + Ok(capabilities) => { + let mut missing_methods = String::from("Required Methods Unsupported:"); + let mut all_good = true; + if !capabilities.get_payload_v2 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_GET_PAYLOAD_V2); + all_good = false; + } + if !capabilities.forkchoice_updated_v2 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_FORKCHOICE_UPDATED_V2); + all_good = false; + } + if !capabilities.new_payload_v2 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_NEW_PAYLOAD_V2); + all_good = false; + } + + if all_good { + CapellaReadiness::Ready + } else { + CapellaReadiness::V2MethodsNotSupported { + error: missing_methods, + } + } + } + } + } else { + CapellaReadiness::NoExecutionEndpoint + } + } +} diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index cce2fbb971..2051a62369 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -91,6 +91,7 @@ impl Default for ChainConfig { count_unrealized_full: CountUnrealizedFull::default(), checkpoint_sync_url_timeout: 60, prepare_payload_lookahead: Duration::from_secs(4), + // This value isn't actually read except in tests. optimistic_finalized_sync: true, } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index e6f44f6654..102b0c06c1 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -17,8 +17,9 @@ use ssz_types::Error as SszTypesError; use state_processing::{ block_signature_verifier::Error as BlockSignatureVerifierError, per_block_processing::errors::{ - AttestationValidationError, AttesterSlashingValidationError, ExitValidationError, - ProposerSlashingValidationError, SyncCommitteeMessageValidationError, + AttestationValidationError, AttesterSlashingValidationError, + BlsExecutionChangeValidationError, ExitValidationError, ProposerSlashingValidationError, + SyncCommitteeMessageValidationError, }, signature_sets::Error as SignatureSetError, state_advance::Error as StateAdvanceError, @@ -69,6 +70,7 @@ pub enum BeaconChainError { ExitValidationError(ExitValidationError), ProposerSlashingValidationError(ProposerSlashingValidationError), AttesterSlashingValidationError(AttesterSlashingValidationError), + BlsExecutionChangeValidationError(BlsExecutionChangeValidationError), StateSkipTooLarge { start_slot: Slot, requested_slot: Slot, @@ -150,7 +152,7 @@ pub enum BeaconChainError { }, AddPayloadLogicError, ExecutionForkChoiceUpdateFailed(execution_layer::Error), - PrepareProposerBlockingFailed(execution_layer::Error), + PrepareProposerFailed(BlockProcessingError), ExecutionForkChoiceUpdateInvalid { status: PayloadStatus, }, @@ -204,6 +206,9 @@ pub enum BeaconChainError { MissingPersistedForkChoice, CommitteePromiseFailed(oneshot_broadcast::Error), MaxCommitteePromises(usize), + BlsToExecutionPriorToCapella, + BlsToExecutionConflictsWithPool, + InconsistentFork(InconsistentFork), ProposerHeadForkChoiceError(fork_choice::Error), } @@ -213,6 +218,7 @@ easy_from_to!(SyncCommitteeMessageValidationError, BeaconChainError); easy_from_to!(ExitValidationError, BeaconChainError); easy_from_to!(ProposerSlashingValidationError, BeaconChainError); easy_from_to!(AttesterSlashingValidationError, BeaconChainError); +easy_from_to!(BlsExecutionChangeValidationError, BeaconChainError); easy_from_to!(SszTypesError, BeaconChainError); easy_from_to!(OpPoolError, BeaconChainError); easy_from_to!(NaiveAggregationError, BeaconChainError); @@ -227,6 +233,7 @@ easy_from_to!(ForkChoiceStoreError, BeaconChainError); easy_from_to!(HistoricalBlockError, BeaconChainError); easy_from_to!(StateAdvanceError, BeaconChainError); easy_from_to!(BlockReplayError, BeaconChainError); +easy_from_to!(InconsistentFork, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { @@ -251,6 +258,11 @@ pub enum BlockProductionError { BlockingFailed(execution_layer::Error), TerminalPoWBlockLookupFailed(execution_layer::Error), GetPayloadFailed(execution_layer::Error), + GetBlobsFailed(execution_layer::Error), + BlobPayloadMismatch { + blob_block_hash: ExecutionBlockHash, + payload_block_hash: ExecutionBlockHash, + }, FailedToReadFinalizedBlock(store::Error), MissingFinalizedBlock(Hash256), BlockTooLarge(usize), @@ -259,6 +271,7 @@ pub enum BlockProductionError { MissingExecutionPayload, TokioJoin(tokio::task::JoinError), BeaconChain(BeaconChainError), + InvalidPayloadFork, } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 7435c3a8cc..825538d562 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -12,22 +12,23 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; -use execution_layer::{BuilderParams, PayloadStatus}; +use execution_layer::{BlockProposalContents, BuilderParams, PayloadAttributes, PayloadStatus}; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::{debug, warn}; use slot_clock::SlotClock; use state_processing::per_block_processing::{ - compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, - partially_verify_execution_payload, + compute_timestamp_at_slot, get_expected_withdrawals, is_execution_enabled, + is_merge_transition_complete, partially_verify_execution_payload, }; use std::sync::Arc; use tokio::task::JoinHandle; use tree_hash::TreeHash; use types::*; -pub type PreparePayloadResult = Result; -pub type PreparePayloadHandle = JoinHandle>>; +pub type PreparePayloadResult = + Result, BlockProductionError>; +pub type PreparePayloadHandle = JoinHandle>>; #[derive(PartialEq)] pub enum AllowOptimisticImport { @@ -68,8 +69,13 @@ impl PayloadNotifier { // where we do not send the block to the EL at all. let block_message = block.message(); let payload = block_message.execution_payload()?; - partially_verify_execution_payload(state, block.slot(), payload, &chain.spec) - .map_err(BlockError::PerBlockProcessingError)?; + partially_verify_execution_payload::<_, FullPayload<_>>( + state, + block.slot(), + payload, + &chain.spec, + ) + .map_err(BlockError::PerBlockProcessingError)?; match notify_execution_layer { NotifyExecutionLayer::No if chain.config.optimistic_finalized_sync => { @@ -81,7 +87,7 @@ impl PayloadNotifier { .ok_or(ExecutionPayloadError::NoExecutionConnection)?; if let Err(e) = - execution_layer.verify_payload_block_hash(&payload.execution_payload) + execution_layer.verify_payload_block_hash(payload.execution_payload_ref()) { warn!( chain.log, @@ -140,7 +146,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( .ok_or(ExecutionPayloadError::NoExecutionConnection)?; let new_payload_response = execution_layer - .notify_new_payload(&execution_payload.execution_payload) + .notify_new_payload(&execution_payload.into()) .await; match new_payload_response { @@ -158,7 +164,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( "Invalid execution payload"; "validation_error" => ?validation_error, "latest_valid_hash" => ?latest_valid_hash, - "execution_block_hash" => ?execution_payload.execution_payload.block_hash, + "execution_block_hash" => ?execution_payload.block_hash(), "root" => ?block.tree_hash_root(), "graffiti" => block.body().graffiti().as_utf8_lossy(), "proposer_index" => block.proposer_index(), @@ -191,7 +197,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( chain.log, "Invalid execution payload block hash"; "validation_error" => ?validation_error, - "execution_block_hash" => ?execution_payload.execution_payload.block_hash, + "execution_block_hash" => ?execution_payload.block_hash(), "root" => ?block.tree_hash_root(), "graffiti" => block.body().graffiti().as_utf8_lossy(), "proposer_index" => block.proposer_index(), @@ -344,7 +350,7 @@ pub fn validate_execution_payload_for_gossip( } }; - if is_merge_transition_complete || execution_payload != &<_>::default() { + if is_merge_transition_complete || !execution_payload.is_default_with_empty_roots() { let expected_timestamp = chain .slot_clock .start_of(block.slot()) @@ -382,13 +388,13 @@ pub fn validate_execution_payload_for_gossip( /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal pub fn get_execution_payload< T: BeaconChainTypes, - Payload: ExecPayload + Default + Send + 'static, + Payload: AbstractExecPayload + 'static, >( chain: Arc>, state: &BeaconState, proposer_index: u64, builder_params: BuilderParams, -) -> Result, BlockProductionError> { +) -> Result, BlockProductionError> { // Compute all required values from the `state` now to avoid needing to pass it into a spawned // task. let spec = &chain.spec; @@ -398,7 +404,15 @@ pub fn get_execution_payload< compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?; let random = *state.get_randao_mix(current_epoch)?; let latest_execution_payload_header_block_hash = - state.latest_execution_payload_header()?.block_hash; + state.latest_execution_payload_header()?.block_hash(); + let withdrawals = match state { + &BeaconState::Capella(_) | &BeaconState::Eip4844(_) => { + Some(get_expected_withdrawals(state, spec)?.into()) + } + &BeaconState::Merge(_) => None, + // These shouldn't happen but they're here to make the pattern irrefutable + &BeaconState::Base(_) | &BeaconState::Altair(_) => None, + }; // Spawn a task to obtain the execution payload from the EL via a series of async calls. The // `join_handle` can be used to await the result of the function. @@ -415,6 +429,7 @@ pub fn get_execution_payload< proposer_index, latest_execution_payload_header_block_hash, builder_params, + withdrawals, ) .await }, @@ -448,13 +463,15 @@ pub async fn prepare_execution_payload( proposer_index: u64, latest_execution_payload_header_block_hash: ExecutionBlockHash, builder_params: BuilderParams, -) -> Result + withdrawals: Option>, +) -> Result, BlockProductionError> where T: BeaconChainTypes, - Payload: ExecPayload + Default, + Payload: AbstractExecPayload, { let current_epoch = builder_params.slot.epoch(T::EthSpec::slots_per_epoch()); let spec = &chain.spec; + let fork = spec.fork_name_at_slot::(builder_params.slot); let execution_layer = chain .execution_layer .as_ref() @@ -468,7 +485,7 @@ where if is_terminal_block_hash_set && !is_activation_epoch_reached { // Use the "empty" payload if there's a terminal block hash, but we haven't reached the // terminal block epoch yet. - return Ok(<_>::default()); + return BlockProposalContents::default_at_fork(fork).map_err(Into::into); } let terminal_pow_block_hash = execution_layer @@ -481,7 +498,7 @@ where } else { // If the merge transition hasn't occurred yet and the EL hasn't found the terminal // block, return an "empty" payload. - return Ok(<_>::default()); + return BlockProposalContents::default_at_fork(fork).map_err(Into::into); } } else { latest_execution_payload_header_block_hash @@ -505,21 +522,26 @@ where .await .map_err(BlockProductionError::BeaconChain)?; + let suggested_fee_recipient = execution_layer + .get_suggested_fee_recipient(proposer_index) + .await; + let payload_attributes = + PayloadAttributes::new(timestamp, random, suggested_fee_recipient, withdrawals); + // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. // // This future is not executed here, it's up to the caller to await it. - let execution_payload = execution_layer + let block_contents = execution_layer .get_payload::( parent_hash, - timestamp, - random, - proposer_index, + &payload_attributes, forkchoice_update_params, builder_params, + fork, &chain.spec, ) .await .map_err(BlockProductionError::GetPayloadFailed)?; - Ok(execution_payload) + Ok(block_contents) } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 5e75c2a632..6b980eea7f 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -7,11 +7,13 @@ mod beacon_chain; mod beacon_fork_choice_store; pub mod beacon_proposer_cache; mod beacon_snapshot; +pub mod blob_verification; pub mod block_reward; mod block_times_cache; mod block_verification; pub mod builder; pub mod canonical_head; +pub mod capella_readiness; pub mod chain_config; mod early_attester_cache; mod errors; diff --git a/beacon_node/beacon_chain/src/merge_readiness.rs b/beacon_node/beacon_chain/src/merge_readiness.rs index 4ef2102fd5..c66df39eed 100644 --- a/beacon_node/beacon_chain/src/merge_readiness.rs +++ b/beacon_node/beacon_chain/src/merge_readiness.rs @@ -8,7 +8,7 @@ use std::fmt::Write; use types::*; /// The time before the Bellatrix fork when we will start issuing warnings about preparation. -const SECONDS_IN_A_WEEK: u64 = 604800; +pub const SECONDS_IN_A_WEEK: u64 = 604800; pub const MERGE_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; #[derive(Default, Debug, Serialize, Deserialize)] diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index b52c4258fe..315f869514 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -972,6 +972,22 @@ lazy_static! { "beacon_pre_finalization_block_lookup_count", "Number of block roots subject to single block lookups" ); + + /* + * Blob sidecar Verification + */ + pub static ref BLOBS_SIDECAR_PROCESSING_REQUESTS: Result = try_create_int_counter( + "beacon_blobs_sidecar_processing_requests_total", + "Count of all blob sidecars submitted for processing" + ); + pub static ref BLOBS_SIDECAR_PROCESSING_SUCCESSES: Result = try_create_int_counter( + "beacon_blobs_sidecar_processing_successes_total", + "Number of blob sidecars verified for gossip" + ); + pub static ref BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES: Result = try_create_histogram( + "beacon_blobs_sidecar_gossip_verification_seconds", + "Full runtime of blob sidecars gossip verification" + ); } // Fifth lazy-static block is used to account for macro recursion limit. diff --git a/beacon_node/beacon_chain/src/observed_operations.rs b/beacon_node/beacon_chain/src/observed_operations.rs index 8d8272b67d..6e53373939 100644 --- a/beacon_node/beacon_chain/src/observed_operations.rs +++ b/beacon_node/beacon_chain/src/observed_operations.rs @@ -1,12 +1,12 @@ use derivative::Derivative; -use smallvec::SmallVec; +use smallvec::{smallvec, SmallVec}; use ssz::{Decode, Encode}; use state_processing::{SigVerifiedOp, VerifyOperation}; use std::collections::HashSet; use std::marker::PhantomData; use types::{ AttesterSlashing, BeaconState, ChainSpec, EthSpec, ForkName, ProposerSlashing, - SignedVoluntaryExit, Slot, + SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, }; /// Number of validator indices to store on the stack in `observed_validators`. @@ -39,7 +39,7 @@ pub enum ObservationOutcome { AlreadyKnown, } -/// Trait for exits and slashings which can be observed using `ObservedOperations`. +/// Trait for operations which can be observed using `ObservedOperations`. pub trait ObservableOperation: VerifyOperation + Sized { /// The set of validator indices involved in this operation. /// @@ -49,13 +49,13 @@ pub trait ObservableOperation: VerifyOperation + Sized { impl ObservableOperation for SignedVoluntaryExit { fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> { - std::iter::once(self.message.validator_index).collect() + smallvec![self.message.validator_index] } } impl ObservableOperation for ProposerSlashing { fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> { - std::iter::once(self.signed_header_1.message.proposer_index).collect() + smallvec![self.signed_header_1.message.proposer_index] } } @@ -80,6 +80,12 @@ impl ObservableOperation for AttesterSlashing { } } +impl ObservableOperation for SignedBlsToExecutionChange { + fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> { + smallvec![self.message.validator_index] + } +} + impl, E: EthSpec> ObservedOperations { pub fn verify_and_observe( &mut self, diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 73906b1b58..35202a3c5d 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,6 +1,8 @@ //! Utilities for managing database schema changes. mod migration_schema_v12; mod migration_schema_v13; +mod migration_schema_v14; +mod migration_schema_v15; use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; use crate::eth1_chain::SszEth1; @@ -114,6 +116,22 @@ pub fn migrate_schema( Ok(()) } + (SchemaVersion(13), SchemaVersion(14)) => { + let ops = migration_schema_v14::upgrade_to_v14::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(14), SchemaVersion(13)) => { + let ops = migration_schema_v14::downgrade_from_v14::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(14), SchemaVersion(15)) => { + let ops = migration_schema_v15::upgrade_to_v15::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(15), SchemaVersion(14)) => { + let ops = migration_schema_v15::downgrade_from_v15::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs index bb72b28c0e..c9aa2097f8 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs @@ -168,16 +168,14 @@ pub fn downgrade_from_v12( log: Logger, ) -> Result, Error> { // Load a V12 op pool and transform it to V5. - let PersistedOperationPoolV12 { + let PersistedOperationPoolV12:: { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, - } = if let Some(PersistedOperationPool::::V12(op_pool)) = - db.get_item(&OP_POOL_DB_KEY)? - { - op_pool + } = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool_v12 } else { debug!(log, "Nothing to do, no operation pool stored"); return Ok(vec![]); diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs new file mode 100644 index 0000000000..be913d8cc5 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs @@ -0,0 +1,125 @@ +use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; +use operation_pool::{ + PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, +}; +use slog::{debug, error, info, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use std::time::Duration; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; +use types::{EthSpec, Hash256, Slot}; + +/// The slot clock isn't usually available before the database is initialized, so we construct a +/// temporary slot clock by reading the genesis state. It should always exist if the database is +/// initialized at a prior schema version, however we still handle the lack of genesis state +/// gracefully. +fn get_slot_clock( + db: &HotColdDB, + log: &Logger, +) -> Result, Error> { + let spec = db.get_chain_spec(); + let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? { + block + } else { + error!(log, "Missing genesis block"); + return Ok(None); + }; + let genesis_state = + if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? { + state + } else { + error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); + return Ok(None); + }; + Ok(Some(T::SlotClock::new( + spec.genesis_slot, + Duration::from_secs(genesis_state.genesis_time()), + Duration::from_secs(spec.seconds_per_slot), + ))) +} + +pub fn upgrade_to_v14( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V12 op pool and transform it to V14. + let PersistedOperationPoolV12:: { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + } = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool_v12 + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + // initialize with empty vector + let bls_to_execution_changes = vec![]; + let v14 = PersistedOperationPool::V14(PersistedOperationPoolV14 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + }); + Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)]) +} + +pub fn downgrade_from_v14( + db: Arc>, + log: Logger, +) -> Result, Error> { + // We cannot downgrade from V14 once the Capella fork has been reached because there will + // be HistoricalSummaries stored in the database instead of HistoricalRoots and prior versions + // of Lighthouse can't handle that. + if let Some(capella_fork_epoch) = db.get_chain_spec().capella_fork_epoch { + let current_epoch = get_slot_clock::(&db, &log)? + .and_then(|clock| clock.now()) + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + .ok_or(Error::SlotClockUnavailableForMigration)?; + + if current_epoch >= capella_fork_epoch { + error!( + log, + "Capella already active: v14+ is mandatory"; + "current_epoch" => current_epoch, + "capella_fork_epoch" => capella_fork_epoch, + ); + return Err(Error::UnableToDowngrade); + } + } + + // Load a V14 op pool and transform it to V12. + let PersistedOperationPoolV14:: { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + info!( + log, + "Dropping bls_to_execution_changes from pool"; + "count" => bls_to_execution_changes.len(), + ); + + let v12 = PersistedOperationPoolV12 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + }; + Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)]) +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs new file mode 100644 index 0000000000..07c86bd931 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs @@ -0,0 +1,76 @@ +use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; +use operation_pool::{ + PersistedOperationPool, PersistedOperationPoolV14, PersistedOperationPoolV15, +}; +use slog::{debug, info, Logger}; +use std::sync::Arc; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; + +pub fn upgrade_to_v15( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V14 op pool and transform it to V15. + let PersistedOperationPoolV14:: { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + } = if let Some(op_pool_v14) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool_v14 + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + let v15 = PersistedOperationPool::V15(PersistedOperationPoolV15 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + // Initialize with empty set + capella_bls_change_broadcast_indices: <_>::default(), + }); + Ok(vec![v15.as_kv_store_op(OP_POOL_DB_KEY)]) +} + +pub fn downgrade_from_v15( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V15 op pool and transform it to V14. + let PersistedOperationPoolV15:: { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + capella_bls_change_broadcast_indices, + } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + info!( + log, + "Forgetting address changes for Capella broadcast"; + "count" => capella_bls_change_broadcast_indices.len(), + ); + + let v14 = PersistedOperationPoolV14 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + }; + Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)]) +} diff --git a/beacon_node/beacon_chain/src/sync_committee_rewards.rs b/beacon_node/beacon_chain/src/sync_committee_rewards.rs index 561fed1a86..2221aa1d5e 100644 --- a/beacon_node/beacon_chain/src/sync_committee_rewards.rs +++ b/beacon_node/beacon_chain/src/sync_committee_rewards.rs @@ -6,10 +6,10 @@ use slog::error; use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; use std::collections::HashMap; use store::RelativeEpoch; -use types::{BeaconBlockRef, BeaconState, ExecPayload}; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState}; impl BeaconChain { - pub fn compute_sync_committee_rewards>( + pub fn compute_sync_committee_rewards>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, state: &mut BeaconState, diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 749487dc5a..f1b9bc83c5 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -13,17 +13,17 @@ use crate::{ StateSkipConfig, }; use bls::get_withdrawal_credentials; -use execution_layer::test_utils::DEFAULT_JWT_SECRET; use execution_layer::{ auth::JwtKey, test_utils::{ - ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_TERMINAL_BLOCK, + ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_JWT_SECRET, + DEFAULT_TERMINAL_BLOCK, }, ExecutionLayer, }; use fork_choice::CountUnrealized; use futures::channel::mpsc::Receiver; -pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; +pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use merkle_proof::MerkleTree; use parking_lot::Mutex; @@ -149,6 +149,7 @@ pub struct Builder { eth_spec_instance: T::EthSpec, spec: Option, validator_keypairs: Option>, + withdrawal_keypairs: Vec>, chain_config: Option, store_config: Option, #[allow(clippy::type_complexity)] @@ -180,7 +181,7 @@ impl Builder> { .unwrap(), ); let mutator = move |builder: BeaconChainBuilder<_>| { - let genesis_state = interop_genesis_state::( + let genesis_state = interop_genesis_state_with_eth1::( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), @@ -241,7 +242,7 @@ impl Builder> { .expect("cannot build without validator keypairs"); let mutator = move |builder: BeaconChainBuilder<_>| { - let genesis_state = interop_genesis_state::( + let genesis_state = interop_genesis_state_with_eth1::( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), @@ -283,6 +284,7 @@ where eth_spec_instance, spec: None, validator_keypairs: None, + withdrawal_keypairs: vec![], chain_config: None, store_config: None, store: None, @@ -308,6 +310,26 @@ where self } + /// Initializes the BLS withdrawal keypairs for `num_keypairs` validators to + /// the "determistic" values, regardless of wether or not the validator has + /// a BLS or execution address in the genesis deposits. + /// + /// This aligns with the withdrawal commitments used in the "interop" + /// genesis states. + pub fn deterministic_withdrawal_keypairs(self, num_keypairs: usize) -> Self { + self.withdrawal_keypairs( + types::test_utils::generate_deterministic_keypairs(num_keypairs) + .into_iter() + .map(Option::Some) + .collect(), + ) + } + + pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec>) -> Self { + self.withdrawal_keypairs = withdrawal_keypairs; + self + } + pub fn default_spec(self) -> Self { self.spec_or_default(None) } @@ -385,15 +407,43 @@ where self } + pub fn recalculate_fork_times_with_genesis(mut self, genesis_time: u64) -> Self { + let mock = self + .mock_execution_layer + .as_mut() + .expect("must have mock execution layer to recalculate fork times"); + let spec = self + .spec + .clone() + .expect("cannot recalculate fork times without spec"); + mock.server.execution_block_generator().shanghai_time = + spec.capella_fork_epoch.map(|epoch| { + genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); + mock.server.execution_block_generator().eip4844_time = + spec.eip4844_fork_epoch.map(|epoch| { + genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); + + self + } + pub fn mock_execution_layer(mut self) -> Self { let spec = self.spec.clone().expect("cannot build without spec"); + let shanghai_time = spec.capella_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); + let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); let mock = MockExecutionLayer::new( self.runtime.task_executor.clone(), - spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, - spec.terminal_block_hash, - spec.terminal_block_hash_activation_epoch, + shanghai_time, + eip4844_time, + None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + spec, None, ); self.execution_layer = Some(mock.el.clone()); @@ -401,19 +451,30 @@ where self } - pub fn mock_execution_layer_with_builder(mut self, beacon_url: SensitiveUrl) -> Self { + pub fn mock_execution_layer_with_builder( + mut self, + beacon_url: SensitiveUrl, + builder_threshold: Option, + ) -> Self { // Get a random unused port let port = unused_port::unused_tcp_port().unwrap(); let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); let spec = self.spec.clone().expect("cannot build without spec"); + let shanghai_time = spec.capella_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); + let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); let mock_el = MockExecutionLayer::new( self.runtime.task_executor.clone(), - spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, - spec.terminal_block_hash, - spec.terminal_block_hash_activation_epoch, + shanghai_time, + eip4844_time, + builder_threshold, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + spec.clone(), Some(builder_url.clone()), ) .move_to_terminal_block(); @@ -505,6 +566,7 @@ where spec: chain.spec.clone(), chain: Arc::new(chain), validator_keypairs, + withdrawal_keypairs: self.withdrawal_keypairs, shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, @@ -520,6 +582,12 @@ where /// Used for testing. pub struct BeaconChainHarness { pub validator_keypairs: Vec, + /// Optional BLS withdrawal keys for each validator. + /// + /// If a validator index is missing from this vec or their entry is `None` then either + /// no BLS withdrawal key was set for them (they had an address from genesis) or the test + /// initializer neglected to set this field. + pub withdrawal_keypairs: Vec>, pub chain: Arc>, pub spec: ChainSpec, @@ -1431,6 +1499,44 @@ where .sign(sk, &fork, genesis_validators_root, &self.chain.spec) } + pub fn make_bls_to_execution_change( + &self, + validator_index: u64, + address: Address, + ) -> SignedBlsToExecutionChange { + let keypair = self.get_withdrawal_keypair(validator_index); + self.make_bls_to_execution_change_with_keys( + validator_index, + address, + &keypair.pk, + &keypair.sk, + ) + } + + pub fn make_bls_to_execution_change_with_keys( + &self, + validator_index: u64, + address: Address, + pubkey: &PublicKey, + secret_key: &SecretKey, + ) -> SignedBlsToExecutionChange { + let genesis_validators_root = self.chain.genesis_validators_root; + BlsToExecutionChange { + validator_index, + from_bls_pubkey: pubkey.compress(), + to_execution_address: address, + } + .sign(secret_key, genesis_validators_root, &self.chain.spec) + } + + pub fn get_withdrawal_keypair(&self, validator_index: u64) -> &Keypair { + self.withdrawal_keypairs + .get(validator_index as usize) + .expect("BLS withdrawal key missing from harness") + .as_ref() + .expect("no withdrawal key for validator") + } + pub fn add_voluntary_exit( &self, block: &mut BeaconBlock, diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs new file mode 100644 index 0000000000..e910e8134f --- /dev/null +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -0,0 +1,167 @@ +#![cfg(not(debug_assertions))] // Tests run too slow in debug. + +use beacon_chain::test_utils::BeaconChainHarness; +use execution_layer::test_utils::Block; +use types::*; + +const VALIDATOR_COUNT: usize = 32; +type E = MainnetEthSpec; + +fn verify_execution_payload_chain(chain: &[FullPayload]) { + let mut prev_ep: Option> = None; + + for ep in chain { + assert!(!ep.is_default_with_empty_roots()); + assert!(ep.block_hash() != ExecutionBlockHash::zero()); + + // Check against previous `ExecutionPayload`. + if let Some(prev_ep) = prev_ep { + assert_eq!(prev_ep.block_hash(), ep.parent_hash()); + assert_eq!(prev_ep.block_number() + 1, ep.block_number()); + assert!(ep.timestamp() > prev_ep.timestamp()); + } + prev_ep = Some(ep.clone()); + } +} + +#[tokio::test] +async fn base_altair_merge_capella() { + let altair_fork_epoch = Epoch::new(4); + let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); + let bellatrix_fork_epoch = Epoch::new(8); + let merge_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); + let capella_fork_epoch = Epoch::new(12); + let capella_fork_slot = capella_fork_epoch.start_slot(E::slots_per_epoch()); + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(altair_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); + spec.capella_fork_epoch = Some(capella_fork_epoch); + + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .logger(logging::test_logger()) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + /* + * Start with the base fork. + */ + assert!(harness.chain.head_snapshot().beacon_block.as_base().is_ok()); + + /* + * Do the Altair fork. + */ + harness.extend_to_slot(altair_fork_slot).await; + + let altair_head = &harness.chain.head_snapshot().beacon_block; + assert!(altair_head.as_altair().is_ok()); + assert_eq!(altair_head.slot(), altair_fork_slot); + + /* + * Do the merge fork, without a terminal PoW block. + */ + harness.extend_to_slot(merge_fork_slot).await; + + let merge_head = &harness.chain.head_snapshot().beacon_block; + assert!(merge_head.as_merge().is_ok()); + assert_eq!(merge_head.slot(), merge_fork_slot); + assert!( + merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "Merge head is default payload" + ); + + /* + * Next merge block shouldn't include an exec payload. + */ + harness.extend_slots(1).await; + + let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; + assert!( + one_after_merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "One after merge head is default payload" + ); + assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1); + + /* + * Trigger the terminal PoW block. + */ + harness + .execution_block_generator() + .move_to_terminal_block() + .unwrap(); + + // Add a slot duration to get to the next slot + let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot; + harness + .execution_block_generator() + .modify_last_block(|block| { + if let Block::PoW(terminal_block) = block { + terminal_block.timestamp = timestamp; + } + }); + harness.extend_slots(1).await; + + let two_after_merge_head = &harness.chain.head_snapshot().beacon_block; + assert!( + two_after_merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "Two after merge head is default payload" + ); + assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2); + + /* + * Next merge block should include an exec payload. + */ + let mut execution_payloads = vec![]; + for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() { + harness.extend_slots(1).await; + let block = &harness.chain.head_snapshot().beacon_block; + let full_payload: FullPayload = block + .message() + .body() + .execution_payload() + .unwrap() + .clone() + .into(); + // pre-capella shouldn't have withdrawals + assert!(full_payload.withdrawals_root().is_err()); + execution_payloads.push(full_payload); + } + + /* + * Should enter capella fork now. + */ + for _ in 0..16 { + harness.extend_slots(1).await; + let block = &harness.chain.head_snapshot().beacon_block; + let full_payload: FullPayload = block + .message() + .body() + .execution_payload() + .unwrap() + .clone() + .into(); + // post-capella should have withdrawals + assert!(full_payload.withdrawals_root().is_ok()); + execution_payloads.push(full_payload); + } + + verify_execution_payload_chain(execution_payloads.as_slice()); +} diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs index eceb4f2e85..c81a547406 100644 --- a/beacon_node/beacon_chain/tests/main.rs +++ b/beacon_node/beacon_chain/tests/main.rs @@ -1,6 +1,7 @@ mod attestation_production; mod attestation_verification; mod block_verification; +mod capella; mod merge; mod op_verification; mod payload_invalidation; diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index c8c47c9904..1e0112a495 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -12,17 +12,14 @@ fn verify_execution_payload_chain(chain: &[FullPayload]) { let mut prev_ep: Option> = None; for ep in chain { - assert!(*ep != FullPayload::default()); + assert!(!ep.is_default_with_empty_roots()); assert!(ep.block_hash() != ExecutionBlockHash::zero()); // Check against previous `ExecutionPayload`. if let Some(prev_ep) = prev_ep { - assert_eq!(prev_ep.block_hash(), ep.execution_payload.parent_hash); - assert_eq!( - prev_ep.execution_payload.block_number + 1, - ep.execution_payload.block_number - ); - assert!(ep.execution_payload.timestamp > prev_ep.execution_payload.timestamp); + assert_eq!(prev_ep.block_hash(), ep.parent_hash()); + assert_eq!(prev_ep.block_number() + 1, ep.block_number()); + assert!(ep.timestamp() > prev_ep.timestamp()); } prev_ep = Some(ep.clone()); } @@ -89,7 +86,7 @@ async fn merge_with_terminal_block_hash_override() { if i == 0 { assert_eq!(execution_payload.block_hash(), genesis_pow_block_hash); } - execution_payloads.push(execution_payload); + execution_payloads.push(execution_payload.into()); } verify_execution_payload_chain(execution_payloads.as_slice()); @@ -141,9 +138,14 @@ async fn base_altair_merge_with_terminal_block_after_fork() { let merge_head = &harness.chain.head_snapshot().beacon_block; assert!(merge_head.as_merge().is_ok()); assert_eq!(merge_head.slot(), merge_fork_slot); - assert_eq!( - *merge_head.message().body().execution_payload().unwrap(), - FullPayload::default() + assert!( + merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "Merge head is default payload" ); /* @@ -153,13 +155,14 @@ async fn base_altair_merge_with_terminal_block_after_fork() { harness.extend_slots(1).await; let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; - assert_eq!( - *one_after_merge_head + assert!( + one_after_merge_head .message() .body() .execution_payload() - .unwrap(), - FullPayload::default() + .unwrap() + .is_default_with_empty_roots(), + "One after merge head is default payload" ); assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1); @@ -185,26 +188,34 @@ async fn base_altair_merge_with_terminal_block_after_fork() { harness.extend_slots(1).await; - let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; - assert_eq!( - *one_after_merge_head + let two_after_merge_head = &harness.chain.head_snapshot().beacon_block; + assert!( + two_after_merge_head .message() .body() .execution_payload() - .unwrap(), - FullPayload::default() + .unwrap() + .is_default_with_empty_roots(), + "Two after merge head is default payload" ); - assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 2); + assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2); /* * Next merge block should include an exec payload. */ - for _ in 0..4 { harness.extend_slots(1).await; let block = &harness.chain.head_snapshot().beacon_block; - execution_payloads.push(block.message().body().execution_payload().unwrap().clone()); + execution_payloads.push( + block + .message() + .body() + .execution_payload() + .unwrap() + .clone() + .into(), + ); } verify_execution_payload_chain(execution_payloads.as_slice()); diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 0b9eaaee0f..54d7734471 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -13,9 +13,9 @@ use beacon_chain::{ INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ - json_structures::{JsonForkChoiceStateV1, JsonPayloadAttributesV1}, + json_structures::{JsonForkchoiceStateV1, JsonPayloadAttributes, JsonPayloadAttributesV1}, test_utils::ExecutionBlockGenerator, - ExecutionLayer, ForkChoiceState, PayloadAttributes, + ExecutionLayer, ForkchoiceState, PayloadAttributes, }; use fork_choice::{ CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus, @@ -120,7 +120,7 @@ impl InvalidPayloadRig { &self.harness.chain.canonical_head } - fn previous_forkchoice_update_params(&self) -> (ForkChoiceState, PayloadAttributes) { + fn previous_forkchoice_update_params(&self) -> (ForkchoiceState, PayloadAttributes) { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); let json = mock_execution_layer .server @@ -129,14 +129,17 @@ impl InvalidPayloadRig { let params = json.get("params").expect("no params"); let fork_choice_state_json = params.get(0).expect("no payload param"); - let fork_choice_state: JsonForkChoiceStateV1 = + let fork_choice_state: JsonForkchoiceStateV1 = serde_json::from_value(fork_choice_state_json.clone()).unwrap(); let payload_param_json = params.get(1).expect("no payload param"); let attributes: JsonPayloadAttributesV1 = serde_json::from_value(payload_param_json.clone()).unwrap(); - (fork_choice_state.into(), attributes.into()) + ( + fork_choice_state.into(), + JsonPayloadAttributes::V1(attributes).into(), + ) } fn previous_payload_attributes(&self) -> PayloadAttributes { @@ -991,20 +994,20 @@ async fn payload_preparation() { .await .unwrap(); - let payload_attributes = PayloadAttributes { - timestamp: rig - .harness + let payload_attributes = PayloadAttributes::new( + rig.harness .chain .slot_clock .start_of(next_slot) .unwrap() .as_secs(), - prev_randao: *head + *head .beacon_state .get_randao_mix(head.beacon_state.current_epoch()) .unwrap(), - suggested_fee_recipient: fee_recipient, - }; + fee_recipient, + None, + ); assert_eq!(rig.previous_payload_attributes(), payload_attributes); } @@ -1138,7 +1141,7 @@ async fn payload_preparation_before_transition_block() { let (fork_choice_state, payload_attributes) = rig.previous_forkchoice_update_params(); let latest_block_hash = rig.latest_execution_block_hash(); - assert_eq!(payload_attributes.suggested_fee_recipient, fee_recipient); + assert_eq!(payload_attributes.suggested_fee_recipient(), fee_recipient); assert_eq!(fork_choice_state.head_block_hash, latest_block_hash); } @@ -1385,18 +1388,16 @@ async fn build_optimistic_chain( .body() .execution_payload() .unwrap() - .execution_payload - == <_>::default(), + .is_default_with_empty_roots(), "the block *has not* undergone the merge transition" ); assert!( - post_transition_block + !post_transition_block .message() .body() .execution_payload() .unwrap() - .execution_payload - != <_>::default(), + .is_default_with_empty_roots(), "the block *has* undergone the merge transition" ); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 8a6ea9cfe1..2f40443b99 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2,6 +2,7 @@ use beacon_chain::attestation_verification::Error as AttnError; use beacon_chain::builder::BeaconChainBuilder; +use beacon_chain::schema_change::migrate_schema; use beacon_chain::test_utils::{ test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, }; @@ -22,6 +23,7 @@ use std::collections::HashSet; use std::convert::TryInto; use std::sync::Arc; use std::time::Duration; +use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; use store::{ iter::{BlockRootsIterator, StateRootsIterator}, HotColdDB, LevelDB, StoreConfig, @@ -68,6 +70,7 @@ fn get_harness( let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) + .logger(store.logger().clone()) .fresh_disk_store(store) .mock_execution_layer() .build(); @@ -1013,8 +1016,8 @@ fn check_shuffling_compatible( // Ensure blocks from abandoned forks are pruned from the Hot DB #[tokio::test] async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1123,8 +1126,8 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { #[tokio::test] async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1255,8 +1258,8 @@ async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { #[tokio::test] async fn pruning_does_not_touch_blocks_prior_to_finalization() { - const HONEST_VALIDATOR_COUNT: usize = 16; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1350,8 +1353,8 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { #[tokio::test] async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1495,8 +1498,8 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { // This is to check if state outside of normal block processing are pruned correctly. #[tokio::test] async fn prunes_skipped_slots_states() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1624,8 +1627,8 @@ async fn prunes_skipped_slots_states() { // This is to check if state outside of normal block processing are pruned correctly. #[tokio::test] async fn finalizes_non_epoch_start_slot() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -2529,6 +2532,91 @@ async fn revert_minority_fork_on_resume() { assert_eq!(heads.len(), 1); } +// This test checks whether the schema downgrade from the latest version to some minimum supported +// version is correct. This is the easiest schema test to write without historic versions of +// Lighthouse on-hand, but has the disadvantage that the min version needs to be adjusted manually +// as old downgrades are deprecated. +#[tokio::test] +async fn schema_downgrade_to_min_version() { + let num_blocks_produced = E::slots_per_epoch() * 4; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let spec = &harness.chain.spec.clone(); + + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let min_version = if harness.spec.capella_fork_epoch.is_some() { + // Can't downgrade beyond V14 once Capella is reached, for simplicity don't test that + // at all if Capella is enabled. + SchemaVersion(14) + } else { + SchemaVersion(11) + }; + + // Close the database to ensure everything is written to disk. + drop(store); + drop(harness); + + // Re-open the store. + let store = get_store(&db_path); + + // Downgrade. + let deposit_contract_deploy_block = 0; + migrate_schema::>( + store.clone(), + deposit_contract_deploy_block, + CURRENT_SCHEMA_VERSION, + min_version, + store.logger().clone(), + spec, + ) + .expect("schema downgrade to minimum version should work"); + + // Upgrade back. + migrate_schema::>( + store.clone(), + deposit_contract_deploy_block, + min_version, + CURRENT_SCHEMA_VERSION, + store.logger().clone(), + spec, + ) + .expect("schema upgrade from minimum version should work"); + + // Rescreate the harness. + let harness = BeaconChainHarness::builder(MinimalEthSpec) + .default_spec() + .keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec()) + .logger(store.logger().clone()) + .resumed_disk_store(store.clone()) + .mock_execution_layer() + .build(); + + check_finalization(&harness, num_blocks_produced); + check_split_slot(&harness, store.clone()); + check_chain_dump(&harness, num_blocks_produced + 1); + check_iterators(&harness); + + // Check that downgrading beyond the minimum version fails (bound is *tight*). + let min_version_sub_1 = SchemaVersion(min_version.as_u64().checked_sub(1).unwrap()); + migrate_schema::>( + store.clone(), + deposit_contract_deploy_block, + CURRENT_SCHEMA_VERSION, + min_version_sub_1, + harness.logger().clone(), + spec, + ) + .expect_err("should not downgrade below minimum version"); +} + /// Checks that two chains are the same, for the purpose of these tests. /// /// Several fields that are hard/impossible to check are ignored (e.g., the store). diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 1e51b0ffb9..239f55e7d3 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -45,6 +45,7 @@ fn get_valid_sync_committee_message( harness: &BeaconChainHarness>, slot: Slot, relative_sync_committee: RelativeSyncCommittee, + message_index: usize, ) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) { let head_state = harness.chain.head_beacon_state_cloned(); let head_block_root = harness.chain.head_snapshot().beacon_block_root; @@ -52,7 +53,7 @@ fn get_valid_sync_committee_message( .make_sync_committee_messages(&head_state, head_block_root, slot, relative_sync_committee) .get(0) .expect("sync messages should exist") - .get(0) + .get(message_index) .expect("first sync message should exist") .clone(); @@ -494,7 +495,7 @@ async fn unaggregated_gossip_verification() { let current_slot = harness.chain.slot().expect("should get slot"); let (valid_sync_committee_message, expected_validator_index, validator_sk, subnet_id) = - get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current); + get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current, 0); macro_rules! assert_invalid { ($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat_param) |+ $( if $guard: expr )?) => { @@ -644,7 +645,7 @@ async fn unaggregated_gossip_verification() { // **Incorrectly** create a sync message using the current sync committee let (next_valid_sync_committee_message, _, _, next_subnet_id) = - get_valid_sync_committee_message(&harness, target_slot, RelativeSyncCommittee::Current); + get_valid_sync_committee_message(&harness, target_slot, RelativeSyncCommittee::Current, 1); assert_invalid!( "sync message on incorrect subnet", diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index d80db132ef..384fcbe5db 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -19,7 +19,7 @@ use types::{ }; // Should ideally be divisible by 3. -pub const VALIDATOR_COUNT: usize = 24; +pub const VALIDATOR_COUNT: usize = 48; lazy_static! { /// A cached set of keys. diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 3517d06b15..fecf6512ac 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,6 +1,6 @@ use eth2::types::builder_bid::SignedBuilderBid; use eth2::types::{ - BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, ExecutionPayload, + AbstractExecPayload, BlindedPayload, EthSpec, ExecutionBlockHash, ExecutionPayload, ForkVersionedResponse, PublicKeyBytes, SignedBeaconBlock, SignedValidatorRegistrationData, Slot, }; @@ -160,7 +160,7 @@ impl BuilderHttpClient { } /// `GET /eth/v1/builder/header` - pub async fn get_builder_header>( + pub async fn get_builder_header>( &self, slot: Slot, parent_hash: ExecutionBlockHash, diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index d01f2505cc..9a49843a9f 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -6,6 +6,10 @@ edition = "2021" [dev-dependencies] serde_yaml = "0.8.13" +logging = { path = "../../common/logging" } +state_processing = { path = "../../consensus/state_processing" } +operation_pool = { path = "../operation_pool" } +tokio = "1.14.0" [dependencies] beacon_chain = { path = "../beacon_chain" } diff --git a/beacon_node/client/src/address_change_broadcast.rs b/beacon_node/client/src/address_change_broadcast.rs new file mode 100644 index 0000000000..272ee908fb --- /dev/null +++ b/beacon_node/client/src/address_change_broadcast.rs @@ -0,0 +1,322 @@ +use crate::*; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use slog::{debug, info, warn, Logger}; +use slot_clock::SlotClock; +use std::cmp; +use std::collections::HashSet; +use std::mem; +use std::time::Duration; +use tokio::sync::mpsc::UnboundedSender; +use tokio::time::sleep; +use types::EthSpec; + +/// The size of each chunk of addresses changes to be broadcast at the Capella +/// fork. +const BROADCAST_CHUNK_SIZE: usize = 128; +/// The delay between broadcasting each chunk. +const BROADCAST_CHUNK_DELAY: Duration = Duration::from_millis(500); + +/// If the Capella fork has already been reached, `broadcast_address_changes` is +/// called immediately. +/// +/// If the Capella fork has not been reached, waits until the start of the fork +/// epoch and then calls `broadcast_address_changes`. +pub async fn broadcast_address_changes_at_capella( + chain: &BeaconChain, + network_send: UnboundedSender>, + log: &Logger, +) { + let spec = &chain.spec; + let slot_clock = &chain.slot_clock; + + let capella_fork_slot = if let Some(epoch) = spec.capella_fork_epoch { + epoch.start_slot(T::EthSpec::slots_per_epoch()) + } else { + // Exit now if Capella is not defined. + return; + }; + + // Wait until the Capella fork epoch. + while chain.slot().map_or(true, |slot| slot < capella_fork_slot) { + match slot_clock.duration_to_slot(capella_fork_slot) { + Some(duration) => { + // Sleep until the Capella fork. + sleep(duration).await; + break; + } + None => { + // We were unable to read the slot clock wait another slot + // and then try again. + sleep(slot_clock.slot_duration()).await; + } + } + } + + // The following function will be called in two scenarios: + // + // 1. The node has been running for some time and the Capella fork has just + // been reached. + // 2. The node has just started and it is *after* the Capella fork. + broadcast_address_changes(chain, network_send, log).await +} + +/// Broadcasts any address changes that are flagged for broadcasting at the +/// Capella fork epoch. +/// +/// Address changes are published in chunks, with a delay between each chunk. +/// This helps reduce the load on the P2P network and also helps prevent us from +/// clogging our `network_send` channel and being late to publish +/// blocks, attestations, etc. +pub async fn broadcast_address_changes( + chain: &BeaconChain, + network_send: UnboundedSender>, + log: &Logger, +) { + let head = chain.head_snapshot(); + let mut changes = chain + .op_pool + .get_bls_to_execution_changes_received_pre_capella(&head.beacon_state, &chain.spec); + + while !changes.is_empty() { + // This `split_off` approach is to allow us to have owned chunks of the + // `changes` vec. The `std::slice::Chunks` method uses references and + // the `itertools` iterator that achives this isn't `Send` so it doesn't + // work well with the `sleep` at the end of the loop. + let tail = changes.split_off(cmp::min(BROADCAST_CHUNK_SIZE, changes.len())); + let chunk = mem::replace(&mut changes, tail); + + let mut published_indices = HashSet::with_capacity(BROADCAST_CHUNK_SIZE); + let mut num_ok = 0; + let mut num_err = 0; + + // Publish each individual address change. + for address_change in chunk { + let validator_index = address_change.message.validator_index; + + let pubsub_message = PubsubMessage::BlsToExecutionChange(Box::new(address_change)); + let message = NetworkMessage::Publish { + messages: vec![pubsub_message], + }; + // It seems highly unlikely that this unbounded send will fail, but + // we handle the result nontheless. + if let Err(e) = network_send.send(message) { + debug!( + log, + "Failed to publish change message"; + "error" => ?e, + "validator_index" => validator_index + ); + num_err += 1; + } else { + debug!( + log, + "Published address change message"; + "validator_index" => validator_index + ); + num_ok += 1; + published_indices.insert(validator_index); + } + } + + // Remove any published indices from the list of indices that need to be + // published. + chain + .op_pool + .register_indices_broadcasted_at_capella(&published_indices); + + info!( + log, + "Published address change messages"; + "num_published" => num_ok, + ); + + if num_err > 0 { + warn!( + log, + "Failed to publish address changes"; + "info" => "failed messages will be retried", + "num_unable_to_publish" => num_err, + ); + } + + sleep(BROADCAST_CHUNK_DELAY).await; + } + + debug!( + log, + "Address change routine complete"; + ); +} + +#[cfg(not(debug_assertions))] // Tests run too slow in debug. +#[cfg(test)] +mod tests { + use super::*; + use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; + use operation_pool::ReceivedPreCapella; + use state_processing::{SigVerifiedOp, VerifyOperation}; + use std::collections::HashSet; + use tokio::sync::mpsc; + use types::*; + + type E = MainnetEthSpec; + + pub const VALIDATOR_COUNT: usize = BROADCAST_CHUNK_SIZE * 3; + pub const EXECUTION_ADDRESS: Address = Address::repeat_byte(42); + + struct Tester { + harness: BeaconChainHarness>, + /// Changes which should be broadcast at the Capella fork. + received_pre_capella_changes: Vec>, + /// Changes which should *not* be broadcast at the Capella fork. + not_received_pre_capella_changes: Vec>, + } + + impl Tester { + fn new() -> Self { + let altair_fork_epoch = Epoch::new(0); + let bellatrix_fork_epoch = Epoch::new(0); + let capella_fork_epoch = Epoch::new(2); + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(altair_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); + spec.capella_fork_epoch = Some(capella_fork_epoch); + + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .logger(logging::test_logger()) + .deterministic_keypairs(VALIDATOR_COUNT) + .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + Self { + harness, + received_pre_capella_changes: <_>::default(), + not_received_pre_capella_changes: <_>::default(), + } + } + + fn produce_verified_address_change( + &self, + validator_index: u64, + ) -> SigVerifiedOp { + let change = self + .harness + .make_bls_to_execution_change(validator_index, EXECUTION_ADDRESS); + let head = self.harness.chain.head_snapshot(); + + change + .validate(&head.beacon_state, &self.harness.spec) + .unwrap() + } + + fn produce_received_pre_capella_changes(mut self, indices: Vec) -> Self { + for validator_index in indices { + self.received_pre_capella_changes + .push(self.produce_verified_address_change(validator_index)); + } + self + } + + fn produce_not_received_pre_capella_changes(mut self, indices: Vec) -> Self { + for validator_index in indices { + self.not_received_pre_capella_changes + .push(self.produce_verified_address_change(validator_index)); + } + self + } + + async fn run(self) { + let harness = self.harness; + let chain = harness.chain.clone(); + + let mut broadcast_indices = HashSet::new(); + for change in self.received_pre_capella_changes { + broadcast_indices.insert(change.as_inner().message.validator_index); + chain + .op_pool + .insert_bls_to_execution_change(change, ReceivedPreCapella::Yes); + } + + let mut non_broadcast_indices = HashSet::new(); + for change in self.not_received_pre_capella_changes { + non_broadcast_indices.insert(change.as_inner().message.validator_index); + chain + .op_pool + .insert_bls_to_execution_change(change, ReceivedPreCapella::No); + } + + harness.set_current_slot( + chain + .spec + .capella_fork_epoch + .unwrap() + .start_slot(E::slots_per_epoch()), + ); + + let (sender, mut receiver) = mpsc::unbounded_channel(); + + broadcast_address_changes_at_capella(&chain, sender, &logging::test_logger()).await; + + let mut broadcasted_changes = vec![]; + while let Some(NetworkMessage::Publish { mut messages }) = receiver.recv().await { + match messages.pop().unwrap() { + PubsubMessage::BlsToExecutionChange(change) => broadcasted_changes.push(change), + _ => panic!("unexpected message"), + } + } + + assert_eq!( + broadcasted_changes.len(), + broadcast_indices.len(), + "all expected changes should have been broadcast" + ); + + for broadcasted in &broadcasted_changes { + assert!( + !non_broadcast_indices.contains(&broadcasted.message.validator_index), + "messages not flagged for broadcast should not have been broadcast" + ); + } + + let head = chain.head_snapshot(); + assert!( + chain + .op_pool + .get_bls_to_execution_changes_received_pre_capella( + &head.beacon_state, + &chain.spec, + ) + .is_empty(), + "there shouldn't be any capella broadcast changes left in the op pool" + ); + } + } + + // Useful for generating even-numbered indices. Required since only even + // numbered genesis validators have BLS credentials. + fn even_indices(start: u64, count: usize) -> Vec { + (start..).filter(|i| i % 2 == 0).take(count).collect() + } + + #[tokio::test] + async fn one_chunk() { + Tester::new() + .produce_received_pre_capella_changes(even_indices(0, 4)) + .produce_not_received_pre_capella_changes(even_indices(10, 4)) + .run() + .await; + } + + #[tokio::test] + async fn multiple_chunks() { + Tester::new() + .produce_received_pre_capella_changes(even_indices(0, BROADCAST_CHUNK_SIZE * 3 / 2)) + .run() + .await; + } +} diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 3b016ebda9..5fa2fddc3e 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -1,3 +1,4 @@ +use crate::address_change_broadcast::broadcast_address_changes_at_capella; use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; use crate::Client; @@ -802,6 +803,25 @@ where // Spawns a routine that polls the `exchange_transition_configuration` endpoint. execution_layer.spawn_transition_configuration_poll(beacon_chain.spec.clone()); } + + // Spawn a service to publish BLS to execution changes at the Capella fork. + if let Some(network_senders) = self.network_senders { + let inner_chain = beacon_chain.clone(); + let broadcast_context = + runtime_context.service_context("addr_bcast".to_string()); + let log = broadcast_context.log().clone(); + broadcast_context.executor.spawn( + async move { + broadcast_address_changes_at_capella( + &inner_chain, + network_senders.network_send(), + &log, + ) + .await + }, + "addr_broadcast", + ); + } } start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 24df874086..b0184dc0ff 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,5 +1,6 @@ extern crate slog; +mod address_change_broadcast; pub mod config; mod metrics; mod notifier; diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1da7a79707..fb8a9b6349 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,5 +1,6 @@ use crate::metrics; use beacon_chain::{ + capella_readiness::CapellaReadiness, merge_readiness::{MergeConfig, MergeReadiness}, BeaconChain, BeaconChainTypes, ExecutionStatus, }; @@ -313,6 +314,7 @@ pub fn spawn_notifier( eth1_logging(&beacon_chain, &log); merge_readiness_logging(current_slot, &beacon_chain, &log).await; + capella_readiness_logging(current_slot, &beacon_chain, &log).await; } }; @@ -350,12 +352,15 @@ async fn merge_readiness_logging( } if merge_completed && !has_execution_layer { - error!( - log, - "Execution endpoint required"; - "info" => "you need an execution engine to validate blocks, see: \ - https://lighthouse-book.sigmaprime.io/merge-migration.html" - ); + if !beacon_chain.is_time_to_prepare_for_capella(current_slot) { + // logging of the EE being offline is handled in `capella_readiness_logging()` + error!( + log, + "Execution endpoint required"; + "info" => "you need an execution engine to validate blocks, see: \ + https://lighthouse-book.sigmaprime.io/merge-migration.html" + ); + } return; } @@ -419,6 +424,61 @@ async fn merge_readiness_logging( } } +/// Provides some helpful logging to users to indicate if their node is ready for Capella +async fn capella_readiness_logging( + current_slot: Slot, + beacon_chain: &BeaconChain, + log: &Logger, +) { + let capella_completed = beacon_chain + .canonical_head + .cached_head() + .snapshot + .beacon_block + .message() + .body() + .execution_payload() + .map_or(false, |payload| payload.withdrawals_root().is_ok()); + + let has_execution_layer = beacon_chain.execution_layer.is_some(); + + if capella_completed && has_execution_layer + || !beacon_chain.is_time_to_prepare_for_capella(current_slot) + { + return; + } + + if capella_completed && !has_execution_layer { + error!( + log, + "Execution endpoint required"; + "info" => "you need a Capella enabled execution engine to validate blocks, see: \ + https://lighthouse-book.sigmaprime.io/merge-migration.html" + ); + return; + } + + match beacon_chain.check_capella_readiness().await { + CapellaReadiness::Ready => { + info!(log, "Ready for Capella") + } + readiness @ CapellaReadiness::ExchangeCapabilitiesFailed { error: _ } => { + error!( + log, + "Not ready for Capella"; + "hint" => "the execution endpoint may be offline", + "info" => %readiness, + ) + } + readiness => warn!( + log, + "Not ready for Capella"; + "hint" => "try updating the execution endpoint", + "info" => %readiness, + ), + } +} + fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger) { let current_slot_opt = beacon_chain.slot().ok(); diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 7e99c43e7d..e0dd797bfa 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -21,7 +21,7 @@ hex = "0.4.2" types = { path = "../../consensus/types"} merkle_proof = { path = "../../consensus/merkle_proof"} eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" tree_hash = "0.4.1" parking_lot = "0.12.0" slog = "2.5.2" diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 069a6e4aad..cd680478cc 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -697,6 +697,7 @@ mod fast { let web3 = eth1.web3(); let now = get_block_number(&web3).await; + let spec = MainnetEthSpec::default_spec(); let service = Service::new( Config { endpoint: Eth1Endpoint::NoAuth( @@ -710,7 +711,7 @@ mod fast { ..Config::default() }, log, - MainnetEthSpec::default_spec(), + spec.clone(), ) .unwrap(); let client = diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 76788b102e..1b687a8b60 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -26,6 +26,7 @@ eth2_ssz = "0.4.1" eth2_ssz_types = "0.2.2" eth2 = { path = "../../common/eth2" } state_processing = { path = "../../consensus/state_processing" } +superstruct = "0.6.0" lru = "0.7.1" exit-future = "0.2.0" tree_hash = "0.4.1" @@ -40,9 +41,9 @@ lazy_static = "1.4.0" ethers-core = "1.0.2" builder_client = { path = "../builder_client" } fork_choice = { path = "../../consensus/fork_choice" } -mev-build-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "6c99b0fbdc0427b1625469d2e575303ce08de5b8" } -ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "a8110af76d97bf2bf27fb987a671808fcbdf1834" } -ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" } +mev-rs = { git = "https://github.com/ralexstokes/mev-rs" } +ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus" } +ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" } tokio-stream = { version = "0.1.9", features = [ "sync" ] } strum = "0.24.0" keccak-hash = "0.10.0" diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs index f023c038ae..e9b7dcc17f 100644 --- a/beacon_node/execution_layer/src/block_hash.rs +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -1,4 +1,5 @@ use crate::{ + json_structures::JsonWithdrawal, keccak::{keccak256, KeccakHasher}, metrics, Error, ExecutionLayer, }; @@ -6,39 +7,51 @@ use ethers_core::utils::rlp::RlpStream; use keccak_hash::KECCAK_EMPTY_LIST_RLP; use triehash::ordered_trie_root; use types::{ - map_execution_block_header_fields, Address, EthSpec, ExecutionBlockHash, ExecutionBlockHeader, - ExecutionPayload, Hash256, Hash64, Uint256, + map_execution_block_header_fields_except_withdrawals, Address, EthSpec, ExecutionBlockHash, + ExecutionBlockHeader, ExecutionPayloadRef, Hash256, Hash64, Uint256, }; impl ExecutionLayer { /// Verify `payload.block_hash` locally within Lighthouse. /// /// No remote calls to the execution client will be made, so this is quite a cheap check. - pub fn verify_payload_block_hash(&self, payload: &ExecutionPayload) -> Result<(), Error> { + pub fn verify_payload_block_hash(&self, payload: ExecutionPayloadRef) -> Result<(), Error> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); // Calculate the transactions root. // We're currently using a deprecated Parity library for this. We should move to a // better alternative when one appears, possibly following Reth. let rlp_transactions_root = ordered_trie_root::( - payload.transactions.iter().map(|txn_bytes| &**txn_bytes), + payload.transactions().iter().map(|txn_bytes| &**txn_bytes), ); + // Calculate withdrawals root (post-Capella). + let rlp_withdrawals_root = if let Ok(withdrawals) = payload.withdrawals() { + Some(ordered_trie_root::( + withdrawals.iter().map(|withdrawal| { + rlp_encode_withdrawal(&JsonWithdrawal::from(withdrawal.clone())) + }), + )) + } else { + None + }; + // Construct the block header. let exec_block_header = ExecutionBlockHeader::from_payload( payload, KECCAK_EMPTY_LIST_RLP.as_fixed_bytes().into(), rlp_transactions_root, + rlp_withdrawals_root, ); // Hash the RLP encoding of the block header. let rlp_block_header = rlp_encode_block_header(&exec_block_header); let header_hash = ExecutionBlockHash::from_root(keccak256(&rlp_block_header)); - if header_hash != payload.block_hash { + if header_hash != payload.block_hash() { return Err(Error::BlockHashMismatch { computed: header_hash, - payload: payload.block_hash, + payload: payload.block_hash(), transactions_root: rlp_transactions_root, }); } @@ -47,13 +60,27 @@ impl ExecutionLayer { } } +/// RLP encode a withdrawal. +pub fn rlp_encode_withdrawal(withdrawal: &JsonWithdrawal) -> Vec { + let mut rlp_stream = RlpStream::new(); + rlp_stream.begin_list(4); + rlp_stream.append(&withdrawal.index); + rlp_stream.append(&withdrawal.validator_index); + rlp_stream.append(&withdrawal.address); + rlp_stream.append(&withdrawal.amount); + rlp_stream.out().into() +} + /// RLP encode an execution block header. pub fn rlp_encode_block_header(header: &ExecutionBlockHeader) -> Vec { let mut rlp_header_stream = RlpStream::new(); rlp_header_stream.begin_unbounded_list(); - map_execution_block_header_fields!(&header, |_, field| { + map_execution_block_header_fields_except_withdrawals!(&header, |_, field| { rlp_header_stream.append(field); }); + if let Some(withdrawals_root) = &header.withdrawals_root { + rlp_header_stream.append(withdrawals_root); + } rlp_header_stream.finalize_unbounded_list(); rlp_header_stream.out().into() } @@ -99,6 +126,7 @@ mod test { mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(), nonce: Hash64::zero(), base_fee_per_gas: 0x036b_u64.into(), + withdrawals_root: None, }; let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b"; let expected_hash = @@ -126,6 +154,7 @@ mod test { mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000020000").unwrap(), nonce: Hash64::zero(), base_fee_per_gas: 0x036b_u64.into(), + withdrawals_root: None, }; let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b"; let expected_hash = @@ -154,6 +183,7 @@ mod test { mix_hash: Hash256::from_str("bf5289894b2ceab3549f92f063febbac896b280ddb18129a57cff13113c11b13").unwrap(), nonce: Hash64::zero(), base_fee_per_gas: 0x34187b238_u64.into(), + withdrawals_root: None, }; let expected_hash = Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351") diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index ba0a37736b..9918b679c3 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,14 +1,23 @@ -use crate::engines::ForkChoiceState; +use crate::engines::ForkchoiceState; +use crate::http::{ + ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ENGINE_FORKCHOICE_UPDATED_V1, + ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, + ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, +}; pub use ethers_core::types::Transaction; +use ethers_core::utils::rlp::{self, Decodable, Rlp}; use http::deposit_methods::RpcError; -pub use json_structures::TransitionConfigurationV1; +pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1}; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; +use std::convert::TryFrom; use strum::IntoStaticStr; +use superstruct::superstruct; pub use types::{ - Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, - Hash256, Uint256, VariableList, + Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, + ExecutionPayloadRef, FixedVector, ForkName, Hash256, Uint256, VariableList, Withdrawal, }; +use types::{ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge}; pub mod auth; pub mod http; @@ -38,7 +47,13 @@ pub enum Error { PayloadConversionLogicFlaw, DeserializeTransaction(ssz_types::Error), DeserializeTransactions(ssz_types::Error), + DeserializeWithdrawals(ssz_types::Error), BuilderApi(builder_client::Error), + IncorrectStateVariant, + RequiredMethodUnsupported(&'static str), + UnsupportedForkVariant(String), + BadConversion(String), + RlpDecoderError(rlp::DecoderError), } impl From for Error { @@ -72,6 +87,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: rlp::DecoderError) -> Self { + Error::RlpDecoderError(e) + } +} + #[derive(Clone, Copy, Debug, PartialEq, IntoStaticStr)] #[strum(serialize_all = "snake_case")] pub enum PayloadStatusV1Status { @@ -111,9 +132,18 @@ pub struct ExecutionBlock { pub timestamp: u64, } -/// Representation of an exection block with enough detail to reconstruct a payload. +/// Representation of an execution block with enough detail to reconstruct a payload. +#[superstruct( + variants(Merge, Capella, Eip4844), + variant_attributes( + derive(Clone, Debug, PartialEq, Serialize, Deserialize,), + serde(bound = "T: EthSpec", rename_all = "camelCase"), + ), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] +#[serde(bound = "T: EthSpec", rename_all = "camelCase", untagged)] pub struct ExecutionBlockWithTransactions { pub parent_hash: ExecutionBlockHash, #[serde(alias = "miner")] @@ -135,16 +165,138 @@ pub struct ExecutionBlockWithTransactions { #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, pub base_fee_per_gas: Uint256, + #[superstruct(only(Eip4844))] + #[serde(with = "eth2_serde_utils::u256_hex_be")] + pub excess_data_gas: Uint256, #[serde(rename = "hash")] pub block_hash: ExecutionBlockHash, pub transactions: Vec, + #[superstruct(only(Capella, Eip4844))] + pub withdrawals: Vec, } -#[derive(Clone, Copy, Debug, PartialEq)] +impl TryFrom> for ExecutionBlockWithTransactions { + type Error = Error; + + fn try_from(payload: ExecutionPayload) -> Result { + let json_payload = match payload { + ExecutionPayload::Merge(block) => Self::Merge(ExecutionBlockWithTransactionsMerge { + parent_hash: block.parent_hash, + fee_recipient: block.fee_recipient, + state_root: block.state_root, + receipts_root: block.receipts_root, + logs_bloom: block.logs_bloom, + prev_randao: block.prev_randao, + block_number: block.block_number, + gas_limit: block.gas_limit, + gas_used: block.gas_used, + timestamp: block.timestamp, + extra_data: block.extra_data, + base_fee_per_gas: block.base_fee_per_gas, + block_hash: block.block_hash, + transactions: block + .transactions + .iter() + .map(|tx| Transaction::decode(&Rlp::new(tx))) + .collect::, _>>()?, + }), + ExecutionPayload::Capella(block) => { + Self::Capella(ExecutionBlockWithTransactionsCapella { + parent_hash: block.parent_hash, + fee_recipient: block.fee_recipient, + state_root: block.state_root, + receipts_root: block.receipts_root, + logs_bloom: block.logs_bloom, + prev_randao: block.prev_randao, + block_number: block.block_number, + gas_limit: block.gas_limit, + gas_used: block.gas_used, + timestamp: block.timestamp, + extra_data: block.extra_data, + base_fee_per_gas: block.base_fee_per_gas, + block_hash: block.block_hash, + transactions: block + .transactions + .iter() + .map(|tx| Transaction::decode(&Rlp::new(tx))) + .collect::, _>>()?, + withdrawals: Vec::from(block.withdrawals) + .into_iter() + .map(|withdrawal| withdrawal.into()) + .collect(), + }) + } + ExecutionPayload::Eip4844(block) => { + Self::Eip4844(ExecutionBlockWithTransactionsEip4844 { + parent_hash: block.parent_hash, + fee_recipient: block.fee_recipient, + state_root: block.state_root, + receipts_root: block.receipts_root, + logs_bloom: block.logs_bloom, + prev_randao: block.prev_randao, + block_number: block.block_number, + gas_limit: block.gas_limit, + gas_used: block.gas_used, + timestamp: block.timestamp, + extra_data: block.extra_data, + base_fee_per_gas: block.base_fee_per_gas, + excess_data_gas: block.excess_data_gas, + block_hash: block.block_hash, + transactions: block + .transactions + .iter() + .map(|tx| Transaction::decode(&Rlp::new(tx))) + .collect::, _>>()?, + withdrawals: Vec::from(block.withdrawals) + .into_iter() + .map(|withdrawal| withdrawal.into()) + .collect(), + }) + } + }; + Ok(json_payload) + } +} + +#[superstruct( + variants(V1, V2), + variant_attributes(derive(Clone, Debug, Eq, Hash, PartialEq),), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct PayloadAttributes { + #[superstruct(getter(copy))] pub timestamp: u64, + #[superstruct(getter(copy))] pub prev_randao: Hash256, + #[superstruct(getter(copy))] pub suggested_fee_recipient: Address, + #[superstruct(only(V2))] + pub withdrawals: Vec, +} + +impl PayloadAttributes { + pub fn new( + timestamp: u64, + prev_randao: Hash256, + suggested_fee_recipient: Address, + withdrawals: Option>, + ) -> Self { + match withdrawals { + Some(withdrawals) => PayloadAttributes::V2(PayloadAttributesV2 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + }), + None => PayloadAttributes::V1(PayloadAttributesV1 { + timestamp, + prev_randao, + suggested_fee_recipient, + }), + } + } } #[derive(Clone, Debug, PartialEq)] @@ -166,3 +318,103 @@ pub struct ProposeBlindedBlockResponse { pub latest_valid_hash: Option, pub validation_error: Option, } + +#[superstruct( + variants(Merge, Capella, Eip4844), + variant_attributes(derive(Clone, Debug, PartialEq),), + map_into(ExecutionPayload), + map_ref_into(ExecutionPayloadRef), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Clone, Debug, PartialEq)] +pub struct GetPayloadResponse { + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] + pub execution_payload: ExecutionPayloadMerge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] + pub execution_payload: ExecutionPayloadCapella, + #[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))] + pub execution_payload: ExecutionPayloadEip4844, + pub block_value: Uint256, +} + +impl<'a, T: EthSpec> From> for ExecutionPayloadRef<'a, T> { + fn from(response: GetPayloadResponseRef<'a, T>) -> Self { + map_get_payload_response_ref_into_execution_payload_ref!(&'a _, response, |inner, cons| { + cons(&inner.execution_payload) + }) + } +} + +impl From> for ExecutionPayload { + fn from(response: GetPayloadResponse) -> Self { + map_get_payload_response_into_execution_payload!(response, |inner, cons| { + cons(inner.execution_payload) + }) + } +} + +impl From> for (ExecutionPayload, Uint256) { + fn from(response: GetPayloadResponse) -> Self { + match response { + GetPayloadResponse::Merge(inner) => ( + ExecutionPayload::Merge(inner.execution_payload), + inner.block_value, + ), + GetPayloadResponse::Capella(inner) => ( + ExecutionPayload::Capella(inner.execution_payload), + inner.block_value, + ), + GetPayloadResponse::Eip4844(inner) => ( + ExecutionPayload::Eip4844(inner.execution_payload), + inner.block_value, + ), + } + } +} + +impl GetPayloadResponse { + pub fn execution_payload_ref(&self) -> ExecutionPayloadRef { + self.to_ref().into() + } +} + +#[derive(Clone, Copy, Debug)] +pub struct EngineCapabilities { + pub new_payload_v1: bool, + pub new_payload_v2: bool, + pub forkchoice_updated_v1: bool, + pub forkchoice_updated_v2: bool, + pub get_payload_v1: bool, + pub get_payload_v2: bool, + pub exchange_transition_configuration_v1: bool, +} + +impl EngineCapabilities { + pub fn to_response(&self) -> Vec<&str> { + let mut response = Vec::new(); + if self.new_payload_v1 { + response.push(ENGINE_NEW_PAYLOAD_V1); + } + if self.new_payload_v2 { + response.push(ENGINE_NEW_PAYLOAD_V2); + } + if self.forkchoice_updated_v1 { + response.push(ENGINE_FORKCHOICE_UPDATED_V1); + } + if self.forkchoice_updated_v2 { + response.push(ENGINE_FORKCHOICE_UPDATED_V2); + } + if self.get_payload_v1 { + response.push(ENGINE_GET_PAYLOAD_V1); + } + if self.get_payload_v2 { + response.push(ENGINE_GET_PAYLOAD_V2); + } + if self.exchange_transition_configuration_v1 { + response.push(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1); + } + + response + } +} diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 7453663012..4416d6a37e 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -7,8 +7,10 @@ use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; +use std::collections::HashSet; +use tokio::sync::Mutex; -use std::time::Duration; +use std::time::{Duration, Instant}; use types::EthSpec; pub use deposit_log::{DepositLog, Log}; @@ -29,22 +31,57 @@ pub const ETH_SYNCING: &str = "eth_syncing"; pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_secs(1); pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1"; +pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2"; pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; +pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); +pub const ENGINE_GET_BLOBS_BUNDLE_V1: &str = "engine_getBlobsBundleV1"; +pub const ENGINE_GET_BLOBS_BUNDLE_TIMEOUT: Duration = Duration::from_secs(2); + pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; +pub const ENGINE_FORKCHOICE_UPDATED_V2: &str = "engine_forkchoiceUpdatedV2"; pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = "engine_exchangeTransitionConfigurationV1"; pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_secs(1); +pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; +pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1); + /// This error is returned during a `chainId` call by Geth. pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; +/// This code is returned by all clients when a method is not supported +/// (verified geth, nethermind, erigon, besu) +pub const METHOD_NOT_FOUND_CODE: i64 = -32601; -/// Contains methods to convert arbitary bytes to an ETH2 deposit contract object. +pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ + ENGINE_NEW_PAYLOAD_V1, + ENGINE_NEW_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_V1, + ENGINE_GET_PAYLOAD_V2, + ENGINE_FORKCHOICE_UPDATED_V1, + ENGINE_FORKCHOICE_UPDATED_V2, + ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, +]; + +/// This is necessary because a user might run a capella-enabled version of +/// lighthouse before they update to a capella-enabled execution engine. +// TODO (mark): rip this out once we are post-capella on mainnet +pub static PRE_CAPELLA_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { + new_payload_v1: true, + new_payload_v2: false, + forkchoice_updated_v1: true, + forkchoice_updated_v2: false, + get_payload_v1: true, + get_payload_v2: false, + exchange_transition_configuration_v1: true, +}; + +/// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. pub mod deposit_log { use ssz::Decode; use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message; @@ -519,10 +556,39 @@ pub mod deposit_methods { } } +#[derive(Clone, Debug)] +pub struct CapabilitiesCacheEntry { + engine_capabilities: EngineCapabilities, + fetch_time: Instant, +} + +impl CapabilitiesCacheEntry { + pub fn new(engine_capabilities: EngineCapabilities) -> Self { + Self { + engine_capabilities, + fetch_time: Instant::now(), + } + } + + pub fn engine_capabilities(&self) -> EngineCapabilities { + self.engine_capabilities + } + + pub fn age(&self) -> Duration { + Instant::now().duration_since(self.fetch_time) + } + + /// returns `true` if the entry's age is >= age_limit + pub fn older_than(&self, age_limit: Option) -> bool { + age_limit.map_or(false, |limit| self.age() >= limit) + } +} + pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, pub execution_timeout_multiplier: u32, + pub engine_capabilities_cache: Mutex>, auth: Option, } @@ -535,6 +601,7 @@ impl HttpJsonRpc { client: Client::builder().build()?, url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), + engine_capabilities_cache: Mutex::new(None), auth: None, }) } @@ -548,6 +615,7 @@ impl HttpJsonRpc { client: Client::builder().build()?, url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), + engine_capabilities_cache: Mutex::new(None), auth: Some(auth), }) } @@ -654,21 +722,48 @@ impl HttpJsonRpc { pub async fn get_block_by_hash_with_txns( &self, block_hash: ExecutionBlockHash, + fork: ForkName, ) -> Result>, Error> { let params = json!([block_hash, true]); - self.rpc_request( - ETH_GET_BLOCK_BY_HASH, - params, - ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, - ) - .await + Ok(Some(match fork { + ForkName::Merge => ExecutionBlockWithTransactions::Merge( + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?, + ), + ForkName::Capella => ExecutionBlockWithTransactions::Capella( + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?, + ), + ForkName::Eip4844 => ExecutionBlockWithTransactions::Eip4844( + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?, + ), + ForkName::Base | ForkName::Altair => { + return Err(Error::UnsupportedForkVariant(format!( + "called get_block_by_hash_with_txns with fork {:?}", + fork + ))) + } + })) } pub async fn new_payload_v1( &self, execution_payload: ExecutionPayload, ) -> Result { - let params = json!([JsonExecutionPayloadV1::from(execution_payload)]); + let params = json!([JsonExecutionPayload::from(execution_payload)]); let response: JsonPayloadStatusV1 = self .rpc_request( @@ -681,13 +776,30 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn new_payload_v2( + &self, + execution_payload: ExecutionPayload, + ) -> Result { + let params = json!([JsonExecutionPayload::from(execution_payload)]); + + let response: JsonPayloadStatusV1 = self + .rpc_request( + ENGINE_NEW_PAYLOAD_V2, + params, + ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + pub async fn get_payload_v1( &self, payload_id: PayloadId, - ) -> Result, Error> { + ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); - let response: JsonExecutionPayloadV1 = self + let payload_v1: JsonExecutionPayloadV1 = self .rpc_request( ENGINE_GET_PAYLOAD_V1, params, @@ -695,17 +807,74 @@ impl HttpJsonRpc { ) .await?; - Ok(response.into()) + Ok(GetPayloadResponse::Merge(GetPayloadResponseMerge { + execution_payload: payload_v1.into(), + // Set the V1 payload values from the EE to be zero. This simulates + // the pre-block-value functionality of always choosing the builder + // block. + block_value: Uint256::zero(), + })) + } + + pub async fn get_payload_v2( + &self, + fork_name: ForkName, + payload_id: PayloadId, + ) -> Result, Error> { + let params = json!([JsonPayloadIdRequest::from(payload_id)]); + + match fork_name { + ForkName::Merge => { + let response: JsonGetPayloadResponseV1 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V2, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + Ok(JsonGetPayloadResponse::V1(response).into()) + } + ForkName::Capella => { + let response: JsonGetPayloadResponseV2 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V2, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + Ok(JsonGetPayloadResponse::V2(response).into()) + } + ForkName::Base | ForkName::Altair | ForkName::Eip4844 => Err( + Error::UnsupportedForkVariant(format!("called get_payload_v2 with {}", fork_name)), + ), + } + } + + pub async fn get_blobs_bundle_v1( + &self, + payload_id: PayloadId, + ) -> Result, Error> { + let params = json!([JsonPayloadIdRequest::from(payload_id)]); + + let response: JsonBlobBundles = self + .rpc_request( + ENGINE_GET_BLOBS_BUNDLE_V1, + params, + ENGINE_GET_BLOBS_BUNDLE_TIMEOUT, + ) + .await?; + + Ok(response) } pub async fn forkchoice_updated_v1( &self, - forkchoice_state: ForkChoiceState, + forkchoice_state: ForkchoiceState, payload_attributes: Option, ) -> Result { let params = json!([ - JsonForkChoiceStateV1::from(forkchoice_state), - payload_attributes.map(JsonPayloadAttributesV1::from) + JsonForkchoiceStateV1::from(forkchoice_state), + payload_attributes.map(JsonPayloadAttributes::from) ]); let response: JsonForkchoiceUpdatedV1Response = self @@ -719,6 +888,27 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn forkchoice_updated_v2( + &self, + forkchoice_state: ForkchoiceState, + payload_attributes: Option, + ) -> Result { + let params = json!([ + JsonForkchoiceStateV1::from(forkchoice_state), + payload_attributes.map(JsonPayloadAttributes::from) + ]); + + let response: JsonForkchoiceUpdatedV1Response = self + .rpc_request( + ENGINE_FORKCHOICE_UPDATED_V2, + params, + ENGINE_FORKCHOICE_UPDATED_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + pub async fn exchange_transition_configuration_v1( &self, transition_configuration: TransitionConfigurationV1, @@ -736,6 +926,118 @@ impl HttpJsonRpc { Ok(response) } + + pub async fn exchange_capabilities(&self) -> Result { + let params = json!([LIGHTHOUSE_CAPABILITIES]); + + let response: Result, _> = self + .rpc_request( + ENGINE_EXCHANGE_CAPABILITIES, + params, + ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT * self.execution_timeout_multiplier, + ) + .await; + + match response { + // TODO (mark): rip this out once we are post capella on mainnet + Err(error) => match error { + Error::ServerMessage { code, message: _ } if code == METHOD_NOT_FOUND_CODE => { + Ok(PRE_CAPELLA_ENGINE_CAPABILITIES) + } + _ => Err(error), + }, + Ok(capabilities) => Ok(EngineCapabilities { + new_payload_v1: capabilities.contains(ENGINE_NEW_PAYLOAD_V1), + new_payload_v2: capabilities.contains(ENGINE_NEW_PAYLOAD_V2), + forkchoice_updated_v1: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V1), + forkchoice_updated_v2: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V2), + get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), + get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), + exchange_transition_configuration_v1: capabilities + .contains(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1), + }), + } + } + + pub async fn clear_exchange_capabilties_cache(&self) { + *self.engine_capabilities_cache.lock().await = None; + } + + /// Returns the execution engine capabilities resulting from a call to + /// engine_exchangeCapabilities. If the capabilities cache is not populated, + /// or if it is populated with a cached result of age >= `age_limit`, this + /// method will fetch the result from the execution engine and populate the + /// cache before returning it. Otherwise it will return a cached result from + /// a previous call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_capabilities( + &self, + age_limit: Option, + ) -> Result { + let mut lock = self.engine_capabilities_cache.lock().await; + + if let Some(lock) = lock.as_ref().filter(|entry| !entry.older_than(age_limit)) { + Ok(lock.engine_capabilities()) + } else { + let engine_capabilities = self.exchange_capabilities().await?; + *lock = Some(CapabilitiesCacheEntry::new(engine_capabilities)); + Ok(engine_capabilities) + } + } + + // automatically selects the latest version of + // new_payload that the execution engine supports + pub async fn new_payload( + &self, + execution_payload: ExecutionPayload, + ) -> Result { + let engine_capabilities = self.get_engine_capabilities(None).await?; + if engine_capabilities.new_payload_v2 { + self.new_payload_v2(execution_payload).await + } else if engine_capabilities.new_payload_v1 { + self.new_payload_v1(execution_payload).await + } else { + Err(Error::RequiredMethodUnsupported("engine_newPayload")) + } + } + + // automatically selects the latest version of + // get_payload that the execution engine supports + pub async fn get_payload( + &self, + fork_name: ForkName, + payload_id: PayloadId, + ) -> Result, Error> { + let engine_capabilities = self.get_engine_capabilities(None).await?; + if engine_capabilities.get_payload_v2 { + self.get_payload_v2(fork_name, payload_id).await + } else if engine_capabilities.new_payload_v1 { + self.get_payload_v1(payload_id).await + } else { + Err(Error::RequiredMethodUnsupported("engine_getPayload")) + } + } + + // automatically selects the latest version of + // forkchoice_updated that the execution engine supports + pub async fn forkchoice_updated( + &self, + forkchoice_state: ForkchoiceState, + payload_attributes: Option, + ) -> Result { + let engine_capabilities = self.get_engine_capabilities(None).await?; + if engine_capabilities.forkchoice_updated_v2 { + self.forkchoice_updated_v2(forkchoice_state, payload_attributes) + .await + } else if engine_capabilities.forkchoice_updated_v1 { + self.forkchoice_updated_v1(forkchoice_state, payload_attributes) + .await + } else { + Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated")) + } + } } #[cfg(test)] @@ -746,7 +1048,7 @@ mod test { use std::future::Future; use std::str::FromStr; use std::sync::Arc; - use types::{MainnetEthSpec, Transactions, Unsigned, VariableList}; + use types::{ExecutionPayloadMerge, MainnetEthSpec, Transactions, Unsigned, VariableList}; struct Tester { server: MockServer, @@ -852,10 +1154,10 @@ mod test { fn encode_transactions( transactions: Transactions, ) -> Result { - let ep: JsonExecutionPayloadV1 = JsonExecutionPayloadV1 { + let ep: JsonExecutionPayload = JsonExecutionPayload::V1(JsonExecutionPayloadV1 { transactions, ..<_>::default() - }; + }); let json = serde_json::to_value(&ep)?; Ok(json.get("transactions").unwrap().clone()) } @@ -882,8 +1184,8 @@ mod test { json.as_object_mut() .unwrap() .insert("transactions".into(), transactions); - let ep: JsonExecutionPayloadV1 = serde_json::from_value(json)?; - Ok(ep.transactions) + let ep: JsonExecutionPayload = serde_json::from_value(json)?; + Ok(ep.transactions().clone()) } fn assert_transactions_serde( @@ -1029,16 +1331,16 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(1), safe_block_hash: ExecutionBlockHash::repeat_byte(1), finalized_block_hash: ExecutionBlockHash::zero(), }, - Some(PayloadAttributes { + Some(PayloadAttributes::V1(PayloadAttributesV1 { timestamp: 5, prev_randao: Hash256::zero(), suggested_fee_recipient: Address::repeat_byte(0), - }), + })), ) .await; }, @@ -1064,16 +1366,16 @@ mod test { .assert_auth_failure(|client| async move { client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(1), safe_block_hash: ExecutionBlockHash::repeat_byte(1), finalized_block_hash: ExecutionBlockHash::zero(), }, - Some(PayloadAttributes { + Some(PayloadAttributes::V1(PayloadAttributesV1 { timestamp: 5, prev_randao: Hash256::zero(), suggested_fee_recipient: Address::repeat_byte(0), - }), + })), ) .await }) @@ -1109,22 +1411,24 @@ mod test { .assert_request_equals( |client| async move { let _ = client - .new_payload_v1::(ExecutionPayload { - parent_hash: ExecutionBlockHash::repeat_byte(0), - fee_recipient: Address::repeat_byte(1), - state_root: Hash256::repeat_byte(1), - receipts_root: Hash256::repeat_byte(0), - logs_bloom: vec![1; 256].into(), - prev_randao: Hash256::repeat_byte(1), - block_number: 0, - gas_limit: 1, - gas_used: 2, - timestamp: 42, - extra_data: vec![].into(), - base_fee_per_gas: Uint256::from(1), - block_hash: ExecutionBlockHash::repeat_byte(1), - transactions: vec![].into(), - }) + .new_payload_v1::(ExecutionPayload::Merge( + ExecutionPayloadMerge { + parent_hash: ExecutionBlockHash::repeat_byte(0), + fee_recipient: Address::repeat_byte(1), + state_root: Hash256::repeat_byte(1), + receipts_root: Hash256::repeat_byte(0), + logs_bloom: vec![1; 256].into(), + prev_randao: Hash256::repeat_byte(1), + block_number: 0, + gas_limit: 1, + gas_used: 2, + timestamp: 42, + extra_data: vec![].into(), + base_fee_per_gas: Uint256::from(1), + block_hash: ExecutionBlockHash::repeat_byte(1), + transactions: vec![].into(), + }, + )) .await; }, json!({ @@ -1154,22 +1458,24 @@ mod test { Tester::new(false) .assert_auth_failure(|client| async move { client - .new_payload_v1::(ExecutionPayload { - parent_hash: ExecutionBlockHash::repeat_byte(0), - fee_recipient: Address::repeat_byte(1), - state_root: Hash256::repeat_byte(1), - receipts_root: Hash256::repeat_byte(0), - logs_bloom: vec![1; 256].into(), - prev_randao: Hash256::repeat_byte(1), - block_number: 0, - gas_limit: 1, - gas_used: 2, - timestamp: 42, - extra_data: vec![].into(), - base_fee_per_gas: Uint256::from(1), - block_hash: ExecutionBlockHash::repeat_byte(1), - transactions: vec![].into(), - }) + .new_payload_v1::(ExecutionPayload::Merge( + ExecutionPayloadMerge { + parent_hash: ExecutionBlockHash::repeat_byte(0), + fee_recipient: Address::repeat_byte(1), + state_root: Hash256::repeat_byte(1), + receipts_root: Hash256::repeat_byte(0), + logs_bloom: vec![1; 256].into(), + prev_randao: Hash256::repeat_byte(1), + block_number: 0, + gas_limit: 1, + gas_used: 2, + timestamp: 42, + extra_data: vec![].into(), + base_fee_per_gas: Uint256::from(1), + block_hash: ExecutionBlockHash::repeat_byte(1), + transactions: vec![].into(), + }, + )) .await }) .await; @@ -1182,7 +1488,7 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(0), safe_block_hash: ExecutionBlockHash::repeat_byte(0), finalized_block_hash: ExecutionBlockHash::repeat_byte(1), @@ -1208,7 +1514,7 @@ mod test { .assert_auth_failure(|client| async move { client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::repeat_byte(0), safe_block_hash: ExecutionBlockHash::repeat_byte(0), finalized_block_hash: ExecutionBlockHash::repeat_byte(1), @@ -1247,16 +1553,16 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), finalized_block_hash: ExecutionBlockHash::zero(), }, - Some(PayloadAttributes { + Some(PayloadAttributes::V1(PayloadAttributesV1 { timestamp: 5, prev_randao: Hash256::zero(), suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), - }) + })) ) .await; }, @@ -1294,16 +1600,16 @@ mod test { |client| async move { let response = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), finalized_block_hash: ExecutionBlockHash::zero(), }, - Some(PayloadAttributes { + Some(PayloadAttributes::V1(PayloadAttributesV1 { timestamp: 5, prev_randao: Hash256::zero(), suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), - }) + })) ) .await .unwrap(); @@ -1357,12 +1663,13 @@ mod test { } })], |client| async move { - let payload = client + let payload: ExecutionPayload<_> = client .get_payload_v1::(str_to_payload_id("0xa247243752eb10b4")) .await - .unwrap(); + .unwrap() + .into(); - let expected = ExecutionPayload { + let expected = ExecutionPayload::Merge(ExecutionPayloadMerge { parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), @@ -1377,7 +1684,7 @@ mod test { base_fee_per_gas: Uint256::from(7), block_hash: ExecutionBlockHash::from_str("0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c").unwrap(), transactions: vec![].into(), - }; + }); assert_eq!(payload, expected); }, @@ -1387,7 +1694,7 @@ mod test { // engine_newPayloadV1 REQUEST validation |client| async move { let _ = client - .new_payload_v1::(ExecutionPayload { + .new_payload_v1::(ExecutionPayload::Merge(ExecutionPayloadMerge{ parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), @@ -1402,7 +1709,7 @@ mod test { base_fee_per_gas: Uint256::from(7), block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), transactions: vec![].into(), - }) + })) .await; }, json!({ @@ -1441,7 +1748,7 @@ mod test { })], |client| async move { let response = client - .new_payload_v1::(ExecutionPayload::default()) + .new_payload_v1::(ExecutionPayload::Merge(ExecutionPayloadMerge::default())) .await .unwrap(); @@ -1460,7 +1767,7 @@ mod test { |client| async move { let _ = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), @@ -1499,7 +1806,7 @@ mod test { |client| async move { let response = client .forkchoice_updated_v1( - ForkChoiceState { + ForkchoiceState { head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 560569c92f..a6ebc19527 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,7 +1,14 @@ use super::*; use serde::{Deserialize, Serialize}; use strum::EnumString; -use types::{EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList}; +use superstruct::superstruct; +use types::{ + Blob, EthSpec, ExecutionBlockHash, FixedVector, KzgCommitment, Transaction, Unsigned, + VariableList, Withdrawal, +}; +use types::{ + ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, +}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -56,9 +63,18 @@ pub struct JsonPayloadIdResponse { pub payload_id: PayloadId, } -#[derive(Debug, PartialEq, Default, Serialize, Deserialize)] -#[serde(bound = "T: EthSpec", rename_all = "camelCase")] -pub struct JsonExecutionPayloadHeaderV1 { +#[superstruct( + variants(V1, V2, V3), + variant_attributes( + derive(Debug, PartialEq, Default, Serialize, Deserialize,), + serde(bound = "T: EthSpec", rename_all = "camelCase"), + ), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(bound = "T: EthSpec", rename_all = "camelCase", untagged)] +pub struct JsonExecutionPayload { pub parent_hash: ExecutionBlockHash, pub fee_recipient: Address, pub state_root: Hash256, @@ -78,210 +94,342 @@ pub struct JsonExecutionPayloadHeaderV1 { pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, - pub block_hash: ExecutionBlockHash, - pub transactions_root: Hash256, -} - -impl From> for ExecutionPayloadHeader { - fn from(e: JsonExecutionPayloadHeaderV1) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonExecutionPayloadHeaderV1 { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions_root, - } = e; - - Self { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions_root, - } - } -} - -#[derive(Debug, PartialEq, Default, Serialize, Deserialize)] -#[serde(bound = "T: EthSpec", rename_all = "camelCase")] -pub struct JsonExecutionPayloadV1 { - pub parent_hash: ExecutionBlockHash, - pub fee_recipient: Address, - pub state_root: Hash256, - pub receipts_root: Hash256, - #[serde(with = "serde_logs_bloom")] - pub logs_bloom: FixedVector, - pub prev_randao: Hash256, - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub block_number: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub gas_used: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] - pub timestamp: u64, - #[serde(with = "ssz_types::serde_utils::hex_var_list")] - pub extra_data: VariableList, + #[superstruct(only(V3))] #[serde(with = "eth2_serde_utils::u256_hex_be")] - pub base_fee_per_gas: Uint256, + pub excess_data_gas: Uint256, pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: VariableList, T::MaxTransactionsPerPayload>, + #[superstruct(only(V2, V3))] + pub withdrawals: VariableList, } -impl From> for JsonExecutionPayloadV1 { - fn from(e: ExecutionPayload) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let ExecutionPayload { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, - } = e; - - Self { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, +impl From> for JsonExecutionPayloadV1 { + fn from(payload: ExecutionPayloadMerge) -> Self { + JsonExecutionPayloadV1 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + } + } +} +impl From> for JsonExecutionPayloadV2 { + fn from(payload: ExecutionPayloadCapella) -> Self { + JsonExecutionPayloadV2 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .map(Into::into) + .collect::>() + .into(), + } + } +} +impl From> for JsonExecutionPayloadV3 { + fn from(payload: ExecutionPayloadEip4844) -> Self { + JsonExecutionPayloadV3 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + excess_data_gas: payload.excess_data_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .map(Into::into) + .collect::>() + .into(), } } } -impl From> for ExecutionPayload { - fn from(e: JsonExecutionPayloadV1) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonExecutionPayloadV1 { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, - } = e; +impl From> for JsonExecutionPayload { + fn from(execution_payload: ExecutionPayload) -> Self { + match execution_payload { + ExecutionPayload::Merge(payload) => JsonExecutionPayload::V1(payload.into()), + ExecutionPayload::Capella(payload) => JsonExecutionPayload::V2(payload.into()), + ExecutionPayload::Eip4844(payload) => JsonExecutionPayload::V3(payload.into()), + } + } +} - Self { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, +impl From> for ExecutionPayloadMerge { + fn from(payload: JsonExecutionPayloadV1) -> Self { + ExecutionPayloadMerge { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + } + } +} +impl From> for ExecutionPayloadCapella { + fn from(payload: JsonExecutionPayloadV2) -> Self { + ExecutionPayloadCapella { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .map(Into::into) + .collect::>() + .into(), + } + } +} +impl From> for ExecutionPayloadEip4844 { + fn from(payload: JsonExecutionPayloadV3) -> Self { + ExecutionPayloadEip4844 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + excess_data_gas: payload.excess_data_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .map(Into::into) + .collect::>() + .into(), + } + } +} + +impl From> for ExecutionPayload { + fn from(json_execution_payload: JsonExecutionPayload) -> Self { + match json_execution_payload { + JsonExecutionPayload::V1(payload) => ExecutionPayload::Merge(payload.into()), + JsonExecutionPayload::V2(payload) => ExecutionPayload::Capella(payload.into()), + JsonExecutionPayload::V3(payload) => ExecutionPayload::Eip4844(payload.into()), + } + } +} + +#[superstruct( + variants(V1, V2, V3), + variant_attributes( + derive(Debug, PartialEq, Serialize, Deserialize), + serde(bound = "T: EthSpec", rename_all = "camelCase") + ), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] +pub struct JsonGetPayloadResponse { + #[superstruct(only(V1), partial_getter(rename = "execution_payload_v1"))] + pub execution_payload: JsonExecutionPayloadV1, + #[superstruct(only(V2), partial_getter(rename = "execution_payload_v2"))] + pub execution_payload: JsonExecutionPayloadV2, + #[superstruct(only(V3), partial_getter(rename = "execution_payload_v3"))] + pub execution_payload: JsonExecutionPayloadV3, + #[serde(with = "eth2_serde_utils::u256_hex_be")] + pub block_value: Uint256, +} + +impl From> for GetPayloadResponse { + fn from(json_get_payload_response: JsonGetPayloadResponse) -> Self { + match json_get_payload_response { + JsonGetPayloadResponse::V1(response) => { + GetPayloadResponse::Merge(GetPayloadResponseMerge { + execution_payload: response.execution_payload.into(), + block_value: response.block_value, + }) + } + JsonGetPayloadResponse::V2(response) => { + GetPayloadResponse::Capella(GetPayloadResponseCapella { + execution_payload: response.execution_payload.into(), + block_value: response.block_value, + }) + } + JsonGetPayloadResponse::V3(response) => { + GetPayloadResponse::Eip4844(GetPayloadResponseEip4844 { + execution_payload: response.execution_payload.into(), + block_value: response.block_value, + }) + } } } } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct JsonPayloadAttributesV1 { +pub struct JsonWithdrawal { + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub index: u64, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub validator_index: u64, + pub address: Address, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub amount: u64, +} + +impl From for JsonWithdrawal { + fn from(withdrawal: Withdrawal) -> Self { + Self { + index: withdrawal.index, + validator_index: withdrawal.validator_index, + address: withdrawal.address, + amount: withdrawal.amount, + } + } +} + +impl From for Withdrawal { + fn from(jw: JsonWithdrawal) -> Self { + Self { + index: jw.index, + validator_index: jw.validator_index, + address: jw.address, + amount: jw.amount, + } + } +} + +#[superstruct( + variants(V1, V2), + variant_attributes( + derive(Debug, Clone, PartialEq, Serialize, Deserialize), + serde(rename_all = "camelCase") + ), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] +pub struct JsonPayloadAttributes { #[serde(with = "eth2_serde_utils::u64_hex_be")] pub timestamp: u64, pub prev_randao: Hash256, pub suggested_fee_recipient: Address, + #[superstruct(only(V2))] + pub withdrawals: Vec, } -impl From for JsonPayloadAttributesV1 { - fn from(p: PayloadAttributes) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let PayloadAttributes { - timestamp, - prev_randao, - suggested_fee_recipient, - } = p; - - Self { - timestamp, - prev_randao, - suggested_fee_recipient, +impl From for JsonPayloadAttributes { + fn from(payload_atributes: PayloadAttributes) -> Self { + match payload_atributes { + PayloadAttributes::V1(pa) => Self::V1(JsonPayloadAttributesV1 { + timestamp: pa.timestamp, + prev_randao: pa.prev_randao, + suggested_fee_recipient: pa.suggested_fee_recipient, + }), + PayloadAttributes::V2(pa) => Self::V2(JsonPayloadAttributesV2 { + timestamp: pa.timestamp, + prev_randao: pa.prev_randao, + suggested_fee_recipient: pa.suggested_fee_recipient, + withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), + }), } } } -impl From for PayloadAttributes { - fn from(j: JsonPayloadAttributesV1) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonPayloadAttributesV1 { - timestamp, - prev_randao, - suggested_fee_recipient, - } = j; - - Self { - timestamp, - prev_randao, - suggested_fee_recipient, +impl From for PayloadAttributes { + fn from(json_payload_attributes: JsonPayloadAttributes) -> Self { + match json_payload_attributes { + JsonPayloadAttributes::V1(jpa) => Self::V1(PayloadAttributesV1 { + timestamp: jpa.timestamp, + prev_randao: jpa.prev_randao, + suggested_fee_recipient: jpa.suggested_fee_recipient, + }), + JsonPayloadAttributes::V2(jpa) => Self::V2(PayloadAttributesV2 { + timestamp: jpa.timestamp, + prev_randao: jpa.prev_randao, + suggested_fee_recipient: jpa.suggested_fee_recipient, + withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), + }), } } } +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(bound = "T: EthSpec", rename_all = "camelCase")] +pub struct JsonBlobBundles { + pub block_hash: ExecutionBlockHash, + pub kzgs: Vec, + pub blobs: Vec>, +} + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct JsonForkChoiceStateV1 { +pub struct JsonForkchoiceStateV1 { pub head_block_hash: ExecutionBlockHash, pub safe_block_hash: ExecutionBlockHash, pub finalized_block_hash: ExecutionBlockHash, } -impl From for JsonForkChoiceStateV1 { - fn from(f: ForkChoiceState) -> Self { +impl From for JsonForkchoiceStateV1 { + fn from(f: ForkchoiceState) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let ForkChoiceState { + let ForkchoiceState { head_block_hash, safe_block_hash, finalized_block_hash, @@ -295,10 +443,10 @@ impl From for JsonForkChoiceStateV1 { } } -impl From for ForkChoiceState { - fn from(j: JsonForkChoiceStateV1) -> Self { +impl From for ForkchoiceState { + fn from(j: JsonForkchoiceStateV1) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonForkChoiceStateV1 { + let JsonForkchoiceStateV1 { head_block_hash, safe_block_hash, finalized_block_hash, diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index eaaa271c51..ce413cb113 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -1,22 +1,25 @@ //! Provides generic behaviour for multiple execution engines, specifically fallback behaviour. use crate::engine_api::{ - Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, + EngineCapabilities, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, + PayloadId, }; use crate::HttpJsonRpc; use lru::LruCache; use slog::{debug, error, info, warn, Logger}; use std::future::Future; use std::sync::Arc; +use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::{watch, Mutex, RwLock}; use tokio_stream::wrappers::WatchStream; -use types::{Address, ExecutionBlockHash, Hash256}; +use types::ExecutionBlockHash; /// The number of payload IDs that will be stored for each `Engine`. /// -/// Since the size of each value is small (~100 bytes) a large number is used for safety. +/// Since the size of each value is small (~800 bytes) a large number is used for safety. const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; +const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes /// Stores the remembered state of a engine. #[derive(Copy, Clone, PartialEq, Debug, Eq, Default)] @@ -28,6 +31,14 @@ enum EngineStateInternal { AuthFailed, } +#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)] +enum CapabilitiesCacheAction { + #[default] + None, + Update, + Clear, +} + /// A subset of the engine state to inform other services if the engine is online or offline. #[derive(Debug, Clone, PartialEq, Eq, Copy)] pub enum EngineState { @@ -88,7 +99,7 @@ impl State { } #[derive(Copy, Clone, PartialEq, Debug)] -pub struct ForkChoiceState { +pub struct ForkchoiceState { pub head_block_hash: ExecutionBlockHash, pub safe_block_hash: ExecutionBlockHash, pub finalized_block_hash: ExecutionBlockHash, @@ -97,9 +108,7 @@ pub struct ForkChoiceState { #[derive(Hash, PartialEq, std::cmp::Eq)] struct PayloadIdCacheKey { pub head_block_hash: ExecutionBlockHash, - pub timestamp: u64, - pub prev_randao: Hash256, - pub suggested_fee_recipient: Address, + pub payload_attributes: PayloadAttributes, } #[derive(Debug)] @@ -115,7 +124,7 @@ pub struct Engine { pub api: HttpJsonRpc, payload_id_cache: Mutex>, state: RwLock, - latest_forkchoice_state: RwLock>, + latest_forkchoice_state: RwLock>, executor: TaskExecutor, log: Logger, } @@ -142,37 +151,30 @@ impl Engine { pub async fn get_payload_id( &self, - head_block_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, + head_block_hash: &ExecutionBlockHash, + payload_attributes: &PayloadAttributes, ) -> Option { self.payload_id_cache .lock() .await - .get(&PayloadIdCacheKey { - head_block_hash, - timestamp, - prev_randao, - suggested_fee_recipient, - }) + .get(&PayloadIdCacheKey::new(head_block_hash, payload_attributes)) .cloned() } pub async fn notify_forkchoice_updated( &self, - forkchoice_state: ForkChoiceState, + forkchoice_state: ForkchoiceState, payload_attributes: Option, log: &Logger, ) -> Result { let response = self .api - .forkchoice_updated_v1(forkchoice_state, payload_attributes) + .forkchoice_updated(forkchoice_state, payload_attributes.clone()) .await?; if let Some(payload_id) = response.payload_id { - if let Some(key) = - payload_attributes.map(|pa| PayloadIdCacheKey::new(&forkchoice_state, &pa)) + if let Some(key) = payload_attributes + .map(|pa| PayloadIdCacheKey::new(&forkchoice_state.head_block_hash, &pa)) { self.payload_id_cache.lock().await.put(key, payload_id); } else { @@ -187,11 +189,11 @@ impl Engine { Ok(response) } - async fn get_latest_forkchoice_state(&self) -> Option { + async fn get_latest_forkchoice_state(&self) -> Option { *self.latest_forkchoice_state.read().await } - pub async fn set_latest_forkchoice_state(&self, state: ForkChoiceState) { + pub async fn set_latest_forkchoice_state(&self, state: ForkchoiceState) { *self.latest_forkchoice_state.write().await = Some(state); } @@ -216,7 +218,7 @@ impl Engine { // For simplicity, payload attributes are never included in this call. It may be // reasonable to include them in the future. - if let Err(e) = self.api.forkchoice_updated_v1(forkchoice_state, None).await { + if let Err(e) = self.api.forkchoice_updated(forkchoice_state, None).await { debug!( self.log, "Failed to issue latest head to engine"; @@ -239,7 +241,7 @@ impl Engine { /// Run the `EngineApi::upcheck` function if the node's last known state is not synced. This /// might be used to recover the node if offline. pub async fn upcheck(&self) { - let state: EngineStateInternal = match self.api.upcheck().await { + let (state, cache_action) = match self.api.upcheck().await { Ok(()) => { let mut state = self.state.write().await; if **state != EngineStateInternal::Synced { @@ -257,12 +259,12 @@ impl Engine { ); } state.update(EngineStateInternal::Synced); - **state + (**state, CapabilitiesCacheAction::Update) } Err(EngineApiError::IsSyncing) => { let mut state = self.state.write().await; state.update(EngineStateInternal::Syncing); - **state + (**state, CapabilitiesCacheAction::Update) } Err(EngineApiError::Auth(err)) => { error!( @@ -273,7 +275,7 @@ impl Engine { let mut state = self.state.write().await; state.update(EngineStateInternal::AuthFailed); - **state + (**state, CapabilitiesCacheAction::Clear) } Err(e) => { error!( @@ -284,10 +286,30 @@ impl Engine { let mut state = self.state.write().await; state.update(EngineStateInternal::Offline); - **state + // need to clear the engine capabilities cache if we detect the + // execution engine is offline as it is likely the engine is being + // updated to a newer version with new capabilities + (**state, CapabilitiesCacheAction::Clear) } }; + // do this after dropping state lock guard to avoid holding two locks at once + match cache_action { + CapabilitiesCacheAction::None => {} + CapabilitiesCacheAction::Update => { + if let Err(e) = self + .get_engine_capabilities(Some(CACHED_ENGINE_CAPABILITIES_AGE_LIMIT)) + .await + { + warn!(self.log, + "Error during exchange capabilities"; + "error" => ?e, + ) + } + } + CapabilitiesCacheAction::Clear => self.api.clear_exchange_capabilties_cache().await, + } + debug!( self.log, "Execution engine upcheck complete"; @@ -295,6 +317,22 @@ impl Engine { ); } + /// Returns the execution engine capabilities resulting from a call to + /// engine_exchangeCapabilities. If the capabilities cache is not populated, + /// or if it is populated with a cached result of age >= `age_limit`, this + /// method will fetch the result from the execution engine and populate the + /// cache before returning it. Otherwise it will return a cached result from + /// a previous call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_capabilities( + &self, + age_limit: Option, + ) -> Result { + self.api.get_engine_capabilities(age_limit).await + } + /// Run `func` on the node regardless of the node's current state. /// /// ## Note @@ -303,7 +341,7 @@ impl Engine { /// deadlock. pub async fn request<'a, F, G, H>(self: &'a Arc, func: F) -> Result where - F: Fn(&'a Engine) -> G, + F: FnOnce(&'a Engine) -> G, G: Future>, { match func(self).await { @@ -348,12 +386,10 @@ impl Engine { } impl PayloadIdCacheKey { - fn new(state: &ForkChoiceState, attributes: &PayloadAttributes) -> Self { + fn new(head_block_hash: &ExecutionBlockHash, attributes: &PayloadAttributes) -> Self { Self { - head_block_hash: state.head_block_hash, - timestamp: attributes.timestamp, - prev_randao: attributes.prev_randao, - suggested_fee_recipient: attributes.suggested_fee_recipient, + head_block_hash: *head_block_hash, + payload_attributes: attributes.clone(), } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 5b0fecbf20..af5e491556 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -7,12 +7,13 @@ use crate::payload_cache::PayloadCache; use auth::{strip_prefix, Auth, JwtKey}; use builder_client::BuilderHttpClient; +pub use engine_api::EngineCapabilities; use engine_api::Error as ApiError; pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; -pub use engines::{EngineState, ForkChoiceState}; -use eth2::types::{builder_bid::SignedBuilderBid, ForkVersionedResponse}; +pub use engines::{EngineState, ForkchoiceState}; +use eth2::types::builder_bid::SignedBuilderBid; use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; use payload_status::process_payload_status; @@ -35,9 +36,13 @@ use tokio::{ time::sleep, }; use tokio_stream::wrappers::WatchStream; +use tree_hash::TreeHash; +use types::{AbstractExecPayload, BeaconStateError, Blob, ExecPayload, KzgCommitment, Withdrawals}; use types::{ - BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, ForkName, - ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256, + BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ExecutionPayload, + ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, ForkName, + ForkVersionedResponse, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, + Slot, Uint256, }; mod block_hash; @@ -98,6 +103,13 @@ pub enum Error { transactions_root: Hash256, }, InvalidJWTSecret(String), + BeaconStateError(BeaconStateError), +} + +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Error::BeaconStateError(e) + } } impl From for Error { @@ -106,6 +118,108 @@ impl From for Error { } } +pub enum BlockProposalContents> { + Payload { + payload: Payload, + block_value: Uint256, + }, + PayloadAndBlobs { + payload: Payload, + block_value: Uint256, + kzg_commitments: Vec, + blobs: Vec>, + }, +} + +impl> BlockProposalContents { + pub fn payload(&self) -> &Payload { + match self { + Self::Payload { + payload, + block_value: _, + } => payload, + Self::PayloadAndBlobs { + payload, + block_value: _, + kzg_commitments: _, + blobs: _, + } => payload, + } + } + pub fn to_payload(self) -> Payload { + match self { + Self::Payload { + payload, + block_value: _, + } => payload, + Self::PayloadAndBlobs { + payload, + block_value: _, + kzg_commitments: _, + blobs: _, + } => payload, + } + } + pub fn kzg_commitments(&self) -> Option<&[KzgCommitment]> { + match self { + Self::Payload { + payload: _, + block_value: _, + } => None, + Self::PayloadAndBlobs { + payload: _, + block_value: _, + kzg_commitments, + blobs: _, + } => Some(kzg_commitments), + } + } + pub fn blobs(&self) -> Option<&[Blob]> { + match self { + Self::Payload { + payload: _, + block_value: _, + } => None, + Self::PayloadAndBlobs { + payload: _, + block_value: _, + kzg_commitments: _, + blobs, + } => Some(blobs), + } + } + pub fn block_value(&self) -> &Uint256 { + match self { + Self::Payload { + payload: _, + block_value, + } => block_value, + Self::PayloadAndBlobs { + payload: _, + block_value, + kzg_commitments: _, + blobs: _, + } => block_value, + } + } + pub fn default_at_fork(fork_name: ForkName) -> Result { + Ok(match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + BlockProposalContents::Payload { + payload: Payload::default_at_fork(fork_name)?, + block_value: Uint256::zero(), + } + } + ForkName::Eip4844 => BlockProposalContents::PayloadAndBlobs { + payload: Payload::default_at_fork(fork_name)?, + block_value: Uint256::zero(), + blobs: vec![], + kzg_commitments: vec![], + }, + }) + } +} + #[derive(Clone, PartialEq)] pub struct ProposerPreparationDataEntry { update_epoch: Epoch, @@ -290,12 +404,12 @@ impl ExecutionLayer { &self.inner.builder } - /// Cache a full payload, keyed on the `tree_hash_root` of its `transactions` field. - fn cache_payload(&self, payload: &ExecutionPayload) -> Option> { - self.inner.payload_cache.put(payload.clone()) + /// Cache a full payload, keyed on the `tree_hash_root` of the payload + fn cache_payload(&self, payload: ExecutionPayloadRef) -> Option> { + self.inner.payload_cache.put(payload.clone_from_ref()) } - /// Attempt to retrieve a full payload from the payload cache by the `transactions_root`. + /// Attempt to retrieve a full payload from the payload cache by the payload root pub fn get_payload_by_root(&self, root: &Hash256) -> Option> { self.inner.payload_cache.pop(root) } @@ -566,19 +680,15 @@ impl ExecutionLayer { /// /// The result will be returned from the first node that returns successfully. No more nodes /// will be contacted. - #[allow(clippy::too_many_arguments)] - pub async fn get_payload>( + pub async fn get_payload>( &self, parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - proposer_index: u64, + payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, + current_fork: ForkName, spec: &ChainSpec, - ) -> Result { - let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; - + ) -> Result, Error> { let payload_result = match Payload::block_type() { BlockType::Blinded => { let _timer = metrics::start_timer_vec( @@ -587,11 +697,10 @@ impl ExecutionLayer { ); self.get_blinded_payload( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, builder_params, + current_fork, spec, ) .await @@ -603,10 +712,9 @@ impl ExecutionLayer { ); self.get_full_payload( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, + current_fork, ) .await .map(ProvenancedPayload::Local) @@ -615,7 +723,7 @@ impl ExecutionLayer { // Track some metrics and return the result. match payload_result { - Ok(ProvenancedPayload::Local(payload)) => { + Ok(ProvenancedPayload::Local(block_proposal_contents)) => { metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, &[metrics::SUCCESS], @@ -624,9 +732,9 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE, &[metrics::LOCAL], ); - Ok(payload) + Ok(block_proposal_contents) } - Ok(ProvenancedPayload::Builder(payload)) => { + Ok(ProvenancedPayload::Builder(block_proposal_contents)) => { metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, &[metrics::SUCCESS], @@ -635,7 +743,7 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE, &[metrics::BUILDER], ); - Ok(payload) + Ok(block_proposal_contents) } Err(e) => { metrics::inc_counter_vec( @@ -647,17 +755,15 @@ impl ExecutionLayer { } } - #[allow(clippy::too_many_arguments)] - async fn get_blinded_payload>( + async fn get_blinded_payload>( &self, parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, + payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, builder_params: BuilderParams, + current_fork: ForkName, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result>, Error> { if let Some(builder) = self.builder() { let slot = builder_params.slot; let pubkey = builder_params.pubkey; @@ -682,10 +788,9 @@ impl ExecutionLayer { timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async { self.get_full_payload_caching::( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, + current_fork, ) .await }) @@ -701,7 +806,7 @@ impl ExecutionLayer { }, "relay_response_ms" => relay_duration.as_millis(), "local_fee_recipient" => match &local_result { - Ok(header) => format!("{:?}", header.fee_recipient()), + Ok(proposal_contents) => format!("{:?}", proposal_contents.payload().fee_recipient()), Err(_) => "request failed".to_string() }, "local_response_ms" => local_duration.as_millis(), @@ -715,7 +820,7 @@ impl ExecutionLayer { "Builder error when requesting payload"; "info" => "falling back to local execution client", "relay_error" => ?e, - "local_block_hash" => ?local.block_hash(), + "local_block_hash" => ?local.payload().block_hash(), "parent_hash" => ?parent_hash, ); Ok(ProvenancedPayload::Local(local)) @@ -725,7 +830,7 @@ impl ExecutionLayer { self.log(), "Builder did not return a payload"; "info" => "falling back to local execution client", - "local_block_hash" => ?local.block_hash(), + "local_block_hash" => ?local.payload().block_hash(), "parent_hash" => ?parent_hash, ); Ok(ProvenancedPayload::Local(local)) @@ -737,22 +842,37 @@ impl ExecutionLayer { self.log(), "Received local and builder payloads"; "relay_block_hash" => ?header.block_hash(), - "local_block_hash" => ?local.block_hash(), + "local_block_hash" => ?local.payload().block_hash(), "parent_hash" => ?parent_hash, ); + let relay_value = relay.data.message.value; + let local_value = *local.block_value(); + if local_value >= relay_value { + info!( + self.log(), + "Local block is more profitable than relay block"; + "local_block_value" => %local_value, + "relay_value" => %relay_value + ); + return Ok(ProvenancedPayload::Local(local)); + } + match verify_builder_bid( &relay, parent_hash, - prev_randao, - timestamp, - Some(local.block_number()), + payload_attributes, + Some(local.payload().block_number()), self.inner.builder_profit_threshold, + current_fork, spec, ) { - Ok(()) => { - Ok(ProvenancedPayload::Builder(relay.data.message.header)) - } + Ok(()) => Ok(ProvenancedPayload::Builder( + BlockProposalContents::Payload { + payload: relay.data.message.header, + block_value: relay.data.message.value, + }, + )), Err(reason) if !reason.payload_invalid() => { info!( self.log(), @@ -795,20 +915,26 @@ impl ExecutionLayer { match verify_builder_bid( &relay, parent_hash, - prev_randao, - timestamp, + payload_attributes, None, self.inner.builder_profit_threshold, + current_fork, spec, ) { - Ok(()) => { - Ok(ProvenancedPayload::Builder(relay.data.message.header)) - } + Ok(()) => Ok(ProvenancedPayload::Builder( + BlockProposalContents::Payload { + payload: relay.data.message.header, + block_value: relay.data.message.value, + }, + )), // If the payload is valid then use it. The local EE failed // to produce a payload so we have no alternative. - Err(e) if !e.payload_invalid() => { - Ok(ProvenancedPayload::Builder(relay.data.message.header)) - } + Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder( + BlockProposalContents::Payload { + payload: relay.data.message.header, + block_value: relay.data.message.value, + }, + )), Err(reason) => { metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, @@ -871,76 +997,62 @@ impl ExecutionLayer { } self.get_full_payload_caching( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, + current_fork, ) .await .map(ProvenancedPayload::Local) } /// Get a full payload without caching its result in the execution layer's payload cache. - async fn get_full_payload>( + async fn get_full_payload>( &self, parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, + payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, - ) -> Result { + current_fork: ForkName, + ) -> Result, Error> { self.get_full_payload_with( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, + current_fork, noop, ) .await } /// Get a full payload and cache its result in the execution layer's payload cache. - async fn get_full_payload_caching>( + async fn get_full_payload_caching>( &self, parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, + payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, - ) -> Result { + current_fork: ForkName, + ) -> Result, Error> { self.get_full_payload_with( parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, + payload_attributes, forkchoice_update_params, + current_fork, Self::cache_payload, ) .await } - async fn get_full_payload_with>( + async fn get_full_payload_with>( &self, parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, + payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, - f: fn(&ExecutionLayer, &ExecutionPayload) -> Option>, - ) -> Result { - debug!( - self.log(), - "Issuing engine_getPayload"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "prev_randao" => ?prev_randao, - "timestamp" => timestamp, - "parent_hash" => ?parent_hash, - ); + current_fork: ForkName, + f: fn(&ExecutionLayer, ExecutionPayloadRef) -> Option>, + ) -> Result, Error> { self.engine() - .request(|engine| async move { + .request(move |engine| async move { let payload_id = if let Some(id) = engine - .get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient) + .get_payload_id(&parent_hash, payload_attributes) .await { // The payload id has been cached for this engine. @@ -956,7 +1068,7 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, &[metrics::MISS], ); - let fork_choice_state = ForkChoiceState { + let fork_choice_state = ForkchoiceState { head_block_hash: parent_hash, safe_block_hash: forkchoice_update_params .justified_hash @@ -965,16 +1077,11 @@ impl ExecutionLayer { .finalized_hash .unwrap_or_else(ExecutionBlockHash::zero), }; - let payload_attributes = PayloadAttributes { - timestamp, - prev_randao, - suggested_fee_recipient, - }; let response = engine .notify_forkchoice_updated( fork_choice_state, - Some(payload_attributes), + Some(payload_attributes.clone()), self.log(), ) .await?; @@ -994,33 +1101,73 @@ impl ExecutionLayer { } }; - engine - .api - .get_payload_v1::(payload_id) - .await - .map(|full_payload| { - if full_payload.fee_recipient != suggested_fee_recipient { - error!( - self.log(), - "Inconsistent fee recipient"; - "msg" => "The fee recipient returned from the Execution Engine differs \ - from the suggested_fee_recipient set on the beacon node. This could \ - indicate that fees are being diverted to another address. Please \ - ensure that the value of suggested_fee_recipient is set correctly and \ - that the Execution Engine is trusted.", - "fee_recipient" => ?full_payload.fee_recipient, - "suggested_fee_recipient" => ?suggested_fee_recipient, - ); + let blob_fut = async { + match current_fork { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + None } - if f(self, &full_payload).is_some() { - warn!( + ForkName::Eip4844 => { + debug!( self.log(), - "Duplicate payload cached, this might indicate redundant proposal \ + "Issuing engine_getBlobsBundle"; + "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), + "prev_randao" => ?payload_attributes.prev_randao(), + "timestamp" => payload_attributes.timestamp(), + "parent_hash" => ?parent_hash, + ); + Some(engine.api.get_blobs_bundle_v1::(payload_id).await) + } + } + }; + let payload_fut = async { + debug!( + self.log(), + "Issuing engine_getPayload"; + "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), + "prev_randao" => ?payload_attributes.prev_randao(), + "timestamp" => payload_attributes.timestamp(), + "parent_hash" => ?parent_hash, + ); + engine.api.get_payload::(current_fork, payload_id).await + }; + let (blob, payload_response) = tokio::join!(blob_fut, payload_fut); + let (execution_payload, block_value) = payload_response.map(|payload_response| { + if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() { + error!( + self.log(), + "Inconsistent fee recipient"; + "msg" => "The fee recipient returned from the Execution Engine differs \ + from the suggested_fee_recipient set on the beacon node. This could \ + indicate that fees are being diverted to another address. Please \ + ensure that the value of suggested_fee_recipient is set correctly and \ + that the Execution Engine is trusted.", + "fee_recipient" => ?payload_response.execution_payload_ref().fee_recipient(), + "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), + ); + } + if f(self, payload_response.execution_payload_ref()).is_some() { + warn!( + self.log(), + "Duplicate payload cached, this might indicate redundant proposal \ attempts." - ); - } - full_payload.into() + ); + } + payload_response.into() + })?; + if let Some(blob) = blob.transpose()? { + // FIXME(sean) cache blobs + Ok(BlockProposalContents::PayloadAndBlobs { + payload: execution_payload.into(), + block_value, + blobs: blob.blobs, + kzg_commitments: blob.kzgs, }) + } else { + Ok(BlockProposalContents::Payload { + payload: execution_payload.into(), + block_value, + }) + } }) .await .map_err(Box::new) @@ -1052,14 +1199,14 @@ impl ExecutionLayer { trace!( self.log(), "Issuing engine_newPayload"; - "parent_hash" => ?execution_payload.parent_hash, - "block_hash" => ?execution_payload.block_hash, - "block_number" => execution_payload.block_number, + "parent_hash" => ?execution_payload.parent_hash(), + "block_hash" => ?execution_payload.block_hash(), + "block_number" => execution_payload.block_number(), ); let result = self .engine() - .request(|engine| engine.api.new_payload_v1(execution_payload.clone())) + .request(|engine| engine.api.new_payload(execution_payload.clone())) .await; if let Ok(status) = &result { @@ -1069,7 +1216,7 @@ impl ExecutionLayer { ); } - process_payload_status(execution_payload.block_hash, result, self.log()) + process_payload_status(execution_payload.block_hash(), result, self.log()) .map_err(Box::new) .map_err(Error::EngineError) } @@ -1172,9 +1319,9 @@ impl ExecutionLayer { let payload_attributes = self.payload_attributes(next_slot, head_block_root).await; // Compute the "lookahead", the time between when the payload will be produced and now. - if let Some(payload_attributes) = payload_attributes { + if let Some(ref payload_attributes) = payload_attributes { if let Ok(now) = SystemTime::now().duration_since(UNIX_EPOCH) { - let timestamp = Duration::from_secs(payload_attributes.timestamp); + let timestamp = Duration::from_secs(payload_attributes.timestamp()); if let Some(lookahead) = timestamp.checked_sub(now) { metrics::observe_duration( &metrics::EXECUTION_LAYER_PAYLOAD_ATTRIBUTES_LOOKAHEAD, @@ -1191,7 +1338,7 @@ impl ExecutionLayer { } } - let forkchoice_state = ForkChoiceState { + let forkchoice_state = ForkchoiceState { head_block_hash, safe_block_hash: justified_block_hash, finalized_block_hash, @@ -1273,6 +1420,26 @@ impl ExecutionLayer { } } + /// Returns the execution engine capabilities resulting from a call to + /// engine_exchangeCapabilities. If the capabilities cache is not populated, + /// or if it is populated with a cached result of age >= `age_limit`, this + /// method will fetch the result from the execution engine and populate the + /// cache before returning it. Otherwise it will return a cached result from + /// a previous call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_capabilities( + &self, + age_limit: Option, + ) -> Result { + self.engine() + .request(|engine| engine.get_engine_capabilities(age_limit)) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } + /// Used during block production to determine if the merge has been triggered. /// /// ## Specification @@ -1476,10 +1643,11 @@ impl ExecutionLayer { pub async fn get_payload_by_block_hash( &self, hash: ExecutionBlockHash, + fork: ForkName, ) -> Result>, Error> { self.engine() .request(|engine| async move { - self.get_payload_by_block_hash_from_engine(engine, hash) + self.get_payload_by_block_hash_from_engine(engine, hash, fork) .await }) .await @@ -1491,14 +1659,26 @@ impl ExecutionLayer { &self, engine: &Engine, hash: ExecutionBlockHash, + fork: ForkName, ) -> Result>, ApiError> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH); if hash == ExecutionBlockHash::zero() { - return Ok(Some(ExecutionPayload::default())); + return match fork { + ForkName::Merge => Ok(Some(ExecutionPayloadMerge::default().into())), + ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())), + ForkName::Eip4844 => Ok(Some(ExecutionPayloadEip4844::default().into())), + ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant( + format!("called get_payload_by_block_hash_from_engine with {}", fork), + )), + }; } - let block = if let Some(block) = engine.api.get_block_by_hash_with_txns::(hash).await? { + let block = if let Some(block) = engine + .api + .get_block_by_hash_with_txns::(hash, fork) + .await? + { block } else { return Ok(None); @@ -1506,30 +1686,91 @@ impl ExecutionLayer { let transactions = VariableList::new( block - .transactions - .into_iter() + .transactions() + .iter() .map(|transaction| VariableList::new(transaction.rlp().to_vec())) .collect::>() .map_err(ApiError::DeserializeTransaction)?, ) .map_err(ApiError::DeserializeTransactions)?; - Ok(Some(ExecutionPayload { - parent_hash: block.parent_hash, - fee_recipient: block.fee_recipient, - state_root: block.state_root, - receipts_root: block.receipts_root, - logs_bloom: block.logs_bloom, - prev_randao: block.prev_randao, - block_number: block.block_number, - gas_limit: block.gas_limit, - gas_used: block.gas_used, - timestamp: block.timestamp, - extra_data: block.extra_data, - base_fee_per_gas: block.base_fee_per_gas, - block_hash: block.block_hash, - transactions, - })) + let payload = match block { + ExecutionBlockWithTransactions::Merge(merge_block) => { + ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: merge_block.parent_hash, + fee_recipient: merge_block.fee_recipient, + state_root: merge_block.state_root, + receipts_root: merge_block.receipts_root, + logs_bloom: merge_block.logs_bloom, + prev_randao: merge_block.prev_randao, + block_number: merge_block.block_number, + gas_limit: merge_block.gas_limit, + gas_used: merge_block.gas_used, + timestamp: merge_block.timestamp, + extra_data: merge_block.extra_data, + base_fee_per_gas: merge_block.base_fee_per_gas, + block_hash: merge_block.block_hash, + transactions, + }) + } + ExecutionBlockWithTransactions::Capella(capella_block) => { + let withdrawals = VariableList::new( + capella_block + .withdrawals + .into_iter() + .map(Into::into) + .collect(), + ) + .map_err(ApiError::DeserializeWithdrawals)?; + ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: capella_block.parent_hash, + fee_recipient: capella_block.fee_recipient, + state_root: capella_block.state_root, + receipts_root: capella_block.receipts_root, + logs_bloom: capella_block.logs_bloom, + prev_randao: capella_block.prev_randao, + block_number: capella_block.block_number, + gas_limit: capella_block.gas_limit, + gas_used: capella_block.gas_used, + timestamp: capella_block.timestamp, + extra_data: capella_block.extra_data, + base_fee_per_gas: capella_block.base_fee_per_gas, + block_hash: capella_block.block_hash, + transactions, + withdrawals, + }) + } + ExecutionBlockWithTransactions::Eip4844(eip4844_block) => { + let withdrawals = VariableList::new( + eip4844_block + .withdrawals + .into_iter() + .map(Into::into) + .collect(), + ) + .map_err(ApiError::DeserializeWithdrawals)?; + ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { + parent_hash: eip4844_block.parent_hash, + fee_recipient: eip4844_block.fee_recipient, + state_root: eip4844_block.state_root, + receipts_root: eip4844_block.receipts_root, + logs_bloom: eip4844_block.logs_bloom, + prev_randao: eip4844_block.prev_randao, + block_number: eip4844_block.block_number, + gas_limit: eip4844_block.gas_limit, + gas_used: eip4844_block.gas_used, + timestamp: eip4844_block.timestamp, + extra_data: eip4844_block.extra_data, + base_fee_per_gas: eip4844_block.base_fee_per_gas, + excess_data_gas: eip4844_block.excess_data_gas, + block_hash: eip4844_block.block_hash, + transactions, + withdrawals, + }) + } + }; + + Ok(Some(payload)) } pub async fn propose_blinded_beacon_block( @@ -1565,9 +1806,9 @@ impl ExecutionLayer { "Builder successfully revealed payload"; "relay_response_ms" => duration.as_millis(), "block_root" => ?block_root, - "fee_recipient" => ?payload.fee_recipient, - "block_hash" => ?payload.block_hash, - "parent_hash" => ?payload.parent_hash + "fee_recipient" => ?payload.fee_recipient(), + "block_hash" => ?payload.block_hash(), + "parent_hash" => ?payload.parent_hash() ) } Err(e) => { @@ -1629,6 +1870,10 @@ enum InvalidBuilderPayload { signature: Signature, pubkey: PublicKeyBytes, }, + WithdrawalsRoot { + payload: Option, + expected: Option, + }, } impl InvalidBuilderPayload { @@ -1643,6 +1888,7 @@ impl InvalidBuilderPayload { InvalidBuilderPayload::BlockNumber { .. } => true, InvalidBuilderPayload::Fork { .. } => true, InvalidBuilderPayload::Signature { .. } => true, + InvalidBuilderPayload::WithdrawalsRoot { .. } => true, } } } @@ -1678,18 +1924,31 @@ impl fmt::Display for InvalidBuilderPayload { "invalid payload signature {} for pubkey {}", signature, pubkey ), + InvalidBuilderPayload::WithdrawalsRoot { payload, expected } => { + let opt_string = |opt_hash: &Option| { + opt_hash + .map(|hash| hash.to_string()) + .unwrap_or_else(|| "None".to_string()) + }; + write!( + f, + "payload withdrawals root was {} not {}", + opt_string(payload), + opt_string(expected) + ) + } } } } /// Perform some cursory, non-exhaustive validation of the bid returned from the builder. -fn verify_builder_bid>( +fn verify_builder_bid>( bid: &ForkVersionedResponse>, parent_hash: ExecutionBlockHash, - prev_randao: Hash256, - timestamp: u64, + payload_attributes: &PayloadAttributes, block_number: Option, profit_threshold: Uint256, + current_fork: ForkName, spec: &ChainSpec, ) -> Result<(), Box> { let is_signature_valid = bid.data.verify_signature(spec); @@ -1706,6 +1965,13 @@ fn verify_builder_bid>( ); } + let expected_withdrawals_root = payload_attributes + .withdrawals() + .ok() + .cloned() + .map(|withdrawals| Withdrawals::::from(withdrawals).tree_hash_root()); + let payload_withdrawals_root = header.withdrawals_root().ok(); + if payload_value < profit_threshold { Err(Box::new(InvalidBuilderPayload::LowValue { profit_threshold, @@ -1716,35 +1982,36 @@ fn verify_builder_bid>( payload: header.parent_hash(), expected: parent_hash, })) - } else if header.prev_randao() != prev_randao { + } else if header.prev_randao() != payload_attributes.prev_randao() { Err(Box::new(InvalidBuilderPayload::PrevRandao { payload: header.prev_randao(), - expected: prev_randao, + expected: payload_attributes.prev_randao(), })) - } else if header.timestamp() != timestamp { + } else if header.timestamp() != payload_attributes.timestamp() { Err(Box::new(InvalidBuilderPayload::Timestamp { payload: header.timestamp(), - expected: timestamp, + expected: payload_attributes.timestamp(), })) } else if block_number.map_or(false, |n| n != header.block_number()) { Err(Box::new(InvalidBuilderPayload::BlockNumber { payload: header.block_number(), expected: block_number, })) - } else if !matches!(bid.version, Some(ForkName::Merge)) { - // Once fork information is added to the payload, we will need to - // check that the local and relay payloads match. At this point, if - // we are requesting a payload at all, we have to assume this is - // the Bellatrix fork. + } else if bid.version != Some(current_fork) { Err(Box::new(InvalidBuilderPayload::Fork { payload: bid.version, - expected: ForkName::Merge, + expected: current_fork, })) } else if !is_signature_valid { Err(Box::new(InvalidBuilderPayload::Signature { signature: bid.data.signature.clone(), pubkey: bid.data.message.pubkey, })) + } else if payload_withdrawals_root != expected_withdrawals_root { + Err(Box::new(InvalidBuilderPayload::WithdrawalsRoot { + payload: payload_withdrawals_root, + expected: expected_withdrawals_root, + })) } else { Ok(()) } @@ -1906,7 +2173,10 @@ mod test { } } -fn noop(_: &ExecutionLayer, _: &ExecutionPayload) -> Option> { +fn noop( + _: &ExecutionLayer, + _: ExecutionPayloadRef, +) -> Option> { None } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 22dcb40070..63893375db 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,4 +1,4 @@ -use crate::engines::ForkChoiceState; +use crate::engines::ForkchoiceState; use crate::{ engine_api::{ json_structures::{ @@ -12,7 +12,10 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -use types::{EthSpec, ExecutionBlockHash, ExecutionPayload, Hash256, Uint256}; +use types::{ + EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, + ExecutionPayloadEip4844, ExecutionPayloadMerge, ForkName, Hash256, Uint256, +}; const GAS_LIMIT: u64 = 16384; const GAS_USED: u64 = GAS_LIMIT - 1; @@ -28,21 +31,21 @@ impl Block { pub fn block_number(&self) -> u64 { match self { Block::PoW(block) => block.block_number, - Block::PoS(payload) => payload.block_number, + Block::PoS(payload) => payload.block_number(), } } pub fn parent_hash(&self) -> ExecutionBlockHash { match self { Block::PoW(block) => block.parent_hash, - Block::PoS(payload) => payload.parent_hash, + Block::PoS(payload) => payload.parent_hash(), } } pub fn block_hash(&self) -> ExecutionBlockHash { match self { Block::PoW(block) => block.block_hash, - Block::PoS(payload) => payload.block_hash, + Block::PoS(payload) => payload.block_hash(), } } @@ -63,33 +66,18 @@ impl Block { timestamp: block.timestamp, }, Block::PoS(payload) => ExecutionBlock { - block_hash: payload.block_hash, - block_number: payload.block_number, - parent_hash: payload.parent_hash, + block_hash: payload.block_hash(), + block_number: payload.block_number(), + parent_hash: payload.parent_hash(), total_difficulty, - timestamp: payload.timestamp, + timestamp: payload.timestamp(), }, } } pub fn as_execution_block_with_tx(&self) -> Option> { match self { - Block::PoS(payload) => Some(ExecutionBlockWithTransactions { - parent_hash: payload.parent_hash, - fee_recipient: payload.fee_recipient, - state_root: payload.state_root, - receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom.clone(), - prev_randao: payload.prev_randao, - block_number: payload.block_number, - gas_limit: payload.gas_limit, - gas_used: payload.gas_used, - timestamp: payload.timestamp, - extra_data: payload.extra_data.clone(), - base_fee_per_gas: payload.base_fee_per_gas, - block_hash: payload.block_hash, - transactions: vec![], - }), + Block::PoS(payload) => Some(payload.clone().try_into().unwrap()), Block::PoW(_) => None, } } @@ -126,6 +114,11 @@ pub struct ExecutionBlockGenerator { pub pending_payloads: HashMap>, pub next_payload_id: u64, pub payload_ids: HashMap>, + /* + * Post-merge fork triggers + */ + pub shanghai_time: Option, // withdrawals + pub eip4844_time: Option, // 4844 } impl ExecutionBlockGenerator { @@ -133,6 +126,8 @@ impl ExecutionBlockGenerator { terminal_total_difficulty: Uint256, terminal_block_number: u64, terminal_block_hash: ExecutionBlockHash, + shanghai_time: Option, + eip4844_time: Option, ) -> Self { let mut gen = Self { head_block: <_>::default(), @@ -145,6 +140,8 @@ impl ExecutionBlockGenerator { pending_payloads: <_>::default(), next_payload_id: 0, payload_ids: <_>::default(), + shanghai_time, + eip4844_time, }; gen.insert_pow_block(0).unwrap(); @@ -176,6 +173,16 @@ impl ExecutionBlockGenerator { } } + pub fn get_fork_at_timestamp(&self, timestamp: u64) -> ForkName { + match self.eip4844_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Eip4844, + _ => match self.shanghai_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Capella, + _ => ForkName::Merge, + }, + } + } + pub fn execution_block_by_number(&self, number: u64) -> Option { self.block_by_number(number) .map(|block| block.as_execution_block(self.terminal_total_difficulty)) @@ -357,7 +364,9 @@ impl ExecutionBlockGenerator { // Update the block hash after modifying the block match &mut block { Block::PoW(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), - Block::PoS(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), + Block::PoS(b) => { + *b.block_hash_mut() = ExecutionBlockHash::from_root(b.tree_hash_root()) + } } // Update head. @@ -378,7 +387,7 @@ impl ExecutionBlockGenerator { } pub fn new_payload(&mut self, payload: ExecutionPayload) -> PayloadStatusV1 { - let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash) { + let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash()) { parent } else { return PayloadStatusV1 { @@ -388,7 +397,7 @@ impl ExecutionBlockGenerator { }; }; - if payload.block_number != parent.block_number() + 1 { + if payload.block_number() != parent.block_number() + 1 { return PayloadStatusV1 { status: PayloadStatusV1Status::Invalid, latest_valid_hash: Some(parent.block_hash()), @@ -396,8 +405,8 @@ impl ExecutionBlockGenerator { }; } - let valid_hash = payload.block_hash; - self.pending_payloads.insert(payload.block_hash, payload); + let valid_hash = payload.block_hash(); + self.pending_payloads.insert(payload.block_hash(), payload); PayloadStatusV1 { status: PayloadStatusV1Status::Valid, @@ -406,9 +415,11 @@ impl ExecutionBlockGenerator { } } - pub fn forkchoice_updated_v1( + // This function expects payload_attributes to already be validated with respect to + // the current fork [obtained by self.get_fork_at_timestamp(payload_attributes.timestamp)] + pub fn forkchoice_updated( &mut self, - forkchoice_state: ForkChoiceState, + forkchoice_state: ForkchoiceState, payload_attributes: Option, ) -> Result { if let Some(payload) = self @@ -462,24 +473,87 @@ impl ExecutionBlockGenerator { let id = payload_id_from_u64(self.next_payload_id); self.next_payload_id += 1; - let mut execution_payload = ExecutionPayload { - parent_hash: forkchoice_state.head_block_hash, - fee_recipient: attributes.suggested_fee_recipient, - receipts_root: Hash256::repeat_byte(42), - state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), - prev_randao: attributes.prev_randao, - block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, - gas_used: GAS_USED, - timestamp: attributes.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), - block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), + let mut execution_payload = match &attributes { + PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + }), + PayloadAttributes::V2(pa) => { + match self.get_fork_at_timestamp(pa.timestamp) { + ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + }), + ForkName::Capella => { + ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + withdrawals: pa.withdrawals.clone().into(), + }) + } + ForkName::Eip4844 => { + ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + // FIXME(4844): maybe this should be set to something? + excess_data_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + withdrawals: pa.withdrawals.clone().into(), + }) + } + _ => unreachable!(), + } + } }; - execution_payload.block_hash = + *execution_payload.block_hash_mut() = ExecutionBlockHash::from_root(execution_payload.tree_hash_root()); self.payload_ids.insert(id, execution_payload); @@ -566,6 +640,8 @@ mod test { TERMINAL_DIFFICULTY.into(), TERMINAL_BLOCK, ExecutionBlockHash::zero(), + None, + None, ); for i in 0..=TERMINAL_BLOCK { diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 97c5235755..138c8f6bcb 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -1,25 +1,33 @@ use super::Context; use crate::engine_api::{http::*, *}; use crate::json_structures::*; +use crate::test_utils::DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI; use serde::de::DeserializeOwned; use serde_json::Value as JsonValue; use std::sync::Arc; -use types::EthSpec; +use types::{EthSpec, ForkName}; + +pub const GENERIC_ERROR_CODE: i64 = -1234; +pub const BAD_PARAMS_ERROR_CODE: i64 = -32602; +pub const UNKNOWN_PAYLOAD_ERROR_CODE: i64 = -38001; +pub const FORK_REQUEST_MISMATCH_ERROR_CODE: i64 = -32000; pub async fn handle_rpc( body: JsonValue, ctx: Arc>, -) -> Result { +) -> Result { *ctx.previous_request.lock() = Some(body.clone()); let method = body .get("method") .and_then(JsonValue::as_str) - .ok_or_else(|| "missing/invalid method field".to_string())?; + .ok_or_else(|| "missing/invalid method field".to_string()) + .map_err(|s| (s, GENERIC_ERROR_CODE))?; let params = body .get("params") - .ok_or_else(|| "missing/invalid params field".to_string())?; + .ok_or_else(|| "missing/invalid params field".to_string()) + .map_err(|s| (s, GENERIC_ERROR_CODE))?; match method { ETH_SYNCING => Ok(JsonValue::Bool(false)), @@ -27,7 +35,8 @@ pub async fn handle_rpc( let tag = params .get(0) .and_then(JsonValue::as_str) - .ok_or_else(|| "missing/invalid params[0] value".to_string())?; + .ok_or_else(|| "missing/invalid params[0] value".to_string()) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; match tag { "latest" => Ok(serde_json::to_value( @@ -36,7 +45,10 @@ pub async fn handle_rpc( .latest_execution_block(), ) .unwrap()), - other => Err(format!("The tag {} is not supported", other)), + other => Err(( + format!("The tag {} is not supported", other), + BAD_PARAMS_ERROR_CODE, + )), } } ETH_GET_BLOCK_BY_HASH => { @@ -47,7 +59,8 @@ pub async fn handle_rpc( .and_then(|s| { s.parse() .map_err(|e| format!("unable to parse hash: {:?}", e)) - })?; + }) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; // If we have a static response set, just return that. if let Some(response) = *ctx.static_get_block_by_hash_response.lock() { @@ -57,7 +70,8 @@ pub async fn handle_rpc( let full_tx = params .get(1) .and_then(JsonValue::as_bool) - .ok_or_else(|| "missing/invalid params[1] value".to_string())?; + .ok_or_else(|| "missing/invalid params[1] value".to_string()) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; if full_tx { Ok(serde_json::to_value( ctx.execution_block_generator @@ -74,18 +88,70 @@ pub async fn handle_rpc( .unwrap()) } } - ENGINE_NEW_PAYLOAD_V1 => { - let request: JsonExecutionPayloadV1 = get_param(params, 0)?; + ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 => { + let request = match method { + ENGINE_NEW_PAYLOAD_V1 => JsonExecutionPayload::V1( + get_param::>(params, 0) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, + ), + ENGINE_NEW_PAYLOAD_V2 => get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V2(jep)) + .or_else(|_| { + get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V1(jep)) + }) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, + // TODO(4844) add that here.. + _ => unreachable!(), + }; + + let fork = ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(*request.timestamp()); + // validate method called correctly according to shanghai fork time + match fork { + ForkName::Merge => { + if matches!(request, JsonExecutionPayload::V2(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV2` before Capella fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + } + ForkName::Capella => { + if method == ENGINE_NEW_PAYLOAD_V1 { + return Err(( + format!("{} called after Capella fork!", method), + GENERIC_ERROR_CODE, + )); + } + if matches!(request, JsonExecutionPayload::V1(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV1` after Capella fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + } + // TODO(4844) add 4844 error checking here + _ => unreachable!(), + }; // Canned responses set by block hash take priority. - if let Some(status) = ctx.get_new_payload_status(&request.block_hash) { + if let Some(status) = ctx.get_new_payload_status(request.block_hash()) { return Ok(serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap()); } let (static_response, should_import) = if let Some(mut response) = ctx.static_new_payload_response.lock().clone() { if response.status.status == PayloadStatusV1Status::Valid { - response.status.latest_valid_hash = Some(request.block_hash) + response.status.latest_valid_hash = Some(*request.block_hash()) } (Some(response.status), response.should_import) @@ -107,21 +173,141 @@ pub async fn handle_rpc( Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap()) } - ENGINE_GET_PAYLOAD_V1 => { - let request: JsonPayloadIdRequest = get_param(params, 0)?; + ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 => { + let request: JsonPayloadIdRequest = + get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let id = request.into(); let response = ctx .execution_block_generator .write() .get_payload(&id) - .ok_or_else(|| format!("no payload for id {:?}", id))?; + .ok_or_else(|| { + ( + format!("no payload for id {:?}", id), + UNKNOWN_PAYLOAD_ERROR_CODE, + ) + })?; - Ok(serde_json::to_value(JsonExecutionPayloadV1::from(response)).unwrap()) + // validate method called correctly according to shanghai fork time + if ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(response.timestamp()) + == ForkName::Capella + && method == ENGINE_GET_PAYLOAD_V1 + { + return Err(( + format!("{} called after Capella fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } + // TODO(4844) add 4844 error checking here + + match method { + ENGINE_GET_PAYLOAD_V1 => { + Ok(serde_json::to_value(JsonExecutionPayload::from(response)).unwrap()) + } + ENGINE_GET_PAYLOAD_V2 => Ok(match JsonExecutionPayload::from(response) { + JsonExecutionPayload::V1(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseV1 { + execution_payload, + block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), + }) + .unwrap() + } + JsonExecutionPayload::V2(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseV2 { + execution_payload, + block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), + }) + .unwrap() + } + _ => unreachable!(), + }), + _ => unreachable!(), + } } - ENGINE_FORKCHOICE_UPDATED_V1 => { - let forkchoice_state: JsonForkChoiceStateV1 = get_param(params, 0)?; - let payload_attributes: Option = get_param(params, 1)?; + ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 => { + let forkchoice_state: JsonForkchoiceStateV1 = + get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; + let payload_attributes = match method { + ENGINE_FORKCHOICE_UPDATED_V1 => { + let jpa1: Option = + get_param(params, 1).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; + jpa1.map(JsonPayloadAttributes::V1) + } + ENGINE_FORKCHOICE_UPDATED_V2 => { + // we can't use `deny_unknown_fields` without breaking compatibility with some + // clients that haven't updated to the latest engine_api spec. So instead we'll + // need to deserialize based on timestamp + get_param::>(params, 1) + .and_then(|pa| { + pa.and_then(|pa| { + match ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(*pa.timestamp()) + { + ForkName::Merge => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V1)) + .transpose() + } + ForkName::Capella => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V2)) + .transpose() + } + _ => unreachable!(), + } + }) + .transpose() + }) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? + } + _ => unreachable!(), + }; + + // validate method called correctly according to shanghai fork time + if let Some(pa) = payload_attributes.as_ref() { + match ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(*pa.timestamp()) + { + ForkName::Merge => { + if matches!(pa, JsonPayloadAttributes::V2(_)) { + return Err(( + format!( + "{} called with `JsonPayloadAttributesV2` before Capella fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + } + ForkName::Capella => { + if method == ENGINE_FORKCHOICE_UPDATED_V1 { + return Err(( + format!("{} called after Capella fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } + if matches!(pa, JsonPayloadAttributes::V1(_)) { + return Err(( + format!( + "{} called with `JsonPayloadAttributesV1` after Capella fork!", + method + ), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } + } + // TODO(4844) add 4844 error checking here + _ => unreachable!(), + }; + } if let Some(hook_response) = ctx .hook @@ -145,10 +331,11 @@ pub async fn handle_rpc( let mut response = ctx .execution_block_generator .write() - .forkchoice_updated_v1( + .forkchoice_updated( forkchoice_state.into(), payload_attributes.map(|json| json.into()), - )?; + ) + .map_err(|s| (s, GENERIC_ERROR_CODE))?; if let Some(mut status) = ctx.static_forkchoice_updated_response.lock().clone() { if status.status == PayloadStatusV1Status::Valid { @@ -169,9 +356,13 @@ pub async fn handle_rpc( }; Ok(serde_json::to_value(transition_config).unwrap()) } - other => Err(format!( - "The method {} does not exist/is not available", - other + ENGINE_EXCHANGE_CAPABILITIES => { + let engine_capabilities = ctx.engine_capabilities.read(); + Ok(serde_json::to_value(engine_capabilities.to_response()).unwrap()) + } + other => Err(( + format!("The method {} does not exist/is not available", other), + METHOD_NOT_FOUND_CODE, )), } } diff --git a/beacon_node/execution_layer/src/test_utils/hook.rs b/beacon_node/execution_layer/src/test_utils/hook.rs index a3748103e3..4653811ac9 100644 --- a/beacon_node/execution_layer/src/test_utils/hook.rs +++ b/beacon_node/execution_layer/src/test_utils/hook.rs @@ -1,8 +1,8 @@ use crate::json_structures::*; type ForkChoiceUpdatedHook = dyn Fn( - JsonForkChoiceStateV1, - Option, + JsonForkchoiceStateV1, + Option, ) -> Option + Send + Sync; @@ -15,8 +15,8 @@ pub struct Hook { impl Hook { pub fn on_forkchoice_updated( &self, - state: JsonForkChoiceStateV1, - payload_attributes: Option, + state: JsonForkchoiceStateV1, + payload_attributes: Option, ) -> Option { (self.forkchoice_updated.as_ref()?)(state, payload_attributes) } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index b8f74c1c93..1997265013 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,17 +1,21 @@ -use crate::test_utils::DEFAULT_JWT_SECRET; +use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes}; use async_trait::async_trait; use eth2::types::{BlockId, StateId, ValidatorId}; use eth2::{BeaconNodeHttpClient, Timeouts}; -use ethereum_consensus::crypto::{SecretKey, Signature}; -use ethereum_consensus::primitives::BlsPublicKey; pub use ethereum_consensus::state_transition::Context; +use ethereum_consensus::{ + crypto::{SecretKey, Signature}, + primitives::{BlsPublicKey, BlsSignature, ExecutionAddress, Hash32, Root, U256}, + state_transition::Error, +}; use fork_choice::ForkchoiceUpdateParameters; -use mev_build_rs::{ +use mev_rs::{ + bellatrix::{BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix}, + capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError, BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload, - ExecutionPayloadHeader as ServerPayloadHeader, SignedBlindedBeaconBlock, SignedBuilderBid, - SignedValidatorRegistration, + SignedBlindedBeaconBlock, SignedBuilderBid, SignedValidatorRegistration, }; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; @@ -26,7 +30,8 @@ use task_executor::TaskExecutor; use tempfile::NamedTempFile; use tree_hash::TreeHash; use types::{ - Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, Hash256, Slot, Uint256, + Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, ForkName, Hash256, Slot, + Uint256, }; #[derive(Clone)] @@ -38,25 +43,129 @@ pub enum Operation { PrevRandao(Hash256), BlockNumber(usize), Timestamp(usize), + WithdrawalsRoot(Hash256), } impl Operation { - fn apply(self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + fn apply(self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { match self { Operation::FeeRecipient(fee_recipient) => { - bid.header.fee_recipient = to_ssz_rs(&fee_recipient)? + *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)? } - Operation::GasLimit(gas_limit) => bid.header.gas_limit = gas_limit as u64, - Operation::Value(value) => bid.value = to_ssz_rs(&value)?, - Operation::ParentHash(parent_hash) => bid.header.parent_hash = to_ssz_rs(&parent_hash)?, - Operation::PrevRandao(prev_randao) => bid.header.prev_randao = to_ssz_rs(&prev_randao)?, - Operation::BlockNumber(block_number) => bid.header.block_number = block_number as u64, - Operation::Timestamp(timestamp) => bid.header.timestamp = timestamp as u64, + Operation::GasLimit(gas_limit) => *bid.gas_limit_mut() = gas_limit as u64, + Operation::Value(value) => *bid.value_mut() = to_ssz_rs(&value)?, + Operation::ParentHash(parent_hash) => *bid.parent_hash_mut() = to_ssz_rs(&parent_hash)?, + Operation::PrevRandao(prev_randao) => *bid.prev_randao_mut() = to_ssz_rs(&prev_randao)?, + Operation::BlockNumber(block_number) => *bid.block_number_mut() = block_number as u64, + Operation::Timestamp(timestamp) => *bid.timestamp_mut() = timestamp as u64, + Operation::WithdrawalsRoot(root) => *bid.withdrawals_root_mut()? = to_ssz_rs(&root)?, } Ok(()) } } +// contains functions we need for BuilderBids.. not sure what to call this +pub trait BidStuff { + fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress; + fn gas_limit_mut(&mut self) -> &mut u64; + fn value_mut(&mut self) -> &mut U256; + fn parent_hash_mut(&mut self) -> &mut Hash32; + fn prev_randao_mut(&mut self) -> &mut Hash32; + fn block_number_mut(&mut self) -> &mut u64; + fn timestamp_mut(&mut self) -> &mut u64; + fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError>; + + fn sign_builder_message( + &mut self, + signing_key: &SecretKey, + context: &Context, + ) -> Result; + + fn to_signed_bid(self, signature: BlsSignature) -> SignedBuilderBid; +} + +impl BidStuff for BuilderBid { + fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress { + match self { + Self::Bellatrix(bid) => &mut bid.header.fee_recipient, + Self::Capella(bid) => &mut bid.header.fee_recipient, + } + } + + fn gas_limit_mut(&mut self) -> &mut u64 { + match self { + Self::Bellatrix(bid) => &mut bid.header.gas_limit, + Self::Capella(bid) => &mut bid.header.gas_limit, + } + } + + fn value_mut(&mut self) -> &mut U256 { + match self { + Self::Bellatrix(bid) => &mut bid.value, + Self::Capella(bid) => &mut bid.value, + } + } + + fn parent_hash_mut(&mut self) -> &mut Hash32 { + match self { + Self::Bellatrix(bid) => &mut bid.header.parent_hash, + Self::Capella(bid) => &mut bid.header.parent_hash, + } + } + + fn prev_randao_mut(&mut self) -> &mut Hash32 { + match self { + Self::Bellatrix(bid) => &mut bid.header.prev_randao, + Self::Capella(bid) => &mut bid.header.prev_randao, + } + } + + fn block_number_mut(&mut self) -> &mut u64 { + match self { + Self::Bellatrix(bid) => &mut bid.header.block_number, + Self::Capella(bid) => &mut bid.header.block_number, + } + } + + fn timestamp_mut(&mut self) -> &mut u64 { + match self { + Self::Bellatrix(bid) => &mut bid.header.timestamp, + Self::Capella(bid) => &mut bid.header.timestamp, + } + } + + fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError> { + match self { + Self::Bellatrix(_) => Err(BlindedBlockProviderError::Custom( + "withdrawals_root called on bellatrix bid".to_string(), + )), + Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root), + } + } + + fn sign_builder_message( + &mut self, + signing_key: &SecretKey, + context: &Context, + ) -> Result { + match self { + Self::Bellatrix(message) => sign_builder_message(message, signing_key, context), + Self::Capella(message) => sign_builder_message(message, signing_key, context), + } + } + + fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid { + match self { + Self::Bellatrix(message) => { + SignedBuilderBid::Bellatrix(SignedBuilderBidBellatrix { message, signature }) + } + Self::Capella(message) => { + SignedBuilderBid::Capella(SignedBuilderBidCapella { message, signature }) + } + } + } +} + pub struct TestingBuilder { server: BlindedBlockProviderServer>, pub builder: MockBuilder, @@ -111,7 +220,10 @@ impl TestingBuilder { } pub async fn run(&self) { - self.server.run().await + let server = self.server.serve(); + if let Err(err) = server.await { + println!("error while listening for incoming: {err}") + } } } @@ -162,7 +274,7 @@ impl MockBuilder { *self.invalidate_signatures.write() = false; } - fn apply_operations(&self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + fn apply_operations(&self, bid: &mut B) -> Result<(), BlindedBlockProviderError> { let mut guard = self.operations.write(); while let Some(op) = guard.pop() { op.apply(bid)?; @@ -172,7 +284,7 @@ impl MockBuilder { } #[async_trait] -impl mev_build_rs::BlindedBlockProvider for MockBuilder { +impl mev_rs::BlindedBlockProvider for MockBuilder { async fn register_validators( &self, registrations: &mut [SignedValidatorRegistration], @@ -200,6 +312,7 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { bid_request: &BidRequest, ) -> Result { let slot = Slot::new(bid_request.slot); + let fork = self.spec.fork_name_at_slot::(slot); let signed_cached_data = self .val_registration_cache .read() @@ -215,9 +328,13 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)? .ok_or_else(|| convert_err("missing head block"))?; - let block = head.data.message_merge().map_err(convert_err)?; + let block = head.data.message(); let head_block_root = block.tree_hash_root(); - let head_execution_hash = block.body.execution_payload.execution_payload.block_hash; + let head_execution_hash = block + .body() + .execution_payload() + .map_err(convert_err)? + .block_hash(); if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { return Err(BlindedBlockProviderError::Custom(format!( "head mismatch: {} {}", @@ -232,12 +349,11 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)? .ok_or_else(|| convert_err("missing finalized block"))? .data - .message_merge() + .message() + .body() + .execution_payload() .map_err(convert_err)? - .body - .execution_payload - .execution_payload - .block_hash; + .block_hash(); let justified_execution_hash = self .beacon_client @@ -246,12 +362,11 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .map_err(convert_err)? .ok_or_else(|| convert_err("missing finalized block"))? .data - .message_merge() + .message() + .body() + .execution_payload() .map_err(convert_err)? - .body - .execution_payload - .execution_payload - .block_hash; + .block_hash(); let val_index = self .beacon_client @@ -287,14 +402,22 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .get_randao_mix(head_state.current_epoch()) .map_err(convert_err)?; - let payload_attributes = PayloadAttributes { - timestamp, - prev_randao: *prev_randao, - suggested_fee_recipient: fee_recipient, + let payload_attributes = match fork { + ForkName::Merge => PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None), + // the withdrawals root is filled in by operations + ForkName::Capella | ForkName::Eip4844 => { + PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![])) + } + ForkName::Base | ForkName::Altair => { + return Err(BlindedBlockProviderError::Custom(format!( + "Unsupported fork: {}", + fork + ))); + } }; self.el - .insert_proposer(slot, head_block_root, val_index, payload_attributes) + .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) .await; let forkchoice_update_params = ForkchoiceUpdateParameters { @@ -308,54 +431,64 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { .el .get_full_payload_caching::>( head_execution_hash, - timestamp, - *prev_randao, - fee_recipient, + &payload_attributes, forkchoice_update_params, + fork, ) .await .map_err(convert_err)? + .to_payload() .to_execution_payload_header(); let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; - let mut header: ServerPayloadHeader = - serde_json::from_str(json_payload.as_str()).map_err(convert_err)?; - - header.gas_limit = cached_data.gas_limit; - - let mut message = BuilderBid { - header, - value: ssz_rs::U256::default(), - public_key: self.builder_sk.public_key(), + let mut message = match fork { + ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { + header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?, + value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, + public_key: self.builder_sk.public_key(), + }), + ForkName::Merge => BuilderBid::Bellatrix(BuilderBidBellatrix { + header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?, + value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, + public_key: self.builder_sk.public_key(), + }), + ForkName::Base | ForkName::Altair | ForkName::Eip4844 => { + return Err(BlindedBlockProviderError::Custom(format!( + "Unsupported fork: {}", + fork + ))) + } }; + *message.gas_limit_mut() = cached_data.gas_limit; self.apply_operations(&mut message)?; - let mut signature = - sign_builder_message(&mut message, &self.builder_sk, self.context.as_ref())?; + message.sign_builder_message(&self.builder_sk, self.context.as_ref())?; if *self.invalidate_signatures.read() { signature = Signature::default(); } - let signed_bid = SignedBuilderBid { message, signature }; - Ok(signed_bid) + Ok(message.to_signed_bid(signature)) } async fn open_bid( &self, signed_block: &mut SignedBlindedBeaconBlock, ) -> Result { + let node = match signed_block { + SignedBlindedBeaconBlock::Bellatrix(block) => { + block.message.body.execution_payload_header.hash_tree_root() + } + SignedBlindedBeaconBlock::Capella(block) => { + block.message.body.execution_payload_header.hash_tree_root() + } + } + .map_err(convert_err)?; + let payload = self .el - .get_payload_by_root(&from_ssz_rs( - &signed_block - .message - .body - .execution_payload_header - .hash_tree_root() - .map_err(convert_err)?, - )?) + .get_payload_by_root(&from_ssz_rs(&node)?) .ok_or_else(|| convert_err("missing payload for tx root"))?; let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index e9d4b2121b..1a5d1fd198 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -9,7 +9,7 @@ use sensitive_url::SensitiveUrl; use task_executor::TaskExecutor; use tempfile::NamedTempFile; use tree_hash::TreeHash; -use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, Uint256}; +use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, MainnetEthSpec}; pub struct MockExecutionLayer { pub server: MockServer, @@ -20,40 +20,44 @@ pub struct MockExecutionLayer { impl MockExecutionLayer { pub fn default_params(executor: TaskExecutor) -> Self { + let mut spec = MainnetEthSpec::default_spec(); + spec.terminal_total_difficulty = DEFAULT_TERMINAL_DIFFICULTY.into(); + spec.terminal_block_hash = ExecutionBlockHash::zero(); + spec.terminal_block_hash_activation_epoch = Epoch::new(0); Self::new( executor, - DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, - ExecutionBlockHash::zero(), - Epoch::new(0), + None, + None, + None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + spec, None, ) } + #[allow(clippy::too_many_arguments)] pub fn new( executor: TaskExecutor, - terminal_total_difficulty: Uint256, terminal_block: u64, - terminal_block_hash: ExecutionBlockHash, - terminal_block_hash_activation_epoch: Epoch, + shanghai_time: Option, + eip4844_time: Option, + builder_threshold: Option, jwt_key: Option, + spec: ChainSpec, builder_url: Option, ) -> Self { let handle = executor.handle().unwrap(); - let mut spec = T::default_spec(); - spec.terminal_total_difficulty = terminal_total_difficulty; - spec.terminal_block_hash = terminal_block_hash; - spec.terminal_block_hash_activation_epoch = terminal_block_hash_activation_epoch; - let jwt_key = jwt_key.unwrap_or_else(JwtKey::random); let server = MockServer::new( &handle, jwt_key, - terminal_total_difficulty, + spec.terminal_total_difficulty, terminal_block, - terminal_block_hash, + spec.terminal_block_hash, + shanghai_time, + eip4844_time, ); let url = SensitiveUrl::parse(&server.url()).unwrap(); @@ -67,7 +71,7 @@ impl MockExecutionLayer { builder_url, secret_files: vec![path], suggested_fee_recipient: Some(Address::repeat_byte(42)), - builder_profit_threshold: DEFAULT_BUILDER_THRESHOLD_WEI, + builder_profit_threshold: builder_threshold.unwrap_or(DEFAULT_BUILDER_THRESHOLD_WEI), ..Default::default() }; let el = @@ -98,21 +102,19 @@ impl MockExecutionLayer { justified_hash: None, finalized_hash: None, }; + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + Address::repeat_byte(42), + // FIXME: think about how to handle different forks / withdrawals here.. + None, + ); // Insert a proposer to ensure the fork choice updated command works. let slot = Slot::new(0); let validator_index = 0; self.el - .insert_proposer( - slot, - head_block_root, - validator_index, - PayloadAttributes { - timestamp, - prev_randao, - suggested_fee_recipient: Address::repeat_byte(42), - }, - ) + .insert_proposer(slot, head_block_root, validator_index, payload_attributes) .await; self.el @@ -132,25 +134,30 @@ impl MockExecutionLayer { slot, chain_health: ChainHealth::Healthy, }; - let payload = self + let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await; + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); + let payload: ExecutionPayload = self .el .get_payload::>( parent_hash, - timestamp, - prev_randao, - validator_index, + &payload_attributes, forkchoice_update_params, builder_params, + // FIXME: do we need to consider other forks somehow? What about withdrawals? + ForkName::Merge, &self.spec, ) .await .unwrap() - .execution_payload; - let block_hash = payload.block_hash; - assert_eq!(payload.parent_hash, parent_hash); - assert_eq!(payload.block_number, block_number); - assert_eq!(payload.timestamp, timestamp); - assert_eq!(payload.prev_randao, prev_randao); + .to_payload() + .into(); + + let block_hash = payload.block_hash(); + assert_eq!(payload.parent_hash(), parent_hash); + assert_eq!(payload.block_number(), block_number); + assert_eq!(payload.timestamp(), timestamp); + assert_eq!(payload.prev_randao(), prev_randao); // Ensure the payload cache is empty. assert!(self @@ -162,25 +169,29 @@ impl MockExecutionLayer { slot, chain_health: ChainHealth::Healthy, }; + let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await; + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); let payload_header = self .el .get_payload::>( parent_hash, - timestamp, - prev_randao, - validator_index, + &payload_attributes, forkchoice_update_params, builder_params, + // FIXME: do we need to consider other forks somehow? What about withdrawals? + ForkName::Merge, &self.spec, ) .await .unwrap() - .execution_payload_header; - assert_eq!(payload_header.block_hash, block_hash); - assert_eq!(payload_header.parent_hash, parent_hash); - assert_eq!(payload_header.block_number, block_number); - assert_eq!(payload_header.timestamp, timestamp); - assert_eq!(payload_header.prev_randao, prev_randao); + .to_payload(); + + assert_eq!(payload_header.block_hash(), block_hash); + assert_eq!(payload_header.parent_hash(), parent_hash); + assert_eq!(payload_header.block_number(), block_number); + assert_eq!(payload_header.timestamp(), timestamp); + assert_eq!(payload_header.prev_randao(), prev_randao); // Ensure the payload cache has the correct payload. assert_eq!( diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index f18ecbe622..077d29575e 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -22,6 +22,7 @@ use tokio::{runtime, sync::oneshot}; use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; +use crate::EngineCapabilities; pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; pub use hook::Hook; pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; @@ -31,6 +32,17 @@ pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; pub const DEFAULT_BUILDER_THRESHOLD_WEI: u128 = 1_000_000_000_000_000_000; +pub const DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI: u128 = 10_000_000_000_000_000; +pub const DEFAULT_BUILDER_PAYLOAD_VALUE_WEI: u128 = 20_000_000_000_000_000; +pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { + new_payload_v1: true, + new_payload_v2: true, + forkchoice_updated_v1: true, + forkchoice_updated_v2: true, + get_payload_v1: true, + get_payload_v2: true, + exchange_transition_configuration_v1: true, +}; mod execution_block_generator; mod handle_rpc; @@ -45,6 +57,8 @@ pub struct MockExecutionConfig { pub terminal_difficulty: Uint256, pub terminal_block: u64, pub terminal_block_hash: ExecutionBlockHash, + pub shanghai_time: Option, + pub eip4844_time: Option, } impl Default for MockExecutionConfig { @@ -55,6 +69,8 @@ impl Default for MockExecutionConfig { terminal_block: DEFAULT_TERMINAL_BLOCK, terminal_block_hash: ExecutionBlockHash::zero(), server_config: Config::default(), + shanghai_time: None, + eip4844_time: None, } } } @@ -74,6 +90,8 @@ impl MockServer { DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), + None, // FIXME(capella): should this be the default? + None, // FIXME(eip4844): should this be the default? ) } @@ -84,11 +102,18 @@ impl MockServer { terminal_block, terminal_block_hash, server_config, + shanghai_time, + eip4844_time, } = config; let last_echo_request = Arc::new(RwLock::new(None)); let preloaded_responses = Arc::new(Mutex::new(vec![])); - let execution_block_generator = - ExecutionBlockGenerator::new(terminal_difficulty, terminal_block, terminal_block_hash); + let execution_block_generator = ExecutionBlockGenerator::new( + terminal_difficulty, + terminal_block, + terminal_block_hash, + shanghai_time, + eip4844_time, + ); let ctx: Arc> = Arc::new(Context { config: server_config, @@ -104,6 +129,7 @@ impl MockServer { hook: <_>::default(), new_payload_statuses: <_>::default(), fcu_payload_statuses: <_>::default(), + engine_capabilities: Arc::new(RwLock::new(DEFAULT_ENGINE_CAPABILITIES)), _phantom: PhantomData, }); @@ -134,12 +160,18 @@ impl MockServer { } } + pub fn set_engine_capabilities(&self, engine_capabilities: EngineCapabilities) { + *self.ctx.engine_capabilities.write() = engine_capabilities; + } + pub fn new( handle: &runtime::Handle, jwt_key: JwtKey, terminal_difficulty: Uint256, terminal_block: u64, terminal_block_hash: ExecutionBlockHash, + shanghai_time: Option, + eip4844_time: Option, ) -> Self { Self::new_with_config( handle, @@ -149,6 +181,8 @@ impl MockServer { terminal_difficulty, terminal_block, terminal_block_hash, + shanghai_time, + eip4844_time, }, ) } @@ -452,6 +486,7 @@ pub struct Context { pub new_payload_statuses: Arc>>, pub fcu_payload_statuses: Arc>>, + pub engine_capabilities: Arc>, pub _phantom: PhantomData, } @@ -603,11 +638,11 @@ pub fn serve( "jsonrpc": JSONRPC_VERSION, "result": result }), - Err(message) => json!({ + Err((message, code)) => json!({ "id": id, "jsonrpc": JSONRPC_VERSION, "error": { - "code": -1234, // Junk error code. + "code": code, "message": message } }), diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index d8c25baec8..122ca8eda6 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -10,6 +10,20 @@ use types::{ pub const DEFAULT_ETH1_BLOCK_HASH: &[u8] = &[0x42; 32]; +pub fn bls_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 { + let mut credentials = hash(&pubkey.as_ssz_bytes()); + credentials[0] = spec.bls_withdrawal_prefix_byte; + Hash256::from_slice(&credentials) +} + +fn eth1_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 { + let fake_execution_address = &hash(&pubkey.as_ssz_bytes())[0..20]; + let mut credentials = [0u8; 32]; + credentials[0] = spec.eth1_address_withdrawal_prefix_byte; + credentials[12..].copy_from_slice(fake_execution_address); + Hash256::from_slice(&credentials) +} + /// Builds a genesis state as defined by the Eth2 interop procedure (see below). /// /// Reference: @@ -21,20 +35,75 @@ pub fn interop_genesis_state( execution_payload_header: Option>, spec: &ChainSpec, ) -> Result, String> { + let withdrawal_credentials = keypairs + .iter() + .map(|keypair| bls_withdrawal_credentials(&keypair.pk, spec)) + .collect::>(); + interop_genesis_state_with_withdrawal_credentials::( + keypairs, + &withdrawal_credentials, + genesis_time, + eth1_block_hash, + execution_payload_header, + spec, + ) +} + +// returns an interop genesis state except every other +// validator has eth1 withdrawal credentials +pub fn interop_genesis_state_with_eth1( + keypairs: &[Keypair], + genesis_time: u64, + eth1_block_hash: Hash256, + execution_payload_header: Option>, + spec: &ChainSpec, +) -> Result, String> { + let withdrawal_credentials = keypairs + .iter() + .enumerate() + .map(|(index, keypair)| { + if index % 2 == 0 { + bls_withdrawal_credentials(&keypair.pk, spec) + } else { + eth1_withdrawal_credentials(&keypair.pk, spec) + } + }) + .collect::>(); + interop_genesis_state_with_withdrawal_credentials::( + keypairs, + &withdrawal_credentials, + genesis_time, + eth1_block_hash, + execution_payload_header, + spec, + ) +} + +pub fn interop_genesis_state_with_withdrawal_credentials( + keypairs: &[Keypair], + withdrawal_credentials: &[Hash256], + genesis_time: u64, + eth1_block_hash: Hash256, + execution_payload_header: Option>, + spec: &ChainSpec, +) -> Result, String> { + if keypairs.len() != withdrawal_credentials.len() { + return Err(format!( + "wrong number of withdrawal credentials, expected: {}, got: {}", + keypairs.len(), + withdrawal_credentials.len() + )); + } + let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; - let withdrawal_credentials = |pubkey: &PublicKey| { - let mut credentials = hash(&pubkey.as_ssz_bytes()); - credentials[0] = spec.bls_withdrawal_prefix_byte; - Hash256::from_slice(&credentials) - }; - let datas = keypairs .into_par_iter() - .map(|keypair| { + .zip(withdrawal_credentials.into_par_iter()) + .map(|(keypair, &withdrawal_credentials)| { let mut data = DepositData { - withdrawal_credentials: withdrawal_credentials(&keypair.pk), + withdrawal_credentials, pubkey: keypair.pk.clone().into(), amount, signature: Signature::empty().into(), @@ -133,4 +202,83 @@ mod test { "validator count should be correct" ); } + + #[test] + fn interop_state_with_eth1() { + let validator_count = 16; + let genesis_time = 42; + let spec = &TestEthSpec::default_spec(); + + let keypairs = generate_deterministic_keypairs(validator_count); + + let state = interop_genesis_state_with_eth1::( + &keypairs, + genesis_time, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + spec, + ) + .expect("should build state"); + + assert_eq!( + state.eth1_data().block_hash, + Hash256::from_slice(&[0x42; 32]), + "eth1 block hash should be co-ordinated junk" + ); + + assert_eq!( + state.genesis_time(), + genesis_time, + "genesis time should be as specified" + ); + + for b in state.balances() { + assert_eq!( + *b, spec.max_effective_balance, + "validator balances should be max effective balance" + ); + } + + for (index, v) in state.validators().iter().enumerate() { + let creds = v.withdrawal_credentials.as_bytes(); + if index % 2 == 0 { + assert_eq!( + creds[0], spec.bls_withdrawal_prefix_byte, + "first byte of withdrawal creds should be bls prefix" + ); + assert_eq!( + &creds[1..], + &hash(&v.pubkey.as_ssz_bytes())[1..], + "rest of withdrawal creds should be pubkey hash" + ); + } else { + assert_eq!( + creds[0], spec.eth1_address_withdrawal_prefix_byte, + "first byte of withdrawal creds should be eth1 prefix" + ); + assert_eq!( + creds[1..12], + [0u8; 11], + "bytes [1:12] of withdrawal creds must be zero" + ); + assert_eq!( + &creds[12..], + &hash(&v.pubkey.as_ssz_bytes())[0..20], + "rest of withdrawal creds should be first 20 bytes of pubkey hash" + ) + } + } + + assert_eq!( + state.balances().len(), + validator_count, + "validator balances len should be correct" + ); + + assert_eq!( + state.validators().len(), + validator_count, + "validator count should be correct" + ); + } } diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs index 1233d99fd3..3fb053bf88 100644 --- a/beacon_node/genesis/src/lib.rs +++ b/beacon_node/genesis/src/lib.rs @@ -5,5 +5,8 @@ mod interop; pub use eth1::Config as Eth1Config; pub use eth1::Eth1Endpoint; pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; -pub use interop::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; +pub use interop::{ + bls_withdrawal_credentials, interop_genesis_state, interop_genesis_state_with_eth1, + interop_genesis_state_with_withdrawal_credentials, DEFAULT_ETH1_BLOCK_HASH, +}; pub use types::test_utils::generate_deterministic_keypairs; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index d7a3a680bd..0aa626be0c 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -37,6 +37,7 @@ sysinfo = "0.26.5" system_health = { path = "../../common/system_health" } directory = { path = "../../common/directory" } eth2_serde_utils = "0.1.1" +operation_pool = { path = "../operation_pool" } [dev-dependencies] store = { path = "../store" } @@ -46,6 +47,7 @@ logging = { path = "../../common/logging" } serde_json = "1.0.58" proto_array = { path = "../../consensus/proto_array" } unused_port = {path = "../../common/unused_port"} +genesis = { path = "../genesis" } [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 05886a4d02..828be8e576 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -4,7 +4,7 @@ use lru::LruCache; use slog::{debug, warn, Logger}; use state_processing::BlockReplayer; use std::sync::Arc; -use types::BlindedBeaconBlock; +use types::beacon_block::BlindedBeaconBlock; use warp_utils::reject::{ beacon_chain_error, beacon_state_error, custom_bad_request, custom_server_error, }; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 009775701a..a0975a5c73 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -36,6 +36,7 @@ use eth2::types::{ use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; +use operation_pool::ReceivedPreCapella; use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; @@ -56,9 +57,9 @@ use types::{ Attestation, AttestationData, AttesterSlashing, BeaconStateError, BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, - SignedBeaconBlock, SignedBlindedBeaconBlock, SignedContributionAndProof, - SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, - SyncContributionData, + SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, + SyncCommitteeMessage, SyncContributionData, }; use version::{ add_consensus_version_header, execution_optimistic_fork_versioned_response, @@ -1122,7 +1123,9 @@ pub fn serve( chain: Arc>, network_tx: UnboundedSender>, log: Logger| async move { - publish_blocks::publish_block(None, block, chain, &network_tx, log) + // need to have cached the blob sidecar somewhere in the beacon chain + // to publish + publish_blocks::publish_block(None, block, None, chain, &network_tx, log) .await .map(|()| warp::reply()) }, @@ -1654,6 +1657,109 @@ pub fn serve( }, ); + // GET beacon/pool/bls_to_execution_changes + let get_beacon_pool_bls_to_execution_changes = beacon_pool_path + .clone() + .and(warp::path("bls_to_execution_changes")) + .and(warp::path::end()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let address_changes = chain.op_pool.get_all_bls_to_execution_changes(); + Ok(api_types::GenericResponse::from(address_changes)) + }) + }); + + // POST beacon/pool/bls_to_execution_changes + let post_beacon_pool_bls_to_execution_changes = beacon_pool_path + .clone() + .and(warp::path("bls_to_execution_changes")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .and_then( + |chain: Arc>, + address_changes: Vec, + network_tx: UnboundedSender>, + log: Logger| { + blocking_json_task(move || { + let mut failures = vec![]; + + for (index, address_change) in address_changes.into_iter().enumerate() { + let validator_index = address_change.message.validator_index; + + match chain.verify_bls_to_execution_change_for_http_api(address_change) { + Ok(ObservationOutcome::New(verified_address_change)) => { + let validator_index = + verified_address_change.as_inner().message.validator_index; + let address = verified_address_change + .as_inner() + .message + .to_execution_address; + + // New to P2P *and* op pool, gossip immediately if post-Capella. + let received_pre_capella = if chain.current_slot_is_post_capella().unwrap_or(false) { + ReceivedPreCapella::No + } else { + ReceivedPreCapella::Yes + }; + if matches!(received_pre_capella, ReceivedPreCapella::No) { + publish_pubsub_message( + &network_tx, + PubsubMessage::BlsToExecutionChange(Box::new( + verified_address_change.as_inner().clone(), + )), + )?; + } + + // Import to op pool (may return `false` if there's a race). + let imported = + chain.import_bls_to_execution_change(verified_address_change, received_pre_capella); + + info!( + log, + "Processed BLS to execution change"; + "validator_index" => validator_index, + "address" => ?address, + "published" => matches!(received_pre_capella, ReceivedPreCapella::No), + "imported" => imported, + ); + } + Ok(ObservationOutcome::AlreadyKnown) => { + debug!( + log, + "BLS to execution change already known"; + "validator_index" => validator_index, + ); + } + Err(e) => { + warn!( + log, + "Invalid BLS to execution change"; + "validator_index" => validator_index, + "reason" => ?e, + "source" => "HTTP", + ); + failures.push(api_types::Failure::new( + index, + format!("invalid: {e:?}"), + )); + } + } + } + + if failures.is_empty() { + Ok(()) + } else { + Err(warp_utils::reject::indexed_bad_request( + "some BLS to execution changes failed to verify".into(), + failures, + )) + } + }) + }, + ); + // GET beacon/deposit_snapshot let get_beacon_deposit_snapshot = eth_v1 .and(warp::path("beacon")) @@ -3470,6 +3576,7 @@ pub fn serve( .or(get_beacon_pool_attester_slashings.boxed()) .or(get_beacon_pool_proposer_slashings.boxed()) .or(get_beacon_pool_voluntary_exits.boxed()) + .or(get_beacon_pool_bls_to_execution_changes.boxed()) .or(get_beacon_deposit_snapshot.boxed()) .or(get_beacon_rewards_blocks.boxed()) .or(get_config_fork_schedule.boxed()) @@ -3523,6 +3630,7 @@ pub fn serve( .or(post_beacon_pool_proposer_slashings.boxed()) .or(post_beacon_pool_voluntary_exits.boxed()) .or(post_beacon_pool_sync_committees.boxed()) + .or(post_beacon_pool_bls_to_execution_changes.boxed()) .or(post_beacon_rewards_attestations.boxed()) .or(post_beacon_rewards_sync_committee.boxed()) .or(post_validator_duties_attester.boxed()) diff --git a/beacon_node/http_api/src/metrics.rs b/beacon_node/http_api/src/metrics.rs index 1c3ab1f680..6851913733 100644 --- a/beacon_node/http_api/src/metrics.rs +++ b/beacon_node/http_api/src/metrics.rs @@ -41,4 +41,16 @@ lazy_static::lazy_static! { "http_api_block_published_very_late_total", "The count of times a block was published beyond the attestation deadline" ); + pub static ref HTTP_API_BLOB_BROADCAST_DELAY_TIMES: Result = try_create_histogram( + "http_api_blob_broadcast_delay_times", + "Time between start of the slot and when the blob was broadcast" + ); + pub static ref HTTP_API_BLOB_PUBLISHED_LATE_TOTAL: Result = try_create_int_counter( + "http_api_blob_published_late_total", + "The count of times a blob was published beyond more than half way to the attestation deadline" + ); + pub static ref HTTP_API_BLOB_PUBLISHED_VERY_LATE_TOTAL: Result = try_create_int_counter( + "http_api_blob_published_very_late_total", + "The count of times a blob was published beyond the attestation deadline" + ); } diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 5d27f117b0..83ab8ceee6 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -3,7 +3,7 @@ use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{ BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer, }; -use lighthouse_network::PubsubMessage; +use lighthouse_network::{PubsubMessage, SignedBeaconBlockAndBlobsSidecar}; use network::NetworkMessage; use slog::{error, info, warn, Logger}; use slot_clock::SlotClock; @@ -11,8 +11,8 @@ use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ - BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, - SignedBeaconBlock, + AbstractExecPayload, BlindedPayload, BlobsSidecar, EthSpec, ExecPayload, ExecutionBlockHash, + FullPayload, Hash256, SignedBeaconBlock, }; use warp::Rejection; @@ -20,6 +20,7 @@ use warp::Rejection; pub async fn publish_block( block_root: Option, block: Arc>, + blobs_sidecar: Option>>, chain: Arc>, network_tx: &UnboundedSender>, log: Logger, @@ -28,7 +29,24 @@ pub async fn publish_block( // Send the block, regardless of whether or not it is valid. The API // specification is very clear that this is the desired behaviour. - crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?; + + let message = match &*block { + SignedBeaconBlock::Eip4844(block) => { + if let Some(sidecar) = blobs_sidecar { + PubsubMessage::BeaconBlockAndBlobsSidecars(Arc::new( + SignedBeaconBlockAndBlobsSidecar { + beacon_block: block.clone(), + blobs_sidecar: (*sidecar).clone(), + }, + )) + } else { + //TODO(pawan): return an empty sidecar instead + return Err(warp_utils::reject::broadcast_without_import(String::new())); + } + } + _ => PubsubMessage::BeaconBlock(block.clone()), + }; + crate::publish_pubsub_message(network_tx, message)?; // Determine the delay after the start of the slot, register it with metrics. let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); @@ -142,6 +160,7 @@ pub async fn publish_blinded_block( publish_block::( Some(block_root), Arc::new(full_block), + None, chain, network_tx, log, @@ -165,12 +184,22 @@ async fn reconstruct_block( // If the execution block hash is zero, use an empty payload. let full_payload = if payload_header.block_hash() == ExecutionBlockHash::zero() { - ExecutionPayload::default() + FullPayload::default_at_fork( + chain + .spec + .fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())), + ) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Default payload construction error: {e:?}" + )) + })? + .into() // If we already have an execution payload with this transactions root cached, use it. } else if let Some(cached_payload) = el.get_payload_by_root(&payload_header.tree_hash_root()) { - info!(log, "Reconstructing a full block using a local payload"; "block_hash" => ?cached_payload.block_hash); + info!(log, "Reconstructing a full block using a local payload"; "block_hash" => ?cached_payload.block_hash()); cached_payload // Otherwise, this means we are attempting a blind block proposal. } else { @@ -183,7 +212,7 @@ async fn reconstruct_block( e )) })?; - info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash); + info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash()); full_payload }; diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index 87ba3a4663..30f475e689 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,9 +1,9 @@ -use crate::api_types::{ - EndpointVersion, ExecutionOptimisticForkVersionedResponse, ForkVersionedResponse, -}; +use crate::api_types::EndpointVersion; use eth2::CONSENSUS_VERSION_HEADER; use serde::Serialize; -use types::{ForkName, InconsistentFork}; +use types::{ + ExecutionOptimisticForkVersionedResponse, ForkName, ForkVersionedResponse, InconsistentFork, +}; use warp::reply::{self, Reply, WithHeader}; pub const V1: EndpointVersion = EndpointVersion(1); diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index 7c228d9803..ee02735797 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -1,5 +1,7 @@ use beacon_chain::{ - test_utils::{BeaconChainHarness, BoxedMutator, EphemeralHarnessType}, + test_utils::{ + BeaconChainHarness, BoxedMutator, Builder as HarnessBuilder, EphemeralHarnessType, + }, BeaconChain, BeaconChainTypes, }; use directory::DEFAULT_ROOT_DIR; @@ -55,25 +57,39 @@ pub struct ApiServer> { pub external_peer_id: PeerId, } +type Initializer = Box< + dyn FnOnce(HarnessBuilder>) -> HarnessBuilder>, +>; type Mutator = BoxedMutator, MemoryStore>; impl InteractiveTester { pub async fn new(spec: Option, validator_count: usize) -> Self { - Self::new_with_mutator(spec, validator_count, None).await + Self::new_with_initializer_and_mutator(spec, validator_count, None, None).await } - pub async fn new_with_mutator( + pub async fn new_with_initializer_and_mutator( spec: Option, validator_count: usize, + initializer: Option>, mutator: Option>, ) -> Self { let mut harness_builder = BeaconChainHarness::builder(E::default()) .spec_or_default(spec) - .deterministic_keypairs(validator_count) .logger(test_logger()) - .mock_execution_layer() - .fresh_ephemeral_store(); + .mock_execution_layer(); + harness_builder = if let Some(initializer) = initializer { + // Apply custom initialization provided by the caller. + initializer(harness_builder) + } else { + // Apply default initial configuration. + harness_builder + .deterministic_keypairs(validator_count) + .fresh_ephemeral_store() + }; + + // Add a mutator for the beacon chain builder which will be called in + // `HarnessBuilder::build`. if let Some(mutator) = mutator { harness_builder = harness_builder.initial_mutator(mutator); } diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 942a1167c2..6144123565 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,8 +1,16 @@ //! Tests for API behaviour across fork boundaries. use crate::common::*; -use beacon_chain::{test_utils::RelativeSyncCommittee, StateSkipConfig}; -use eth2::types::{StateId, SyncSubcommittee}; -use types::{ChainSpec, Epoch, EthSpec, MinimalEthSpec, Slot}; +use beacon_chain::{ + test_utils::{RelativeSyncCommittee, DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME}, + StateSkipConfig, +}; +use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; +use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials}; +use std::collections::HashSet; +use types::{ + test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, + Address, ChainSpec, Epoch, EthSpec, Hash256, MinimalEthSpec, Slot, +}; type E = MinimalEthSpec; @@ -12,6 +20,14 @@ fn altair_spec(altair_fork_epoch: Epoch) -> ChainSpec { spec } +fn capella_spec(capella_fork_epoch: Epoch) -> ChainSpec { + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(capella_fork_epoch); + spec +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn sync_committee_duties_across_fork() { let validator_count = E::sync_committee_size(); @@ -307,3 +323,219 @@ async fn sync_committee_indices_across_fork() { ); } } + +/// Assert that an HTTP API error has the given status code and indexed errors for the given indices. +fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Vec) { + let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { + code, + failures, + .. + }) = error else { + panic!("wrong error, expected ServerIndexedMessage, got: {error:?}") + }; + assert_eq!(code, status_code); + assert_eq!(failures.len(), indices.len()); + for (index, failure) in indices.into_iter().zip(failures) { + assert_eq!(failure.index, index as u64); + } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn bls_to_execution_changes_update_all_around_capella_fork() { + let validator_count = 128; + let fork_epoch = Epoch::new(2); + let spec = capella_spec(fork_epoch); + let max_bls_to_execution_changes = E::max_bls_to_execution_changes(); + + // Use a genesis state with entirely BLS withdrawal credentials. + // Offset keypairs by `validator_count` to create keys distinct from the signing keys. + let validator_keypairs = generate_deterministic_keypairs(validator_count); + let withdrawal_keypairs = (0..validator_count) + .map(|i| Some(generate_deterministic_keypair(i + validator_count))) + .collect::>(); + let withdrawal_credentials = withdrawal_keypairs + .iter() + .map(|keypair| bls_withdrawal_credentials(&keypair.as_ref().unwrap().pk, &spec)) + .collect::>(); + let genesis_state = interop_genesis_state_with_withdrawal_credentials( + &validator_keypairs, + &withdrawal_credentials, + HARNESS_GENESIS_TIME, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + &spec, + ) + .unwrap(); + + let tester = InteractiveTester::::new_with_initializer_and_mutator( + Some(spec.clone()), + validator_count, + Some(Box::new(|harness_builder| { + harness_builder + .keypairs(validator_keypairs) + .withdrawal_keypairs(withdrawal_keypairs) + .genesis_state_ephemeral_store(genesis_state) + })), + None, + ) + .await; + let harness = &tester.harness; + let client = &tester.client; + + let all_validators = harness.get_all_validators(); + let all_validators_u64 = all_validators.iter().map(|x| *x as u64).collect::>(); + + // Create a bunch of valid address changes. + let valid_address_changes = all_validators_u64 + .iter() + .map(|&validator_index| { + harness.make_bls_to_execution_change( + validator_index, + Address::from_low_u64_be(validator_index), + ) + }) + .collect::>(); + + // Address changes which conflict with `valid_address_changes` on the address chosen. + let conflicting_address_changes = all_validators_u64 + .iter() + .map(|&validator_index| { + harness.make_bls_to_execution_change( + validator_index, + Address::from_low_u64_be(validator_index + 1), + ) + }) + .collect::>(); + + // Address changes signed with the wrong key. + let wrong_key_address_changes = all_validators_u64 + .iter() + .map(|&validator_index| { + // Use the correct pubkey. + let pubkey = &harness.get_withdrawal_keypair(validator_index).pk; + // And the wrong secret key. + let secret_key = &harness + .get_withdrawal_keypair((validator_index + 1) % validator_count as u64) + .sk; + harness.make_bls_to_execution_change_with_keys( + validator_index, + Address::from_low_u64_be(validator_index), + pubkey, + secret_key, + ) + }) + .collect::>(); + + // Submit some changes before Capella. Just enough to fill two blocks. + let num_pre_capella = validator_count / 4; + let blocks_filled_pre_capella = 2; + assert_eq!( + num_pre_capella, + blocks_filled_pre_capella * max_bls_to_execution_changes + ); + + client + .post_beacon_pool_bls_to_execution_changes(&valid_address_changes[..num_pre_capella]) + .await + .unwrap(); + + let expected_received_pre_capella_messages = valid_address_changes[..num_pre_capella].to_vec(); + + // Conflicting changes for the same validators should all fail. + let error = client + .post_beacon_pool_bls_to_execution_changes(&conflicting_address_changes[..num_pre_capella]) + .await + .unwrap_err(); + assert_server_indexed_error(error, 400, (0..num_pre_capella).collect()); + + // Re-submitting the same changes should be accepted. + client + .post_beacon_pool_bls_to_execution_changes(&valid_address_changes[..num_pre_capella]) + .await + .unwrap(); + + // Invalid changes signed with the wrong keys should all be rejected without affecting the seen + // indices filters (apply ALL of them). + let error = client + .post_beacon_pool_bls_to_execution_changes(&wrong_key_address_changes) + .await + .unwrap_err(); + assert_server_indexed_error(error, 400, all_validators.clone()); + + // Advance to right before Capella. + let capella_slot = fork_epoch.start_slot(E::slots_per_epoch()); + harness.extend_to_slot(capella_slot - 1).await; + assert_eq!(harness.head_slot(), capella_slot - 1); + + assert_eq!( + harness + .chain + .op_pool + .get_bls_to_execution_changes_received_pre_capella( + &harness.chain.head_snapshot().beacon_state, + &spec, + ) + .into_iter() + .collect::>(), + HashSet::from_iter(expected_received_pre_capella_messages.into_iter()), + "all pre-capella messages should be queued for capella broadcast" + ); + + // Add Capella blocks which should be full of BLS to execution changes. + for i in 0..validator_count / max_bls_to_execution_changes { + let head_block_root = harness.extend_slots(1).await; + let head_block = harness + .chain + .get_block(&head_block_root) + .await + .unwrap() + .unwrap(); + + let bls_to_execution_changes = head_block + .message() + .body() + .bls_to_execution_changes() + .unwrap(); + + // Block should be full. + assert_eq!( + bls_to_execution_changes.len(), + max_bls_to_execution_changes, + "block not full on iteration {i}" + ); + + // Included changes should be the ones from `valid_address_changes` in any order. + for address_change in bls_to_execution_changes.iter() { + assert!(valid_address_changes.contains(address_change)); + } + + // After the initial 2 blocks, add the rest of the changes using a large + // request containing all the valid, all the conflicting and all the invalid. + // Despite the invalid and duplicate messages, the new ones should still get picked up by + // the pool. + if i == blocks_filled_pre_capella - 1 { + let all_address_changes: Vec<_> = [ + valid_address_changes.clone(), + conflicting_address_changes.clone(), + wrong_key_address_changes.clone(), + ] + .concat(); + + let error = client + .post_beacon_pool_bls_to_execution_changes(&all_address_changes) + .await + .unwrap_err(); + assert_server_indexed_error( + error, + 400, + (validator_count..3 * validator_count).collect(), + ); + } + } + + // Eventually all validators should have eth1 withdrawal credentials. + let head_state = harness.get_current_state(); + for validator in head_state.validators() { + assert!(validator.has_eth1_withdrawal_credential(&spec)); + } +} diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 17a3624afe..7096fac425 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -5,7 +5,7 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy}, }; use eth2::types::DepositContractData; -use execution_layer::{ForkChoiceState, PayloadAttributes}; +use execution_layer::{ForkchoiceState, PayloadAttributes}; use parking_lot::Mutex; use slot_clock::SlotClock; use state_processing::state_advance::complete_state_advance; @@ -55,7 +55,7 @@ struct ForkChoiceUpdates { #[derive(Debug, Clone)] struct ForkChoiceUpdateMetadata { received_at: Duration, - state: ForkChoiceState, + state: ForkchoiceState, payload_attributes: Option, } @@ -86,7 +86,7 @@ impl ForkChoiceUpdates { .payload_attributes .as_ref() .map_or(false, |payload_attributes| { - payload_attributes.timestamp == proposal_timestamp + payload_attributes.timestamp() == proposal_timestamp }) }) .cloned() @@ -278,9 +278,10 @@ pub async fn proposer_boost_re_org_test( let num_empty_votes = Some(attesters_per_slot * percent_empty_votes / 100); let num_head_votes = Some(attesters_per_slot * percent_head_votes / 100); - let tester = InteractiveTester::::new_with_mutator( + let tester = InteractiveTester::::new_with_initializer_and_mutator( Some(spec), validator_count, + None, Some(Box::new(move |builder| { builder .proposer_re_org_threshold(Some(ReOrgThreshold(re_org_threshold))) @@ -342,7 +343,7 @@ pub async fn proposer_boost_re_org_test( .lock() .set_forkchoice_updated_hook(Box::new(move |state, payload_attributes| { let received_at = chain_inner.slot_clock.now_duration().unwrap(); - let state = ForkChoiceState::from(state); + let state = ForkchoiceState::from(state); let payload_attributes = payload_attributes.map(Into::into); let update = ForkChoiceUpdateMetadata { received_at, @@ -521,16 +522,20 @@ pub async fn proposer_boost_re_org_test( if !misprediction { assert_eq!( - lookahead, payload_lookahead, + lookahead, + payload_lookahead, "lookahead={lookahead:?}, timestamp={}, prev_randao={:?}", - payload_attribs.timestamp, payload_attribs.prev_randao, + payload_attribs.timestamp(), + payload_attribs.prev_randao(), ); } else { // On a misprediction we issue the first fcU 500ms before creating a block! assert_eq!( - lookahead, fork_choice_lookahead, + lookahead, + fork_choice_lookahead, "timestamp={}, prev_randao={:?}", - payload_attribs.timestamp, payload_attribs.prev_randao, + payload_attribs.timestamp(), + payload_attribs.prev_randao(), ); } } @@ -540,7 +545,7 @@ pub async fn proposer_boost_re_org_test( pub async fn fork_choice_before_proposal() { // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. - let validator_count = 32; + let validator_count = 64; let all_validators = (0..validator_count).collect::>(); let num_initial: u64 = 31; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 2e795e522d..6424d73eb5 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -11,9 +11,11 @@ use eth2::{ types::{BlockId as CoreBlockId, StateId as CoreStateId, *}, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; -use execution_layer::test_utils::Operation; use execution_layer::test_utils::TestingBuilder; use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI; +use execution_layer::test_utils::{ + Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, +}; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; use http_api::{BlockId, StateId}; @@ -22,6 +24,7 @@ use network::NetworkReceivers; use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; +use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_slot_processing; use std::convert::TryInto; use std::sync::Arc; @@ -72,38 +75,53 @@ struct ApiTester { mock_builder: Option>>, } +struct ApiTesterConfig { + spec: ChainSpec, + builder_threshold: Option, +} + +impl Default for ApiTesterConfig { + fn default() -> Self { + let mut spec = E::default_spec(); + spec.shard_committee_period = 2; + Self { + spec, + builder_threshold: None, + } + } +} + impl ApiTester { pub async fn new() -> Self { // This allows for testing voluntary exits without building out a massive chain. - let mut spec = E::default_spec(); - spec.shard_committee_period = 2; - Self::new_from_spec(spec).await + Self::new_from_config(ApiTesterConfig::default()).await } pub async fn new_with_hard_forks(altair: bool, bellatrix: bool) -> Self { - let mut spec = E::default_spec(); - spec.shard_committee_period = 2; + let mut config = ApiTesterConfig::default(); // Set whether the chain has undergone each hard fork. if altair { - spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); } if bellatrix { - spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); } - Self::new_from_spec(spec).await + Self::new_from_config(config).await } - pub async fn new_from_spec(spec: ChainSpec) -> Self { + pub async fn new_from_config(config: ApiTesterConfig) -> Self { // Get a random unused port + let spec = config.spec; let port = unused_port::unused_tcp_port().unwrap(); let beacon_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); let harness = Arc::new( BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) + .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() - .mock_execution_layer_with_builder(beacon_url.clone()) + .mock_execution_layer_with_builder(beacon_url.clone(), config.builder_threshold) .build(), ); @@ -358,6 +376,28 @@ impl ApiTester { tester } + pub async fn new_mev_tester_no_builder_threshold() -> Self { + let mut config = ApiTesterConfig { + builder_threshold: Some(0), + spec: E::default_spec(), + }; + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + let tester = Self::new_from_config(config) + .await + .test_post_validator_register_validator() + .await; + tester + .mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, + ))); + tester + } + fn skip_slots(self, count: u64) -> Self { for _ in 0..count { self.chain @@ -1372,9 +1412,9 @@ impl ApiTester { pub async fn test_get_config_spec(self) -> Self { let result = self .client - .get_config_spec::() + .get_config_spec::() .await - .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .map(|res| ConfigAndPreset::Capella(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&self.chain.spec, None); @@ -2122,7 +2162,7 @@ impl ApiTester { self } - pub async fn test_blinded_block_production>(&self) { + pub async fn test_blinded_block_production>(&self) { let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; @@ -2182,7 +2222,7 @@ impl ApiTester { } } - pub async fn test_blinded_block_production_no_verify_randao>( + pub async fn test_blinded_block_production_no_verify_randao>( self, ) -> Self { for _ in 0..E::slots_per_epoch() { @@ -2206,7 +2246,9 @@ impl ApiTester { self } - pub async fn test_blinded_block_production_verify_randao_invalid>( + pub async fn test_blinded_block_production_verify_randao_invalid< + Payload: AbstractExecPayload, + >( self, ) -> Self { let fork = self.chain.canonical_head.cached_head().head_fork(); @@ -2664,7 +2706,7 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2673,14 +2715,11 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); - assert_eq!( - payload.execution_payload_header.fee_recipient, - expected_fee_recipient - ); - assert_eq!(payload.execution_payload_header.gas_limit, 11_111_111); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); + assert_eq!(payload.gas_limit(), 11_111_111); // If this cache is empty, it indicates fallback was not used, so the payload came from the // mock builder. @@ -2707,7 +2746,7 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2716,14 +2755,11 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); - assert_eq!( - payload.execution_payload_header.fee_recipient, - expected_fee_recipient - ); - assert_eq!(payload.execution_payload_header.gas_limit, 30_000_000); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); + assert_eq!(payload.gas_limit(), 30_000_000); // This cache should not be populated because fallback should not have been used. assert!(self @@ -2753,7 +2789,7 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2762,12 +2798,9 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); - assert_eq!( - payload.execution_payload_header.fee_recipient, - test_fee_recipient - ); + assert_eq!(payload.fee_recipient(), test_fee_recipient); // This cache should not be populated because fallback should not have been used. assert!(self @@ -2801,11 +2834,11 @@ impl ApiTester { .beacon_state .latest_execution_payload_header() .unwrap() - .block_hash; + .block_hash(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2814,12 +2847,9 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); - assert_eq!( - payload.execution_payload_header.parent_hash, - expected_parent_hash - ); + assert_eq!(payload.parent_hash(), expected_parent_hash); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -2856,7 +2886,7 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2865,12 +2895,9 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); - assert_eq!( - payload.execution_payload_header.prev_randao, - expected_prev_randao - ); + assert_eq!(payload.prev_randao(), expected_prev_randao); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -2901,12 +2928,12 @@ impl ApiTester { .beacon_state .latest_execution_payload_header() .unwrap() - .block_number + .block_number() + 1; let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2915,12 +2942,9 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); - assert_eq!( - payload.execution_payload_header.block_number, - expected_block_number - ); + assert_eq!(payload.block_number(), expected_block_number); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -2951,11 +2975,11 @@ impl ApiTester { .beacon_state .latest_execution_payload_header() .unwrap() - .timestamp; + .timestamp(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -2964,9 +2988,9 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); - assert!(payload.execution_payload_header.timestamp > min_expected_timestamp); + assert!(payload.timestamp() > min_expected_timestamp); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -2991,7 +3015,7 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -3000,7 +3024,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3028,7 +3052,7 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -3037,7 +3061,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3071,7 +3095,7 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) .await @@ -3080,7 +3104,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // This cache should not be populated because fallback should not have been used. assert!(self @@ -3100,7 +3124,7 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) .await @@ -3109,7 +3133,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3149,7 +3173,7 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) .await @@ -3158,7 +3182,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3188,7 +3212,7 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) .await @@ -3197,7 +3221,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // This cache should not be populated because fallback should not have been used. assert!(self @@ -3231,7 +3255,7 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -3240,13 +3264,10 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); - assert_eq!( - payload.execution_payload_header.fee_recipient, - expected_fee_recipient - ); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3275,7 +3296,7 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload = self + let payload: BlindedPayload = self .client .get_validator_blinded_blocks::>(slot, &randao_reveal, None) .await @@ -3284,7 +3305,7 @@ impl ApiTester { .body() .execution_payload() .unwrap() - .clone(); + .into(); // If this cache is populated, it indicates fallback to the local EE was correctly used. assert!(self @@ -3297,6 +3318,209 @@ impl ApiTester { self } + pub async fn test_builder_payload_chosen_when_more_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The builder's payload should've been chosen, so this cache should not be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_local_payload_chosen_when_equally_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The local payload should've been chosen, so this cache should be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_local_payload_chosen_when_more_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI - 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The local payload should've been chosen, so this cache should be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_builder_works_post_capella(self) -> Self { + // Ensure builder payload is chosen + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + + let slot = self.chain.slot().unwrap(); + let propose_state = self + .harness + .chain + .state_at_slot(slot, StateSkipConfig::WithoutStateRoots) + .unwrap(); + let withdrawals = get_expected_withdrawals(&propose_state, &self.chain.spec).unwrap(); + let withdrawals_root = withdrawals.tree_hash_root(); + // Set withdrawals root for builder + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::WithdrawalsRoot(withdrawals_root)); + + let epoch = self.chain.epoch().unwrap(); + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The builder's payload should've been chosen, so this cache should not be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_lighthouse_rejects_invalid_withdrawals_root(self) -> Self { + // Ensure builder payload *would be* chosen + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + // Set withdrawals root to something invalid + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::WithdrawalsRoot(Hash256::repeat_byte(0x42))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The local payload should've been chosen because the builder's was invalid + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + #[cfg(target_os = "linux")] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); @@ -3766,9 +3990,9 @@ async fn get_events() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_events_altair() { - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(Epoch::new(0)); - ApiTester::new_from_spec(spec) + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) .await .test_get_events_altair() .await; @@ -4281,6 +4505,38 @@ async fn builder_inadequate_builder_threshold() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_payload_chosen_by_profit() { + ApiTester::new_mev_tester_no_builder_threshold() + .await + .test_builder_payload_chosen_when_more_profitable() + .await + .test_local_payload_chosen_when_equally_profitable() + .await + .test_local_payload_chosen_when_more_profitable() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_works_post_capella() { + let mut config = ApiTesterConfig { + builder_threshold: Some(0), + spec: E::default_spec(), + }; + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + + ApiTester::new_from_config(config) + .await + .test_post_validator_register_validator() + .await + .test_builder_works_post_capella() + .await + .test_lighthouse_rejects_invalid_withdrawals_root() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 9b00c39d2b..f956d35d04 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -13,6 +13,8 @@ serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" +tree_hash = "0.4.1" +tree_hash_derive = "0.4.0" slog = { version = "2.5.2", features = ["max_level_trace"] } lighthouse_version = { path = "../../common/lighthouse_version" } tokio = { version = "1.14.0", features = ["time", "macros"] } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 009aab8e3c..e403843323 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -306,8 +306,8 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos let topic_bytes = message.topic.as_str().as_bytes(); match fork_context.current_fork() { // according to: https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#the-gossip-domain-gossipsub - // the derivation of the message-id remains the same in the merge - ForkName::Altair | ForkName::Merge => { + // the derivation of the message-id remains the same in the merge and for eip 4844. + ForkName::Altair | ForkName::Merge | ForkName::Capella | ForkName::Eip4844 => { let topic_len_bytes = topic_bytes.len().to_le_bytes(); let mut vec = Vec::with_capacity( prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index be4da809cb..d7733f7cd3 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -15,6 +15,7 @@ pub mod peer_manager; pub mod rpc; pub mod types; +pub use crate::types::SignedBeaconBlockAndBlobsSidecar; pub use config::gossip_max_size; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 03f6a746ac..d9ea0dea03 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -534,6 +534,7 @@ impl PeerManager { Protocol::Ping => PeerAction::MidToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::BlobsByRange => PeerAction::MidToleranceError, Protocol::LightClientBootstrap => PeerAction::LowToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, @@ -550,6 +551,7 @@ impl PeerManager { Protocol::Ping => PeerAction::Fatal, Protocol::BlocksByRange => return, Protocol::BlocksByRoot => return, + Protocol::BlobsByRange => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, Protocol::MetaData => PeerAction::LowToleranceError, @@ -566,6 +568,7 @@ impl PeerManager { Protocol::Ping => PeerAction::LowToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::BlobsByRange => PeerAction::MidToleranceError, Protocol::LightClientBootstrap => return, Protocol::Goodbye => return, Protocol::MetaData => return, diff --git a/beacon_node/lighthouse_network/src/rpc/codec/base.rs b/beacon_node/lighthouse_network/src/rpc/codec/base.rs index 53f85d9a7b..164a7c025d 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/base.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/base.rs @@ -193,14 +193,20 @@ mod tests { let mut chain_spec = Spec::default_spec(); let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); + let capella_fork_epoch = Epoch::new(3); + let eip4844_fork_epoch = Epoch::new(4); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + chain_spec.capella_fork_epoch = Some(capella_fork_epoch); + chain_spec.eip4844_fork_epoch = Some(eip4844_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Eip4844 => eip4844_fork_epoch.start_slot(Spec::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index eccbf0dd62..fe4c05fde7 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -15,9 +15,11 @@ use std::io::{Read, Write}; use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; +use types::light_client_bootstrap::LightClientBootstrap; use types::{ - light_client_bootstrap::LightClientBootstrap, EthSpec, ForkContext, ForkName, Hash256, - SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockMerge, + BlobsSidecar, EthSpec, ForkContext, ForkName, Hash256, SignedBeaconBlock, + SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, + SignedBeaconBlockEip4844, SignedBeaconBlockMerge, }; use unsigned_varint::codec::Uvi; @@ -70,6 +72,7 @@ impl Encoder> for SSZSnappyInboundCodec< RPCResponse::Status(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), + RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(), RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RPCResponse::Pong(res) => res.data.as_ssz_bytes(), RPCResponse::MetaData(res) => @@ -229,6 +232,7 @@ impl Encoder> for SSZSnappyOutboundCodec< OutboundRequest::Goodbye(req) => req.as_ssz_bytes(), OutboundRequest::BlocksByRange(req) => req.as_ssz_bytes(), OutboundRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(), + OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(), OutboundRequest::Ping(req) => req.as_ssz_bytes(), OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode OutboundRequest::LightClientBootstrap(req) => req.as_ssz_bytes(), @@ -409,6 +413,14 @@ fn context_bytes( return match **ref_box_block { // NOTE: If you are adding another fork type here, be sure to modify the // `fork_context.to_context_bytes()` function to support it as well! + SignedBeaconBlock::Eip4844 { .. } => { + // Eip4844 context being `None` implies that "merge never happened". + fork_context.to_context_bytes(ForkName::Eip4844) + } + SignedBeaconBlock::Capella { .. } => { + // Capella context being `None` implies that "merge never happened". + fork_context.to_context_bytes(ForkName::Capella) + } SignedBeaconBlock::Merge { .. } => { // Merge context being `None` implies that "merge never happened". fork_context.to_context_bytes(ForkName::Merge) @@ -471,6 +483,9 @@ fn handle_v1_request( Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, }))), + Protocol::BlobsByRange => Ok(Some(InboundRequest::BlobsByRange( + BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -507,6 +522,9 @@ fn handle_v2_request( Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, }))), + Protocol::BlobsByRange => Ok(Some(InboundRequest::BlobsByRange( + BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. Protocol::MetaData => { @@ -544,6 +562,7 @@ fn handle_v1_response( Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), + Protocol::BlobsByRange => Err(RPCError::InvalidData("blobs by range via v1".to_string())), Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -595,6 +614,16 @@ fn handle_v2_response( decoded_buffer, )?), )))), + ForkName::Capella => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( + decoded_buffer, + )?), + )))), + ForkName::Eip4844 => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844::from_ssz_bytes( + decoded_buffer, + )?), + )))), }, Protocol::BlocksByRoot => match fork_name { ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( @@ -610,6 +639,25 @@ fn handle_v2_response( decoded_buffer, )?), )))), + ForkName::Capella => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes( + decoded_buffer, + )?), + )))), + ForkName::Eip4844 => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844::from_ssz_bytes( + decoded_buffer, + )?), + )))), + }, + Protocol::BlobsByRange => match fork_name { + ForkName::Eip4844 => Ok(Some(RPCResponse::BlobsByRange(Arc::new( + BlobsSidecar::from_ssz_bytes(decoded_buffer)?, + )))), + _ => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Invalid forkname for blobsbyrange".to_string(), + )), }, _ => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, @@ -645,8 +693,8 @@ mod tests { }; use std::sync::Arc; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Epoch, ForkContext, - FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, Epoch, + ForkContext, FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, }; use snap::write::FrameEncoder; @@ -659,14 +707,20 @@ mod tests { let mut chain_spec = Spec::default_spec(); let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); + let capella_fork_epoch = Epoch::new(3); + let eip4844_fork_epoch = Epoch::new(4); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + chain_spec.capella_fork_epoch = Some(capella_fork_epoch); + chain_spec.eip4844_fork_epoch = Some(eip4844_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Eip4844 => eip4844_fork_epoch.start_slot(Spec::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } @@ -870,6 +924,9 @@ mod tests { OutboundRequest::BlocksByRoot(bbroot) => { assert_eq!(decoded, InboundRequest::BlocksByRoot(bbroot)) } + OutboundRequest::BlobsByRange(blbrange) => { + assert_eq!(decoded, InboundRequest::BlobsByRange(blbrange)) + } OutboundRequest::Ping(ping) => { assert_eq!(decoded, InboundRequest::Ping(ping)) } diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index bea0929fb0..e89d458503 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -67,6 +67,7 @@ pub struct OutboundRateLimiterConfig { pub(super) goodbye_quota: Quota, pub(super) blocks_by_range_quota: Quota, pub(super) blocks_by_root_quota: Quota, + pub(super) blobs_by_range_quota: Quota, } impl OutboundRateLimiterConfig { @@ -77,6 +78,8 @@ impl OutboundRateLimiterConfig { pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(methods::MAX_REQUEST_BLOCKS, 10); pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); + pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = + Quota::n_every(methods::MAX_REQUEST_BLOBS_SIDECARS, 10); } impl Default for OutboundRateLimiterConfig { @@ -88,6 +91,7 @@ impl Default for OutboundRateLimiterConfig { goodbye_quota: Self::DEFAULT_GOODBYE_QUOTA, blocks_by_range_quota: Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA, blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA, + blobs_by_range_quota: Self::DEFAULT_BLOBS_BY_RANGE_QUOTA, } } } @@ -111,6 +115,7 @@ impl Debug for OutboundRateLimiterConfig { .field("goodbye", fmt_q!(&self.goodbye_quota)) .field("blocks_by_range", fmt_q!(&self.blocks_by_range_quota)) .field("blocks_by_root", fmt_q!(&self.blocks_by_root_quota)) + .field("blobs_by_range", fmt_q!(&self.blobs_by_range_quota)) .finish() } } @@ -129,6 +134,7 @@ impl FromStr for OutboundRateLimiterConfig { let mut goodbye_quota = None; let mut blocks_by_range_quota = None; let mut blocks_by_root_quota = None; + let mut blobs_by_range_quota = None; for proto_def in s.split(';') { let ProtocolQuota { protocol, quota } = proto_def.parse()?; let quota = Some(quota); @@ -139,6 +145,7 @@ impl FromStr for OutboundRateLimiterConfig { Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota), Protocol::Ping => ping_quota = ping_quota.or(quota), Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), + Protocol::BlobsByRange => blobs_by_range_quota = blobs_by_range_quota.or(quota), Protocol::LightClientBootstrap => return Err("Lighthouse does not send LightClientBootstrap requests. Quota should not be set."), } } @@ -151,6 +158,8 @@ impl FromStr for OutboundRateLimiterConfig { .unwrap_or(Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA), blocks_by_root_quota: blocks_by_root_quota .unwrap_or(Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA), + blobs_by_range_quota: blobs_by_range_quota + .unwrap_or(Self::DEFAULT_BLOBS_BY_RANGE_QUOTA), }) } } diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 5da595c3db..d66d587a07 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -13,7 +13,8 @@ use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; use types::{ - light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, + blobs_sidecar::BlobsSidecar, light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, + Hash256, SignedBeaconBlock, Slot, }; /// Maximum number of blocks in a single request. @@ -24,6 +25,9 @@ pub const MAX_REQUEST_BLOCKS: u64 = 1024; pub type MaxErrorLen = U256; pub const MAX_ERROR_LEN: u64 = 256; +pub type MaxRequestBlobsSidecars = U1024; +pub const MAX_REQUEST_BLOBS_SIDECARS: u64 = 1024; + /// Wrapper over SSZ List to represent error message in rpc responses. #[derive(Debug, Clone)] pub struct ErrorType(pub VariableList); @@ -206,6 +210,16 @@ pub struct BlocksByRangeRequest { pub count: u64, } +/// Request a number of beacon blobs from a peer. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct BlobsByRangeRequest { + /// The starting slot to request blobs. + pub start_slot: u64, + + /// The number of blobs from the start slot. + pub count: u64, +} + /// Request a number of beacon block roots from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] pub struct OldBlocksByRangeRequest { @@ -245,6 +259,9 @@ pub enum RPCResponse { /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Arc>), + /// A response to a get BLOBS_BY_RANGE request + BlobsByRange(Arc>), + /// A response to a get LIGHTCLIENT_BOOTSTRAP request. LightClientBootstrap(LightClientBootstrap), @@ -263,6 +280,9 @@ pub enum ResponseTermination { /// Blocks by root stream termination. BlocksByRoot, + + /// Blobs by range stream termination. + BlobsByRange, } /// The structured response containing a result/code indicating success or failure @@ -330,6 +350,7 @@ impl RPCCodedResponse { RPCResponse::Status(_) => false, RPCResponse::BlocksByRange(_) => true, RPCResponse::BlocksByRoot(_) => true, + RPCResponse::BlobsByRange(_) => true, RPCResponse::Pong(_) => false, RPCResponse::MetaData(_) => false, RPCResponse::LightClientBootstrap(_) => false, @@ -365,6 +386,7 @@ impl RPCResponse { RPCResponse::Status(_) => Protocol::Status, RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange, RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, + RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange, RPCResponse::Pong(_) => Protocol::Ping, RPCResponse::MetaData(_) => Protocol::MetaData, RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, @@ -401,6 +423,9 @@ impl std::fmt::Display for RPCResponse { RPCResponse::BlocksByRoot(block) => { write!(f, "BlocksByRoot: Block slot: {}", block.slot()) } + RPCResponse::BlobsByRange(blob) => { + write!(f, "BlobsByRange: Blob slot: {}", blob.beacon_block_slot) + } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), RPCResponse::LightClientBootstrap(bootstrap) => { @@ -452,6 +477,12 @@ impl std::fmt::Display for OldBlocksByRangeRequest { } } +impl std::fmt::Display for BlobsByRangeRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Start Slot: {}, Count: {}", self.start_slot, self.count) + } +} + impl slog::KV for StatusMessage { fn serialize( &self, diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 31569b820b..a455cef1a1 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -24,6 +24,7 @@ pub(crate) use handler::HandlerErr; pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse}; pub(crate) use protocol::{InboundRequest, RPCProtocol}; +use crate::rpc::methods::MAX_REQUEST_BLOBS_SIDECARS; pub use handler::SubstreamId; pub use methods::{ BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest, @@ -144,6 +145,11 @@ impl RPC { Duration::from_secs(10), ) .n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10)) + .n_every( + Protocol::BlobsByRange, + MAX_REQUEST_BLOBS_SIDECARS, + Duration::from_secs(10), + ) .build() .expect("Configuration parameters are valid"); @@ -339,6 +345,7 @@ where match end { ResponseTermination::BlocksByRange => Protocol::BlocksByRange, ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot, + ResponseTermination::BlobsByRange => Protocol::BlobsByRange, }, ), }, diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 774303800e..250df1fa6b 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -38,6 +38,7 @@ pub enum OutboundRequest { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + BlobsByRange(BlobsByRangeRequest), LightClientBootstrap(LightClientBootstrapRequest), Ping(Ping), MetaData(PhantomData), @@ -76,6 +77,11 @@ impl OutboundRequest { ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), ], + OutboundRequest::BlobsByRange(_) => vec![ProtocolId::new( + Protocol::BlobsByRange, + Version::V1, + Encoding::SSZSnappy, + )], OutboundRequest::Ping(_) => vec![ProtocolId::new( Protocol::Ping, Version::V1, @@ -100,6 +106,7 @@ impl OutboundRequest { OutboundRequest::Goodbye(_) => 0, OutboundRequest::BlocksByRange(req) => req.count, OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, + OutboundRequest::BlobsByRange(req) => req.count, OutboundRequest::Ping(_) => 1, OutboundRequest::MetaData(_) => 1, OutboundRequest::LightClientBootstrap(_) => 1, @@ -113,6 +120,7 @@ impl OutboundRequest { OutboundRequest::Goodbye(_) => Protocol::Goodbye, OutboundRequest::BlocksByRange(_) => Protocol::BlocksByRange, OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, + OutboundRequest::BlobsByRange(_) => Protocol::BlobsByRange, OutboundRequest::Ping(_) => Protocol::Ping, OutboundRequest::MetaData(_) => Protocol::MetaData, OutboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap, @@ -127,6 +135,7 @@ impl OutboundRequest { // variants that have `multiple_responses()` can have values. OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, + OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, OutboundRequest::LightClientBootstrap(_) => unreachable!(), OutboundRequest::Status(_) => unreachable!(), OutboundRequest::Goodbye(_) => unreachable!(), @@ -183,6 +192,7 @@ impl std::fmt::Display for OutboundRequest { OutboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), + OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), OutboundRequest::MetaData(_) => write!(f, "MetaData request"), OutboundRequest::LightClientBootstrap(bootstrap) => { diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index e5d784d800..b6651021d8 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -20,9 +20,11 @@ use tokio_util::{ codec::Framed, compat::{Compat, FuturesAsyncReadCompatExt}, }; +use types::BlobsSidecar; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EthSpec, ForkContext, - ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge, Blob, + EmptyBlock, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, + SignedBeaconBlock, }; lazy_static! { @@ -61,6 +63,13 @@ lazy_static! { .as_ssz_bytes() .len(); + pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD: usize = SignedBeaconBlock::::from_block( + BeaconBlock::Capella(BeaconBlockCapella::full(&MainnetEthSpec::default_spec())), + Signature::empty(), + ) + .as_ssz_bytes() + .len(); + /// The `BeaconBlockMerge` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. /// We calculate the value from its fields instead of constructing the block and checking the length. /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network @@ -68,9 +77,19 @@ lazy_static! { pub static ref SIGNED_BEACON_BLOCK_MERGE_MAX: usize = // Size of a full altair block *SIGNED_BEACON_BLOCK_ALTAIR_MAX - + types::ExecutionPayload::::max_execution_payload_size() // adding max size of execution payload (~16gb) + + types::ExecutionPayload::::max_execution_payload_merge_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field + pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD + + types::ExecutionPayload::::max_execution_payload_capella_size() // adding max size of execution payload (~16gb) + + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field + + pub static ref SIGNED_BEACON_BLOCK_EIP4844_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD + + types::ExecutionPayload::::max_execution_payload_eip4844_size() // adding max size of execution payload (~16gb) + + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload` + + (::ssz_fixed_len() * ::max_blobs_per_block()) + + ssz::BYTES_PER_LENGTH_OFFSET; // Length offset for the blob commitments field. + pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = VariableList::::from(Vec::::new()) .as_ssz_bytes() @@ -96,12 +115,21 @@ lazy_static! { .as_ssz_bytes() .len(); + pub static ref BLOBS_SIDECAR_MIN: usize = BlobsSidecar::::empty() + .as_ssz_bytes() + .len(); + + pub static ref BLOBS_SIDECAR_MAX: usize = *BLOBS_SIDECAR_MIN // Max size of variable length `blobs` field + + (MainnetEthSpec::max_blobs_per_block() * as Encode>::ssz_fixed_len()); } /// The maximum bytes that can be sent across the RPC pre-merge. pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M /// The maximum bytes that can be sent across the RPC post-merge. pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M +pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M + // FIXME(sean) should this be increased to account for blobs? +pub(crate) const MAX_RPC_SIZE_POST_EIP4844: usize = 10 * 1_048_576; // 10M /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; /// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte). @@ -113,8 +141,10 @@ const REQUEST_TIMEOUT: u64 = 15; /// Returns the maximum bytes that can be sent across the RPC. pub fn max_rpc_size(fork_context: &ForkContext) -> usize { match fork_context.current_fork() { - ForkName::Merge => MAX_RPC_SIZE_POST_MERGE, ForkName::Altair | ForkName::Base => MAX_RPC_SIZE, + ForkName::Merge => MAX_RPC_SIZE_POST_MERGE, + ForkName::Capella => MAX_RPC_SIZE_POST_CAPELLA, + ForkName::Eip4844 => MAX_RPC_SIZE_POST_EIP4844, } } @@ -135,6 +165,14 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks *SIGNED_BEACON_BLOCK_MERGE_MAX, // Merge block is larger than base and altair blocks ), + ForkName::Capella => RpcLimits::new( + *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks + *SIGNED_BEACON_BLOCK_CAPELLA_MAX, // Capella block is larger than base, altair and merge blocks + ), + ForkName::Eip4844 => RpcLimits::new( + *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks + *SIGNED_BEACON_BLOCK_EIP4844_MAX, // EIP 4844 block is larger than all prior fork blocks + ), } } @@ -152,6 +190,8 @@ pub enum Protocol { /// The `BlocksByRoot` protocol name. #[strum(serialize = "beacon_blocks_by_root")] BlocksByRoot, + /// The `BlobsByRange` protocol name. + BlobsByRange, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. @@ -287,6 +327,10 @@ impl ProtocolId { Protocol::BlocksByRoot => { RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX) } + Protocol::BlobsByRange => RpcLimits::new( + ::ssz_fixed_len(), + ::ssz_fixed_len(), + ), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -309,7 +353,7 @@ impl ProtocolId { Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), - + Protocol::BlobsByRange => RpcLimits::new(*BLOBS_SIDECAR_MIN, *BLOBS_SIDECAR_MAX), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -427,6 +471,7 @@ pub enum InboundRequest { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + BlobsByRange(BlobsByRangeRequest), LightClientBootstrap(LightClientBootstrapRequest), Ping(Ping), MetaData(PhantomData), @@ -443,6 +488,7 @@ impl InboundRequest { InboundRequest::Goodbye(_) => 0, InboundRequest::BlocksByRange(req) => req.count, InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64, + InboundRequest::BlobsByRange(req) => req.count, InboundRequest::Ping(_) => 1, InboundRequest::MetaData(_) => 1, InboundRequest::LightClientBootstrap(_) => 1, @@ -456,6 +502,7 @@ impl InboundRequest { InboundRequest::Goodbye(_) => Protocol::Goodbye, InboundRequest::BlocksByRange(_) => Protocol::BlocksByRange, InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, + InboundRequest::BlobsByRange(_) => Protocol::BlobsByRange, InboundRequest::Ping(_) => Protocol::Ping, InboundRequest::MetaData(_) => Protocol::MetaData, InboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap, @@ -470,6 +517,7 @@ impl InboundRequest { // variants that have `multiple_responses()` can have values. InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, + InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, InboundRequest::Status(_) => unreachable!(), InboundRequest::Goodbye(_) => unreachable!(), InboundRequest::Ping(_) => unreachable!(), @@ -576,6 +624,7 @@ impl std::fmt::Display for InboundRequest { InboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), + InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), InboundRequest::MetaData(_) => write!(f, "MetaData request"), InboundRequest::LightClientBootstrap(bootstrap) => { diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index a1f7b89a2f..163d9a84ea 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -93,6 +93,8 @@ pub struct RPCRateLimiter { bbrange_rl: Limiter, /// BlocksByRoot rate limiter. bbroots_rl: Limiter, + /// BlobsByRange rate limiter. + blbrange_rl: Limiter, /// LightClientBootstrap rate limiter. lcbootstrap_rl: Limiter, } @@ -121,6 +123,8 @@ pub struct RPCRateLimiterBuilder { bbrange_quota: Option, /// Quota for the BlocksByRoot protocol. bbroots_quota: Option, + /// Quota for the BlobsByRange protocol. + blbrange_quota: Option, /// Quota for the LightClientBootstrap protocol. lcbootstrap_quota: Option, } @@ -136,6 +140,7 @@ impl RPCRateLimiterBuilder { Protocol::Goodbye => self.goodbye_quota = q, Protocol::BlocksByRange => self.bbrange_quota = q, Protocol::BlocksByRoot => self.bbroots_quota = q, + Protocol::BlobsByRange => self.blbrange_quota = q, Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, } self @@ -180,6 +185,10 @@ impl RPCRateLimiterBuilder { .lcbootstrap_quota .ok_or("LightClientBootstrap quota not specified")?; + let blbrange_quota = self + .blbrange_quota + .ok_or("BlobsByRange quota not specified")?; + // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; let metadata_rl = Limiter::from_quota(metadata_quota)?; @@ -187,6 +196,7 @@ impl RPCRateLimiterBuilder { let goodbye_rl = Limiter::from_quota(goodbye_quota)?; let bbroots_rl = Limiter::from_quota(bbroots_quota)?; let bbrange_rl = Limiter::from_quota(bbrange_quota)?; + let blbrange_rl = Limiter::from_quota(blbrange_quota)?; let lcbootstrap_rl = Limiter::from_quota(lcbootstrap_quote)?; // check for peers to prune every 30 seconds, starting in 30 seconds @@ -201,6 +211,7 @@ impl RPCRateLimiterBuilder { goodbye_rl, bbroots_rl, bbrange_rl, + blbrange_rl, lcbootstrap_rl, init_time: Instant::now(), }) @@ -254,6 +265,7 @@ impl RPCRateLimiter { Protocol::Goodbye => &mut self.goodbye_rl, Protocol::BlocksByRange => &mut self.bbrange_rl, Protocol::BlocksByRoot => &mut self.bbroots_rl, + Protocol::BlobsByRange => &mut self.blbrange_rl, Protocol::LightClientBootstrap => &mut self.lcbootstrap_rl, }; check(limiter) @@ -267,6 +279,7 @@ impl RPCRateLimiter { self.goodbye_rl.prune(time_since_start); self.bbrange_rl.prune(time_since_start); self.bbroots_rl.prune(time_since_start); + self.blbrange_rl.prune(time_since_start); } } diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index 451c6206f3..61e9b46a90 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -60,6 +60,7 @@ impl SelfRateLimiter { goodbye_quota, blocks_by_range_quota, blocks_by_root_quota, + blobs_by_range_quota, } = config; let limiter = RateLimiter::builder() @@ -69,6 +70,7 @@ impl SelfRateLimiter { .set_quota(Protocol::Goodbye, goodbye_quota) .set_quota(Protocol::BlocksByRange, blocks_by_range_quota) .set_quota(Protocol::BlocksByRoot, blocks_by_root_quota) + .set_quota(Protocol::BlobsByRange, blobs_by_range_quota) // Manually set the LightClientBootstrap quota, since we use the same rate limiter for // inbound and outbound requests, and the LightClientBootstrap is an only inbound // protocol. diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 849a86f51b..5152c187e3 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,8 +1,10 @@ use std::sync::Arc; use libp2p::core::connection::ConnectionId; -use types::{light_client_bootstrap::LightClientBootstrap, EthSpec, SignedBeaconBlock}; +use types::light_client_bootstrap::LightClientBootstrap; +use types::{BlobsSidecar, EthSpec, SignedBeaconBlock}; +use crate::rpc::methods::BlobsByRangeRequest; use crate::rpc::{ methods::{ BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, @@ -32,6 +34,8 @@ pub enum Request { Status(StatusMessage), /// A blocks by range request. BlocksByRange(BlocksByRangeRequest), + /// A blobs by range request. + BlobsByRange(BlobsByRangeRequest), /// A request blocks root request. BlocksByRoot(BlocksByRootRequest), // light client bootstrap request @@ -49,6 +53,7 @@ impl std::convert::From for OutboundRequest { step: 1, }) } + Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r), Request::LightClientBootstrap(b) => OutboundRequest::LightClientBootstrap(b), Request::Status(s) => OutboundRequest::Status(s), } @@ -67,6 +72,8 @@ pub enum Response { Status(StatusMessage), /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. BlocksByRange(Option>>), + /// A response to a get BLOBS_BY_RANGE request. A None response signals the end of the batch. + BlobsByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Option>>), /// A response to a LightClientUpdate request. @@ -84,6 +91,10 @@ impl std::convert::From> for RPCCodedResponse RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)), None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), }, + Response::BlobsByRange(r) => match r { + Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange), + }, Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), Response::LightClientBootstrap(b) => { RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b)) diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index c784191cd3..d3971a7d74 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -20,6 +20,8 @@ pub struct GossipCache { topic_msgs: HashMap, Key>>, /// Timeout for blocks. beacon_block: Option, + /// Timeout for blobs. + beacon_block_and_blobs_sidecar: Option, /// Timeout for aggregate attestations. aggregates: Option, /// Timeout for attestations. @@ -34,6 +36,8 @@ pub struct GossipCache { signed_contribution_and_proof: Option, /// Timeout for sync committee messages. sync_committee_message: Option, + /// Timeout for signed BLS to execution changes. + bls_to_execution_change: Option, /// Timeout for light client finality updates. light_client_finality_update: Option, /// Timeout for light client optimistic updates. @@ -45,6 +49,8 @@ pub struct GossipCacheBuilder { default_timeout: Option, /// Timeout for blocks. beacon_block: Option, + /// Timeout for blob sidecars. + beacon_block_and_blobs_sidecar: Option, /// Timeout for aggregate attestations. aggregates: Option, /// Timeout for attestations. @@ -59,6 +65,8 @@ pub struct GossipCacheBuilder { signed_contribution_and_proof: Option, /// Timeout for sync committee messages. sync_committee_message: Option, + /// Timeout for signed BLS to execution changes. + bls_to_execution_change: Option, /// Timeout for light client finality updates. light_client_finality_update: Option, /// Timeout for light client optimistic updates. @@ -121,6 +129,12 @@ impl GossipCacheBuilder { self } + /// Timeout for BLS to execution change messages. + pub fn bls_to_execution_change_timeout(mut self, timeout: Duration) -> Self { + self.bls_to_execution_change = Some(timeout); + self + } + /// Timeout for light client finality update messages. pub fn light_client_finality_update_timeout(mut self, timeout: Duration) -> Self { self.light_client_finality_update = Some(timeout); @@ -137,6 +151,7 @@ impl GossipCacheBuilder { let GossipCacheBuilder { default_timeout, beacon_block, + beacon_block_and_blobs_sidecar, aggregates, attestation, voluntary_exit, @@ -144,6 +159,7 @@ impl GossipCacheBuilder { attester_slashing, signed_contribution_and_proof, sync_committee_message, + bls_to_execution_change, light_client_finality_update, light_client_optimistic_update, } = self; @@ -151,6 +167,7 @@ impl GossipCacheBuilder { expirations: DelayQueue::default(), topic_msgs: HashMap::default(), beacon_block: beacon_block.or(default_timeout), + beacon_block_and_blobs_sidecar: beacon_block_and_blobs_sidecar.or(default_timeout), aggregates: aggregates.or(default_timeout), attestation: attestation.or(default_timeout), voluntary_exit: voluntary_exit.or(default_timeout), @@ -158,6 +175,7 @@ impl GossipCacheBuilder { attester_slashing: attester_slashing.or(default_timeout), signed_contribution_and_proof: signed_contribution_and_proof.or(default_timeout), sync_committee_message: sync_committee_message.or(default_timeout), + bls_to_execution_change: bls_to_execution_change.or(default_timeout), light_client_finality_update: light_client_finality_update.or(default_timeout), light_client_optimistic_update: light_client_optimistic_update.or(default_timeout), } @@ -175,6 +193,7 @@ impl GossipCache { pub fn insert(&mut self, topic: GossipTopic, data: Vec) { let expire_timeout = match topic.kind() { GossipKind::BeaconBlock => self.beacon_block, + GossipKind::BeaconBlocksAndBlobsSidecar => self.beacon_block_and_blobs_sidecar, GossipKind::BeaconAggregateAndProof => self.aggregates, GossipKind::Attestation(_) => self.attestation, GossipKind::VoluntaryExit => self.voluntary_exit, @@ -182,6 +201,7 @@ impl GossipCache { GossipKind::AttesterSlashing => self.attester_slashing, GossipKind::SignedContributionAndProof => self.signed_contribution_and_proof, GossipKind::SyncCommitteeMessage(_) => self.sync_committee_message, + GossipKind::BlsToExecutionChange => self.bls_to_execution_change, GossipKind::LightClientFinalityUpdate => self.light_client_finality_update, GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update, }; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 832f025c43..151eef4e80 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1,3 +1,5 @@ +use self::behaviour::Behaviour; +use self::gossip_cache::GossipCache; use crate::config::{gossipsub_config, NetworkLoad}; use crate::discovery::{ subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, @@ -7,15 +9,16 @@ use crate::peer_manager::{ ConnectionDirection, PeerManager, PeerManagerEvent, }; use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; +use crate::rpc::*; use crate::service::behaviour::BehaviourEvent; pub use crate::service::behaviour::Gossipsub; use crate::types::{ subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, }; +use crate::EnrExt; use crate::Eth2Enr; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; -use crate::{rpc::*, EnrExt}; use api_types::{PeerRequestId, Request, RequestId, Response}; use futures::stream::StreamExt; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; @@ -31,20 +34,18 @@ use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent}; use libp2p::PeerId; use slog::{crit, debug, info, o, trace, warn}; - -use std::marker::PhantomData; use std::path::PathBuf; use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; +use std::{ + marker::PhantomData, + sync::Arc, + task::{Context, Poll}, +}; use types::{ consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, }; use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER}; -use self::behaviour::Behaviour; -use self::gossip_cache::GossipCache; - pub mod api_types; mod behaviour; mod gossip_cache; @@ -197,6 +198,7 @@ impl Network { .attester_slashing_timeout(half_epoch * 2) // .signed_contribution_and_proof_timeout(timeout) // Do not retry // .sync_committee_message_timeout(timeout) // Do not retry + .bls_to_execution_change_timeout(half_epoch * 2) .build() }; @@ -996,6 +998,9 @@ impl Network { Request::BlocksByRoot { .. } => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) } + Request::BlobsByRange { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"]) + } } NetworkEvent::RequestReceived { peer_id, @@ -1259,6 +1264,14 @@ impl Network { ); Some(event) } + InboundRequest::BlobsByRange(req) => { + let event = self.build_request( + peer_request_id, + peer_id, + Request::BlobsByRange(req), + ); + Some(event) + } InboundRequest::LightClientBootstrap(req) => { let event = self.build_request( peer_request_id, @@ -1291,6 +1304,9 @@ impl Network { RPCResponse::BlocksByRange(resp) => { self.build_response(id, peer_id, Response::BlocksByRange(Some(resp))) } + RPCResponse::BlobsByRange(resp) => { + self.build_response(id, peer_id, Response::BlobsByRange(Some(resp))) + } RPCResponse::BlocksByRoot(resp) => { self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } @@ -1304,6 +1320,7 @@ impl Network { let response = match termination { ResponseTermination::BlocksByRange => Response::BlocksByRange(None), ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), + ResponseTermination::BlobsByRange => Response::BlobsByRange(None), }; self.build_response(id, peer_id, response) } diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index addaaf5b5e..625df65ee9 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -252,6 +252,7 @@ pub(crate) fn create_whitelist_filter( add(ProposerSlashing); add(AttesterSlashing); add(SignedContributionAndProof); + add(BlsToExecutionChange); add(LightClientFinalityUpdate); add(LightClientOptimisticUpdate); for id in 0..attestation_subnet_count { diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index 2a5ca6c806..e8894cb711 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -13,7 +13,7 @@ pub type EnrSyncCommitteeBitfield = BitVector<::SyncCommitteeSu pub type Enr = discv5::enr::Enr; pub use globals::NetworkGlobals; -pub use pubsub::{PubsubMessage, SnappyTransform}; +pub use pubsub::{PubsubMessage, SignedBeaconBlockAndBlobsSidecar, SnappyTransform}; pub use subnet::{Subnet, SubnetDiscovery}; pub use sync_state::{BackFillState, SyncState}; pub use topics::{ diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index b036e558c9..08efeb7c69 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -3,22 +3,39 @@ use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::TopicHash; use libp2p::gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage}; +use serde_derive::{Deserialize, Serialize}; use snap::raw::{decompress_len, Decoder, Encoder}; use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; use std::boxed::Box; use std::io::{Error, ErrorKind}; use std::sync::Arc; +use tree_hash_derive::TreeHash; use types::{ - Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate, - LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockMerge, - SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ForkContext, ForkName, + LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, + SignedBeaconBlockCapella, SignedBeaconBlockEip4844, SignedBeaconBlockMerge, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; +/// TODO(pawan): move this to consensus/types? strictly not a consensus type +#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq)] +#[serde(bound = "T: EthSpec")] +pub struct SignedBeaconBlockAndBlobsSidecar { + // TODO(pawan): switch to a SignedBeaconBlock and use ssz offsets for decoding to make this + // future proof? + pub beacon_block: SignedBeaconBlockEip4844, + pub blobs_sidecar: BlobsSidecar, +} + #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. BeaconBlock(Arc>), + /// Gossipsub message providing notification of a new SignedBeaconBlock coupled with a blobs sidecar. + BeaconBlockAndBlobsSidecars(Arc>), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id. @@ -33,6 +50,8 @@ pub enum PubsubMessage { SignedContributionAndProof(Box>), /// Gossipsub message providing notification of unaggregated sync committee signatures with its subnet id. SyncCommitteeMessage(Box<(SyncSubnetId, SyncCommitteeMessage)>), + /// Gossipsub message for BLS to execution change messages. + BlsToExecutionChange(Box), /// Gossipsub message providing notification of a light client finality update. LightClientFinalityUpdate(Box>), /// Gossipsub message providing notification of a light client optimistic update. @@ -110,6 +129,9 @@ impl PubsubMessage { pub fn kind(&self) -> GossipKind { match self { PubsubMessage::BeaconBlock(_) => GossipKind::BeaconBlock, + PubsubMessage::BeaconBlockAndBlobsSidecars(_) => { + GossipKind::BeaconBlocksAndBlobsSidecar + } PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof, PubsubMessage::Attestation(attestation_data) => { GossipKind::Attestation(attestation_data.0) @@ -119,6 +141,7 @@ impl PubsubMessage { PubsubMessage::AttesterSlashing(_) => GossipKind::AttesterSlashing, PubsubMessage::SignedContributionAndProof(_) => GossipKind::SignedContributionAndProof, PubsubMessage::SyncCommitteeMessage(data) => GossipKind::SyncCommitteeMessage(data.0), + PubsubMessage::BlsToExecutionChange(_) => GossipKind::BlsToExecutionChange, PubsubMessage::LightClientFinalityUpdate(_) => GossipKind::LightClientFinalityUpdate, PubsubMessage::LightClientOptimisticUpdate(_) => { GossipKind::LightClientOptimisticUpdate @@ -175,6 +198,16 @@ impl PubsubMessage { SignedBeaconBlockMerge::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), + Some(ForkName::Eip4844) => { + return Err( + "beacon_block topic is not used from eip4844 fork onwards" + .to_string(), + ) + } + Some(ForkName::Capella) => SignedBeaconBlock::::Capella( + SignedBeaconBlockCapella::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), None => { return Err(format!( "Unknown gossipsub fork digest: {:?}", @@ -184,6 +217,28 @@ impl PubsubMessage { }; Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) } + GossipKind::BeaconBlocksAndBlobsSidecar => { + match fork_context.from_context_bytes(gossip_topic.fork_digest) { + Some(ForkName::Eip4844) => { + let block_and_blobs_sidecar = + SignedBeaconBlockAndBlobsSidecar::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::BeaconBlockAndBlobsSidecars(Arc::new( + block_and_blobs_sidecar, + ))) + } + Some( + ForkName::Base + | ForkName::Altair + | ForkName::Merge + | ForkName::Capella, + ) + | None => Err(format!( + "beacon_blobs_and_sidecar topic invalid for given fork digest {:?}", + gossip_topic.fork_digest + )), + } + } GossipKind::VoluntaryExit => { let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?; @@ -214,6 +269,14 @@ impl PubsubMessage { sync_committee, )))) } + GossipKind::BlsToExecutionChange => { + let bls_to_execution_change = + SignedBlsToExecutionChange::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::BlsToExecutionChange(Box::new( + bls_to_execution_change, + ))) + } GossipKind::LightClientFinalityUpdate => { let light_client_finality_update = LightClientFinalityUpdate::from_ssz_bytes(data) @@ -244,6 +307,7 @@ impl PubsubMessage { // messages for us. match &self { PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(), + PubsubMessage::BeaconBlockAndBlobsSidecars(data) => data.as_ssz_bytes(), PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(), PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(), PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), @@ -251,6 +315,7 @@ impl PubsubMessage { PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(), PubsubMessage::SignedContributionAndProof(data) => data.as_ssz_bytes(), PubsubMessage::SyncCommitteeMessage(data) => data.1.as_ssz_bytes(), + PubsubMessage::BlsToExecutionChange(data) => data.as_ssz_bytes(), PubsubMessage::LightClientFinalityUpdate(data) => data.as_ssz_bytes(), PubsubMessage::LightClientOptimisticUpdate(data) => data.as_ssz_bytes(), } @@ -266,6 +331,12 @@ impl std::fmt::Display for PubsubMessage { block.slot(), block.message().proposer_index() ), + PubsubMessage::BeaconBlockAndBlobsSidecars(block_and_blob) => write!( + f, + "Beacon block and Blobs Sidecar: slot: {}, blobs: {}", + block_and_blob.beacon_block.message.slot, + block_and_blob.blobs_sidecar.blobs.len(), + ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, "Aggregate and Proof: slot: {}, index: {}, aggregator_index: {}", @@ -287,6 +358,13 @@ impl std::fmt::Display for PubsubMessage { PubsubMessage::SyncCommitteeMessage(data) => { write!(f, "Sync committee message: subnet_id: {}", *data.0) } + PubsubMessage::BlsToExecutionChange(data) => { + write!( + f, + "Signed BLS to execution change: validator_index: {}, address: {:?}", + data.message.validator_index, data.message.to_execution_address + ) + } PubsubMessage::LightClientFinalityUpdate(_data) => { write!(f, "Light CLient Finality Update") } diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index e7e3cf4abb..b83b03d6b2 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -11,6 +11,7 @@ use crate::Subnet; pub const TOPIC_PREFIX: &str = "eth2"; pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; +pub const BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC: &str = "beacon_block_and_blobs_sidecar"; pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof"; pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; @@ -18,16 +19,18 @@ pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; pub const SIGNED_CONTRIBUTION_AND_PROOF_TOPIC: &str = "sync_committee_contribution_and_proof"; pub const SYNC_COMMITTEE_PREFIX_TOPIC: &str = "sync_committee_"; +pub const BLS_TO_EXECUTION_CHANGE_TOPIC: &str = "bls_to_execution_change"; pub const LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; -pub const CORE_TOPICS: [GossipKind; 6] = [ +pub const CORE_TOPICS: [GossipKind; 7] = [ GossipKind::BeaconBlock, GossipKind::BeaconAggregateAndProof, GossipKind::VoluntaryExit, GossipKind::ProposerSlashing, GossipKind::AttesterSlashing, GossipKind::SignedContributionAndProof, + GossipKind::BlsToExecutionChange, ]; pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [ @@ -54,6 +57,8 @@ pub struct GossipTopic { pub enum GossipKind { /// Topic for publishing beacon blocks. BeaconBlock, + /// Topic for publishing beacon block coupled with blob sidecars. + BeaconBlocksAndBlobsSidecar, /// Topic for publishing aggregate attestations and proofs. BeaconAggregateAndProof, /// Topic for publishing raw attestations on a particular subnet. @@ -70,6 +75,8 @@ pub enum GossipKind { /// Topic for publishing unaggregated sync committee signatures on a particular subnet. #[strum(serialize = "sync_committee")] SyncCommitteeMessage(SyncSubnetId), + /// Topic for validator messages which change their withdrawal address. + BlsToExecutionChange, /// Topic for publishing finality updates for light clients. LightClientFinalityUpdate, /// Topic for publishing optimistic updates for light clients. @@ -143,10 +150,12 @@ impl GossipTopic { let kind = match topic_parts[3] { BEACON_BLOCK_TOPIC => GossipKind::BeaconBlock, BEACON_AGGREGATE_AND_PROOF_TOPIC => GossipKind::BeaconAggregateAndProof, + BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC => GossipKind::BeaconBlocksAndBlobsSidecar, SIGNED_CONTRIBUTION_AND_PROOF_TOPIC => GossipKind::SignedContributionAndProof, VOLUNTARY_EXIT_TOPIC => GossipKind::VoluntaryExit, PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing, ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing, + BLS_TO_EXECUTION_CHANGE_TOPIC => GossipKind::BlsToExecutionChange, LIGHT_CLIENT_FINALITY_UPDATE => GossipKind::LightClientFinalityUpdate, LIGHT_CLIENT_OPTIMISTIC_UPDATE => GossipKind::LightClientOptimisticUpdate, topic => match committee_topic_index(topic) { @@ -198,6 +207,7 @@ impl std::fmt::Display for GossipTopic { let kind = match self.kind { GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), + GossipKind::BeaconBlocksAndBlobsSidecar => BEACON_BLOCK_AND_BLOBS_SIDECAR_TOPIC.into(), GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), @@ -207,6 +217,7 @@ impl std::fmt::Display for GossipTopic { GossipKind::SyncCommitteeMessage(index) => { format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index) } + GossipKind::BlsToExecutionChange => BLS_TO_EXECUTION_CHANGE_TOPIC.into(), GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(), GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(), }; @@ -281,6 +292,7 @@ mod tests { VoluntaryExit, ProposerSlashing, AttesterSlashing, + BeaconBlocksAndBlobsSidecar, ] .iter() { diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index b67b412cfc..8cc46940b9 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -25,14 +25,20 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext { let mut chain_spec = E::default_spec(); let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); + let capella_fork_epoch = Epoch::new(3); + let eip4844_fork_epoch = Epoch::new(4); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + chain_spec.capella_fork_epoch = Some(capella_fork_epoch); + chain_spec.eip4844_fork_epoch = Some(eip4844_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(E::slots_per_epoch()), + ForkName::Capella => capella_fork_epoch.start_slot(E::slots_per_epoch()), + ForkName::Eip4844 => eip4844_fork_epoch.start_slot(E::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 9183453492..ebdbb67421 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -9,8 +9,8 @@ use std::time::Duration; use tokio::runtime::Runtime; use tokio::time::sleep; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Epoch, EthSpec, ForkContext, - ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, Epoch, EthSpec, + ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, }; mod common; diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 1b036b32c6..95d8a294c1 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -45,6 +45,7 @@ tokio-util = { version = "0.6.3", features = ["time"] } derivative = "2.2.0" delay_map = "0.1.1" ethereum-types = { version = "0.14.1", optional = true } +operation_pool = { path = "../operation_pool" } execution_layer = { path = "../execution_layer" } [features] diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 8118443a65..018e6f7e34 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -45,7 +45,9 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock, NotifyExe use derivative::Derivative; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; +use lighthouse_network::rpc::methods::BlobsByRangeRequest; use lighthouse_network::rpc::LightClientBootstrapRequest; +use lighthouse_network::SignedBeaconBlockAndBlobsSidecar; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, @@ -63,8 +65,8 @@ use task_executor::TaskExecutor; use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate, - ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, - SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, + ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use work_reprocessing_queue::{ spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock, @@ -114,6 +116,10 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024; /// before we start dropping them. const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; +/// The maximum number of queued `SignedBeaconBlockAndBlobsSidecar` objects received on gossip that +/// will be stored before we start dropping them. +const MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN: usize = 1_024; + /// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but /// within acceptable clock disparity) that will be queued before we start dropping them. const MAX_DELAYED_BLOCK_QUEUE_LEN: usize = 1_024; @@ -166,10 +172,18 @@ const MAX_STATUS_QUEUE_LEN: usize = 1_024; /// will be stored before we start dropping them. const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024; +const MAX_BLOBS_BY_RANGE_QUEUE_LEN: usize = 1_024; + /// The maximum number of queued `BlocksByRootRequest` objects received from the network RPC that /// will be stored before we start dropping them. const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024; +/// Maximum number of `SignedBlsToExecutionChange` messages to queue before dropping them. +/// +/// This value is set high to accommodate the large spike that is expected immediately after Capella +/// is activated. +const MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN: usize = 16_384; + /// The maximum number of queued `LightClientBootstrapRequest` objects received from the network RPC that /// will be stored before we start dropping them. const MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN: usize = 1_024; @@ -202,6 +216,7 @@ pub const GOSSIP_ATTESTATION_BATCH: &str = "gossip_attestation_batch"; pub const GOSSIP_AGGREGATE: &str = "gossip_aggregate"; pub const GOSSIP_AGGREGATE_BATCH: &str = "gossip_aggregate_batch"; pub const GOSSIP_BLOCK: &str = "gossip_block"; +pub const GOSSIP_BLOCK_AND_BLOBS_SIDECAR: &str = "gossip_block_and_blobs_sidecar"; pub const DELAYED_IMPORT_BLOCK: &str = "delayed_import_block"; pub const GOSSIP_VOLUNTARY_EXIT: &str = "gossip_voluntary_exit"; pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing"; @@ -215,10 +230,12 @@ pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const STATUS_PROCESSING: &str = "status_processing"; pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; +pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request"; pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; pub const UNKNOWN_LIGHT_CLIENT_UPDATE: &str = "unknown_light_client_update"; +pub const GOSSIP_BLS_TO_EXECUTION_CHANGE: &str = "gossip_bls_to_execution_change"; /// A simple first-in-first-out queue with a maximum length. struct FifoQueue { @@ -422,6 +439,26 @@ impl WorkEvent { } } + /// Create a new `Work` event for some blobs sidecar. + pub fn gossip_block_and_blobs_sidecar( + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + block_and_blobs: Arc>, + seen_timestamp: Duration, + ) -> Self { + Self { + drop_during_sync: false, + work: Work::GossipBlockAndBlobsSidecar { + message_id, + peer_id, + peer_client, + block_and_blobs, + seen_timestamp, + }, + } + } + /// Create a new `Work` event for some sync committee signature. pub fn gossip_sync_signature( message_id: MessageId, @@ -544,6 +581,22 @@ impl WorkEvent { } } + /// Create a new `Work` event for some BLS to execution change. + pub fn gossip_bls_to_execution_change( + message_id: MessageId, + peer_id: PeerId, + bls_to_execution_change: Box, + ) -> Self { + Self { + drop_during_sync: false, + work: Work::GossipBlsToExecutionChange { + message_id, + peer_id, + bls_to_execution_change, + }, + } + } + /// Create a new `Work` event for some block, where the result from computation (if any) is /// sent to the other side of `result_tx`. pub fn rpc_beacon_block( @@ -615,6 +668,21 @@ impl WorkEvent { } } + pub fn blobs_by_range_request( + peer_id: PeerId, + request_id: PeerRequestId, + request: BlobsByRangeRequest, + ) -> Self { + Self { + drop_during_sync: false, + work: Work::BlobsByRangeRequest { + peer_id, + request_id, + request, + }, + } + } + /// Create a new work event to process `LightClientBootstrap`s from the RPC network. pub fn lightclient_bootstrap_request( peer_id: PeerId, @@ -770,6 +838,13 @@ pub enum Work { block: Arc>, seen_timestamp: Duration, }, + GossipBlockAndBlobsSidecar { + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + block_and_blobs: Arc>, + seen_timestamp: Duration, + }, DelayedImportBlock { peer_id: PeerId, block: Box>, @@ -840,6 +915,16 @@ pub enum Work { request_id: PeerRequestId, request: BlocksByRootRequest, }, + BlobsByRangeRequest { + peer_id: PeerId, + request_id: PeerRequestId, + request: BlobsByRangeRequest, + }, + GossipBlsToExecutionChange { + message_id: MessageId, + peer_id: PeerId, + bls_to_execution_change: Box, + }, LightClientBootstrapRequest { peer_id: PeerId, request_id: PeerRequestId, @@ -856,6 +941,7 @@ impl Work { Work::GossipAggregate { .. } => GOSSIP_AGGREGATE, Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH, Work::GossipBlock { .. } => GOSSIP_BLOCK, + Work::GossipBlockAndBlobsSidecar { .. } => GOSSIP_BLOCK_AND_BLOBS_SIDECAR, Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK, Work::GossipVoluntaryExit { .. } => GOSSIP_VOLUNTARY_EXIT, Work::GossipProposerSlashing { .. } => GOSSIP_PROPOSER_SLASHING, @@ -869,9 +955,11 @@ impl Work { Work::Status { .. } => STATUS_PROCESSING, Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST, Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST, + Work::BlobsByRangeRequest { .. } => BLOBS_BY_RANGE_REQUEST, Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, + Work::GossipBlsToExecutionChange { .. } => GOSSIP_BLS_TO_EXECUTION_CHANGE, Work::UnknownLightClientOptimisticUpdate { .. } => UNKNOWN_LIGHT_CLIENT_UPDATE, } } @@ -1015,11 +1103,18 @@ impl BeaconProcessor { let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); + let mut gossip_block_and_blobs_sidecar_queue = + FifoQueue::new(MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN); let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN); let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN); let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); + let mut blbrange_queue = FifoQueue::new(MAX_BLOBS_BY_RANGE_QUEUE_LEN); + + let mut gossip_bls_to_execution_change_queue = + FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN); + let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN); // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). @@ -1122,6 +1217,8 @@ impl BeaconProcessor { // required to verify some attestations. } else if let Some(item) = gossip_block_queue.pop() { self.spawn_worker(item, toolbox); + } else if let Some(item) = gossip_block_and_blobs_sidecar_queue.pop() { + self.spawn_worker(item, toolbox); // Check the aggregates, *then* the unaggregates since we assume that // aggregates are more valuable to local validators and effectively give us // more information with less signature verification time. @@ -1252,9 +1349,12 @@ impl BeaconProcessor { self.spawn_worker(item, toolbox); } else if let Some(item) = gossip_proposer_slashing_queue.pop() { self.spawn_worker(item, toolbox); - // Check exits last since our validators don't get rewards from them. + // Check exits and address changes late since our validators don't get + // rewards from them. } else if let Some(item) = gossip_voluntary_exit_queue.pop() { self.spawn_worker(item, toolbox); + } else if let Some(item) = gossip_bls_to_execution_change_queue.pop() { + self.spawn_worker(item, toolbox); // Handle backfill sync chain segments. } else if let Some(item) = backfill_chain_segment.pop() { self.spawn_worker(item, toolbox); @@ -1328,6 +1428,9 @@ impl BeaconProcessor { Work::GossipBlock { .. } => { gossip_block_queue.push(work, work_id, &self.log) } + Work::GossipBlockAndBlobsSidecar { .. } => { + gossip_block_and_blobs_sidecar_queue.push(work, work_id, &self.log) + } Work::DelayedImportBlock { .. } => { delayed_block_queue.push(work, work_id, &self.log) } @@ -1367,6 +1470,9 @@ impl BeaconProcessor { Work::BlocksByRootsRequest { .. } => { bbroots_queue.push(work, work_id, &self.log) } + Work::BlobsByRangeRequest { .. } => { + blbrange_queue.push(work, work_id, &self.log) + } Work::LightClientBootstrapRequest { .. } => { lcbootstrap_queue.push(work, work_id, &self.log) } @@ -1376,6 +1482,9 @@ impl BeaconProcessor { Work::UnknownBlockAggregate { .. } => { unknown_block_aggregate_queue.push(work) } + Work::GossipBlsToExecutionChange { .. } => { + gossip_bls_to_execution_change_queue.push(work, work_id, &self.log) + } Work::UnknownLightClientOptimisticUpdate { .. } => { unknown_light_client_update_queue.push(work, work_id, &self.log) } @@ -1431,6 +1540,10 @@ impl BeaconProcessor { &metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL, gossip_attester_slashing_queue.len() as i64, ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL, + gossip_bls_to_execution_change_queue.len() as i64, + ); if aggregate_queue.is_full() && aggregate_debounce.elapsed() { error!( @@ -1592,6 +1705,12 @@ impl BeaconProcessor { ) .await }), + /* + * Verification for blobs sidecars received on gossip. + */ + Work::GossipBlockAndBlobsSidecar { .. } => { + warn!(self.log, "Unexpected block and blobs on gossip") + } /* * Import for blocks that we received earlier than their intended slot. */ @@ -1669,6 +1788,20 @@ impl BeaconProcessor { seen_timestamp, ) }), + /* + * BLS to execution change verification. + */ + Work::GossipBlsToExecutionChange { + message_id, + peer_id, + bls_to_execution_change, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_bls_to_execution_change( + message_id, + peer_id, + *bls_to_execution_change, + ) + }), /* * Light client finality update verification. */ @@ -1779,6 +1912,9 @@ impl BeaconProcessor { request, ) }), + Work::BlobsByRangeRequest { .. } => { + warn!(self.log.clone(), "Unexpected BlobsByRange Request") + } /* * Processing of lightclient bootstrap requests from other peers. */ diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 3601ccb195..f2b1b3a26b 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -12,6 +12,7 @@ use beacon_chain::{ GossipVerifiedBlock, NotifyExecutionLayer, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; +use operation_pool::ReceivedPreCapella; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; @@ -22,8 +23,8 @@ use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, - SyncSubnetId, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; use super::{ @@ -676,6 +677,7 @@ impl Worker { .await { let block_root = gossip_verified_block.block_root; + if let Some(handle) = duplicate_cache.check_and_insert(block_root) { self.process_gossip_verified_block( peer_id, @@ -1190,6 +1192,83 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL); } + pub fn process_gossip_bls_to_execution_change( + self, + message_id: MessageId, + peer_id: PeerId, + bls_to_execution_change: SignedBlsToExecutionChange, + ) { + let validator_index = bls_to_execution_change.message.validator_index; + let address = bls_to_execution_change.message.to_execution_address; + + let change = match self + .chain + .verify_bls_to_execution_change_for_gossip(bls_to_execution_change) + { + Ok(ObservationOutcome::New(change)) => change, + Ok(ObservationOutcome::AlreadyKnown) => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + debug!( + self.log, + "Dropping BLS to execution change"; + "validator_index" => validator_index, + "peer" => %peer_id + ); + return; + } + Err(e) => { + debug!( + self.log, + "Dropping invalid BLS to execution change"; + "validator_index" => validator_index, + "peer" => %peer_id, + "error" => ?e + ); + // We ignore pre-capella messages without penalizing peers. + if matches!(e, BeaconChainError::BlsToExecutionPriorToCapella) { + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } else { + // We penalize the peer slightly to prevent overuse of invalids. + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "invalid_bls_to_execution_change", + ); + } + return; + } + }; + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_VERIFIED_TOTAL); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + // Address change messages from gossip are only processed *after* the + // Capella fork epoch. + let received_pre_capella = ReceivedPreCapella::No; + + self.chain + .import_bls_to_execution_change(change, received_pre_capella); + + debug!( + self.log, + "Successfully imported BLS to execution change"; + "validator_index" => validator_index, + "address" => ?address, + ); + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_IMPORTED_TOTAL); + } + /// Process the sync committee signature received from the gossip network and: /// /// - If it passes gossip propagation criteria, tell the network thread to forward it. diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 8dc76877a1..ee029e1351 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -145,6 +145,19 @@ lazy_static! { "beacon_processor_attester_slashing_imported_total", "Total number of attester slashings imported to the op pool." ); + // Gossip BLS to execution changes. + pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_bls_to_execution_change_queue_total", + "Count of address changes from gossip waiting to be verified." + ); + pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_VERIFIED_TOTAL: Result = try_create_int_counter( + "beacon_processor_bls_to_execution_change_verified_total", + "Total number of address changes verified for propagation." + ); + pub static ref BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_bls_to_execution_change_imported_total", + "Total number of address changes imported to the op pool." + ); // Rpc blocks. pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( "beacon_processor_rpc_block_queue_total", @@ -154,6 +167,15 @@ lazy_static! { "beacon_processor_rpc_block_imported_total", "Total number of gossip blocks imported to fork choice, etc." ); + // Rpc blobs. + pub static ref BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_rpc_blob_queue_total", + "Count of blobs from the rpc waiting to be verified." + ); + pub static ref BEACON_PROCESSOR_RPC_BLOB_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_rpc_blob_imported_total", + "Total number of gossip blobs imported." + ); // Chain segments. pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( "beacon_processor_chain_segment_queue_total", diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index ce98337cfe..ceb5abc568 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -168,6 +168,9 @@ impl Router { Request::BlocksByRoot(request) => self .processor .on_blocks_by_root_request(peer_id, id, request), + Request::BlobsByRange(request) => self + .processor + .on_blobs_by_range_request(peer_id, id, request), Request::LightClientBootstrap(request) => self .processor .on_lightclient_bootstrap(peer_id, id, request), @@ -195,6 +198,10 @@ impl Router { self.processor .on_blocks_by_root_response(peer_id, request_id, beacon_block); } + Response::BlobsByRange(beacon_blob) => { + self.processor + .on_blobs_by_range_response(peer_id, request_id, beacon_blob); + } Response::LightClientBootstrap(_) => unreachable!(), } } @@ -233,6 +240,14 @@ impl Router { block, ); } + PubsubMessage::BeaconBlockAndBlobsSidecars(block_and_blobs) => { + self.processor.on_block_and_blobs_sidecar_gossip( + id, + peer_id, + self.network_globals.client(&peer_id), + block_and_blobs, + ); + } PubsubMessage::VoluntaryExit(exit) => { debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id); self.processor.on_voluntary_exit_gossip(id, peer_id, exit); @@ -280,6 +295,18 @@ impl Router { sync_committtee_msg.0, ); } + PubsubMessage::BlsToExecutionChange(bls_to_execution_change) => { + trace!( + self.log, + "Received BLS to execution change"; + "peer_id" => %peer_id + ); + self.processor.on_bls_to_execution_change_gossip( + id, + peer_id, + bls_to_execution_change, + ); + } PubsubMessage::LightClientFinalityUpdate(light_client_finality_update) => { trace!( self.log, diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 999ba29e90..cd42c57fca 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -6,7 +6,8 @@ use crate::status::status_message; use crate::sync::manager::RequestId as SyncId; use crate::sync::SyncMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use lighthouse_network::rpc::*; +use lighthouse_network::rpc::methods::BlobsByRangeRequest; +use lighthouse_network::{rpc::*, SignedBeaconBlockAndBlobsSidecar}; use lighthouse_network::{ Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response, }; @@ -17,9 +18,10 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::SyncCommitteeMessage; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, EthSpec, LightClientFinalityUpdate, LightClientOptimisticUpdate, - ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, - SignedVoluntaryExit, SubnetId, SyncSubnetId, + Attestation, AttesterSlashing, BlobsSidecar, EthSpec, LightClientFinalityUpdate, + LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncSubnetId, }; /// Processes validated messages from the network. It relays necessary data to the syncing thread @@ -161,6 +163,17 @@ impl Processor { )) } + pub fn on_blobs_by_range_request( + &mut self, + peer_id: PeerId, + request_id: PeerRequestId, + request: BlobsByRangeRequest, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::blobs_by_range_request( + peer_id, request_id, request, + )) + } + /// Handle a `LightClientBootstrap` request from the peer. pub fn on_lightclient_bootstrap( &mut self, @@ -217,6 +230,33 @@ impl Processor { }); } + pub fn on_blobs_by_range_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + blob_wrapper: Option>>, + ) { + trace!( + self.log, + "Received BlobsByRange Response"; + "peer" => %peer_id, + ); + + if let RequestId::Sync(id) = request_id { + self.send_to_sync(SyncMessage::RpcBlob { + peer_id, + request_id: id, + blob_sidecar: blob_wrapper, + seen_timestamp: timestamp_now(), + }); + } else { + debug!( + self.log, + "All blobs by range responses should belong to sync" + ); + } + } + /// Handle a `BlocksByRoot` response from the peer. pub fn on_blocks_by_root_response( &mut self, @@ -268,6 +308,22 @@ impl Processor { )) } + pub fn on_block_and_blobs_sidecar_gossip( + &mut self, + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + block_and_blobs: Arc>, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::gossip_block_and_blobs_sidecar( + message_id, + peer_id, + peer_client, + block_and_blobs, + timestamp_now(), + )) + } + pub fn on_unaggregated_attestation_gossip( &mut self, message_id: MessageId, @@ -369,6 +425,19 @@ impl Processor { )) } + pub fn on_bls_to_execution_change_gossip( + &mut self, + message_id: MessageId, + peer_id: PeerId, + bls_to_execution_change: Box, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::gossip_bls_to_execution_change( + message_id, + peer_id, + bls_to_execution_change, + )) + } + pub fn on_light_client_finality_update_gossip( &mut self, message_id: MessageId, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 230c883a93..0548b0906b 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -47,13 +47,13 @@ use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; -use slog::{crit, debug, error, info, trace, Logger}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use std::boxed::Box; use std::ops::Sub; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; -use types::{EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{BlobsSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a @@ -93,6 +93,14 @@ pub enum SyncMessage { seen_timestamp: Duration, }, + /// A blob has been received from RPC. + RpcBlob { + peer_id: PeerId, + request_id: RequestId, + blob_sidecar: Option>>, + seen_timestamp: Duration, + }, + /// A block with an unknown parent has been received. UnknownBlock(PeerId, Arc>, Hash256), @@ -584,6 +592,9 @@ impl SyncManager { .block_lookups .parent_chain_processed(chain_hash, result, &mut self.network), }, + SyncMessage::RpcBlob { .. } => { + warn!(self.log, "Unexpected blob message received"); + } } } diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 1d67ecdccc..cc4eacde89 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -13,12 +13,13 @@ parking_lot = "0.12.0" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" rayon = "1.5.0" serde = "1.0.116" serde_derive = "1.0.116" store = { path = "../store" } bitvec = "1" +rand = "0.8.5" [dev-dependencies] beacon_chain = { path = "../beacon_chain" } diff --git a/beacon_node/operation_pool/src/bls_to_execution_changes.rs b/beacon_node/operation_pool/src/bls_to_execution_changes.rs new file mode 100644 index 0000000000..c73666e145 --- /dev/null +++ b/beacon_node/operation_pool/src/bls_to_execution_changes.rs @@ -0,0 +1,147 @@ +use state_processing::SigVerifiedOp; +use std::collections::{hash_map::Entry, HashMap, HashSet}; +use std::sync::Arc; +use types::{ + AbstractExecPayload, BeaconState, ChainSpec, EthSpec, SignedBeaconBlock, + SignedBlsToExecutionChange, +}; + +/// Indicates if a `BlsToExecutionChange` was received before or after the +/// Capella fork. This is used to know which messages we should broadcast at the +/// Capella fork epoch. +#[derive(Copy, Clone)] +pub enum ReceivedPreCapella { + Yes, + No, +} + +/// Pool of BLS to execution changes that maintains a LIFO queue and an index by validator. +/// +/// Using the LIFO queue for block production disincentivises spam on P2P at the Capella fork, +/// and is less-relevant after that. +#[derive(Debug, Default)] +pub struct BlsToExecutionChanges { + /// Map from validator index to BLS to execution change. + by_validator_index: HashMap>>, + /// Last-in-first-out (LIFO) queue of verified messages. + queue: Vec>>, + /// Contains a set of validator indices which need to have their changes + /// broadcast at the capella epoch. + received_pre_capella_indices: HashSet, +} + +impl BlsToExecutionChanges { + pub fn existing_change_equals( + &self, + address_change: &SignedBlsToExecutionChange, + ) -> Option { + self.by_validator_index + .get(&address_change.message.validator_index) + .map(|existing| existing.as_inner() == address_change) + } + + pub fn insert( + &mut self, + verified_change: SigVerifiedOp, + received_pre_capella: ReceivedPreCapella, + ) -> bool { + let validator_index = verified_change.as_inner().message.validator_index; + // Wrap in an `Arc` once on insert. + let verified_change = Arc::new(verified_change); + match self.by_validator_index.entry(validator_index) { + Entry::Vacant(entry) => { + self.queue.push(verified_change.clone()); + entry.insert(verified_change); + if matches!(received_pre_capella, ReceivedPreCapella::Yes) { + self.received_pre_capella_indices.insert(validator_index); + } + true + } + Entry::Occupied(_) => false, + } + } + + /// FIFO ordering, used for persistence to disk. + pub fn iter_fifo( + &self, + ) -> impl Iterator>> { + self.queue.iter() + } + + /// LIFO ordering, used for block packing. + pub fn iter_lifo( + &self, + ) -> impl Iterator>> { + self.queue.iter().rev() + } + + /// Returns only those which are flagged for broadcasting at the Capella + /// fork. Uses FIFO ordering, although we expect this list to be shuffled by + /// the caller. + pub fn iter_received_pre_capella( + &self, + ) -> impl Iterator>> { + self.queue.iter().filter(|address_change| { + self.received_pre_capella_indices + .contains(&address_change.as_inner().message.validator_index) + }) + } + + /// Returns the set of indicies which should have their address changes + /// broadcast at the Capella fork. + pub fn iter_pre_capella_indices(&self) -> impl Iterator { + self.received_pre_capella_indices.iter() + } + + /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. + /// + /// The block check is necessary to avoid pruning too eagerly and losing the ability to include + /// address changes during re-orgs. This is isn't *perfect* so some address changes could + /// still get stuck if there are gnarly re-orgs and the changes can't be widely republished + /// due to the gossip duplicate rules. + pub fn prune>( + &mut self, + head_block: &SignedBeaconBlock, + head_state: &BeaconState, + spec: &ChainSpec, + ) { + let mut validator_indices_pruned = vec![]; + + self.queue.retain(|address_change| { + let validator_index = address_change.as_inner().message.validator_index; + head_state + .validators() + .get(validator_index as usize) + .map_or(true, |validator| { + let prune = validator.has_eth1_withdrawal_credential(spec) + && head_block + .message() + .body() + .bls_to_execution_changes() + .map_or(true, |recent_changes| { + !recent_changes + .iter() + .any(|c| c.message.validator_index == validator_index) + }); + if prune { + validator_indices_pruned.push(validator_index); + } + !prune + }) + }); + + for validator_index in validator_indices_pruned { + self.by_validator_index.remove(&validator_index); + } + } + + /// Removes `broadcasted` validators from the set of validators that should + /// have their BLS changes broadcast at the Capella fork boundary. + pub fn register_indices_broadcasted_at_capella(&mut self, broadcasted: &HashSet) { + self.received_pre_capella_indices = self + .received_pre_capella_indices + .difference(broadcasted) + .copied() + .collect(); + } +} diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index bb370ed5b2..c5be4f0a61 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -2,25 +2,31 @@ mod attestation; mod attestation_id; mod attestation_storage; mod attester_slashing; +mod bls_to_execution_changes; mod max_cover; mod metrics; mod persistence; mod reward_cache; mod sync_aggregate_id; +pub use crate::bls_to_execution_changes::ReceivedPreCapella; pub use attestation::{earliest_attestation_validators, AttMaxCover}; pub use attestation_storage::{AttestationRef, SplitAttestation}; pub use max_cover::MaxCover; pub use persistence::{ - PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5, + PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, + PersistedOperationPoolV15, PersistedOperationPoolV5, }; pub use reward_cache::RewardCache; use crate::attestation_storage::{AttestationMap, CheckpointKey}; +use crate::bls_to_execution_changes::BlsToExecutionChanges; use crate::sync_aggregate_id::SyncAggregateId; use attester_slashing::AttesterSlashingMaxCover; use max_cover::maximum_cover; use parking_lot::{RwLock, RwLockWriteGuard}; +use rand::seq::SliceRandom; +use rand::thread_rng; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::per_block_processing::{ get_slashable_indices_modular, verify_exit, VerifySignatures, @@ -30,8 +36,9 @@ use std::collections::{hash_map::Entry, HashMap, HashSet}; use std::marker::PhantomData; use std::ptr; use types::{ - sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, Attestation, AttestationData, - AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ProposerSlashing, + sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, AbstractExecPayload, + Attestation, AttestationData, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, + Epoch, EthSpec, ProposerSlashing, SignedBeaconBlock, SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, Validator, }; @@ -49,6 +56,8 @@ pub struct OperationPool { proposer_slashings: RwLock>>, /// Map from exiting validator to their exit data. voluntary_exits: RwLock>>, + /// Map from credential changing validator to their position in the queue. + bls_to_execution_changes: RwLock>, /// Reward cache for accelerating attestation packing. reward_cache: RwLock, _phantom: PhantomData, @@ -429,7 +438,7 @@ impl OperationPool { pub fn prune_proposer_slashings(&self, head_state: &BeaconState) { prune_validator_hash_map( &mut self.proposer_slashings.write(), - |validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, + |_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, head_state, ); } @@ -504,18 +513,121 @@ impl OperationPool { // // We choose simplicity over the gain of pruning more exits since they are small and // should not be seen frequently. - |validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, + |_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, head_state, ); } + /// Check if an address change equal to `address_change` is already in the pool. + /// + /// Return `None` if no address change for the validator index exists in the pool. + pub fn bls_to_execution_change_in_pool_equals( + &self, + address_change: &SignedBlsToExecutionChange, + ) -> Option { + self.bls_to_execution_changes + .read() + .existing_change_equals(address_change) + } + + /// Insert a BLS to execution change into the pool, *only if* no prior change is known. + /// + /// Return `true` if the change was inserted. + pub fn insert_bls_to_execution_change( + &self, + verified_change: SigVerifiedOp, + received_pre_capella: ReceivedPreCapella, + ) -> bool { + self.bls_to_execution_changes + .write() + .insert(verified_change, received_pre_capella) + } + + /// Get a list of execution changes for inclusion in a block. + /// + /// They're in random `HashMap` order, which isn't exactly fair, but isn't unfair either. + pub fn get_bls_to_execution_changes( + &self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Vec { + filter_limit_operations( + self.bls_to_execution_changes.read().iter_lifo(), + |address_change| { + address_change.signature_is_still_valid(&state.fork()) + && state + .get_validator(address_change.as_inner().message.validator_index as usize) + .map_or(false, |validator| { + !validator.has_eth1_withdrawal_credential(spec) + }) + }, + |address_change| address_change.as_inner().clone(), + T::MaxBlsToExecutionChanges::to_usize(), + ) + } + + /// Get a list of execution changes to be broadcast at the Capella fork. + /// + /// The list that is returned will be shuffled to help provide a fair + /// broadcast of messages. + pub fn get_bls_to_execution_changes_received_pre_capella( + &self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Vec { + let mut changes = filter_limit_operations( + self.bls_to_execution_changes + .read() + .iter_received_pre_capella(), + |address_change| { + address_change.signature_is_still_valid(&state.fork()) + && state + .get_validator(address_change.as_inner().message.validator_index as usize) + .map_or(false, |validator| { + !validator.has_eth1_withdrawal_credential(spec) + }) + }, + |address_change| address_change.as_inner().clone(), + usize::max_value(), + ); + changes.shuffle(&mut thread_rng()); + changes + } + + /// Removes `broadcasted` validators from the set of validators that should + /// have their BLS changes broadcast at the Capella fork boundary. + pub fn register_indices_broadcasted_at_capella(&self, broadcasted: &HashSet) { + self.bls_to_execution_changes + .write() + .register_indices_broadcasted_at_capella(broadcasted); + } + + /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. + pub fn prune_bls_to_execution_changes>( + &self, + head_block: &SignedBeaconBlock, + head_state: &BeaconState, + spec: &ChainSpec, + ) { + self.bls_to_execution_changes + .write() + .prune(head_block, head_state, spec) + } + /// Prune all types of transactions given the latest head state and head fork. - pub fn prune_all(&self, head_state: &BeaconState, current_epoch: Epoch) { + pub fn prune_all>( + &self, + head_block: &SignedBeaconBlock, + head_state: &BeaconState, + current_epoch: Epoch, + spec: &ChainSpec, + ) { self.prune_attestations(current_epoch); self.prune_sync_contributions(head_state.slot()); self.prune_proposer_slashings(head_state); self.prune_attester_slashings(head_state); self.prune_voluntary_exits(head_state); + self.prune_bls_to_execution_changes(head_block, head_state, spec); } /// Total number of voluntary exits in the pool. @@ -581,6 +693,17 @@ impl OperationPool { .map(|(_, exit)| exit.as_inner().clone()) .collect() } + + /// Returns all known `SignedBlsToExecutionChange` objects. + /// + /// This method may return objects that are invalid for block inclusion. + pub fn get_all_bls_to_execution_changes(&self) -> Vec { + self.bls_to_execution_changes + .read() + .iter_fifo() + .map(|address_change| address_change.as_inner().clone()) + .collect() + } } /// Filter up to a maximum number of operations out of an iterator. @@ -614,7 +737,7 @@ fn prune_validator_hash_map( prune_if: F, head_state: &BeaconState, ) where - F: Fn(&Validator) -> bool, + F: Fn(u64, &Validator) -> bool, T: VerifyOperation, { map.retain(|&validator_index, op| { @@ -622,7 +745,7 @@ fn prune_validator_hash_map( && head_state .validators() .get(validator_index as usize) - .map_or(true, |validator| !prune_if(validator)) + .map_or(true, |validator| !prune_if(validator_index, validator)) }); } @@ -1665,7 +1788,7 @@ mod release_tests { fn cross_fork_harness() -> (BeaconChainHarness>, ChainSpec) { - let mut spec = test_spec::(); + let mut spec = E::default_spec(); // Give some room to sign surround slashings. spec.altair_fork_epoch = Some(Epoch::new(3)); diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index ed15369df7..35d2b4ce7e 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -1,5 +1,6 @@ use crate::attestation_id::AttestationId; use crate::attestation_storage::AttestationMap; +use crate::bls_to_execution_changes::{BlsToExecutionChanges, ReceivedPreCapella}; use crate::sync_aggregate_id::SyncAggregateId; use crate::OpPoolError; use crate::OperationPool; @@ -8,6 +9,8 @@ use parking_lot::RwLock; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::SigVerifiedOp; +use std::collections::HashSet; +use std::mem; use store::{DBColumn, Error as StoreError, StoreItem}; use types::*; @@ -18,7 +21,7 @@ type PersistedSyncContributions = Vec<(SyncAggregateId, Vec { #[superstruct(only(V5))] pub attestations_v5: Vec<(AttestationId, Vec>)>, /// Attestations and their attesting indices. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14, V15))] pub attestations: Vec<(Attestation, Vec)>, /// Mapping from sync contribution ID to sync contributions and aggregate. pub sync_contributions: PersistedSyncContributions, @@ -40,20 +43,27 @@ pub struct PersistedOperationPool { #[superstruct(only(V5))] pub attester_slashings_v5: Vec<(AttesterSlashing, ForkVersion)>, /// Attester slashings. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14, V15))] pub attester_slashings: Vec, T>>, /// [DEPRECATED] Proposer slashings. #[superstruct(only(V5))] pub proposer_slashings_v5: Vec, /// Proposer slashings with fork information. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14, V15))] pub proposer_slashings: Vec>, /// [DEPRECATED] Voluntary exits. #[superstruct(only(V5))] pub voluntary_exits_v5: Vec, /// Voluntary exits with fork information. - #[superstruct(only(V12))] + #[superstruct(only(V12, V14, V15))] pub voluntary_exits: Vec>, + /// BLS to Execution Changes + #[superstruct(only(V14, V15))] + pub bls_to_execution_changes: Vec>, + /// Validator indices with BLS to Execution Changes to be broadcast at the + /// Capella fork. + #[superstruct(only(V15))] + pub capella_bls_change_broadcast_indices: Vec, } impl PersistedOperationPool { @@ -99,17 +109,33 @@ impl PersistedOperationPool { .map(|(_, exit)| exit.clone()) .collect(); - PersistedOperationPool::V12(PersistedOperationPoolV12 { + let bls_to_execution_changes = operation_pool + .bls_to_execution_changes + .read() + .iter_fifo() + .map(|bls_to_execution_change| (**bls_to_execution_change).clone()) + .collect(); + + let capella_bls_change_broadcast_indices = operation_pool + .bls_to_execution_changes + .read() + .iter_pre_capella_indices() + .copied() + .collect(); + + PersistedOperationPool::V15(PersistedOperationPoolV15 { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, + bls_to_execution_changes, + capella_bls_change_broadcast_indices, }) } /// Reconstruct an `OperationPool`. - pub fn into_operation_pool(self) -> Result, OpPoolError> { + pub fn into_operation_pool(mut self) -> Result, OpPoolError> { let attester_slashings = RwLock::new(self.attester_slashings()?.iter().cloned().collect()); let proposer_slashings = RwLock::new( self.proposer_slashings()? @@ -127,21 +153,46 @@ impl PersistedOperationPool { ); let sync_contributions = RwLock::new(self.sync_contributions().iter().cloned().collect()); let attestations = match self { - PersistedOperationPool::V5(_) => return Err(OpPoolError::IncorrectOpPoolVariant), - PersistedOperationPool::V12(pool) => { + PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => { + return Err(OpPoolError::IncorrectOpPoolVariant) + } + PersistedOperationPool::V14(_) | PersistedOperationPool::V15(_) => { let mut map = AttestationMap::default(); - for (att, attesting_indices) in pool.attestations { + for (att, attesting_indices) in self.attestations()?.clone() { map.insert(att, attesting_indices); } RwLock::new(map) } }; + let mut bls_to_execution_changes = BlsToExecutionChanges::default(); + if let Ok(persisted_changes) = self.bls_to_execution_changes_mut() { + let persisted_changes = mem::take(persisted_changes); + + let broadcast_indices = + if let Ok(indices) = self.capella_bls_change_broadcast_indices_mut() { + mem::take(indices).into_iter().collect() + } else { + HashSet::new() + }; + + for bls_to_execution_change in persisted_changes { + let received_pre_capella = if broadcast_indices + .contains(&bls_to_execution_change.as_inner().message.validator_index) + { + ReceivedPreCapella::Yes + } else { + ReceivedPreCapella::No + }; + bls_to_execution_changes.insert(bls_to_execution_change, received_pre_capella); + } + } let op_pool = OperationPool { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, + bls_to_execution_changes: RwLock::new(bls_to_execution_changes), reward_cache: Default::default(), _phantom: Default::default(), }; @@ -163,6 +214,48 @@ impl StoreItem for PersistedOperationPoolV5 { } } +impl StoreItem for PersistedOperationPoolV12 { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + PersistedOperationPoolV12::from_ssz_bytes(bytes).map_err(Into::into) + } +} + +impl StoreItem for PersistedOperationPoolV14 { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + PersistedOperationPoolV14::from_ssz_bytes(bytes).map_err(Into::into) + } +} + +impl StoreItem for PersistedOperationPoolV15 { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + PersistedOperationPoolV15::from_ssz_bytes(bytes).map_err(Into::into) + } +} + /// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`. impl StoreItem for PersistedOperationPool { fn db_column() -> DBColumn { @@ -175,8 +268,8 @@ impl StoreItem for PersistedOperationPool { fn from_store_bytes(bytes: &[u8]) -> Result { // Default deserialization to the latest variant. - PersistedOperationPoolV12::from_ssz_bytes(bytes) - .map(Self::V12) + PersistedOperationPoolV15::from_ssz_bytes(bytes) + .map(Self::V15) .map_err(Into::into) } } diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 20ae37b3b1..7ec2af9f9d 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -14,7 +14,7 @@ leveldb = { version = "0.8.6", default-features = false } parking_lot = "0.12.0" itertools = "0.10.0" eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } slog = "2.5.2" @@ -25,4 +25,4 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lru = "0.7.1" sloggers = { version = "2.1.1", features = ["json"] } directory = { path = "../../common/directory" } -strum = { version = "0.24.0", features = ["derive"] } +strum = { version = "0.24.0", features = ["derive"] } \ No newline at end of file diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index 8c64d4bcc0..73edfbb074 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -18,6 +18,7 @@ use self::UpdatePattern::*; use crate::*; use ssz::{Decode, Encode}; use typenum::Unsigned; +use types::historical_summary::HistoricalSummary; /// Description of how a `BeaconState` field is updated during state processing. /// @@ -26,7 +27,18 @@ use typenum::Unsigned; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum UpdatePattern { /// The value is updated once per `n` slots. - OncePerNSlots { n: u64 }, + OncePerNSlots { + n: u64, + /// The slot at which the field begins to accumulate values. + /// + /// The field should not be read or written until `activation_slot` is reached, and the + /// activation slot should act as an offset when converting slots to vector indices. + activation_slot: Option, + /// The slot at which the field ceases to accumulate values. + /// + /// If this is `None` then the field is continually updated. + deactivation_slot: Option, + }, /// The value is updated once per epoch, for the epoch `current_epoch - lag`. OncePerEpoch { lag: u64 }, } @@ -98,12 +110,30 @@ pub trait Field: Copy { fn start_and_end_vindex(current_slot: Slot, spec: &ChainSpec) -> (usize, usize) { // We take advantage of saturating subtraction on slots and epochs match Self::update_pattern(spec) { - OncePerNSlots { n } => { + OncePerNSlots { + n, + activation_slot, + deactivation_slot, + } => { // Per-slot changes exclude the index for the current slot, because // it won't be set until the slot completes (think of `state_roots`, `block_roots`). // This also works for the `historical_roots` because at the `n`th slot, the 0th // entry of the list is created, and before that the list is empty. - let end_vindex = current_slot / n; + // + // To account for the switch from historical roots to historical summaries at + // Capella we also modify the current slot by the activation and deactivation slots. + // The activation slot acts as an offset (subtraction) while the deactivation slot + // acts as a clamp (min). + let slot_with_clamp = deactivation_slot.map_or(current_slot, |deactivation_slot| { + std::cmp::min(current_slot, deactivation_slot) + }); + let slot_with_clamp_and_offset = if let Some(activation_slot) = activation_slot { + slot_with_clamp - activation_slot + } else { + // Return (0, 0) to indicate that the field should not be read/written. + return (0, 0); + }; + let end_vindex = slot_with_clamp_and_offset / n; let start_vindex = end_vindex - Self::Length::to_u64(); (start_vindex.as_usize(), end_vindex.as_usize()) } @@ -295,7 +325,11 @@ field!( Hash256, T::SlotsPerHistoricalRoot, DBColumn::BeaconBlockRoots, - |_| OncePerNSlots { n: 1 }, + |_| OncePerNSlots { + n: 1, + activation_slot: Some(Slot::new(0)), + deactivation_slot: None + }, |state: &BeaconState<_>, index, _| safe_modulo_index(state.block_roots(), index) ); @@ -305,7 +339,11 @@ field!( Hash256, T::SlotsPerHistoricalRoot, DBColumn::BeaconStateRoots, - |_| OncePerNSlots { n: 1 }, + |_| OncePerNSlots { + n: 1, + activation_slot: Some(Slot::new(0)), + deactivation_slot: None, + }, |state: &BeaconState<_>, index, _| safe_modulo_index(state.state_roots(), index) ); @@ -315,8 +353,12 @@ field!( Hash256, T::HistoricalRootsLimit, DBColumn::BeaconHistoricalRoots, - |_| OncePerNSlots { - n: T::SlotsPerHistoricalRoot::to_u64() + |spec: &ChainSpec| OncePerNSlots { + n: T::SlotsPerHistoricalRoot::to_u64(), + activation_slot: Some(Slot::new(0)), + deactivation_slot: spec + .capella_fork_epoch + .map(|fork_epoch| fork_epoch.start_slot(T::slots_per_epoch())), }, |state: &BeaconState<_>, index, _| safe_modulo_index(state.historical_roots(), index) ); @@ -331,6 +373,27 @@ field!( |state: &BeaconState<_>, index, _| safe_modulo_index(state.randao_mixes(), index) ); +field!( + HistoricalSummaries, + VariableLengthField, + HistoricalSummary, + T::HistoricalRootsLimit, + DBColumn::BeaconHistoricalSummaries, + |spec: &ChainSpec| OncePerNSlots { + n: T::SlotsPerHistoricalRoot::to_u64(), + activation_slot: spec + .capella_fork_epoch + .map(|fork_epoch| fork_epoch.start_slot(T::slots_per_epoch())), + deactivation_slot: None, + }, + |state: &BeaconState<_>, index, _| safe_modulo_index( + state + .historical_summaries() + .map_err(|_| ChunkError::InvalidFork)?, + index + ) +); + pub fn store_updated_vector, E: EthSpec, S: KeyValueStore>( field: F, store: &S, @@ -679,6 +742,7 @@ pub enum ChunkError { end_vindex: usize, length: usize, }, + InvalidFork, } #[cfg(test)] diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 027b8152ee..53d99f75eb 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -7,6 +7,7 @@ use types::{EthSpec, MinimalEthSpec}; pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5; +pub const DEFAULT_BLOB_CACHE_SIZE: usize = 5; /// Database configuration parameters. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -17,6 +18,8 @@ pub struct StoreConfig { pub slots_per_restore_point_set_explicitly: bool, /// Maximum number of blocks to store in the in-memory block cache. pub block_cache_size: usize, + /// Maximum number of blobs to store in the in-memory blob cache. + pub blob_cache_size: usize, /// Whether to compact the database on initialization. pub compact_on_init: bool, /// Whether to compact the database during database pruning. @@ -43,6 +46,7 @@ impl Default for StoreConfig { slots_per_restore_point: MinimalEthSpec::slots_per_historical_root() as u64, slots_per_restore_point_set_explicitly: false, block_cache_size: DEFAULT_BLOCK_CACHE_SIZE, + blob_cache_size: DEFAULT_BLOB_CACHE_SIZE, compact_on_init: false, compact_on_prune: true, prune_payloads: true, diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 30ee66074f..fcc40706b3 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -3,7 +3,7 @@ use crate::config::StoreConfigError; use crate::hot_cold_store::HotColdDBError; use ssz::DecodeError; use state_processing::BlockReplayError; -use types::{BeaconStateError, Hash256, Slot}; +use types::{BeaconStateError, Hash256, InconsistentFork, Slot}; pub type Result = std::result::Result; @@ -42,9 +42,9 @@ pub enum Error { }, BlockReplayError(BlockReplayError), AddPayloadLogicError, - ResyncRequiredForExecutionPayloadSeparation, SlotClockUnavailableForMigration, - V9MigrationFailure(Hash256), + UnableToDowngrade, + InconsistentFork(InconsistentFork), } pub trait HandleUnavailable { @@ -103,6 +103,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: InconsistentFork) -> Error { + Error::InconsistentFork(e) + } +} + #[derive(Debug)] pub struct DBError { pub message: String, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 6028d0ddcf..965bbb3bd4 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1,5 +1,5 @@ use crate::chunked_vector::{ - store_updated_vector, BlockRoots, HistoricalRoots, RandaoMixes, StateRoots, + store_updated_vector, BlockRoots, HistoricalRoots, HistoricalSummaries, RandaoMixes, StateRoots, }; use crate::config::{ OnDiskStoreConfig, StoreConfig, DEFAULT_SLOTS_PER_RESTORE_POINT, @@ -60,6 +60,8 @@ pub struct HotColdDB, Cold: ItemStore> { /// /// The hot database also contains all blocks. pub hot_db: Hot, + /// LRU cache of deserialized blobs. Updated whenever a blob is loaded. + blob_cache: Mutex>>, /// LRU cache of deserialized blocks. Updated whenever a block is loaded. block_cache: Mutex>>, /// Chain spec. @@ -129,6 +131,7 @@ impl HotColdDB, MemoryStore> { cold_db: MemoryStore::open(), hot_db: MemoryStore::open(), block_cache: Mutex::new(LruCache::new(config.block_cache_size)), + blob_cache: Mutex::new(LruCache::new(config.blob_cache_size)), config, spec, log, @@ -162,6 +165,7 @@ impl HotColdDB, LevelDB> { cold_db: LevelDB::open(cold_path)?, hot_db: LevelDB::open(hot_path)?, block_cache: Mutex::new(LruCache::new(config.block_cache_size)), + blob_cache: Mutex::new(LruCache::new(config.blob_cache_size)), config, spec, log, @@ -354,7 +358,8 @@ impl, Cold: ItemStore> HotColdDB } else if !self.config.prune_payloads { // If payload pruning is disabled there's a chance we may have the payload of // this finalized block. Attempt to load it but don't error in case it's missing. - if let Some(payload) = self.get_execution_payload(block_root)? { + let fork_name = blinded_block.fork_name(&self.spec)?; + if let Some(payload) = self.get_execution_payload(block_root, fork_name)? { DatabaseBlock::Full( blinded_block .try_into_full_block(Some(payload)) @@ -393,8 +398,9 @@ impl, Cold: ItemStore> HotColdDB blinded_block: SignedBeaconBlock>, ) -> Result, Error> { if blinded_block.message().execution_payload().is_ok() { + let fork_name = blinded_block.fork_name(&self.spec)?; let execution_payload = self - .get_execution_payload(block_root)? + .get_execution_payload(block_root, fork_name)? .ok_or(HotColdDBError::MissingExecutionPayload(*block_root))?; blinded_block.try_into_full_block(Some(execution_payload)) } else { @@ -413,7 +419,7 @@ impl, Cold: ItemStore> HotColdDB } /// Fetch a block from the store, ignoring which fork variant it *should* be for. - pub fn get_block_any_variant>( + pub fn get_block_any_variant>( &self, block_root: &Hash256, ) -> Result>, Error> { @@ -424,7 +430,7 @@ impl, Cold: ItemStore> HotColdDB /// /// This is useful for e.g. ignoring the slot-indicated fork to forcefully load a block as if it /// were for a different fork. - pub fn get_block_with>( + pub fn get_block_with>( &self, block_root: &Hash256, decoder: impl FnOnce(&[u8]) -> Result, ssz::DecodeError>, @@ -437,9 +443,26 @@ impl, Cold: ItemStore> HotColdDB } /// Load the execution payload for a block from disk. + /// This method deserializes with the proper fork. pub fn get_execution_payload( &self, block_root: &Hash256, + fork_name: ForkName, + ) -> Result>, Error> { + let column = ExecutionPayload::::db_column().into(); + let key = block_root.as_bytes(); + + match self.hot_db.get_bytes(column, key)? { + Some(bytes) => Ok(Some(ExecutionPayload::from_ssz_bytes(&bytes, fork_name)?)), + None => Ok(None), + } + } + + /// Load the execution payload for a block from disk. + /// DANGEROUS: this method just guesses the fork. + pub fn get_execution_payload_dangerous_fork_agnostic( + &self, + block_root: &Hash256, ) -> Result>, Error> { self.get_item(block_root) } @@ -465,6 +488,41 @@ impl, Cold: ItemStore> HotColdDB .key_delete(DBColumn::ExecPayload.into(), block_root.as_bytes()) } + pub fn put_blobs(&self, block_root: &Hash256, blobs: BlobsSidecar) -> Result<(), Error> { + self.hot_db.put_bytes( + DBColumn::BeaconBlob.into(), + block_root.as_bytes(), + &blobs.as_ssz_bytes(), + )?; + self.blob_cache.lock().push(*block_root, blobs); + Ok(()) + } + + pub fn get_blobs(&self, block_root: &Hash256) -> Result>, Error> { + if let Some(blobs) = self.blob_cache.lock().get(block_root) { + Ok(Some(blobs.clone())) + } else if let Some(bytes) = self + .hot_db + .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? + { + let ret = BlobsSidecar::from_ssz_bytes(&bytes)?; + self.blob_cache.lock().put(*block_root, ret.clone()); + Ok(Some(ret)) + } else { + Ok(None) + } + } + + pub fn blobs_as_kv_store_ops( + &self, + key: &Hash256, + blobs: &BlobsSidecar, + ops: &mut Vec, + ) { + let db_key = get_key_for_col(DBColumn::BeaconBlob.into(), key.as_bytes()); + ops.push(KeyValueStoreOp::PutKeyValue(db_key, blobs.as_ssz_bytes())); + } + pub fn put_state_summary( &self, state_root: &Hash256, @@ -692,6 +750,10 @@ impl, Cold: ItemStore> HotColdDB self.store_hot_state(&state_root, state, &mut key_value_batch)?; } + StoreOp::PutBlobs(block_root, blobs) => { + self.blobs_as_kv_store_ops(&block_root, &blobs, &mut key_value_batch); + } + StoreOp::PutStateSummary(state_root, summary) => { key_value_batch.push(summary.as_kv_store_op(state_root)); } @@ -740,6 +802,7 @@ impl, Cold: ItemStore> HotColdDB // Update the block cache whilst holding a lock, to ensure that the cache updates atomically // with the database. let mut guard = self.block_cache.lock(); + let mut guard_blob = self.blob_cache.lock(); for op in &batch { match op { @@ -747,6 +810,10 @@ impl, Cold: ItemStore> HotColdDB guard.put(*block_root, (**block).clone()); } + StoreOp::PutBlobs(block_root, blobs) => { + guard_blob.put(*block_root, (**blobs).clone()); + } + StoreOp::PutState(_, _) => (), StoreOp::PutStateSummary(_, _) => (), @@ -887,6 +954,7 @@ impl, Cold: ItemStore> HotColdDB store_updated_vector(StateRoots, db, state, &self.spec, ops)?; store_updated_vector(HistoricalRoots, db, state, &self.spec, ops)?; store_updated_vector(RandaoMixes, db, state, &self.spec, ops)?; + store_updated_vector(HistoricalSummaries, db, state, &self.spec, ops)?; // 3. Store restore point. let restore_point_index = state.slot().as_u64() / self.config.slots_per_restore_point; @@ -941,6 +1009,7 @@ impl, Cold: ItemStore> HotColdDB partial_state.load_state_roots(&self.cold_db, &self.spec)?; partial_state.load_historical_roots(&self.cold_db, &self.spec)?; partial_state.load_randao_mixes(&self.cold_db, &self.spec)?; + partial_state.load_historical_summaries(&self.cold_db, &self.spec)?; partial_state.try_into() } @@ -1107,6 +1176,11 @@ impl, Cold: ItemStore> HotColdDB &self.spec } + /// Get a reference to the `Logger` used by the database. + pub fn logger(&self) -> &Logger { + &self.log + } + /// Fetch a copy of the current split slot from memory. pub fn get_split_slot(&self) -> Slot { self.split.read_recursive().slot diff --git a/beacon_node/store/src/impls/execution_payload.rs b/beacon_node/store/src/impls/execution_payload.rs index ddb9a44628..ad68d1fba0 100644 --- a/beacon_node/store/src/impls/execution_payload.rs +++ b/beacon_node/store/src/impls/execution_payload.rs @@ -1,7 +1,35 @@ use crate::{DBColumn, Error, StoreItem}; use ssz::{Decode, Encode}; -use types::{EthSpec, ExecutionPayload}; +use types::{ + EthSpec, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, + ExecutionPayloadMerge, +}; +macro_rules! impl_store_item { + ($ty_name:ident) => { + impl StoreItem for $ty_name { + fn db_column() -> DBColumn { + DBColumn::ExecPayload + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } + } + }; +} +impl_store_item!(ExecutionPayloadMerge); +impl_store_item!(ExecutionPayloadCapella); +impl_store_item!(ExecutionPayloadEip4844); + +/// This fork-agnostic implementation should be only used for writing. +/// +/// It is very inefficient at reading, and decoding the desired fork-specific variant is recommended +/// instead. impl StoreItem for ExecutionPayload { fn db_column() -> DBColumn { DBColumn::ExecPayload @@ -12,6 +40,13 @@ impl StoreItem for ExecutionPayload { } fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) + ExecutionPayloadEip4844::from_ssz_bytes(bytes) + .map(Self::Eip4844) + .or_else(|_| { + ExecutionPayloadCapella::from_ssz_bytes(bytes) + .map(Self::Capella) + .or_else(|_| ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge)) + }) + .map_err(Into::into) } } diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 9d15dd4043..47ddd8fc7e 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -155,6 +155,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati pub enum StoreOp<'a, E: EthSpec> { PutBlock(Hash256, Arc>), PutState(Hash256, &'a BeaconState), + PutBlobs(Hash256, Arc>), PutStateSummary(Hash256, HotStateSummary), PutStateTemporaryFlag(Hash256), DeleteStateTemporaryFlag(Hash256), @@ -172,6 +173,8 @@ pub enum DBColumn { BeaconMeta, #[strum(serialize = "blk")] BeaconBlock, + #[strum(serialize = "blb")] + BeaconBlob, /// For full `BeaconState`s in the hot database (finalized or fork-boundary states). #[strum(serialize = "ste")] BeaconState, @@ -212,6 +215,8 @@ pub enum DBColumn { /// For Optimistically Imported Merge Transition Blocks #[strum(serialize = "otb")] OptimisticTransitionBlock, + #[strum(serialize = "bhs")] + BeaconHistoricalSummaries, } /// A block from the database, which might have an execution payload or not. diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 5cb3f12200..729b36ff2e 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(13); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(15); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 010796afd5..55697bd316 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -1,12 +1,13 @@ use crate::chunked_vector::{ - load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, RandaoMixes, - StateRoots, + load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, + HistoricalSummaries, RandaoMixes, StateRoots, }; use crate::{get_key_for_col, DBColumn, Error, KeyValueStore, KeyValueStoreOp}; use ssz::{Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::convert::TryInto; use std::sync::Arc; +use types::historical_summary::HistoricalSummary; use types::superstruct; use types::*; @@ -14,7 +15,7 @@ use types::*; /// /// Utilises lazy-loading from separate storage for its vector fields. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Capella, Eip4844), variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) )] #[derive(Debug, PartialEq, Clone, Encode)] @@ -66,9 +67,9 @@ where pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub current_epoch_participation: VariableList, // Finality @@ -78,23 +79,46 @@ where pub finalized_checkpoint: Checkpoint, // Inactivity - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub next_sync_committee: Arc>, // Execution - #[superstruct(only(Merge))] - pub latest_execution_payload_header: ExecutionPayloadHeader, + #[superstruct( + only(Merge), + partial_getter(rename = "latest_execution_payload_header_merge") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderMerge, + #[superstruct( + only(Capella), + partial_getter(rename = "latest_execution_payload_header_capella") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderCapella, + #[superstruct( + only(Eip4844), + partial_getter(rename = "latest_execution_payload_header_eip4844") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, + + // Capella + #[superstruct(only(Capella, Eip4844))] + pub next_withdrawal_index: u64, + #[superstruct(only(Capella, Eip4844))] + pub next_withdrawal_validator_index: u64, + + #[ssz(skip_serializing, skip_deserializing)] + #[superstruct(only(Capella, Eip4844))] + pub historical_summaries: Option>, } /// Implement the conversion function from BeaconState -> PartialBeaconState. macro_rules! impl_from_state_forgetful { - ($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => { + ($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_fields_opt:ident),*]) => { PartialBeaconState::$variant_name($struct_name { // Versioning genesis_time: $s.genesis_time, @@ -135,6 +159,11 @@ macro_rules! impl_from_state_forgetful { // Variant-specific fields $( $extra_fields: $s.$extra_fields.clone() + ),*, + + // Variant-specific optional + $( + $extra_fields_opt: None ),* }) } @@ -149,7 +178,8 @@ impl PartialBeaconState { outer, Base, PartialBeaconStateBase, - [previous_epoch_attestations, current_epoch_attestations] + [previous_epoch_attestations, current_epoch_attestations], + [] ), BeaconState::Altair(s) => impl_from_state_forgetful!( s, @@ -162,7 +192,8 @@ impl PartialBeaconState { current_sync_committee, next_sync_committee, inactivity_scores - ] + ], + [] ), BeaconState::Merge(s) => impl_from_state_forgetful!( s, @@ -176,7 +207,42 @@ impl PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header - ] + ], + [] + ), + BeaconState::Capella(s) => impl_from_state_forgetful!( + s, + outer, + Capella, + PartialBeaconStateCapella, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + next_withdrawal_index, + next_withdrawal_validator_index + ], + [historical_summaries] + ), + BeaconState::Eip4844(s) => impl_from_state_forgetful!( + s, + outer, + Eip4844, + PartialBeaconStateEip4844, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + next_withdrawal_index, + next_withdrawal_validator_index + ], + [historical_summaries] ), } } @@ -252,6 +318,23 @@ impl PartialBeaconState { Ok(()) } + pub fn load_historical_summaries>( + &mut self, + store: &S, + spec: &ChainSpec, + ) -> Result<(), Error> { + let slot = self.slot(); + if let Ok(historical_summaries) = self.historical_summaries_mut() { + if historical_summaries.is_none() { + *historical_summaries = + Some(load_variable_list_from_db::( + store, slot, spec, + )?); + } + } + Ok(()) + } + pub fn load_randao_mixes>( &mut self, store: &S, @@ -275,7 +358,7 @@ impl PartialBeaconState { /// Implement the conversion from PartialBeaconState -> BeaconState. macro_rules! impl_try_into_beacon_state { - ($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => { + ($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_opt_fields:ident),*]) => { BeaconState::$variant_name($struct_name { // Versioning genesis_time: $inner.genesis_time, @@ -320,6 +403,11 @@ macro_rules! impl_try_into_beacon_state { // Variant-specific fields $( $extra_fields: $inner.$extra_fields + ),*, + + // Variant-specific optional fields + $( + $extra_opt_fields: unpack_field($inner.$extra_opt_fields)? ),* }) } @@ -338,7 +426,8 @@ impl TryInto> for PartialBeaconState { inner, Base, BeaconStateBase, - [previous_epoch_attestations, current_epoch_attestations] + [previous_epoch_attestations, current_epoch_attestations], + [] ), PartialBeaconState::Altair(inner) => impl_try_into_beacon_state!( inner, @@ -350,7 +439,8 @@ impl TryInto> for PartialBeaconState { current_sync_committee, next_sync_committee, inactivity_scores - ] + ], + [] ), PartialBeaconState::Merge(inner) => impl_try_into_beacon_state!( inner, @@ -363,7 +453,40 @@ impl TryInto> for PartialBeaconState { next_sync_committee, inactivity_scores, latest_execution_payload_header - ] + ], + [] + ), + PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( + inner, + Capella, + BeaconStateCapella, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + next_withdrawal_index, + next_withdrawal_validator_index + ], + [historical_summaries] + ), + PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!( + inner, + Eip4844, + BeaconStateEip4844, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + next_withdrawal_index, + next_withdrawal_validator_index + ], + [historical_summaries] ), }; Ok(state) diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 05cb0b69cf..2848180970 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -141,7 +141,7 @@ curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indic "attestation_head_hit_percentage": 100, "attestation_target_hits": 5, "attestation_target_misses": 5, - "attestation_target_hit_percentage": 50 + "attestation_target_hit_percentage": 50 } } } diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index 0982e10ab9..7219a0f6b6 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -26,10 +26,16 @@ validator client or the slasher**. | v3.1.0 | Sep 2022 | v12 | yes | | v3.2.0 | Oct 2022 | v12 | yes | | v3.3.0 | Nov 2022 | v13 | yes | +| v3.4.0 | Jan 2023 | v13 | yes | +| v3.5.0 | Feb 2023 | v15 | yes before Capella | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). +> **Note**: Support for old schemas is gradually removed from newer versions of Lighthouse. We +usually do this after a major version has been out for a while and everyone has upgraded. In this +case the above table will continue to record the deprecated schema changes for reference. + ## How to apply a database downgrade To apply a downgrade you need to use the `lighthouse db migrate` command with the correct parameters. diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 294f8ec8a3..eca086d838 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -21,7 +21,7 @@ bytes = "1.0.1" account_utils = { path = "../../common/account_utils" } sensitive_url = { path = "../../common/sensitive_url" } eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" futures-util = "0.3.8" futures = "0.3.8" store = { path = "../../beacon_node/store", optional = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 653c6c0bcc..03f96f34e2 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -14,9 +14,8 @@ pub mod lighthouse_vc; pub mod mixin; pub mod types; -use self::mixin::{RequestAccept, ResponseForkName, ResponseOptional}; +use self::mixin::{RequestAccept, ResponseOptional}; use self::types::{Error as ResponseError, *}; -use ::types::map_fork_name_with; use futures::Stream; use futures_util::StreamExt; use lighthouse_network::PeerId; @@ -611,7 +610,7 @@ impl BeaconNodeHttpClient { /// `POST beacon/blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blocks>( + pub async fn post_beacon_blocks>( &self, block: &SignedBeaconBlock, ) -> Result<(), Error> { @@ -631,7 +630,7 @@ impl BeaconNodeHttpClient { /// `POST beacon/blinded_blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blinded_blocks>( + pub async fn post_beacon_blinded_blocks>( &self, block: &SignedBeaconBlock, ) -> Result<(), Error> { @@ -683,35 +682,7 @@ impl BeaconNodeHttpClient { None => return Ok(None), }; - // If present, use the fork provided in the headers to decode the block. Gracefully handle - // missing and malformed fork names by falling back to regular deserialisation. - let (block, version, execution_optimistic) = match response.fork_name_from_header() { - Ok(Some(fork_name)) => { - let (data, (version, execution_optimistic)) = - map_fork_name_with!(fork_name, SignedBeaconBlock, { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, (version, execution_optimistic)) - }); - (data, version, execution_optimistic) - } - Ok(None) | Err(_) => { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, version, execution_optimistic) - } - }; - Ok(Some(ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data: block, - })) + Ok(Some(response.json().await?)) } /// `GET v1/beacon/blinded_blocks/{block_id}` @@ -728,35 +699,7 @@ impl BeaconNodeHttpClient { None => return Ok(None), }; - // If present, use the fork provided in the headers to decode the block. Gracefully handle - // missing and malformed fork names by falling back to regular deserialisation. - let (block, version, execution_optimistic) = match response.fork_name_from_header() { - Ok(Some(fork_name)) => { - let (data, (version, execution_optimistic)) = - map_fork_name_with!(fork_name, SignedBlindedBeaconBlock, { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, (version, execution_optimistic)) - }); - (data, version, execution_optimistic) - } - Ok(None) | Err(_) => { - let ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data, - } = response.json().await?; - (data, version, execution_optimistic) - } - }; - Ok(Some(ExecutionOptimisticForkVersionedResponse { - version, - execution_optimistic, - data: block, - })) + Ok(Some(response.json().await?)) } /// `GET v1/beacon/blocks` (LEGACY) @@ -1012,6 +955,24 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST beacon/pool/bls_to_execution_changes` + pub async fn post_beacon_pool_bls_to_execution_changes( + &self, + address_changes: &[SignedBlsToExecutionChange], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("bls_to_execution_changes"); + + self.post(path, &address_changes).await?; + + Ok(()) + } + /// `GET beacon/deposit_snapshot` pub async fn get_deposit_snapshot(&self) -> Result, Error> { use ssz::Decode; @@ -1392,7 +1353,7 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blocks/{slot}` - pub async fn get_validator_blocks>( + pub async fn get_validator_blocks>( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1403,7 +1364,7 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blocks/{slot}` - pub async fn get_validator_blocks_modular>( + pub async fn get_validator_blocks_modular>( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1434,8 +1395,34 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `GET v1/validator/blocks_and_blobs/{slot}` + pub async fn get_validator_blocks_and_blobs>( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("blocks_and_blobs") + .push(&slot.to_string()); + + path.query_pairs_mut() + .append_pair("randao_reveal", &randao_reveal.to_string()); + + if let Some(graffiti) = graffiti { + path.query_pairs_mut() + .append_pair("graffiti", &graffiti.to_string()); + } + + self.get(path).await + } + /// `GET v2/validator/blinded_blocks/{slot}` - pub async fn get_validator_blinded_blocks>( + pub async fn get_validator_blinded_blocks>( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1451,7 +1438,10 @@ impl BeaconNodeHttpClient { } /// `GET v1/validator/blinded_blocks/{slot}` - pub async fn get_validator_blinded_blocks_modular>( + pub async fn get_validator_blinded_blocks_modular< + T: EthSpec, + Payload: AbstractExecPayload, + >( &self, slot: Slot, randao_reveal: &SignatureBytes, diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 53cca49120..1a0b46e38d 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -236,21 +236,6 @@ impl<'a, T: Serialize> From<&'a T> for GenericResponseRef<'a, T> { } } -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct ExecutionOptimisticForkVersionedResponse { - #[serde(skip_serializing_if = "Option::is_none")] - pub version: Option, - pub execution_optimistic: Option, - pub data: T, -} - -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct ForkVersionedResponse { - #[serde(skip_serializing_if = "Option::is_none")] - pub version: Option, - pub data: T, -} - #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] pub struct RootData { pub root: Hash256, @@ -1129,6 +1114,38 @@ pub struct LivenessResponseData { pub is_live: bool, } +#[derive(PartialEq, Debug, Serialize, Deserialize)] +#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload")] +pub struct BlocksAndBlobs> { + pub block: BeaconBlock, + pub blobs: Vec>, + pub kzg_aggregate_proof: KzgProof, +} + +impl> ForkVersionDeserialize + for BlocksAndBlobs +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + #[derive(Deserialize)] + #[serde(bound = "T: EthSpec")] + struct Helper { + block: serde_json::Value, + blobs: Vec>, + kzg_aggregate_proof: KzgProof, + } + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + + Ok(Self { + block: BeaconBlock::deserialize_by_fork::<'de, D>(helper.block, fork_name)?, + blobs: helper.blobs, + kzg_aggregate_proof: helper.kzg_aggregate_proof, + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index d55ef3f3b5..6aa2c9590a 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -36,6 +36,12 @@ ALTAIR_FORK_EPOCH: 512 # Merge BELLATRIX_FORK_VERSION: 0x02000064 BELLATRIX_FORK_EPOCH: 385536 +# Capella +CAPELLA_FORK_VERSION: 0x03000064 +CAPELLA_FORK_EPOCH: 18446744073709551615 +# Eip4844 +EIP4844_FORK_VERSION: 0x04000064 +EIP4844_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03000064 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 6e87a708f8..83e6de7906 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -36,6 +36,12 @@ ALTAIR_FORK_EPOCH: 74240 # Merge BELLATRIX_FORK_VERSION: 0x02000000 BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC +# Capella +CAPELLA_FORK_VERSION: 0x03000000 +CAPELLA_FORK_EPOCH: 18446744073709551615 +# Eip4844 +EIP4844_FORK_VERSION: 0x04000000 +EIP4844_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 4c3e4bb6ec..4ba006ec94 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -29,8 +29,12 @@ TERMINAL_BLOCK_HASH: 0x000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 # Capella -CAPELLA_FORK_VERSION: 0x03001020 -CAPELLA_FORK_EPOCH: 18446744073709551615 +CAPELLA_FORK_VERSION: 0x90000072 +CAPELLA_FORK_EPOCH: 56832 + +# Eip4844 +EIP4844_FORK_VERSION: 0x03001020 +EIP4844_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x04001020 diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml index cd03b37a44..0e0ef0707e 100644 --- a/consensus/cached_tree_hash/Cargo.toml +++ b/consensus/cached_tree_hash/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" ethereum-types = "0.14.1" eth2_ssz_types = "0.2.2" eth2_hashing = "0.3.0" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" eth2_ssz = "0.4.1" tree_hash = "0.4.1" smallvec = "1.6.1" diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 52a738351e..f0381e5ad9 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -11,7 +11,7 @@ types = { path = "../types" } state_processing = { path = "../state_processing" } proto_array = { path = "../proto_array" } eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } [dev-dependencies] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index afae7f058b..590e151a85 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -13,9 +13,10 @@ use std::collections::BTreeSet; use std::marker::PhantomData; use std::time::Duration; use types::{ - consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, AttesterSlashing, BeaconBlockRef, - BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, - ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, + consts::merge::INTERVALS_PER_SLOT, AbstractExecPayload, AttestationShufflingId, + AttesterSlashing, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, + EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, + SignedBeaconBlock, Slot, }; #[derive(Debug)] @@ -412,18 +413,18 @@ where AttestationShufflingId::new(anchor_block_root, anchor_state, RelativeEpoch::Next) .map_err(Error::BeaconStateError)?; - // Default any non-merge execution block hashes to 0x000..000. - let execution_status = anchor_block.message_merge().map_or_else( - |()| ExecutionStatus::irrelevant(), - |message| { - let execution_payload = &message.body.execution_payload; - if execution_payload == &<_>::default() { + let execution_status = anchor_block.message().execution_payload().map_or_else( + // If the block doesn't have an execution payload then it can't have + // execution enabled. + |_| ExecutionStatus::irrelevant(), + |execution_payload| { + if execution_payload.is_default_with_empty_roots() { // A default payload does not have execution enabled. ExecutionStatus::irrelevant() } else { // Assume that this payload is valid, since the anchor should be a trusted block and // state. - ExecutionStatus::Valid(message.body.execution_payload.block_hash()) + ExecutionStatus::Valid(execution_payload.block_hash()) } }, ); @@ -744,7 +745,7 @@ where /// The supplied block **must** pass the `state_transition` function as it will not be run /// here. #[allow(clippy::too_many_arguments)] - pub fn on_block>( + pub fn on_block>( &mut self, system_time_current_slot: Slot, block: BeaconBlockRef, @@ -856,7 +857,12 @@ where (parent_justified, parent_finalized) } else { let justification_and_finalization_state = match block { - BeaconBlockRef::Merge(_) | BeaconBlockRef::Altair(_) => { + // TODO(eip4844): Ensure that the final specification + // does not substantially modify per epoch processing. + BeaconBlockRef::Eip4844(_) + | BeaconBlockRef::Capella(_) + | BeaconBlockRef::Merge(_) + | BeaconBlockRef::Altair(_) => { let participation_cache = per_epoch_processing::altair::ParticipationCache::new(state, spec) .map_err(Error::ParticipationCacheBuild)?; diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index 60c58859ed..9500b1c7da 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -1,7 +1,7 @@ use proto_array::JustifiedBalances; use std::collections::BTreeSet; use std::fmt::Debug; -use types::{BeaconBlockRef, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; /// Approximates the `Store` in "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": /// @@ -34,7 +34,7 @@ pub trait ForkChoiceStore: Sized { /// Called whenever `ForkChoice::on_block` has verified a block, but not yet added it to fork /// choice. Allows the implementer to performing caching or other housekeeping duties. - fn on_verified_block>( + fn on_verified_block>( &mut self, block: BeaconBlockRef, block_root: Hash256, diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index dfab6fda56..205ef8f521 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -11,7 +11,7 @@ path = "src/bin.rs" [dependencies] types = { path = "../types" } eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" serde = "1.0.116" serde_derive = "1.0.116" serde_yaml = "0.8.13" diff --git a/consensus/ssz/Cargo.toml b/consensus/ssz/Cargo.toml index 1eef2a5557..d39ad10875 100644 --- a/consensus/ssz/Cargo.toml +++ b/consensus/ssz/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" name = "ssz" [dev-dependencies] -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" [dependencies] ethereum-types = "0.14.1" diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs index 76d85f775d..3d36fb4379 100644 --- a/consensus/ssz/src/decode/impls.rs +++ b/consensus/ssz/src/decode/impls.rs @@ -388,6 +388,7 @@ macro_rules! impl_decodable_for_u8_array { impl_decodable_for_u8_array!(4); impl_decodable_for_u8_array!(32); +impl_decodable_for_u8_array!(48); macro_rules! impl_for_vec { ($type: ty, $max_len: expr) => { diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs index 833480e1b6..8c609d9397 100644 --- a/consensus/ssz/src/encode/impls.rs +++ b/consensus/ssz/src/encode/impls.rs @@ -511,6 +511,7 @@ macro_rules! impl_encodable_for_u8_array { impl_encodable_for_u8_array!(4); impl_encodable_for_u8_array!(32); +impl_encodable_for_u8_array!(48); #[cfg(test)] mod tests { diff --git a/consensus/ssz/tests/tests.rs b/consensus/ssz/tests/tests.rs index b4b91da4b5..f52d2c5cdf 100644 --- a/consensus/ssz/tests/tests.rs +++ b/consensus/ssz/tests/tests.rs @@ -388,145 +388,3 @@ mod round_trip { round_trip(data); } } - -mod derive_macro { - use ssz::{Decode, Encode}; - use ssz_derive::{Decode, Encode}; - use std::fmt::Debug; - - fn assert_encode(item: &T, bytes: &[u8]) { - assert_eq!(item.as_ssz_bytes(), bytes); - } - - fn assert_encode_decode(item: &T, bytes: &[u8]) { - assert_encode(item, bytes); - assert_eq!(T::from_ssz_bytes(bytes).unwrap(), *item); - } - - #[derive(PartialEq, Debug, Encode, Decode)] - #[ssz(enum_behaviour = "union")] - enum TwoFixedUnion { - U8(u8), - U16(u16), - } - - #[derive(PartialEq, Debug, Encode, Decode)] - struct TwoFixedUnionStruct { - a: TwoFixedUnion, - } - - #[test] - fn two_fixed_union() { - let eight = TwoFixedUnion::U8(1); - let sixteen = TwoFixedUnion::U16(1); - - assert_encode_decode(&eight, &[0, 1]); - assert_encode_decode(&sixteen, &[1, 1, 0]); - - assert_encode_decode(&TwoFixedUnionStruct { a: eight }, &[4, 0, 0, 0, 0, 1]); - assert_encode_decode(&TwoFixedUnionStruct { a: sixteen }, &[4, 0, 0, 0, 1, 1, 0]); - } - - #[derive(PartialEq, Debug, Encode, Decode)] - struct VariableA { - a: u8, - b: Vec, - } - - #[derive(PartialEq, Debug, Encode, Decode)] - struct VariableB { - a: Vec, - b: u8, - } - - #[derive(PartialEq, Debug, Encode)] - #[ssz(enum_behaviour = "transparent")] - enum TwoVariableTrans { - A(VariableA), - B(VariableB), - } - - #[derive(PartialEq, Debug, Encode)] - struct TwoVariableTransStruct { - a: TwoVariableTrans, - } - - #[derive(PartialEq, Debug, Encode, Decode)] - #[ssz(enum_behaviour = "union")] - enum TwoVariableUnion { - A(VariableA), - B(VariableB), - } - - #[derive(PartialEq, Debug, Encode, Decode)] - struct TwoVariableUnionStruct { - a: TwoVariableUnion, - } - - #[test] - fn two_variable_trans() { - let trans_a = TwoVariableTrans::A(VariableA { - a: 1, - b: vec![2, 3], - }); - let trans_b = TwoVariableTrans::B(VariableB { - a: vec![1, 2], - b: 3, - }); - - assert_encode(&trans_a, &[1, 5, 0, 0, 0, 2, 3]); - assert_encode(&trans_b, &[5, 0, 0, 0, 3, 1, 2]); - - assert_encode( - &TwoVariableTransStruct { a: trans_a }, - &[4, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], - ); - assert_encode( - &TwoVariableTransStruct { a: trans_b }, - &[4, 0, 0, 0, 5, 0, 0, 0, 3, 1, 2], - ); - } - - #[test] - fn two_variable_union() { - let union_a = TwoVariableUnion::A(VariableA { - a: 1, - b: vec![2, 3], - }); - let union_b = TwoVariableUnion::B(VariableB { - a: vec![1, 2], - b: 3, - }); - - assert_encode_decode(&union_a, &[0, 1, 5, 0, 0, 0, 2, 3]); - assert_encode_decode(&union_b, &[1, 5, 0, 0, 0, 3, 1, 2]); - - assert_encode_decode( - &TwoVariableUnionStruct { a: union_a }, - &[4, 0, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], - ); - assert_encode_decode( - &TwoVariableUnionStruct { a: union_b }, - &[4, 0, 0, 0, 1, 5, 0, 0, 0, 3, 1, 2], - ); - } - - #[derive(PartialEq, Debug, Encode, Decode)] - #[ssz(enum_behaviour = "union")] - enum TwoVecUnion { - A(Vec), - B(Vec), - } - - #[test] - fn two_vec_union() { - assert_encode_decode(&TwoVecUnion::A(vec![]), &[0]); - assert_encode_decode(&TwoVecUnion::B(vec![]), &[1]); - - assert_encode_decode(&TwoVecUnion::A(vec![0]), &[0, 0]); - assert_encode_decode(&TwoVecUnion::B(vec![0]), &[1, 0]); - - assert_encode_decode(&TwoVecUnion::A(vec![0, 1]), &[0, 0, 1]); - assert_encode_decode(&TwoVecUnion::B(vec![0, 1]), &[1, 0, 1]); - } -} diff --git a/consensus/ssz_derive/Cargo.toml b/consensus/ssz_derive/Cargo.toml index cac617d391..d3b2865a61 100644 --- a/consensus/ssz_derive/Cargo.toml +++ b/consensus/ssz_derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "eth2_ssz_derive" -version = "0.3.0" +version = "0.3.1" authors = ["Paul Hauner "] edition = "2021" description = "Procedural derive macros to accompany the eth2_ssz crate." @@ -15,3 +15,6 @@ syn = "1.0.42" proc-macro2 = "1.0.23" quote = "1.0.7" darling = "0.13.0" + +[dev-dependencies] +eth2_ssz = "0.4.1" diff --git a/consensus/ssz_derive/src/lib.rs b/consensus/ssz_derive/src/lib.rs index a5a5a0dddf..40d63fd02f 100644 --- a/consensus/ssz_derive/src/lib.rs +++ b/consensus/ssz_derive/src/lib.rs @@ -1,7 +1,147 @@ #![recursion_limit = "256"] //! Provides procedural derive macros for the `Encode` and `Decode` traits of the `eth2_ssz` crate. //! -//! Supports field attributes, see each derive macro for more information. +//! ## Attributes +//! +//! The following struct/enum attributes are available: +//! +//! - `#[ssz(enum_behaviour = "union")]`: encodes and decodes an `enum` with a one-byte variant selector. +//! - `#[ssz(enum_behaviour = "transparent")]`: allows encoding an `enum` by serializing only the +//! value whilst ignoring outermost the `enum`. +//! - `#[ssz(struct_behaviour = "container")]`: encodes and decodes the `struct` as an SSZ +//! "container". +//! - `#[ssz(struct_behaviour = "transparent")]`: encodes and decodes a `struct` with exactly one +//! non-skipped field as if the outermost `struct` does not exist. +//! +//! The following field attributes are available: +//! +//! - `#[ssz(with = "module")]`: uses the methods in `module` to implement `ssz::Encode` and +//! `ssz::Decode`. This is useful when it's not possible to create an `impl` for that type +//! (e.g. the type is defined in another crate). +//! - `#[ssz(skip_serializing)]`: this field will not be included in the serialized SSZ vector. +//! - `#[ssz(skip_deserializing)]`: this field will not be expected in the serialized +//! SSZ vector and it will be initialized from a `Default` implementation. +//! +//! ## Examples +//! +//! ### Structs +//! +//! ```rust +//! use ssz::{Encode, Decode}; +//! use ssz_derive::{Encode, Decode}; +//! +//! /// Represented as an SSZ "list" wrapped in an SSZ "container". +//! #[derive(Debug, PartialEq, Encode, Decode)] +//! #[ssz(struct_behaviour = "container")] // "container" is the default behaviour +//! struct TypicalStruct { +//! foo: Vec +//! } +//! +//! assert_eq!( +//! TypicalStruct { foo: vec![42] }.as_ssz_bytes(), +//! vec![4, 0, 0, 0, 42] +//! ); +//! +//! assert_eq!( +//! TypicalStruct::from_ssz_bytes(&[4, 0, 0, 0, 42]).unwrap(), +//! TypicalStruct { foo: vec![42] }, +//! ); +//! +//! /// Represented as an SSZ "list" *without* an SSZ "container". +//! #[derive(Encode, Decode)] +//! #[ssz(struct_behaviour = "transparent")] +//! struct WrapperStruct { +//! foo: Vec +//! } +//! +//! assert_eq!( +//! WrapperStruct { foo: vec![42] }.as_ssz_bytes(), +//! vec![42] +//! ); +//! +//! /// Represented as an SSZ "list" *without* an SSZ "container". The `bar` byte is ignored. +//! #[derive(Debug, PartialEq, Encode, Decode)] +//! #[ssz(struct_behaviour = "transparent")] +//! struct WrapperStructSkippedField { +//! foo: Vec, +//! #[ssz(skip_serializing, skip_deserializing)] +//! bar: u8, +//! } +//! +//! assert_eq!( +//! WrapperStructSkippedField { foo: vec![42], bar: 99 }.as_ssz_bytes(), +//! vec![42] +//! ); +//! assert_eq!( +//! WrapperStructSkippedField::from_ssz_bytes(&[42]).unwrap(), +//! WrapperStructSkippedField { foo: vec![42], bar: 0 } +//! ); +//! +//! /// Represented as an SSZ "list" *without* an SSZ "container". +//! #[derive(Encode, Decode)] +//! #[ssz(struct_behaviour = "transparent")] +//! struct NewType(Vec); +//! +//! assert_eq!( +//! NewType(vec![42]).as_ssz_bytes(), +//! vec![42] +//! ); +//! +//! /// Represented as an SSZ "list" *without* an SSZ "container". The `bar` byte is ignored. +//! #[derive(Debug, PartialEq, Encode, Decode)] +//! #[ssz(struct_behaviour = "transparent")] +//! struct NewTypeSkippedField(Vec, #[ssz(skip_serializing, skip_deserializing)] u8); +//! +//! assert_eq!( +//! NewTypeSkippedField(vec![42], 99).as_ssz_bytes(), +//! vec![42] +//! ); +//! assert_eq!( +//! NewTypeSkippedField::from_ssz_bytes(&[42]).unwrap(), +//! NewTypeSkippedField(vec![42], 0) +//! ); +//! ``` +//! +//! ### Enums +//! +//! ```rust +//! use ssz::{Encode, Decode}; +//! use ssz_derive::{Encode, Decode}; +//! +//! /// Represented as an SSZ "union". +//! #[derive(Debug, PartialEq, Encode, Decode)] +//! #[ssz(enum_behaviour = "union")] +//! enum UnionEnum { +//! Foo(u8), +//! Bar(Vec), +//! } +//! +//! assert_eq!( +//! UnionEnum::Foo(42).as_ssz_bytes(), +//! vec![0, 42] +//! ); +//! assert_eq!( +//! UnionEnum::from_ssz_bytes(&[1, 42, 42]).unwrap(), +//! UnionEnum::Bar(vec![42, 42]), +//! ); +//! +//! /// Represented as only the value in the enum variant. +//! #[derive(Debug, PartialEq, Encode)] +//! #[ssz(enum_behaviour = "transparent")] +//! enum TransparentEnum { +//! Foo(u8), +//! Bar(Vec), +//! } +//! +//! assert_eq!( +//! TransparentEnum::Foo(42).as_ssz_bytes(), +//! vec![42] +//! ); +//! assert_eq!( +//! TransparentEnum::Bar(vec![42, 42]).as_ssz_bytes(), +//! vec![42, 42] +//! ); +//! ``` use darling::{FromDeriveInput, FromMeta}; use proc_macro::TokenStream; @@ -13,11 +153,18 @@ use syn::{parse_macro_input, DataEnum, DataStruct, DeriveInput, Ident}; /// extensions). const MAX_UNION_SELECTOR: u8 = 127; +const ENUM_TRANSPARENT: &str = "transparent"; +const ENUM_UNION: &str = "union"; +const NO_ENUM_BEHAVIOUR_ERROR: &str = "enums require an \"enum_behaviour\" attribute with \ + a \"transparent\" or \"union\" value, e.g., #[ssz(enum_behaviour = \"transparent\")]"; + #[derive(Debug, FromDeriveInput)] #[darling(attributes(ssz))] struct StructOpts { #[darling(default)] enum_behaviour: Option, + #[darling(default)] + struct_behaviour: Option, } /// Field-level configuration. @@ -31,40 +178,87 @@ struct FieldOpts { skip_deserializing: bool, } -const ENUM_TRANSPARENT: &str = "transparent"; -const ENUM_UNION: &str = "union"; -const ENUM_VARIANTS: &[&str] = &[ENUM_TRANSPARENT, ENUM_UNION]; -const NO_ENUM_BEHAVIOUR_ERROR: &str = "enums require an \"enum_behaviour\" attribute, \ - e.g., #[ssz(enum_behaviour = \"transparent\")]"; - -enum EnumBehaviour { - Transparent, - Union, +enum Procedure<'a> { + Struct { + data: &'a syn::DataStruct, + behaviour: StructBehaviour, + }, + Enum { + data: &'a syn::DataEnum, + behaviour: EnumBehaviour, + }, } -impl EnumBehaviour { - pub fn new(s: Option) -> Option { - s.map(|s| match s.as_ref() { - ENUM_TRANSPARENT => EnumBehaviour::Transparent, - ENUM_UNION => EnumBehaviour::Union, - other => panic!( - "{} is an invalid enum_behaviour, use either {:?}", - other, ENUM_VARIANTS - ), - }) +enum StructBehaviour { + Container, + Transparent, +} + +enum EnumBehaviour { + Union, + Transparent, +} + +impl<'a> Procedure<'a> { + fn read(item: &'a DeriveInput) -> Self { + let opts = StructOpts::from_derive_input(item).unwrap(); + + match &item.data { + syn::Data::Struct(data) => { + if opts.enum_behaviour.is_some() { + panic!("cannot use \"enum_behaviour\" for a struct"); + } + + match opts.struct_behaviour.as_deref() { + Some("container") | None => Procedure::Struct { + data, + behaviour: StructBehaviour::Container, + }, + Some("transparent") => Procedure::Struct { + data, + behaviour: StructBehaviour::Transparent, + }, + Some(other) => panic!( + "{} is not a valid struct behaviour, use \"container\" or \"transparent\"", + other + ), + } + } + syn::Data::Enum(data) => { + if opts.struct_behaviour.is_some() { + panic!("cannot use \"struct_behaviour\" for an enum"); + } + + match opts.enum_behaviour.as_deref() { + Some("union") => Procedure::Enum { + data, + behaviour: EnumBehaviour::Union, + }, + Some("transparent") => Procedure::Enum { + data, + behaviour: EnumBehaviour::Transparent, + }, + Some(other) => panic!( + "{} is not a valid enum behaviour, use \"container\" or \"transparent\"", + other + ), + None => panic!("{}", NO_ENUM_BEHAVIOUR_ERROR), + } + } + _ => panic!("ssz_derive only supports structs and enums"), + } } } -fn parse_ssz_fields(struct_data: &syn::DataStruct) -> Vec<(&syn::Type, &syn::Ident, FieldOpts)> { +fn parse_ssz_fields( + struct_data: &syn::DataStruct, +) -> Vec<(&syn::Type, Option<&syn::Ident>, FieldOpts)> { struct_data .fields .iter() .map(|field| { let ty = &field.ty; - let ident = match &field.ident { - Some(ref ident) => ident, - _ => panic!("ssz_derive only supports named struct fields."), - }; + let ident = field.ident.as_ref(); let field_opts_candidates = field .attrs @@ -93,21 +287,17 @@ fn parse_ssz_fields(struct_data: &syn::DataStruct) -> Vec<(&syn::Type, &syn::Ide #[proc_macro_derive(Encode, attributes(ssz))] pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { let item = parse_macro_input!(input as DeriveInput); - let opts = StructOpts::from_derive_input(&item).unwrap(); - let enum_opt = EnumBehaviour::new(opts.enum_behaviour); + let procedure = Procedure::read(&item); - match &item.data { - syn::Data::Struct(s) => { - if enum_opt.is_some() { - panic!("enum_behaviour is invalid for structs"); - } - ssz_encode_derive_struct(&item, s) - } - syn::Data::Enum(s) => match enum_opt.expect(NO_ENUM_BEHAVIOUR_ERROR) { - EnumBehaviour::Transparent => ssz_encode_derive_enum_transparent(&item, s), - EnumBehaviour::Union => ssz_encode_derive_enum_union(&item, s), + match procedure { + Procedure::Struct { data, behaviour } => match behaviour { + StructBehaviour::Transparent => ssz_encode_derive_struct_transparent(&item, data), + StructBehaviour::Container => ssz_encode_derive_struct(&item, data), + }, + Procedure::Enum { data, behaviour } => match behaviour { + EnumBehaviour::Transparent => ssz_encode_derive_enum_transparent(&item, data), + EnumBehaviour::Union => ssz_encode_derive_enum_union(&item, data), }, - _ => panic!("ssz_derive only supports structs and enums"), } } @@ -132,6 +322,13 @@ fn ssz_encode_derive_struct(derive_input: &DeriveInput, struct_data: &DataStruct continue; } + let ident = match ident { + Some(ref ident) => ident, + _ => panic!( + "#[ssz(struct_behaviour = \"container\")] only supports named struct fields." + ), + }; + if let Some(module) = field_opts.with { let module = quote! { #module::encode }; field_is_ssz_fixed_len.push(quote! { #module::is_ssz_fixed_len() }); @@ -219,6 +416,82 @@ fn ssz_encode_derive_struct(derive_input: &DeriveInput, struct_data: &DataStruct output.into() } +/// Derive `ssz::Encode` "transparently" for a struct which has exactly one non-skipped field. +/// +/// The single field is encoded directly, making the outermost `struct` transparent. +/// +/// ## Field attributes +/// +/// - `#[ssz(skip_serializing)]`: the field will not be serialized. +fn ssz_encode_derive_struct_transparent( + derive_input: &DeriveInput, + struct_data: &DataStruct, +) -> TokenStream { + let name = &derive_input.ident; + let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); + let ssz_fields = parse_ssz_fields(struct_data); + let num_fields = ssz_fields + .iter() + .filter(|(_, _, field_opts)| !field_opts.skip_deserializing) + .count(); + + if num_fields != 1 { + panic!( + "A \"transparent\" struct must have exactly one non-skipped field ({} fields found)", + num_fields + ); + } + + let (ty, ident, _field_opts) = ssz_fields + .iter() + .find(|(_, _, field_opts)| !field_opts.skip_deserializing) + .expect("\"transparent\" struct must have at least one non-skipped field"); + + let output = if let Some(field_name) = ident { + quote! { + impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { + fn is_ssz_fixed_len() -> bool { + <#ty as ssz::Encode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <#ty as ssz::Encode>::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.#field_name.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.#field_name.ssz_append(buf) + } + } + } + } else { + quote! { + impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { + fn is_ssz_fixed_len() -> bool { + <#ty as ssz::Encode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <#ty as ssz::Encode>::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } + } + } + }; + + output.into() +} + /// Derive `ssz::Encode` for an enum in the "transparent" method. /// /// The "transparent" method is distinct from the "union" method specified in the SSZ specification. @@ -367,24 +640,20 @@ fn ssz_encode_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum #[proc_macro_derive(Decode, attributes(ssz))] pub fn ssz_decode_derive(input: TokenStream) -> TokenStream { let item = parse_macro_input!(input as DeriveInput); - let opts = StructOpts::from_derive_input(&item).unwrap(); - let enum_opt = EnumBehaviour::new(opts.enum_behaviour); + let procedure = Procedure::read(&item); - match &item.data { - syn::Data::Struct(s) => { - if enum_opt.is_some() { - panic!("enum_behaviour is invalid for structs"); - } - ssz_decode_derive_struct(&item, s) - } - syn::Data::Enum(s) => match enum_opt.expect(NO_ENUM_BEHAVIOUR_ERROR) { + match procedure { + Procedure::Struct { data, behaviour } => match behaviour { + StructBehaviour::Transparent => ssz_decode_derive_struct_transparent(&item, data), + StructBehaviour::Container => ssz_decode_derive_struct(&item, data), + }, + Procedure::Enum { data, behaviour } => match behaviour { + EnumBehaviour::Union => ssz_decode_derive_enum_union(&item, data), EnumBehaviour::Transparent => panic!( "Decode cannot be derived for enum_behaviour \"{}\", only \"{}\" is valid.", ENUM_TRANSPARENT, ENUM_UNION ), - EnumBehaviour::Union => ssz_decode_derive_enum_union(&item, s), }, - _ => panic!("ssz_derive only supports structs and enums"), } } @@ -409,6 +678,13 @@ fn ssz_decode_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> Tok let mut fixed_lens = vec![]; for (ty, ident, field_opts) in parse_ssz_fields(struct_data) { + let ident = match ident { + Some(ref ident) => ident, + _ => panic!( + "#[ssz(struct_behaviour = \"container\")] only supports named struct fields." + ), + }; + field_names.push(quote! { #ident }); @@ -545,6 +821,90 @@ fn ssz_decode_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> Tok output.into() } +/// Implements `ssz::Decode` "transparently" for a `struct` with exactly one non-skipped field. +/// +/// The bytes will be decoded as if they are the inner field, without the outermost struct. The +/// outermost struct will then be applied artificially. +/// +/// ## Field attributes +/// +/// - `#[ssz(skip_deserializing)]`: during de-serialization the field will be instantiated from a +/// `Default` implementation. The decoder will assume that the field was not serialized at all +/// (e.g., if it has been serialized, an error will be raised instead of `Default` overriding it). +fn ssz_decode_derive_struct_transparent( + item: &DeriveInput, + struct_data: &DataStruct, +) -> TokenStream { + let name = &item.ident; + let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); + let ssz_fields = parse_ssz_fields(struct_data); + let num_fields = ssz_fields + .iter() + .filter(|(_, _, field_opts)| !field_opts.skip_deserializing) + .count(); + + if num_fields != 1 { + panic!( + "A \"transparent\" struct must have exactly one non-skipped field ({} fields found)", + num_fields + ); + } + + let mut fields = vec![]; + let mut wrapped_type = None; + + for (i, (ty, ident, field_opts)) in ssz_fields.into_iter().enumerate() { + if let Some(name) = ident { + if field_opts.skip_deserializing { + fields.push(quote! { + #name: <_>::default(), + }); + } else { + fields.push(quote! { + #name: <_>::from_ssz_bytes(bytes)?, + }); + wrapped_type = Some(ty); + } + } else { + let index = syn::Index::from(i); + if field_opts.skip_deserializing { + fields.push(quote! { + #index:<_>::default(), + }); + } else { + fields.push(quote! { + #index:<_>::from_ssz_bytes(bytes)?, + }); + wrapped_type = Some(ty); + } + } + } + + let ty = wrapped_type.unwrap(); + + let output = quote! { + impl #impl_generics ssz::Decode for #name #ty_generics #where_clause { + fn is_ssz_fixed_len() -> bool { + <#ty as ssz::Decode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <#ty as ssz::Decode>::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> std::result::Result { + Ok(Self { + #( + #fields + )* + + }) + } + } + }; + output.into() +} + /// Derive `ssz::Decode` for an `enum` following the "union" SSZ spec. fn ssz_decode_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { let name = &derive_input.ident; diff --git a/consensus/ssz_derive/tests/tests.rs b/consensus/ssz_derive/tests/tests.rs new file mode 100644 index 0000000000..2eeb3a48db --- /dev/null +++ b/consensus/ssz_derive/tests/tests.rs @@ -0,0 +1,215 @@ +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::fmt::Debug; +use std::marker::PhantomData; + +fn assert_encode(item: &T, bytes: &[u8]) { + assert_eq!(item.as_ssz_bytes(), bytes); +} + +fn assert_encode_decode(item: &T, bytes: &[u8]) { + assert_encode(item, bytes); + assert_eq!(T::from_ssz_bytes(bytes).unwrap(), *item); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(enum_behaviour = "union")] +enum TwoFixedUnion { + U8(u8), + U16(u16), +} + +#[derive(PartialEq, Debug, Encode, Decode)] +struct TwoFixedUnionStruct { + a: TwoFixedUnion, +} + +#[test] +fn two_fixed_union() { + let eight = TwoFixedUnion::U8(1); + let sixteen = TwoFixedUnion::U16(1); + + assert_encode_decode(&eight, &[0, 1]); + assert_encode_decode(&sixteen, &[1, 1, 0]); + + assert_encode_decode(&TwoFixedUnionStruct { a: eight }, &[4, 0, 0, 0, 0, 1]); + assert_encode_decode(&TwoFixedUnionStruct { a: sixteen }, &[4, 0, 0, 0, 1, 1, 0]); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +struct VariableA { + a: u8, + b: Vec, +} + +#[derive(PartialEq, Debug, Encode, Decode)] +struct VariableB { + a: Vec, + b: u8, +} + +#[derive(PartialEq, Debug, Encode)] +#[ssz(enum_behaviour = "transparent")] +enum TwoVariableTrans { + A(VariableA), + B(VariableB), +} + +#[derive(PartialEq, Debug, Encode)] +struct TwoVariableTransStruct { + a: TwoVariableTrans, +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(enum_behaviour = "union")] +enum TwoVariableUnion { + A(VariableA), + B(VariableB), +} + +#[derive(PartialEq, Debug, Encode, Decode)] +struct TwoVariableUnionStruct { + a: TwoVariableUnion, +} + +#[test] +fn two_variable_trans() { + let trans_a = TwoVariableTrans::A(VariableA { + a: 1, + b: vec![2, 3], + }); + let trans_b = TwoVariableTrans::B(VariableB { + a: vec![1, 2], + b: 3, + }); + + assert_encode(&trans_a, &[1, 5, 0, 0, 0, 2, 3]); + assert_encode(&trans_b, &[5, 0, 0, 0, 3, 1, 2]); + + assert_encode( + &TwoVariableTransStruct { a: trans_a }, + &[4, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], + ); + assert_encode( + &TwoVariableTransStruct { a: trans_b }, + &[4, 0, 0, 0, 5, 0, 0, 0, 3, 1, 2], + ); +} + +#[test] +fn two_variable_union() { + let union_a = TwoVariableUnion::A(VariableA { + a: 1, + b: vec![2, 3], + }); + let union_b = TwoVariableUnion::B(VariableB { + a: vec![1, 2], + b: 3, + }); + + assert_encode_decode(&union_a, &[0, 1, 5, 0, 0, 0, 2, 3]); + assert_encode_decode(&union_b, &[1, 5, 0, 0, 0, 3, 1, 2]); + + assert_encode_decode( + &TwoVariableUnionStruct { a: union_a }, + &[4, 0, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], + ); + assert_encode_decode( + &TwoVariableUnionStruct { a: union_b }, + &[4, 0, 0, 0, 1, 5, 0, 0, 0, 3, 1, 2], + ); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(enum_behaviour = "union")] +enum TwoVecUnion { + A(Vec), + B(Vec), +} + +#[test] +fn two_vec_union() { + assert_encode_decode(&TwoVecUnion::A(vec![]), &[0]); + assert_encode_decode(&TwoVecUnion::B(vec![]), &[1]); + + assert_encode_decode(&TwoVecUnion::A(vec![0]), &[0, 0]); + assert_encode_decode(&TwoVecUnion::B(vec![0]), &[1, 0]); + + assert_encode_decode(&TwoVecUnion::A(vec![0, 1]), &[0, 0, 1]); + assert_encode_decode(&TwoVecUnion::B(vec![0, 1]), &[1, 0, 1]); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(struct_behaviour = "transparent")] +struct TransparentStruct { + inner: Vec, +} + +impl TransparentStruct { + fn new(inner: u8) -> Self { + Self { inner: vec![inner] } + } +} + +#[test] +fn transparent_struct() { + assert_encode_decode(&TransparentStruct::new(42), &vec![42_u8].as_ssz_bytes()); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(struct_behaviour = "transparent")] +struct TransparentStructSkippedField { + inner: Vec, + #[ssz(skip_serializing, skip_deserializing)] + skipped: PhantomData, +} + +impl TransparentStructSkippedField { + fn new(inner: u8) -> Self { + Self { + inner: vec![inner], + skipped: PhantomData, + } + } +} + +#[test] +fn transparent_struct_skipped_field() { + assert_encode_decode( + &TransparentStructSkippedField::new(42), + &vec![42_u8].as_ssz_bytes(), + ); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(struct_behaviour = "transparent")] +struct TransparentStructNewType(Vec); + +#[test] +fn transparent_struct_newtype() { + assert_encode_decode( + &TransparentStructNewType(vec![42_u8]), + &vec![42_u8].as_ssz_bytes(), + ); +} + +#[derive(PartialEq, Debug, Encode, Decode)] +#[ssz(struct_behaviour = "transparent")] +struct TransparentStructNewTypeSkippedField( + Vec, + #[ssz(skip_serializing, skip_deserializing)] PhantomData, +); + +impl TransparentStructNewTypeSkippedField { + fn new(inner: Vec) -> Self { + Self(inner, PhantomData) + } +} + +#[test] +fn transparent_struct_newtype_skipped_field() { + assert_encode_decode( + &TransparentStructNewTypeSkippedField::new(vec![42_u8]), + &vec![42_u8].as_ssz_bytes(), + ); +} diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs index 0539cc7d2c..b7bde22578 100644 --- a/consensus/ssz_types/src/bitfield.rs +++ b/consensus/ssz_types/src/bitfield.rs @@ -660,7 +660,7 @@ impl arbitrary::Arbitrary<'_> for Bitfield> { let size = N::to_usize(); let mut vec = smallvec![0u8; size]; u.fill_buffer(&mut vec)?; - Ok(Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?) + Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat) } } @@ -672,7 +672,7 @@ impl arbitrary::Arbitrary<'_> for Bitfield> { let size = std::cmp::min(rand, max_size); let mut vec = smallvec![0u8; size]; u.fill_buffer(&mut vec)?; - Ok(Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?) + Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat) } } diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs index 1ad82a3841..9625f27f3a 100644 --- a/consensus/ssz_types/src/fixed_vector.rs +++ b/consensus/ssz_types/src/fixed_vector.rs @@ -291,7 +291,7 @@ impl<'a, T: arbitrary::Arbitrary<'a>, N: 'static + Unsigned> arbitrary::Arbitrar for _ in 0..size { vec.push(::arbitrary(u)?); } - Ok(Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?) + Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat) } } diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs index a342b361ed..3361f75090 100644 --- a/consensus/ssz_types/src/variable_list.rs +++ b/consensus/ssz_types/src/variable_list.rs @@ -176,6 +176,15 @@ impl<'a, T, N: Unsigned> IntoIterator for &'a VariableList { } } +impl IntoIterator for VariableList { + type Item = T; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.vec.into_iter() + } +} + impl tree_hash::TreeHash for VariableList where T: tree_hash::TreeHash, @@ -273,7 +282,7 @@ impl<'a, T: arbitrary::Arbitrary<'a>, N: 'static + Unsigned> arbitrary::Arbitrar for _ in 0..size { vec.push(::arbitrary(u)?); } - Ok(Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?) + Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat) } } diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 46ac2bae57..ccb41830be 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -14,7 +14,7 @@ bls = { path = "../../crypto/bls" } integer-sqrt = "0.1.5" itertools = "0.10.0" eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" eth2_ssz_types = "0.2.2" merkle_proof = { path = "../merkle_proof" } safe_arith = { path = "../safe_arith" } diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index ac2dba875e..77cd1a3265 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -50,7 +50,10 @@ pub fn slash_validator( validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; let proposer_reward = match state { BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?, - BeaconState::Altair(_) | BeaconState::Merge(_) => whistleblower_reward + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => whistleblower_reward .safe_mul(PROPOSER_WEIGHT)? .safe_div(WEIGHT_DENOMINATOR)?, }; diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index 0bd5f61aff..ccf8cefb69 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -4,8 +4,8 @@ use std::collections::{hash_map::Entry, HashMap}; use std::marker::PhantomData; use tree_hash::TreeHash; use types::{ - Attestation, AttestationData, BeaconState, BeaconStateError, BitList, ChainSpec, Epoch, - EthSpec, ExecPayload, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, + AbstractExecPayload, Attestation, AttestationData, BeaconState, BeaconStateError, BitList, + ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, }; #[derive(Debug)] @@ -98,7 +98,7 @@ impl ConsensusContext { self } - pub fn get_current_block_root>( + pub fn get_current_block_root>( &mut self, block: &SignedBeaconBlock, ) -> Result { diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index fb2c9bfa7d..3f9328f4d5 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -2,7 +2,9 @@ use super::per_block_processing::{ errors::BlockProcessingError, process_operations::process_deposit, }; use crate::common::DepositDataTree; -use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; +use crate::upgrade::{ + upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_eip4844, +}; use safe_arith::{ArithError, SafeArith}; use tree_hash::TreeHash; use types::DEPOSIT_TREE_DEPTH; @@ -61,15 +63,51 @@ pub fn initialize_beacon_state_from_eth1( .bellatrix_fork_epoch .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) { + // this will set state.latest_execution_payload_header = ExecutionPayloadHeaderMerge::default() upgrade_to_bellatrix(&mut state, spec)?; // Remove intermediate Altair fork from `state.fork`. state.fork_mut().previous_version = spec.bellatrix_fork_version; // Override latest execution payload header. - // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/merge/beacon-chain.md#testing - *state.latest_execution_payload_header_mut()? = - execution_payload_header.unwrap_or_default(); + // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing + if let Some(ExecutionPayloadHeader::Merge(ref header)) = execution_payload_header { + *state.latest_execution_payload_header_merge_mut()? = header.clone(); + } + } + + // Upgrade to capella if configured from genesis + if spec + .capella_fork_epoch + .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) + { + upgrade_to_capella(&mut state, spec)?; + + // Remove intermediate Bellatrix fork from `state.fork`. + state.fork_mut().previous_version = spec.capella_fork_version; + + // Override latest execution payload header. + // See https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#testing + if let Some(ExecutionPayloadHeader::Capella(ref header)) = execution_payload_header { + *state.latest_execution_payload_header_capella_mut()? = header.clone(); + } + } + + // Upgrade to eip4844 if configured from genesis + if spec + .eip4844_fork_epoch + .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) + { + upgrade_to_eip4844(&mut state, spec)?; + + // Remove intermediate Capella fork from `state.fork`. + state.fork_mut().previous_version = spec.eip4844_fork_version; + + // Override latest execution payload header. + // See https://github.com/ethereum/consensus-specs/blob/dev/specs/eip4844/beacon-chain.md#testing + if let Some(ExecutionPayloadHeader::Eip4844(header)) = execution_payload_header { + *state.latest_execution_payload_header_eip4844_mut()? = header; + } } // Now that we have our validators, initialize the caches (including the committees) diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 7d0cb01aeb..4f686200b0 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -13,11 +13,13 @@ pub use self::verify_attester_slashing::{ pub use self::verify_proposer_slashing::verify_proposer_slashing; pub use altair::sync_committee::process_sync_aggregate; pub use block_signature_verifier::{BlockSignatureVerifier, ParallelSignatureSets}; +pub use eip4844::eip4844::process_blob_kzg_commitments; pub use is_valid_indexed_attestation::is_valid_indexed_attestation; pub use process_operations::process_operations; pub use verify_attestation::{ verify_attestation_for_block_inclusion, verify_attestation_for_state, }; +pub use verify_bls_to_execution_change::verify_bls_to_execution_change; pub use verify_deposit::{ get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, }; @@ -25,6 +27,7 @@ pub use verify_exit::verify_exit; pub mod altair; pub mod block_signature_verifier; +pub mod eip4844; pub mod errors; mod is_valid_indexed_attestation; pub mod process_operations; @@ -32,10 +35,13 @@ pub mod signature_sets; pub mod tests; mod verify_attestation; mod verify_attester_slashing; +mod verify_bls_to_execution_change; mod verify_deposit; mod verify_exit; mod verify_proposer_slashing; +use crate::common::decrease_balance; + #[cfg(feature = "arbitrary-fuzz")] use arbitrary::Arbitrary; @@ -88,7 +94,7 @@ pub enum VerifyBlockRoot { /// re-calculating the root when it is already known. Note `block_root` should be equal to the /// tree hash root of the block, NOT the signing root of the block. This function takes /// care of mixing in the domain. -pub fn per_block_processing>( +pub fn per_block_processing>( state: &mut BeaconState, signed_block: &SignedBeaconBlock, block_signature_strategy: BlockSignatureStrategy, @@ -156,7 +162,8 @@ pub fn per_block_processing>( // previous block. if is_execution_enabled(state, block.body()) { let payload = block.body().execution_payload()?; - process_execution_payload(state, payload, spec)?; + process_withdrawals::(state, payload, spec)?; + process_execution_payload::(state, payload, spec)?; } process_randao(state, block, verify_randao, ctxt, spec)?; @@ -173,6 +180,12 @@ pub fn per_block_processing>( )?; } + // Eip4844 specifications are not yet released so additional care is taken + // to ensure the code does not run in production. + if matches!(block, BeaconBlockRef::Eip4844(_)) { + process_blob_kzg_commitments(block.body())?; + } + Ok(()) } @@ -235,7 +248,7 @@ pub fn process_block_header( /// Verifies the signature of a block. /// /// Spec v0.12.1 -pub fn verify_block_signature>( +pub fn verify_block_signature>( state: &BeaconState, block: &SignedBeaconBlock, ctxt: &mut ConsensusContext, @@ -261,7 +274,7 @@ pub fn verify_block_signature>( /// Verifies the `randao_reveal` against the block's proposer pubkey and updates /// `state.latest_randao_mixes`. -pub fn process_randao>( +pub fn process_randao>( state: &mut BeaconState, block: BeaconBlockRef<'_, T, Payload>, verify_signatures: VerifySignatures, @@ -334,17 +347,17 @@ pub fn get_new_eth1_data( /// Contains a partial set of checks from the `process_execution_payload` function: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload -pub fn partially_verify_execution_payload>( +pub fn partially_verify_execution_payload>( state: &BeaconState, block_slot: Slot, - payload: &Payload, + payload: Payload::Ref<'_>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { if is_merge_transition_complete(state) { block_verify!( - payload.parent_hash() == state.latest_execution_payload_header()?.block_hash, + payload.parent_hash() == state.latest_execution_payload_header()?.block_hash(), BlockProcessingError::ExecutionHashChainIncontiguous { - expected: state.latest_execution_payload_header()?.block_hash, + expected: state.latest_execution_payload_header()?.block_hash(), found: payload.parent_hash(), } ); @@ -376,14 +389,33 @@ pub fn partially_verify_execution_payload>( /// Partially equivalent to the `process_execution_payload` function: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload -pub fn process_execution_payload>( +pub fn process_execution_payload>( state: &mut BeaconState, - payload: &Payload, + payload: Payload::Ref<'_>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - partially_verify_execution_payload(state, state.slot(), payload, spec)?; + partially_verify_execution_payload::(state, state.slot(), payload, spec)?; - *state.latest_execution_payload_header_mut()? = payload.to_execution_payload_header(); + match state.latest_execution_payload_header_mut()? { + ExecutionPayloadHeaderRefMut::Merge(header_mut) => { + match payload.to_execution_payload_header() { + ExecutionPayloadHeader::Merge(header) => *header_mut = header, + _ => return Err(BlockProcessingError::IncorrectStateType), + } + } + ExecutionPayloadHeaderRefMut::Capella(header_mut) => { + match payload.to_execution_payload_header() { + ExecutionPayloadHeader::Capella(header) => *header_mut = header, + _ => return Err(BlockProcessingError::IncorrectStateType), + } + } + ExecutionPayloadHeaderRefMut::Eip4844(header_mut) => { + match payload.to_execution_payload_header() { + ExecutionPayloadHeader::Eip4844(header) => *header_mut = header, + _ => return Err(BlockProcessingError::IncorrectStateType), + } + } + } Ok(()) } @@ -392,30 +424,37 @@ pub fn process_execution_payload>( /// the merge has happened or if we're on the transition block. Thus we don't want to propagate /// errors from the `BeaconState` being an earlier variant than `BeaconStateMerge` as we'd have to /// repeaetedly write code to treat these errors as false. -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#is_merge_transition_complete +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_complete pub fn is_merge_transition_complete(state: &BeaconState) -> bool { + // We must check defaultness against the payload header with 0x0 roots, as that's what's meant + // by `ExecutionPayloadHeader()` in the spec. state .latest_execution_payload_header() - .map(|header| *header != >::default()) + .map(|header| !header.is_default_with_zero_roots()) .unwrap_or(false) } -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#is_merge_transition_block -pub fn is_merge_transition_block>( +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_block +pub fn is_merge_transition_block>( state: &BeaconState, body: BeaconBlockBodyRef, ) -> bool { + // For execution payloads in blocks (which may be headers) we must check defaultness against + // the payload with `transactions_root` equal to the tree hash of the empty list. body.execution_payload() - .map(|payload| !is_merge_transition_complete(state) && *payload != Payload::default()) + .map(|payload| { + !is_merge_transition_complete(state) && !payload.is_default_with_empty_roots() + }) .unwrap_or(false) } -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#is_execution_enabled -pub fn is_execution_enabled>( +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_execution_enabled +pub fn is_execution_enabled>( state: &BeaconState, body: BeaconBlockBodyRef, ) -> bool { is_merge_transition_block(state, body) || is_merge_transition_complete(state) } -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#compute_timestamp_at_slot + +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot pub fn compute_timestamp_at_slot( state: &BeaconState, block_slot: Slot, @@ -426,3 +465,115 @@ pub fn compute_timestamp_at_slot( .safe_mul(spec.seconds_per_slot) .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) } + +/// Compute the next batch of withdrawals which should be included in a block. +/// +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-get_expected_withdrawals +pub fn get_expected_withdrawals( + state: &BeaconState, + spec: &ChainSpec, +) -> Result, BlockProcessingError> { + let epoch = state.current_epoch(); + let mut withdrawal_index = state.next_withdrawal_index()?; + let mut validator_index = state.next_withdrawal_validator_index()?; + let mut withdrawals = vec![]; + + let bound = std::cmp::min( + state.validators().len() as u64, + spec.max_validators_per_withdrawals_sweep, + ); + for _ in 0..bound { + let validator = state.get_validator(validator_index as usize)?; + let balance = *state.balances().get(validator_index as usize).ok_or( + BeaconStateError::BalancesOutOfBounds(validator_index as usize), + )?; + if validator.is_fully_withdrawable_at(balance, epoch, spec) { + withdrawals.push(Withdrawal { + index: withdrawal_index, + validator_index, + address: validator + .get_eth1_withdrawal_address(spec) + .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, + amount: balance, + }); + withdrawal_index.safe_add_assign(1)?; + } else if validator.is_partially_withdrawable_validator(balance, spec) { + withdrawals.push(Withdrawal { + index: withdrawal_index, + validator_index, + address: validator + .get_eth1_withdrawal_address(spec) + .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, + amount: balance.safe_sub(spec.max_effective_balance)?, + }); + withdrawal_index.safe_add_assign(1)?; + } + if withdrawals.len() == T::max_withdrawals_per_payload() { + break; + } + validator_index = validator_index + .safe_add(1)? + .safe_rem(state.validators().len() as u64)?; + } + + Ok(withdrawals.into()) +} + +/// Apply withdrawals to the state. +pub fn process_withdrawals>( + state: &mut BeaconState, + payload: Payload::Ref<'_>, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + match state { + BeaconState::Merge(_) => Ok(()), + BeaconState::Capella(_) | BeaconState::Eip4844(_) => { + let expected_withdrawals = get_expected_withdrawals(state, spec)?; + let expected_root = expected_withdrawals.tree_hash_root(); + let withdrawals_root = payload.withdrawals_root()?; + + if expected_root != withdrawals_root { + return Err(BlockProcessingError::WithdrawalsRootMismatch { + expected: expected_root, + found: withdrawals_root, + }); + } + + for withdrawal in expected_withdrawals.iter() { + decrease_balance( + state, + withdrawal.validator_index as usize, + withdrawal.amount, + )?; + } + + // Update the next withdrawal index if this block contained withdrawals + if let Some(latest_withdrawal) = expected_withdrawals.last() { + *state.next_withdrawal_index_mut()? = latest_withdrawal.index.safe_add(1)?; + + // Update the next validator index to start the next withdrawal sweep + if expected_withdrawals.len() == T::max_withdrawals_per_payload() { + // Next sweep starts after the latest withdrawal's validator index + let next_validator_index = latest_withdrawal + .validator_index + .safe_add(1)? + .safe_rem(state.validators().len() as u64)?; + *state.next_withdrawal_validator_index_mut()? = next_validator_index; + } + } + + // Advance sweep by the max length of the sweep if there was not a full set of withdrawals + if expected_withdrawals.len() != T::max_withdrawals_per_payload() { + let next_validator_index = state + .next_withdrawal_validator_index()? + .safe_add(spec.max_validators_per_withdrawals_sweep)? + .safe_rem(state.validators().len() as u64)?; + *state.next_withdrawal_validator_index_mut()? = next_validator_index; + } + + Ok(()) + } + // these shouldn't even be encountered but they're here for completeness + BeaconState::Base(_) | BeaconState::Altair(_) => Ok(()), + } +} diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 5e52ff8cb8..709302eec1 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -7,7 +7,8 @@ use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet}; use rayon::prelude::*; use std::borrow::Cow; use types::{ - BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, SignedBeaconBlock, + AbstractExecPayload, BeaconState, BeaconStateError, ChainSpec, EthSpec, Hash256, + SignedBeaconBlock, }; pub type Result = std::result::Result; @@ -124,7 +125,7 @@ where /// contains invalid signatures on deposits._ /// /// See `Self::verify` for more detail. - pub fn verify_entire_block>( + pub fn verify_entire_block>( state: &'a BeaconState, get_pubkey: F, decompressor: D, @@ -138,7 +139,7 @@ where } /// Includes all signatures on the block (except the deposit signatures) for verification. - pub fn include_all_signatures>( + pub fn include_all_signatures>( &mut self, block: &'a SignedBeaconBlock, ctxt: &mut ConsensusContext, @@ -155,7 +156,7 @@ where /// Includes all signatures on the block (except the deposit signatures and the proposal /// signature) for verification. - pub fn include_all_signatures_except_proposal>( + pub fn include_all_signatures_except_proposal>( &mut self, block: &'a SignedBeaconBlock, ctxt: &mut ConsensusContext, @@ -169,12 +170,13 @@ where // Deposits are not included because they can legally have invalid signatures. self.include_exits(block)?; self.include_sync_aggregate(block)?; + self.include_bls_to_execution_changes(block)?; Ok(()) } /// Includes the block signature for `self.block` for verification. - pub fn include_block_proposal>( + pub fn include_block_proposal>( &mut self, block: &'a SignedBeaconBlock, block_root: Option, @@ -193,7 +195,7 @@ where } /// Includes the randao signature for `self.block` for verification. - pub fn include_randao_reveal>( + pub fn include_randao_reveal>( &mut self, block: &'a SignedBeaconBlock, verified_proposer_index: Option, @@ -210,7 +212,7 @@ where } /// Includes all signatures in `self.block.body.proposer_slashings` for verification. - pub fn include_proposer_slashings>( + pub fn include_proposer_slashings>( &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { @@ -239,7 +241,7 @@ where } /// Includes all signatures in `self.block.body.attester_slashings` for verification. - pub fn include_attester_slashings>( + pub fn include_attester_slashings>( &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { @@ -268,7 +270,7 @@ where } /// Includes all signatures in `self.block.body.attestations` for verification. - pub fn include_attestations>( + pub fn include_attestations>( &mut self, block: &'a SignedBeaconBlock, ctxt: &mut ConsensusContext, @@ -298,7 +300,7 @@ where } /// Includes all signatures in `self.block.body.voluntary_exits` for verification. - pub fn include_exits>( + pub fn include_exits>( &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { @@ -322,7 +324,7 @@ where } /// Include the signature of the block's sync aggregate (if it exists) for verification. - pub fn include_sync_aggregate>( + pub fn include_sync_aggregate>( &mut self, block: &'a SignedBeaconBlock, ) -> Result<()> { @@ -341,6 +343,24 @@ where Ok(()) } + /// Include the signature of the block's BLS to execution changes for verification. + pub fn include_bls_to_execution_changes>( + &mut self, + block: &'a SignedBeaconBlock, + ) -> Result<()> { + // To improve performance we might want to decompress the withdrawal pubkeys in parallel. + if let Ok(bls_to_execution_changes) = block.message().body().bls_to_execution_changes() { + for bls_to_execution_change in bls_to_execution_changes { + self.sets.push(bls_execution_change_signature_set( + self.state, + bls_to_execution_change, + self.spec, + )?); + } + } + Ok(()) + } + /// Verify all the signatures that have been included in `self`, returning `true` if and only if /// all the signatures are valid. /// diff --git a/consensus/state_processing/src/per_block_processing/eip4844.rs b/consensus/state_processing/src/per_block_processing/eip4844.rs new file mode 100644 index 0000000000..23ab3c5c07 --- /dev/null +++ b/consensus/state_processing/src/per_block_processing/eip4844.rs @@ -0,0 +1,2 @@ +#[allow(clippy::module_inception)] +pub mod eip4844; diff --git a/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs b/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs new file mode 100644 index 0000000000..7826057a43 --- /dev/null +++ b/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs @@ -0,0 +1,122 @@ +use crate::BlockProcessingError; +use eth2_hashing::hash_fixed; +use itertools::{EitherOrBoth, Itertools}; +use safe_arith::SafeArith; +use ssz::Decode; +use ssz_types::VariableList; +use types::consts::eip4844::{BLOB_TX_TYPE, VERSIONED_HASH_VERSION_KZG}; +use types::{ + AbstractExecPayload, BeaconBlockBodyRef, EthSpec, ExecPayload, KzgCommitment, Transaction, + Transactions, VersionedHash, +}; + +pub fn process_blob_kzg_commitments>( + block_body: BeaconBlockBodyRef, +) -> Result<(), BlockProcessingError> { + if let (Ok(payload), Ok(kzg_commitments)) = ( + block_body.execution_payload(), + block_body.blob_kzg_commitments(), + ) { + if let Some(transactions) = payload.transactions() { + if !verify_kzg_commitments_against_transactions::(transactions, kzg_commitments)? { + return Err(BlockProcessingError::BlobVersionHashMismatch); + } + } + } + + Ok(()) +} + +pub fn verify_kzg_commitments_against_transactions( + transactions: &Transactions, + kzg_commitments: &VariableList, +) -> Result { + let nested_iter = transactions + .into_iter() + .filter(|tx| { + tx.first() + .map(|tx_type| *tx_type == BLOB_TX_TYPE) + .unwrap_or(false) + }) + .map(|tx| tx_peek_blob_versioned_hashes::(tx)); + + itertools::process_results(nested_iter, |iter| { + let zipped_iter = iter + .flatten() + // Need to use `itertools::zip_longest` here because just zipping hides if one iter is shorter + // and `itertools::zip_eq` panics. + .zip_longest(kzg_commitments.into_iter()) + .enumerate() + .map(|(index, next)| match next { + EitherOrBoth::Both(hash, commitment) => Ok((hash?, commitment)), + // The number of versioned hashes from the blob transactions exceeds the number of + // commitments in the block. + EitherOrBoth::Left(_) => Err(BlockProcessingError::BlobNumCommitmentsMismatch { + commitments_processed_in_block: index, + commitments_processed_in_transactions: index.safe_add(1)?, + }), + // The number of commitments in the block exceeds the number of versioned hashes + // in the blob transactions. + EitherOrBoth::Right(_) => Err(BlockProcessingError::BlobNumCommitmentsMismatch { + commitments_processed_in_block: index.safe_add(1)?, + commitments_processed_in_transactions: index, + }), + }); + + itertools::process_results(zipped_iter, |mut iter| { + iter.all(|(tx_versioned_hash, commitment)| { + tx_versioned_hash == kzg_commitment_to_versioned_hash(commitment) + }) + }) + })? +} + +/// Only transactions of type `BLOB_TX_TYPE` should be passed into this function. +fn tx_peek_blob_versioned_hashes( + opaque_tx: &Transaction, +) -> Result< + impl IntoIterator> + '_, + BlockProcessingError, +> { + let tx_len = opaque_tx.len(); + let message_offset = 1.safe_add(u32::from_ssz_bytes(opaque_tx.get(1..5).ok_or( + BlockProcessingError::BlobVersionHashIndexOutOfBounds { + length: tx_len, + index: 5, + }, + )?)?)?; + + let message_offset_usize = message_offset as usize; + + // field offset: 32 + 8 + 32 + 32 + 8 + 4 + 32 + 4 + 4 + 32 = 188 + let blob_versioned_hashes_offset = message_offset.safe_add(u32::from_ssz_bytes( + opaque_tx + .get(message_offset_usize.safe_add(188)?..message_offset_usize.safe_add(192)?) + .ok_or(BlockProcessingError::BlobVersionHashIndexOutOfBounds { + length: tx_len, + index: message_offset_usize.safe_add(192)?, + })?, + )?)?; + + let num_hashes = tx_len + .safe_sub(blob_versioned_hashes_offset as usize)? + .safe_div(32)?; + + Ok((0..num_hashes).into_iter().map(move |i| { + let next_version_hash_index = + (blob_versioned_hashes_offset as usize).safe_add(i.safe_mul(32)?)?; + let bytes = opaque_tx + .get(next_version_hash_index..next_version_hash_index.safe_add(32)?) + .ok_or(BlockProcessingError::BlobVersionHashIndexOutOfBounds { + length: tx_len, + index: (next_version_hash_index).safe_add(32)?, + })?; + Ok(VersionedHash::from_slice(bytes)) + })) +} + +fn kzg_commitment_to_versioned_hash(kzg_commitment: &KzgCommitment) -> VersionedHash { + let mut hashed_commitment = hash_fixed(&kzg_commitment.0); + hashed_commitment[0] = VERSIONED_HASH_VERSION_KZG; + VersionedHash::from(hashed_commitment) +} diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 71bd55f883..5c34afd593 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -2,6 +2,7 @@ use super::signature_sets::Error as SignatureSetError; use crate::ContextError; use merkle_proof::MerkleTreeError; use safe_arith::ArithError; +use ssz::DecodeError; use types::*; /// The error returned from the `per_block_processing` function. Indicates that a block is either @@ -48,12 +49,17 @@ pub enum BlockProcessingError { index: usize, reason: ExitInvalid, }, + BlsExecutionChangeInvalid { + index: usize, + reason: BlsExecutionChangeInvalid, + }, SyncAggregateInvalid { reason: SyncAggregateInvalid, }, BeaconStateError(BeaconStateError), SignatureSetError(SignatureSetError), SszTypesError(ssz_types::Error), + SszDecodeError(DecodeError), MerkleTreeError(MerkleTreeError), ArithError(ArithError), InconsistentBlockFork(InconsistentFork), @@ -72,6 +78,23 @@ pub enum BlockProcessingError { }, ExecutionInvalid, ConsensusContext(ContextError), + WithdrawalsRootMismatch { + expected: Hash256, + found: Hash256, + }, + BlobVersionHashMismatch, + /// The number of commitments in blob transactions in the payload does not match the number + /// of commitments in the block. + BlobNumCommitmentsMismatch { + commitments_processed_in_block: usize, + /// This number depic + commitments_processed_in_transactions: usize, + }, + BlobVersionHashIndexOutOfBounds { + index: usize, + length: usize, + }, + WithdrawalCredentialsInvalid, } impl From for BlockProcessingError { @@ -92,6 +115,12 @@ impl From for BlockProcessingError { } } +impl From for BlockProcessingError { + fn from(error: DecodeError) -> Self { + BlockProcessingError::SszDecodeError(error) + } +} + impl From for BlockProcessingError { fn from(e: ArithError) -> Self { BlockProcessingError::ArithError(e) @@ -160,7 +189,8 @@ impl_into_block_processing_error_with_index!( IndexedAttestationInvalid, AttestationInvalid, DepositInvalid, - ExitInvalid + ExitInvalid, + BlsExecutionChangeInvalid ); pub type HeaderValidationError = BlockOperationError; @@ -170,6 +200,7 @@ pub type AttestationValidationError = BlockOperationError; pub type SyncCommitteeMessageValidationError = BlockOperationError; pub type DepositValidationError = BlockOperationError; pub type ExitValidationError = BlockOperationError; +pub type BlsExecutionChangeValidationError = BlockOperationError; #[derive(Debug, PartialEq, Clone)] pub enum BlockOperationError { @@ -274,7 +305,7 @@ pub enum AttesterSlashingInvalid { /// Describes why an object is invalid. #[derive(Debug, PartialEq, Clone)] pub enum AttestationInvalid { - /// Commmittee index exceeds number of committees in that slot. + /// Committee index exceeds number of committees in that slot. BadCommitteeIndex, /// Attestation included before the inclusion delay. IncludedTooEarly { @@ -385,6 +416,18 @@ pub enum ExitInvalid { SignatureSetError(SignatureSetError), } +#[derive(Debug, PartialEq, Clone)] +pub enum BlsExecutionChangeInvalid { + /// The specified validator is not in the state's validator registry. + ValidatorUnknown(u64), + /// Validator does not have BLS Withdrawal credentials before this change. + NonBlsWithdrawalCredentials, + /// Provided BLS pubkey does not match withdrawal credentials. + WithdrawalCredentialsMismatch, + /// The signature is invalid. + BadSignature, +} + #[derive(Debug, PartialEq, Clone)] pub enum SyncAggregateInvalid { /// One or more of the aggregate public keys is invalid. diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 9aa1e6d376..8a6163f29b 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -9,7 +9,7 @@ use crate::VerifySignatures; use safe_arith::SafeArith; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; -pub fn process_operations>( +pub fn process_operations>( state: &mut BeaconState, block_body: BeaconBlockBodyRef, verify_signatures: VerifySignatures, @@ -33,6 +33,11 @@ pub fn process_operations>( process_attestations(state, block_body, verify_signatures, ctxt, spec)?; process_deposits(state, block_body.deposits(), spec)?; process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?; + + if let Ok(bls_to_execution_changes) = block_body.bls_to_execution_changes() { + process_bls_to_execution_changes(state, bls_to_execution_changes, verify_signatures, spec)?; + } + Ok(()) } @@ -232,7 +237,7 @@ pub fn process_attester_slashings( } /// Wrapper function to handle calling the correct version of `process_attestations` based on /// the fork. -pub fn process_attestations>( +pub fn process_attestations>( state: &mut BeaconState, block_body: BeaconBlockBodyRef, verify_signatures: VerifySignatures, @@ -249,7 +254,10 @@ pub fn process_attestations>( spec, )?; } - BeaconBlockBodyRef::Altair(_) | BeaconBlockBodyRef::Merge(_) => { + BeaconBlockBodyRef::Altair(_) + | BeaconBlockBodyRef::Merge(_) + | BeaconBlockBodyRef::Capella(_) + | BeaconBlockBodyRef::Eip4844(_) => { altair::process_attestations( state, block_body.attestations(), @@ -282,6 +290,31 @@ pub fn process_exits( Ok(()) } +/// Validates each `bls_to_execution_change` and updates the state +/// +/// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returns +/// an `Err` describing the invalid object or cause of failure. +pub fn process_bls_to_execution_changes( + state: &mut BeaconState, + bls_to_execution_changes: &[SignedBlsToExecutionChange], + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + for (i, signed_address_change) in bls_to_execution_changes.iter().enumerate() { + verify_bls_to_execution_change(state, signed_address_change, verify_signatures, spec) + .map_err(|e| e.into_with_index(i))?; + + state + .get_validator_mut(signed_address_change.message.validator_index as usize)? + .change_withdrawal_credentials( + &signed_address_change.message.to_execution_address, + spec, + ); + } + + Ok(()) +} + /// Validates each `Deposit` and updates the state, short-circuiting on an invalid object. /// /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 90bbdd56fe..c05d3f057d 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -7,12 +7,12 @@ use ssz::DecodeError; use std::borrow::Cow; use tree_hash::TreeHash; use types::{ - AggregateSignature, AttesterSlashing, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, - DepositData, Domain, Epoch, EthSpec, ExecPayload, Fork, Hash256, InconsistentFork, - IndexedAttestation, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, + AbstractExecPayload, AggregateSignature, AttesterSlashing, BeaconBlockRef, BeaconState, + BeaconStateError, ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, + InconsistentFork, IndexedAttestation, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, - SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, SigningData, Slot, SyncAggregate, - SyncAggregatorSelectionData, Unsigned, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, + SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, }; pub type Result = std::result::Result; @@ -71,7 +71,7 @@ where } /// A signature set that is valid if a block was signed by the expected block producer. -pub fn block_proposal_signature_set<'a, T, F, Payload: ExecPayload>( +pub fn block_proposal_signature_set<'a, T, F, Payload: AbstractExecPayload>( state: &'a BeaconState, get_pubkey: F, signed_block: &'a SignedBeaconBlock, @@ -113,7 +113,7 @@ where /// Unlike `block_proposal_signature_set` this does **not** check that the proposer index is /// correct according to the shuffling. It should only be used if no suitable `BeaconState` is /// available. -pub fn block_proposal_signature_set_from_parts<'a, T, F, Payload: ExecPayload>( +pub fn block_proposal_signature_set_from_parts<'a, T, F, Payload: AbstractExecPayload>( signed_block: &'a SignedBeaconBlock, block_root: Option, proposer_index: u64, @@ -156,8 +156,34 @@ where )) } +pub fn bls_execution_change_signature_set<'a, T: EthSpec>( + state: &'a BeaconState, + signed_address_change: &'a SignedBlsToExecutionChange, + spec: &'a ChainSpec, +) -> Result> { + let domain = spec.compute_domain( + Domain::BlsToExecutionChange, + spec.genesis_fork_version, + state.genesis_validators_root(), + ); + let message = signed_address_change.message.signing_root(domain); + let signing_key = Cow::Owned( + signed_address_change + .message + .from_bls_pubkey + .decompress() + .map_err(|_| Error::PublicKeyDecompressionFailed)?, + ); + + Ok(SignatureSet::single_pubkey( + &signed_address_change.signature, + signing_key, + message, + )) +} + /// A signature set that is valid if the block proposers randao reveal signature is correct. -pub fn randao_signature_set<'a, T, F, Payload: ExecPayload>( +pub fn randao_signature_set<'a, T, F, Payload: AbstractExecPayload>( state: &'a BeaconState, get_pubkey: F, block: BeaconBlockRef<'a, T, Payload>, diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs new file mode 100644 index 0000000000..bb26799250 --- /dev/null +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -0,0 +1,56 @@ +use super::errors::{BlockOperationError, BlsExecutionChangeInvalid as Invalid}; +use crate::per_block_processing::signature_sets::bls_execution_change_signature_set; +use crate::VerifySignatures; +use eth2_hashing::hash; +use types::*; + +type Result = std::result::Result>; + +fn error(reason: Invalid) -> BlockOperationError { + BlockOperationError::invalid(reason) +} + +/// Indicates if a `BlsToExecutionChange` is valid to be included in a block, +/// where the block is being applied to the given `state`. +/// +/// Returns `Ok(())` if the `SignedBlsToExecutionChange` is valid, otherwise indicates the reason for invalidity. +pub fn verify_bls_to_execution_change( + state: &BeaconState, + signed_address_change: &SignedBlsToExecutionChange, + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<()> { + let address_change = &signed_address_change.message; + + let validator = state + .validators() + .get(address_change.validator_index as usize) + .ok_or_else(|| error(Invalid::ValidatorUnknown(address_change.validator_index)))?; + + verify!( + validator + .withdrawal_credentials + .as_bytes() + .first() + .map(|byte| *byte == spec.bls_withdrawal_prefix_byte) + .unwrap_or(false), + Invalid::NonBlsWithdrawalCredentials + ); + + // Re-hashing the pubkey isn't necessary during block replay, so we may want to skip that in + // future. + let pubkey_hash = hash(address_change.from_bls_pubkey.as_serialized()); + verify!( + validator.withdrawal_credentials.as_bytes().get(1..) == pubkey_hash.get(1..), + Invalid::WithdrawalCredentialsMismatch + ); + + if verify_signatures.is_true() { + verify!( + bls_execution_change_signature_set(state, signed_address_change, spec)?.verify(), + Invalid::BadSignature + ); + } + + Ok(()) +} diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index cb90c67b56..996e39c27f 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -3,14 +3,16 @@ pub use epoch_processing_summary::EpochProcessingSummary; use errors::EpochProcessingError as Error; pub use justification_and_finalization_state::JustificationAndFinalizationState; -pub use registry_updates::process_registry_updates; use safe_arith::SafeArith; -pub use slashings::process_slashings; use types::{BeaconState, ChainSpec, EthSpec}; + +pub use registry_updates::process_registry_updates; +pub use slashings::process_slashings; pub use weigh_justification_and_finalization::weigh_justification_and_finalization; pub mod altair; pub mod base; +pub mod capella; pub mod effective_balance_updates; pub mod epoch_processing_summary; pub mod errors; @@ -38,6 +40,7 @@ pub fn process_epoch( match state { BeaconState::Base(_) => base::process_epoch(state, spec), BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_epoch(state, spec), + BeaconState::Capella(_) | BeaconState::Eip4844(_) => capella::process_epoch(state, spec), } } diff --git a/consensus/state_processing/src/per_epoch_processing/capella.rs b/consensus/state_processing/src/per_epoch_processing/capella.rs new file mode 100644 index 0000000000..aaf301f29e --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/capella.rs @@ -0,0 +1,78 @@ +use super::altair::inactivity_updates::process_inactivity_updates; +use super::altair::justification_and_finalization::process_justification_and_finalization; +use super::altair::participation_cache::ParticipationCache; +use super::altair::participation_flag_updates::process_participation_flag_updates; +use super::altair::rewards_and_penalties::process_rewards_and_penalties; +use super::altair::sync_committee_updates::process_sync_committee_updates; +use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; +use crate::per_epoch_processing::{ + effective_balance_updates::process_effective_balance_updates, + resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, +}; +use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; + +pub use historical_summaries_update::process_historical_summaries_update; + +mod historical_summaries_update; + +pub fn process_epoch( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result, Error> { + // Ensure the committee caches are built. + state.build_committee_cache(RelativeEpoch::Previous, spec)?; + state.build_committee_cache(RelativeEpoch::Current, spec)?; + state.build_committee_cache(RelativeEpoch::Next, spec)?; + + // Pre-compute participating indices and total balances. + let participation_cache = ParticipationCache::new(state, spec)?; + let sync_committee = state.current_sync_committee()?.clone(); + + // Justification and finalization. + let justification_and_finalization_state = + process_justification_and_finalization(state, &participation_cache)?; + justification_and_finalization_state.apply_changes_to_state(state); + + process_inactivity_updates(state, &participation_cache, spec)?; + + // Rewards and Penalties. + process_rewards_and_penalties(state, &participation_cache, spec)?; + + // Registry Updates. + process_registry_updates(state, spec)?; + + // Slashings. + process_slashings( + state, + participation_cache.current_epoch_total_active_balance(), + spec, + )?; + + // Reset eth1 data votes. + process_eth1_data_reset(state)?; + + // Update effective balances with hysteresis (lag). + process_effective_balance_updates(state, spec)?; + + // Reset slashings + process_slashings_reset(state)?; + + // Set randao mix + process_randao_mixes_reset(state)?; + + // Set historical summaries accumulator + process_historical_summaries_update(state)?; + + // Rotate current/previous epoch participation + process_participation_flag_updates(state)?; + + process_sync_committee_updates(state, spec)?; + + // Rotate the epoch caches to suit the epoch transition. + state.advance_caches(spec)?; + + Ok(EpochProcessingSummary::Altair { + participation_cache, + sync_committee, + }) +} diff --git a/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs b/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs new file mode 100644 index 0000000000..9a87ceb605 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs @@ -0,0 +1,23 @@ +use crate::EpochProcessingError; +use safe_arith::SafeArith; +use types::historical_summary::HistoricalSummary; +use types::{BeaconState, EthSpec}; + +pub fn process_historical_summaries_update( + state: &mut BeaconState, +) -> Result<(), EpochProcessingError> { + // Set historical block root accumulator. + let next_epoch = state.next_epoch()?; + if next_epoch + .as_u64() + .safe_rem((T::slots_per_historical_root() as u64).safe_div(T::slots_per_epoch())?)? + == 0 + { + let summary = HistoricalSummary::new(state); + return state + .historical_summaries_mut()? + .push(summary) + .map_err(Into::into); + } + Ok(()) +} diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 9018db65bc..8d2600bb41 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -1,4 +1,6 @@ -use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; +use crate::upgrade::{ + upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_eip4844, +}; use crate::{per_epoch_processing::EpochProcessingSummary, *}; use safe_arith::{ArithError, SafeArith}; use types::*; @@ -55,6 +57,14 @@ pub fn per_slot_processing( if spec.bellatrix_fork_epoch == Some(state.current_epoch()) { upgrade_to_bellatrix(state, spec)?; } + // Capella. + if spec.capella_fork_epoch == Some(state.current_epoch()) { + upgrade_to_capella(state, spec)?; + } + // Eip4844 + if spec.eip4844_fork_epoch == Some(state.current_epoch()) { + upgrade_to_eip4844(state, spec)?; + } } Ok(summary) diff --git a/consensus/state_processing/src/upgrade.rs b/consensus/state_processing/src/upgrade.rs index fdf13c8281..01b6571056 100644 --- a/consensus/state_processing/src/upgrade.rs +++ b/consensus/state_processing/src/upgrade.rs @@ -1,5 +1,9 @@ pub mod altair; +pub mod capella; +pub mod eip4844; pub mod merge; pub use altair::upgrade_to_altair; +pub use capella::upgrade_to_capella; +pub use eip4844::upgrade_to_eip4844; pub use merge::upgrade_to_bellatrix; diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs new file mode 100644 index 0000000000..3b933fac37 --- /dev/null +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -0,0 +1,74 @@ +use ssz_types::VariableList; +use std::mem; +use types::{BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EthSpec, Fork}; + +/// Transform a `Merge` state into an `Capella` state. +pub fn upgrade_to_capella( + pre_state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let epoch = pre_state.current_epoch(); + let pre = pre_state.as_merge_mut()?; + + // Where possible, use something like `mem::take` to move fields from behind the &mut + // reference. For other fields that don't have a good default value, use `clone`. + // + // Fixed size vectors get cloned because replacing them would require the same size + // allocation as cloning. + let post = BeaconState::Capella(BeaconStateCapella { + // Versioning + genesis_time: pre.genesis_time, + genesis_validators_root: pre.genesis_validators_root, + slot: pre.slot, + fork: Fork { + previous_version: pre.fork.current_version, + current_version: spec.capella_fork_version, + epoch, + }, + // History + latest_block_header: pre.latest_block_header.clone(), + block_roots: pre.block_roots.clone(), + state_roots: pre.state_roots.clone(), + historical_roots: mem::take(&mut pre.historical_roots), + // Eth1 + eth1_data: pre.eth1_data.clone(), + eth1_data_votes: mem::take(&mut pre.eth1_data_votes), + eth1_deposit_index: pre.eth1_deposit_index, + // Registry + validators: mem::take(&mut pre.validators), + balances: mem::take(&mut pre.balances), + // Randomness + randao_mixes: pre.randao_mixes.clone(), + // Slashings + slashings: pre.slashings.clone(), + // `Participation + previous_epoch_participation: mem::take(&mut pre.previous_epoch_participation), + current_epoch_participation: mem::take(&mut pre.current_epoch_participation), + // Finality + justification_bits: pre.justification_bits.clone(), + previous_justified_checkpoint: pre.previous_justified_checkpoint, + current_justified_checkpoint: pre.current_justified_checkpoint, + finalized_checkpoint: pre.finalized_checkpoint, + // Inactivity + inactivity_scores: mem::take(&mut pre.inactivity_scores), + // Sync committees + current_sync_committee: pre.current_sync_committee.clone(), + next_sync_committee: pre.next_sync_committee.clone(), + // Execution + latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_capella(), + // Capella + next_withdrawal_index: 0, + next_withdrawal_validator_index: 0, + historical_summaries: VariableList::default(), + // Caches + total_active_balance: pre.total_active_balance, + committee_caches: mem::take(&mut pre.committee_caches), + pubkey_cache: mem::take(&mut pre.pubkey_cache), + exit_cache: mem::take(&mut pre.exit_cache), + tree_hash_cache: mem::take(&mut pre.tree_hash_cache), + }); + + *pre_state = post; + + Ok(()) +} diff --git a/consensus/state_processing/src/upgrade/eip4844.rs b/consensus/state_processing/src/upgrade/eip4844.rs new file mode 100644 index 0000000000..4f6ff9d194 --- /dev/null +++ b/consensus/state_processing/src/upgrade/eip4844.rs @@ -0,0 +1,75 @@ +use std::mem; +use types::{BeaconState, BeaconStateEip4844, BeaconStateError as Error, ChainSpec, EthSpec, Fork}; + +/// Transform a `Capella` state into an `Eip4844` state. +pub fn upgrade_to_eip4844( + pre_state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let epoch = pre_state.current_epoch(); + let pre = pre_state.as_capella_mut()?; + + let previous_fork_version = pre.fork.current_version; + + // Where possible, use something like `mem::take` to move fields from behind the &mut + // reference. For other fields that don't have a good default value, use `clone`. + // + // Fixed size vectors get cloned because replacing them would require the same size + // allocation as cloning. + let post = BeaconState::Eip4844(BeaconStateEip4844 { + // Versioning + genesis_time: pre.genesis_time, + genesis_validators_root: pre.genesis_validators_root, + slot: pre.slot, + fork: Fork { + previous_version: previous_fork_version, + current_version: spec.eip4844_fork_version, + epoch, + }, + // History + latest_block_header: pre.latest_block_header.clone(), + block_roots: pre.block_roots.clone(), + state_roots: pre.state_roots.clone(), + historical_roots: mem::take(&mut pre.historical_roots), + // Eth1 + eth1_data: pre.eth1_data.clone(), + eth1_data_votes: mem::take(&mut pre.eth1_data_votes), + eth1_deposit_index: pre.eth1_deposit_index, + // Registry + validators: mem::take(&mut pre.validators), + balances: mem::take(&mut pre.balances), + // Randomness + randao_mixes: pre.randao_mixes.clone(), + // Slashings + slashings: pre.slashings.clone(), + // `Participation + previous_epoch_participation: mem::take(&mut pre.previous_epoch_participation), + current_epoch_participation: mem::take(&mut pre.current_epoch_participation), + // Finality + justification_bits: pre.justification_bits.clone(), + previous_justified_checkpoint: pre.previous_justified_checkpoint, + current_justified_checkpoint: pre.current_justified_checkpoint, + finalized_checkpoint: pre.finalized_checkpoint, + // Inactivity + inactivity_scores: mem::take(&mut pre.inactivity_scores), + // Sync committees + current_sync_committee: pre.current_sync_committee.clone(), + next_sync_committee: pre.next_sync_committee.clone(), + // Execution + latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_eip4844(), + // Capella + next_withdrawal_index: pre.next_withdrawal_index, + next_withdrawal_validator_index: pre.next_withdrawal_validator_index, + historical_summaries: pre.historical_summaries.clone(), + // Caches + total_active_balance: pre.total_active_balance, + committee_caches: mem::take(&mut pre.committee_caches), + pubkey_cache: mem::take(&mut pre.pubkey_cache), + exit_cache: mem::take(&mut pre.exit_cache), + tree_hash_cache: mem::take(&mut pre.tree_hash_cache), + }); + + *pre_state = post; + + Ok(()) +} diff --git a/consensus/state_processing/src/upgrade/merge.rs b/consensus/state_processing/src/upgrade/merge.rs index 2e4ed441a4..c172466248 100644 --- a/consensus/state_processing/src/upgrade/merge.rs +++ b/consensus/state_processing/src/upgrade/merge.rs @@ -1,7 +1,7 @@ use std::mem; use types::{ BeaconState, BeaconStateError as Error, BeaconStateMerge, ChainSpec, EthSpec, - ExecutionPayloadHeader, Fork, + ExecutionPayloadHeaderMerge, Fork, }; /// Transform a `Altair` state into an `Merge` state. @@ -57,7 +57,7 @@ pub fn upgrade_to_bellatrix( current_sync_committee: pre.current_sync_committee.clone(), next_sync_committee: pre.next_sync_committee.clone(), // Execution - latest_execution_payload_header: >::default(), + latest_execution_payload_header: >::default(), // Caches total_active_balance: pre.total_active_balance, committee_caches: mem::take(&mut pre.committee_caches), diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index 80dee28f62..50ac2ff3de 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -1,8 +1,10 @@ use crate::per_block_processing::{ errors::{ - AttesterSlashingValidationError, ExitValidationError, ProposerSlashingValidationError, + AttesterSlashingValidationError, BlsExecutionChangeValidationError, ExitValidationError, + ProposerSlashingValidationError, }, - verify_attester_slashing, verify_exit, verify_proposer_slashing, + verify_attester_slashing, verify_bls_to_execution_change, verify_exit, + verify_proposer_slashing, }; use crate::VerifySignatures; use derivative::Derivative; @@ -12,7 +14,7 @@ use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; use types::{ AttesterSlashing, BeaconState, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, ProposerSlashing, - SignedVoluntaryExit, + SignedBlsToExecutionChange, SignedVoluntaryExit, }; const MAX_FORKS_VERIFIED_AGAINST: usize = 2; @@ -87,6 +89,7 @@ where } pub fn signature_is_still_valid(&self, current_fork: &Fork) -> bool { + // The .all() will return true if the iterator is empty. self.as_inner() .verification_epochs() .into_iter() @@ -118,6 +121,8 @@ pub trait VerifyOperation: Encode + Decode + Sized { /// Return the epochs at which parts of this message were verified. /// /// These need to map 1-to-1 to the `SigVerifiedOp::verified_against` for this type. + /// + /// If the message is valid across all forks it should return an empty smallvec. fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]>; } @@ -182,3 +187,21 @@ impl VerifyOperation for ProposerSlashing { .epoch(E::slots_per_epoch())] } } + +impl VerifyOperation for SignedBlsToExecutionChange { + type Error = BlsExecutionChangeValidationError; + + fn validate( + self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result, Self::Error> { + verify_bls_to_execution_change(state, &self, VerifySignatures::True, spec)?; + Ok(SigVerifiedOp::new(self, state)) + } + + #[allow(clippy::integer_arithmetic)] + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + smallvec![] + } +} diff --git a/consensus/tree_hash/Cargo.toml b/consensus/tree_hash/Cargo.toml index 731e2f177e..b2630d4bf6 100644 --- a/consensus/tree_hash/Cargo.toml +++ b/consensus/tree_hash/Cargo.toml @@ -12,7 +12,7 @@ tree_hash_derive = "0.4.0" types = { path = "../types" } beacon_chain = { path = "../../beacon_node/beacon_chain" } eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" [dependencies] ethereum-types = "0.14.1" diff --git a/consensus/tree_hash/src/impls.rs b/consensus/tree_hash/src/impls.rs index cf05d2a3d5..899356f833 100644 --- a/consensus/tree_hash/src/impls.rs +++ b/consensus/tree_hash/src/impls.rs @@ -82,6 +82,26 @@ macro_rules! impl_for_lt_32byte_u8_array { impl_for_lt_32byte_u8_array!(4); impl_for_lt_32byte_u8_array!(32); +impl TreeHash for [u8; 48] { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Vector + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + unreachable!("Vector should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("Vector should never be packed.") + } + + fn tree_hash_root(&self) -> Hash256 { + let values_per_chunk = BYTES_PER_CHUNK; + let minimum_chunk_count = (48 + values_per_chunk - 1) / values_per_chunk; + merkle_root(self, minimum_chunk_count) + } +} + impl TreeHash for U128 { fn tree_hash_type() -> TreeHashType { TreeHashType::Basic diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 7fd730a514..276077c7f1 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -9,12 +9,13 @@ name = "benches" harness = false [dependencies] +serde-big-array = {version = "0.3.2", features = ["const-generics"]} merkle_proof = { path = "../../consensus/merkle_proof" } -bls = { path = "../../crypto/bls" } +bls = { path = "../../crypto/bls", features = ["arbitrary"] } compare_fields = { path = "../../common/compare_fields" } compare_fields_derive = { path = "../../common/compare_fields_derive" } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } -ethereum-types = "0.14.1" +ethereum-types = { version = "0.14.1", features = ["arbitrary"] } eth2_hashing = "0.3.0" hex = "0.4.2" int_to_bytes = { path = "../int_to_bytes" } @@ -25,12 +26,12 @@ safe_arith = { path = "../safe_arith" } serde = {version = "1.0.116" , features = ["rc"] } serde_derive = "1.0.116" slog = "2.5.2" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" -eth2_ssz_types = "0.2.2" -swap_or_not_shuffle = { path = "../swap_or_not_shuffle" } +eth2_ssz = { version = "0.4.1", features = ["arbitrary"] } +eth2_ssz_derive = "0.3.1" +eth2_ssz_types = { version = "0.2.2", features = ["arbitrary"] } +swap_or_not_shuffle = { path = "../swap_or_not_shuffle", features = ["arbitrary"] } test_random_derive = { path = "../../common/test_random_derive" } -tree_hash = "0.4.1" +tree_hash = { version = "0.4.1", features = ["arbitrary"] } tree_hash_derive = "0.4.0" rand_xorshift = "0.3.0" cached_tree_hash = { path = "../cached_tree_hash" } @@ -38,13 +39,15 @@ serde_yaml = "0.8.13" tempfile = "3.1.0" derivative = "2.1.1" rusqlite = { version = "0.28.0", features = ["bundled"], optional = true } -arbitrary = { version = "1.0", features = ["derive"], optional = true } +# The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by +# `AbstractExecPayload` +arbitrary = { version = "1.0", features = ["derive"] } eth2_serde_utils = "0.1.1" regex = "1.5.5" lazy_static = "1.4.0" parking_lot = "0.12.0" itertools = "0.10.0" -superstruct = "0.5.0" +superstruct = "0.6.0" metastruct = "0.1.0" serde_json = "1.0.74" smallvec = "1.8.0" @@ -63,12 +66,6 @@ default = ["sqlite", "legacy-arith"] # Allow saturating arithmetic on slots and epochs. Enabled by default, but deprecated. legacy-arith = [] sqlite = ["rusqlite"] -arbitrary-fuzz = [ - "arbitrary", - "ethereum-types/arbitrary", - "bls/arbitrary", - "eth2_ssz/arbitrary", - "eth2_ssz_types/arbitrary", - "swap_or_not_shuffle/arbitrary", - "tree_hash/arbitrary", -] +# The `arbitrary-fuzz` feature is a no-op provided for backwards compatibility. +# For simplicity `Arbitrary` is now derived regardless of the feature's presence. +arbitrary-fuzz = [] diff --git a/consensus/types/presets/gnosis/capella.yaml b/consensus/types/presets/gnosis/capella.yaml new file mode 100644 index 0000000000..913c2956ba --- /dev/null +++ b/consensus/types/presets/gnosis/capella.yaml @@ -0,0 +1,17 @@ +# Mainnet preset - Capella + +# Misc +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_BLS_TO_EXECUTION_CHANGES: 16 + +# Execution +# --------------------------------------------------------------- +# 2**4 (= 16) withdrawals +MAX_WITHDRAWALS_PER_PAYLOAD: 16 + +# Withdrawals processing +# --------------------------------------------------------------- +# 2**14 (= 16384) validators +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16384 diff --git a/consensus/types/presets/mainnet/capella.yaml b/consensus/types/presets/mainnet/capella.yaml new file mode 100644 index 0000000000..913c2956ba --- /dev/null +++ b/consensus/types/presets/mainnet/capella.yaml @@ -0,0 +1,17 @@ +# Mainnet preset - Capella + +# Misc +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_BLS_TO_EXECUTION_CHANGES: 16 + +# Execution +# --------------------------------------------------------------- +# 2**4 (= 16) withdrawals +MAX_WITHDRAWALS_PER_PAYLOAD: 16 + +# Withdrawals processing +# --------------------------------------------------------------- +# 2**14 (= 16384) validators +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16384 diff --git a/consensus/types/presets/minimal/capella.yaml b/consensus/types/presets/minimal/capella.yaml new file mode 100644 index 0000000000..d27253de87 --- /dev/null +++ b/consensus/types/presets/minimal/capella.yaml @@ -0,0 +1,17 @@ +# Minimal preset - Capella + +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_BLS_TO_EXECUTION_CHANGES: 16 + + +# Execution +# --------------------------------------------------------------- +# [customized] 2**2 (= 4) +MAX_WITHDRAWALS_PER_PAYLOAD: 4 + +# Withdrawals processing +# --------------------------------------------------------------- +# [customized] 2**4 (= 16) validators +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16 diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 19c8f8a0a8..39a0a28c0c 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -11,9 +11,20 @@ use tree_hash_derive::TreeHash; /// A Validators aggregate attestation and selection proof. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[derive( + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct AggregateAndProof { /// The index of the validator that created the attestation. #[serde(with = "eth2_serde_utils::quoted_u64")] diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 12586e28d5..5c333e0d45 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -23,12 +23,21 @@ pub enum Error { /// Details an attestation that can be slashable. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, + arbitrary::Arbitrary, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, )] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct Attestation { pub aggregation_bits: BitList, pub data: AttestationData, diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index 8792a3c56d..c6a661c85d 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -10,8 +10,8 @@ use tree_hash_derive::TreeHash; /// The data upon which an attestation is based. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( + arbitrary::Arbitrary, Debug, Clone, PartialEq, diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation_duty.rs index ecfa613ed4..87a9c932a4 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation_duty.rs @@ -1,8 +1,7 @@ use crate::*; use serde_derive::{Deserialize, Serialize}; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] +#[derive(arbitrary::Arbitrary, Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] pub struct AttestationDuty { /// The slot during which the attester must attest. pub slot: Slot, diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/attester_slashing.rs index b239f62e46..c563495074 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/attester_slashing.rs @@ -9,12 +9,21 @@ use tree_hash_derive::TreeHash; /// Two conflicting attestations. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Derivative, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Derivative, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, )] #[derivative(PartialEq, Eq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct AttesterSlashing { pub attestation_1: IndexedAttestation, pub attestation_2: IndexedAttestation, diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 0ec1f9a374..0f26cd0e5e 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -1,6 +1,6 @@ use crate::beacon_block_body::{ - BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, BeaconBlockBodyRef, - BeaconBlockBodyRefMut, + BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyEip4844, BeaconBlockBodyMerge, + BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; use crate::test_utils::TestRandom; use crate::*; @@ -17,7 +17,7 @@ use tree_hash_derive::TreeHash; /// A block of the `BeaconChain`. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Capella, Eip4844), variant_attributes( derive( Debug, @@ -29,10 +29,14 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, Derivative, + arbitrary::Arbitrary ), - derivative(PartialEq, Hash(bound = "T: EthSpec, Payload: ExecPayload")), - serde(bound = "T: EthSpec, Payload: ExecPayload", deny_unknown_fields), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), + derivative(PartialEq, Hash(bound = "T: EthSpec, Payload: AbstractExecPayload")), + serde( + bound = "T: EthSpec, Payload: AbstractExecPayload", + deny_unknown_fields + ), + arbitrary(bound = "T: EthSpec, Payload: AbstractExecPayload"), ), ref_attributes( derive(Debug, PartialEq, TreeHash), @@ -41,14 +45,16 @@ use tree_hash_derive::TreeHash; map_ref_into(BeaconBlockBodyRef, BeaconBlock), map_ref_mut_into(BeaconBlockBodyRefMut) )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] +#[derive( + Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, +)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(untagged)] -#[serde(bound = "T: EthSpec, Payload: ExecPayload")] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload")] +#[arbitrary(bound = "T: EthSpec, Payload: AbstractExecPayload")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] -pub struct BeaconBlock = FullPayload> { +pub struct BeaconBlock = FullPayload> { #[superstruct(getter(copy))] pub slot: Slot, #[superstruct(getter(copy))] @@ -64,23 +70,34 @@ pub struct BeaconBlock = FullPayload> { pub body: BeaconBlockBodyAltair, #[superstruct(only(Merge), partial_getter(rename = "body_merge"))] pub body: BeaconBlockBodyMerge, + #[superstruct(only(Capella), partial_getter(rename = "body_capella"))] + pub body: BeaconBlockBodyCapella, + #[superstruct(only(Eip4844), partial_getter(rename = "body_eip4844"))] + pub body: BeaconBlockBodyEip4844, } pub type BlindedBeaconBlock = BeaconBlock>; -impl> SignedRoot for BeaconBlock {} -impl<'a, T: EthSpec, Payload: ExecPayload> SignedRoot for BeaconBlockRef<'a, T, Payload> {} +impl> SignedRoot for BeaconBlock {} +impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignedRoot + for BeaconBlockRef<'a, T, Payload> +{ +} -impl> BeaconBlock { +/// Empty block trait for each block variant to implement. +pub trait EmptyBlock { + /// Returns an empty block to be used during genesis. + fn empty(spec: &ChainSpec) -> Self; +} + +impl> BeaconBlock { /// Returns an empty block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { - if spec.bellatrix_fork_epoch == Some(T::genesis_epoch()) { - Self::Merge(BeaconBlockMerge::empty(spec)) - } else if spec.altair_fork_epoch == Some(T::genesis_epoch()) { - Self::Altair(BeaconBlockAltair::empty(spec)) - } else { - Self::Base(BeaconBlockBase::empty(spec)) - } + map_fork_name!( + spec.fork_name_at_epoch(T::genesis_epoch()), + Self, + EmptyBlock::empty(spec) + ) } /// Custom SSZ decoder that takes a `ChainSpec` as context. @@ -109,13 +126,12 @@ impl> BeaconBlock { /// Usually it's better to prefer `from_ssz_bytes` which will decode the correct variant based /// on the fork slot. pub fn any_from_ssz_bytes(bytes: &[u8]) -> Result { - BeaconBlockMerge::from_ssz_bytes(bytes) - .map(BeaconBlock::Merge) - .or_else(|_| { - BeaconBlockAltair::from_ssz_bytes(bytes) - .map(BeaconBlock::Altair) - .or_else(|_| BeaconBlockBase::from_ssz_bytes(bytes).map(BeaconBlock::Base)) - }) + BeaconBlockEip4844::from_ssz_bytes(bytes) + .map(BeaconBlock::Eip4844) + .or_else(|_| BeaconBlockCapella::from_ssz_bytes(bytes).map(BeaconBlock::Capella)) + .or_else(|_| BeaconBlockMerge::from_ssz_bytes(bytes).map(BeaconBlock::Merge)) + .or_else(|_| BeaconBlockAltair::from_ssz_bytes(bytes).map(BeaconBlock::Altair)) + .or_else(|_| BeaconBlockBase::from_ssz_bytes(bytes).map(BeaconBlock::Base)) } /// Convenience accessor for the `body` as a `BeaconBlockBodyRef`. @@ -178,7 +194,7 @@ impl> BeaconBlock { } } -impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { +impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockRef<'a, T, Payload> { /// Returns the name of the fork pertaining to `self`. /// /// Will return an `Err` if `self` has been instantiated to a variant conflicting with the fork @@ -189,6 +205,8 @@ impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { BeaconBlockRef::Base { .. } => ForkName::Base, BeaconBlockRef::Altair { .. } => ForkName::Altair, BeaconBlockRef::Merge { .. } => ForkName::Merge, + BeaconBlockRef::Capella { .. } => ForkName::Capella, + BeaconBlockRef::Eip4844 { .. } => ForkName::Eip4844, }; if fork_at_slot == object_fork { @@ -242,12 +260,12 @@ impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { /// Extracts a reference to an execution payload from a block, returning an error if the block /// is pre-merge. - pub fn execution_payload(&self) -> Result<&Payload, Error> { + pub fn execution_payload(&self) -> Result, Error> { self.body().execution_payload() } } -impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRefMut<'a, T, Payload> { +impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockRefMut<'a, T, Payload> { /// Convert a mutable reference to a beacon block to a mutable ref to its body. pub fn body_mut(self) -> BeaconBlockBodyRefMut<'a, T, Payload> { map_beacon_block_ref_mut_into_beacon_block_body_ref_mut!(&'a _, self, |block, cons| cons( @@ -256,9 +274,8 @@ impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRefMut<'a, T, Payload> } } -impl> BeaconBlockBase { - /// Returns an empty block to be used during genesis. - pub fn empty(spec: &ChainSpec) -> Self { +impl> EmptyBlock for BeaconBlockBase { + fn empty(spec: &ChainSpec) -> Self { BeaconBlockBase { slot: spec.genesis_slot, proposer_index: 0, @@ -281,7 +298,9 @@ impl> BeaconBlockBase { }, } } +} +impl> BeaconBlockBase { /// Return a block where the block has maximum size. pub fn full(spec: &ChainSpec) -> Self { let header = BeaconBlockHeader { @@ -377,9 +396,9 @@ impl> BeaconBlockBase { } } -impl> BeaconBlockAltair { +impl> EmptyBlock for BeaconBlockAltair { /// Returns an empty Altair block to be used during genesis. - pub fn empty(spec: &ChainSpec) -> Self { + fn empty(spec: &ChainSpec) -> Self { BeaconBlockAltair { slot: spec.genesis_slot, proposer_index: 0, @@ -403,7 +422,9 @@ impl> BeaconBlockAltair { }, } } +} +impl> BeaconBlockAltair { /// Return an Altair block where the block has maximum size. pub fn full(spec: &ChainSpec) -> Self { let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); @@ -436,9 +457,9 @@ impl> BeaconBlockAltair { } } -impl> BeaconBlockMerge { +impl> EmptyBlock for BeaconBlockMerge { /// Returns an empty Merge block to be used during genesis. - pub fn empty(spec: &ChainSpec) -> Self { + fn empty(spec: &ChainSpec) -> Self { BeaconBlockMerge { slot: spec.genesis_slot, proposer_index: 0, @@ -458,7 +479,112 @@ impl> BeaconBlockMerge { deposits: VariableList::empty(), voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), - execution_payload: Payload::default(), + execution_payload: Payload::Merge::default(), + }, + } + } +} + +impl> BeaconBlockCapella { + /// Return a Capella block where the block has maximum size. + pub fn full(spec: &ChainSpec) -> Self { + let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); + let bls_to_execution_changes = vec![ + SignedBlsToExecutionChange { + message: BlsToExecutionChange { + validator_index: 0, + from_bls_pubkey: PublicKeyBytes::empty(), + to_execution_address: Address::zero(), + }, + signature: Signature::empty() + }; + T::max_bls_to_execution_changes() + ] + .into(); + let sync_aggregate = SyncAggregate { + sync_committee_signature: AggregateSignature::empty(), + sync_committee_bits: BitVector::default(), + }; + BeaconBlockCapella { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyCapella { + proposer_slashings: base_block.body.proposer_slashings, + attester_slashings: base_block.body.attester_slashings, + attestations: base_block.body.attestations, + deposits: base_block.body.deposits, + voluntary_exits: base_block.body.voluntary_exits, + bls_to_execution_changes, + sync_aggregate, + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + execution_payload: Payload::Capella::default(), + }, + } + } +} + +impl> EmptyBlock for BeaconBlockCapella { + /// Returns an empty Capella block to be used during genesis. + fn empty(spec: &ChainSpec) -> Self { + BeaconBlockCapella { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyCapella { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + execution_payload: Payload::Capella::default(), + bls_to_execution_changes: VariableList::empty(), + }, + } + } +} + +impl> EmptyBlock for BeaconBlockEip4844 { + /// Returns an empty Eip4844 block to be used during genesis. + fn empty(spec: &ChainSpec) -> Self { + BeaconBlockEip4844 { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyEip4844 { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + execution_payload: Payload::Eip4844::default(), + bls_to_execution_changes: VariableList::empty(), + blob_kzg_commitments: VariableList::empty(), }, } } @@ -533,7 +659,7 @@ macro_rules! impl_from { parent_root, state_root, body, - }, payload) + }, payload.map(Into::into)) } } } @@ -542,6 +668,8 @@ macro_rules! impl_from { impl_from!(BeaconBlockBase, >, >, |body: BeaconBlockBodyBase<_, _>| body.into()); impl_from!(BeaconBlockAltair, >, >, |body: BeaconBlockBodyAltair<_, _>| body.into()); impl_from!(BeaconBlockMerge, >, >, |body: BeaconBlockBodyMerge<_, _>| body.into()); +impl_from!(BeaconBlockCapella, >, >, |body: BeaconBlockBodyCapella<_, _>| body.into()); +impl_from!(BeaconBlockEip4844, >, >, |body: BeaconBlockBodyEip4844<_, _>| body.into()); // We can clone blocks with payloads to blocks without payloads, without cloning the payload. macro_rules! impl_clone_as_blinded { @@ -572,6 +700,8 @@ macro_rules! impl_clone_as_blinded { impl_clone_as_blinded!(BeaconBlockBase, >, >); impl_clone_as_blinded!(BeaconBlockAltair, >, >); impl_clone_as_blinded!(BeaconBlockMerge, >, >); +impl_clone_as_blinded!(BeaconBlockCapella, >, >); +impl_clone_as_blinded!(BeaconBlockEip4844, >, >); // A reference to a full beacon block can be cloned into a blinded beacon block, without cloning the // execution payload. @@ -601,6 +731,24 @@ impl From>> } } +impl> ForkVersionDeserialize + for BeaconBlock +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + Ok(map_fork_name!( + fork_name, + Self, + serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!( + "BeaconBlock failed to deserialize: {:?}", + e + )))? + )) + } +} + #[cfg(test)] mod tests { use super::*; @@ -650,19 +798,65 @@ mod tests { }); } + #[test] + fn roundtrip_capella_block() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + let spec = &ForkName::Capella.make_genesis_spec(MainnetEthSpec::default_spec()); + + let inner_block = BeaconBlockCapella { + slot: Slot::random_for_test(rng), + proposer_index: u64::random_for_test(rng), + parent_root: Hash256::random_for_test(rng), + state_root: Hash256::random_for_test(rng), + body: BeaconBlockBodyCapella::random_for_test(rng), + }; + let block = BeaconBlock::Capella(inner_block.clone()); + + test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| { + BeaconBlock::from_ssz_bytes(bytes, spec) + }); + } + + #[test] + fn roundtrip_4844_block() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + let spec = &ForkName::Eip4844.make_genesis_spec(MainnetEthSpec::default_spec()); + + let inner_block = BeaconBlockEip4844 { + slot: Slot::random_for_test(rng), + proposer_index: u64::random_for_test(rng), + parent_root: Hash256::random_for_test(rng), + state_root: Hash256::random_for_test(rng), + body: BeaconBlockBodyEip4844::random_for_test(rng), + }; + let block = BeaconBlock::Eip4844(inner_block.clone()); + + test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| { + BeaconBlock::from_ssz_bytes(bytes, spec) + }); + } + #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; - let spec = E::default_spec(); + let mut spec = E::default_spec(); let rng = &mut XorShiftRng::from_seed([42; 16]); - let fork_epoch = spec.altair_fork_epoch.unwrap(); + let altair_fork_epoch = spec.altair_fork_epoch.unwrap(); - let base_epoch = fork_epoch.saturating_sub(1_u64); + let base_epoch = altair_fork_epoch.saturating_sub(1_u64); let base_slot = base_epoch.end_slot(E::slots_per_epoch()); - let altair_epoch = fork_epoch; + let altair_epoch = altair_fork_epoch; let altair_slot = altair_epoch.start_slot(E::slots_per_epoch()); + let capella_epoch = altair_fork_epoch + 1; + let capella_slot = capella_epoch.start_slot(E::slots_per_epoch()); + let eip4844_epoch = capella_epoch + 1; + let eip4844_slot = eip4844_epoch.start_slot(E::slots_per_epoch()); + + spec.altair_fork_epoch = Some(altair_epoch); + spec.capella_fork_epoch = Some(capella_epoch); + spec.eip4844_fork_epoch = Some(eip4844_epoch); // BeaconBlockBase { @@ -707,5 +901,49 @@ mod tests { BeaconBlock::from_ssz_bytes(&bad_altair_block.as_ssz_bytes(), &spec) .expect_err("bad altair block cannot be decoded"); } + + // BeaconBlockCapella + { + let good_block = BeaconBlock::Capella(BeaconBlockCapella { + slot: capella_slot, + ..<_>::random_for_test(rng) + }); + // It's invalid to have an Capella block with a epoch lower than the fork epoch. + let bad_block = { + let mut bad = good_block.clone(); + *bad.slot_mut() = altair_slot; + bad + }; + + assert_eq!( + BeaconBlock::from_ssz_bytes(&good_block.as_ssz_bytes(), &spec) + .expect("good capella block can be decoded"), + good_block + ); + BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) + .expect_err("bad capella block cannot be decoded"); + } + + // BeaconBlockEip4844 + { + let good_block = BeaconBlock::Eip4844(BeaconBlockEip4844 { + slot: eip4844_slot, + ..<_>::random_for_test(rng) + }); + // It's invalid to have an Capella block with a epoch lower than the fork epoch. + let bad_block = { + let mut bad = good_block.clone(); + *bad.slot_mut() = capella_slot; + bad + }; + + assert_eq!( + BeaconBlock::from_ssz_bytes(&good_block.as_ssz_bytes(), &spec) + .expect("good eip4844 block can be decoded"), + good_block + ); + BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) + .expect_err("bad eip4844 block cannot be decoded"); + } } } diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 381a9bd43e..07c8f898b3 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -1,3 +1,4 @@ +use crate::kzg_commitment::KzgCommitment; use crate::test_utils::TestRandom; use crate::*; use derivative::Derivative; @@ -13,7 +14,7 @@ use tree_hash_derive::TreeHash; /// /// This *superstruct* abstracts over the hard-fork. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Capella, Eip4844), variant_attributes( derive( Debug, @@ -25,20 +26,24 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, Derivative, + arbitrary::Arbitrary ), - derivative(PartialEq, Hash(bound = "T: EthSpec, Payload: ExecPayload")), - serde(bound = "T: EthSpec, Payload: ExecPayload", deny_unknown_fields), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) + derivative(PartialEq, Hash(bound = "T: EthSpec, Payload: AbstractExecPayload")), + serde( + bound = "T: EthSpec, Payload: AbstractExecPayload", + deny_unknown_fields + ), + arbitrary(bound = "T: EthSpec, Payload: AbstractExecPayload"), ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] -#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] +#[derive(Debug, Clone, Serialize, Deserialize, Derivative, arbitrary::Arbitrary)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(untagged)] -#[serde(bound = "T: EthSpec, Payload: ExecPayload")] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -pub struct BeaconBlockBody = FullPayload> { +#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload")] +#[arbitrary(bound = "T: EthSpec, Payload: AbstractExecPayload")] +pub struct BeaconBlockBody = FullPayload> { pub randao_reveal: Signature, pub eth1_data: Eth1Data, pub graffiti: Graffiti, @@ -47,21 +52,50 @@ pub struct BeaconBlockBody = FullPayload> pub attestations: VariableList, T::MaxAttestations>, pub deposits: VariableList, pub voluntary_exits: VariableList, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub sync_aggregate: SyncAggregate, // We flatten the execution payload so that serde can use the name of the inner type, // either `execution_payload` for full payloads, or `execution_payload_header` for blinded // payloads. - #[superstruct(only(Merge))] + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] #[serde(flatten)] - pub execution_payload: Payload, + pub execution_payload: Payload::Merge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] + #[serde(flatten)] + pub execution_payload: Payload::Capella, + #[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))] + #[serde(flatten)] + pub execution_payload: Payload::Eip4844, + #[superstruct(only(Capella, Eip4844))] + pub bls_to_execution_changes: + VariableList, + #[superstruct(only(Eip4844))] + pub blob_kzg_commitments: VariableList, #[superstruct(only(Base, Altair))] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[serde(skip)] + #[arbitrary(default)] pub _phantom: PhantomData, } +impl> BeaconBlockBody { + pub fn execution_payload(&self) -> Result, Error> { + self.to_ref().execution_payload() + } +} + +impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, Payload> { + pub fn execution_payload(&self) -> Result, Error> { + match self { + Self::Base(_) | Self::Altair(_) => Err(Error::IncorrectStateVariant), + Self::Merge(body) => Ok(Payload::Ref::from(&body.execution_payload)), + Self::Capella(body) => Ok(Payload::Ref::from(&body.execution_payload)), + Self::Eip4844(body) => Ok(Payload::Ref::from(&body.execution_payload)), + } + } +} + impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { /// Get the fork_name of this object pub fn fork_name(self) -> ForkName { @@ -69,6 +103,8 @@ impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { BeaconBlockBodyRef::Base { .. } => ForkName::Base, BeaconBlockBodyRef::Altair { .. } => ForkName::Altair, BeaconBlockBodyRef::Merge { .. } => ForkName::Merge, + BeaconBlockBodyRef::Capella { .. } => ForkName::Capella, + BeaconBlockBodyRef::Eip4844 { .. } => ForkName::Eip4844, } } } @@ -214,7 +250,7 @@ impl From>> impl From>> for ( BeaconBlockBodyMerge>, - Option>, + Option>, ) { fn from(body: BeaconBlockBodyMerge>) -> Self { @@ -228,7 +264,7 @@ impl From>> deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayloadMerge { execution_payload }, } = body; ( @@ -242,7 +278,7 @@ impl From>> deposits, voluntary_exits, sync_aggregate, - execution_payload: BlindedPayload { + execution_payload: BlindedPayloadMerge { execution_payload_header: From::from(&execution_payload), }, }, @@ -251,6 +287,92 @@ impl From>> } } +impl From>> + for ( + BeaconBlockBodyCapella>, + Option>, + ) +{ + fn from(body: BeaconBlockBodyCapella>) -> Self { + let BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadCapella { execution_payload }, + bls_to_execution_changes, + } = body; + + ( + BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadCapella { + execution_payload_header: From::from(&execution_payload), + }, + bls_to_execution_changes, + }, + Some(execution_payload), + ) + } +} + +impl From>> + for ( + BeaconBlockBodyEip4844>, + Option>, + ) +{ + fn from(body: BeaconBlockBodyEip4844>) -> Self { + let BeaconBlockBodyEip4844 { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadEip4844 { execution_payload }, + bls_to_execution_changes, + blob_kzg_commitments, + } = body; + + ( + BeaconBlockBodyEip4844 { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadEip4844 { + execution_payload_header: From::from(&execution_payload), + }, + bls_to_execution_changes, + blob_kzg_commitments, + }, + Some(execution_payload), + ) + } +} + // We can clone a full block into a blinded block, without cloning the payload. impl BeaconBlockBodyBase> { pub fn clone_as_blinded(&self) -> BeaconBlockBodyBase> { @@ -278,7 +400,7 @@ impl BeaconBlockBodyMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayloadMerge { execution_payload }, } = self; BeaconBlockBodyMerge { @@ -291,13 +413,83 @@ impl BeaconBlockBodyMerge> { deposits: deposits.clone(), voluntary_exits: voluntary_exits.clone(), sync_aggregate: sync_aggregate.clone(), - execution_payload: BlindedPayload { - execution_payload_header: From::from(execution_payload), + execution_payload: BlindedPayloadMerge { + execution_payload_header: execution_payload.into(), }, } } } +impl BeaconBlockBodyCapella> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyCapella> { + let BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadCapella { execution_payload }, + bls_to_execution_changes, + } = self; + + BeaconBlockBodyCapella { + randao_reveal: randao_reveal.clone(), + eth1_data: eth1_data.clone(), + graffiti: *graffiti, + proposer_slashings: proposer_slashings.clone(), + attester_slashings: attester_slashings.clone(), + attestations: attestations.clone(), + deposits: deposits.clone(), + voluntary_exits: voluntary_exits.clone(), + sync_aggregate: sync_aggregate.clone(), + execution_payload: BlindedPayloadCapella { + execution_payload_header: execution_payload.into(), + }, + bls_to_execution_changes: bls_to_execution_changes.clone(), + } + } +} + +impl BeaconBlockBodyEip4844> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyEip4844> { + let BeaconBlockBodyEip4844 { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadEip4844 { execution_payload }, + bls_to_execution_changes, + blob_kzg_commitments, + } = self; + + BeaconBlockBodyEip4844 { + randao_reveal: randao_reveal.clone(), + eth1_data: eth1_data.clone(), + graffiti: *graffiti, + proposer_slashings: proposer_slashings.clone(), + attester_slashings: attester_slashings.clone(), + attestations: attestations.clone(), + deposits: deposits.clone(), + voluntary_exits: voluntary_exits.clone(), + sync_aggregate: sync_aggregate.clone(), + execution_payload: BlindedPayloadEip4844 { + execution_payload_header: execution_payload.into(), + }, + bls_to_execution_changes: bls_to_execution_changes.clone(), + blob_kzg_commitments: blob_kzg_commitments.clone(), + } + } +} + impl From>> for ( BeaconBlockBody>, @@ -307,7 +499,7 @@ impl From>> fn from(body: BeaconBlockBody>) -> Self { map_beacon_block_body!(body, |inner, cons| { let (block, payload) = inner.into(); - (cons(block), payload) + (cons(block), payload.map(Into::into)) }) } } diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index cca8fef841..c6d6678f31 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -10,9 +10,19 @@ use tree_hash_derive::TreeHash; /// A header of a `BeaconBlock`. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct BeaconBlockHeader { pub slot: Slot, diff --git a/consensus/types/src/beacon_committee.rs b/consensus/types/src/beacon_committee.rs index 6483c009af..ad293c3a3b 100644 --- a/consensus/types/src/beacon_committee.rs +++ b/consensus/types/src/beacon_committee.rs @@ -17,8 +17,7 @@ impl<'a> BeaconCommittee<'a> { } } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Default, Clone, Debug, PartialEq)] +#[derive(arbitrary::Arbitrary, Default, Clone, Debug, PartialEq)] pub struct OwnedBeaconCommittee { pub slot: Slot, pub index: CommitteeIndex, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 12d44741f9..c98df48d14 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -14,6 +14,7 @@ use ssz::{ssz_encode, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; use std::convert::TryInto; +use std::hash::Hash; use std::{fmt, mem, sync::Arc}; use superstruct::superstruct; use swap_or_not_shuffle::compute_shuffled_index; @@ -25,6 +26,7 @@ pub use self::committee_cache::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, CommitteeCache, }; +use crate::historical_summary::HistoricalSummary; pub use clone_config::CloneConfig; pub use eth_spec::*; pub use iter::BlockRootsIter; @@ -120,6 +122,7 @@ pub enum Error { ArithError(ArithError), MissingBeaconBlock(SignedBeaconBlockHash), MissingBeaconState(BeaconStateHash), + PayloadConversionLogicFlaw, SyncCommitteeNotKnown { current_epoch: Epoch, epoch: Epoch, @@ -144,8 +147,7 @@ impl AllowNextEpoch { } } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Eq, Hash, Clone, Copy)] +#[derive(PartialEq, Eq, Hash, Clone, Copy, arbitrary::Arbitrary)] pub struct BeaconStateHash(Hash256); impl fmt::Debug for BeaconStateHash { @@ -174,7 +176,7 @@ impl From for Hash256 { /// The state of the `BeaconChain` at some slot. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Capella, Eip4844), variant_attributes( derive( Derivative, @@ -187,18 +189,19 @@ impl From for Hash256 { TreeHash, TestRandom, CompareFields, + arbitrary::Arbitrary ), serde(bound = "T: EthSpec", deny_unknown_fields), + arbitrary(bound = "T: EthSpec"), derivative(Clone), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] -#[derive(Debug, PartialEq, Serialize, Deserialize, Encode, TreeHash)] +#[derive(Debug, PartialEq, Serialize, Deserialize, Encode, TreeHash, arbitrary::Arbitrary)] #[serde(untagged)] #[serde(bound = "T: EthSpec")] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[arbitrary(bound = "T: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] pub struct BeaconState @@ -222,6 +225,7 @@ where pub block_roots: FixedVector, #[compare_fields(as_slice)] pub state_roots: FixedVector, + // Frozen in Capella, replaced by historical_summaries pub historical_roots: VariableList, // Ethereum 1.0 chain data @@ -252,9 +256,9 @@ where pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub current_epoch_participation: VariableList, // Finality @@ -269,18 +273,42 @@ where // Inactivity #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge))] + #[superstruct(only(Altair, Merge, Capella, Eip4844))] pub next_sync_committee: Arc>, // Execution - #[superstruct(only(Merge))] - pub latest_execution_payload_header: ExecutionPayloadHeader, + #[superstruct( + only(Merge), + partial_getter(rename = "latest_execution_payload_header_merge") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderMerge, + #[superstruct( + only(Capella), + partial_getter(rename = "latest_execution_payload_header_capella") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderCapella, + #[superstruct( + only(Eip4844), + partial_getter(rename = "latest_execution_payload_header_eip4844") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844, + + // Capella + #[superstruct(only(Capella, Eip4844), partial_getter(copy))] + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub next_withdrawal_index: u64, + #[superstruct(only(Capella, Eip4844), partial_getter(copy))] + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub next_withdrawal_validator_index: u64, + // Deep history valid from Capella onwards. + #[superstruct(only(Capella, Eip4844))] + pub historical_summaries: VariableList, // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] @@ -391,6 +419,8 @@ impl BeaconState { BeaconState::Base { .. } => ForkName::Base, BeaconState::Altair { .. } => ForkName::Altair, BeaconState::Merge { .. } => ForkName::Merge, + BeaconState::Capella { .. } => ForkName::Capella, + BeaconState::Eip4844 { .. } => ForkName::Eip4844, }; if fork_at_slot == object_fork { @@ -680,6 +710,39 @@ impl BeaconState { .ok_or(Error::ShuffleIndexOutOfBounds(index)) } + /// Convenience accessor for the `execution_payload_header` as an `ExecutionPayloadHeaderRef`. + pub fn latest_execution_payload_header(&self) -> Result, Error> { + match self { + BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), + BeaconState::Merge(state) => Ok(ExecutionPayloadHeaderRef::Merge( + &state.latest_execution_payload_header, + )), + BeaconState::Capella(state) => Ok(ExecutionPayloadHeaderRef::Capella( + &state.latest_execution_payload_header, + )), + BeaconState::Eip4844(state) => Ok(ExecutionPayloadHeaderRef::Eip4844( + &state.latest_execution_payload_header, + )), + } + } + + pub fn latest_execution_payload_header_mut( + &mut self, + ) -> Result, Error> { + match self { + BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), + BeaconState::Merge(state) => Ok(ExecutionPayloadHeaderRefMut::Merge( + &mut state.latest_execution_payload_header, + )), + BeaconState::Capella(state) => Ok(ExecutionPayloadHeaderRefMut::Capella( + &mut state.latest_execution_payload_header, + )), + BeaconState::Eip4844(state) => Ok(ExecutionPayloadHeaderRefMut::Eip4844( + &mut state.latest_execution_payload_header, + )), + } + } + /// Return `true` if the validator who produced `slot_signature` is eligible to aggregate. /// /// Spec v0.12.1 @@ -1104,6 +1167,8 @@ impl BeaconState { BeaconState::Base(state) => (&mut state.validators, &mut state.balances), BeaconState::Altair(state) => (&mut state.validators, &mut state.balances), BeaconState::Merge(state) => (&mut state.validators, &mut state.balances), + BeaconState::Capella(state) => (&mut state.validators, &mut state.balances), + BeaconState::Eip4844(state) => (&mut state.validators, &mut state.balances), } } @@ -1300,12 +1365,16 @@ impl BeaconState { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.current_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.current_epoch_participation), + BeaconState::Capella(state) => Ok(&mut state.current_epoch_participation), + BeaconState::Eip4844(state) => Ok(&mut state.current_epoch_participation), } } else if epoch == self.previous_epoch() { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.previous_epoch_participation), + BeaconState::Capella(state) => Ok(&mut state.previous_epoch_participation), + BeaconState::Eip4844(state) => Ok(&mut state.previous_epoch_participation), } } else { Err(BeaconStateError::EpochOutOfBounds) @@ -1610,6 +1679,8 @@ impl BeaconState { BeaconState::Base(inner) => BeaconState::Base(inner.clone()), BeaconState::Altair(inner) => BeaconState::Altair(inner.clone()), BeaconState::Merge(inner) => BeaconState::Merge(inner.clone()), + BeaconState::Capella(inner) => BeaconState::Capella(inner.clone()), + BeaconState::Eip4844(inner) => BeaconState::Eip4844(inner.clone()), }; if config.committee_caches { *res.committee_caches_mut() = self.committee_caches().clone(); @@ -1777,7 +1848,25 @@ impl CompareFields for BeaconState { (BeaconState::Base(x), BeaconState::Base(y)) => x.compare_fields(y), (BeaconState::Altair(x), BeaconState::Altair(y)) => x.compare_fields(y), (BeaconState::Merge(x), BeaconState::Merge(y)) => x.compare_fields(y), + (BeaconState::Capella(x), BeaconState::Capella(y)) => x.compare_fields(y), + (BeaconState::Eip4844(x), BeaconState::Eip4844(y)) => x.compare_fields(y), _ => panic!("compare_fields: mismatched state variants",), } } } + +impl ForkVersionDeserialize for BeaconState { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + Ok(map_fork_name!( + fork_name, + Self, + serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!( + "BeaconState failed to deserialize: {:?}", + e + )))? + )) + } +} diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 03adaf3d44..8afef1183b 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -336,7 +336,6 @@ pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> V active } -#[cfg(feature = "arbitrary-fuzz")] impl arbitrary::Arbitrary<'_> for CommitteeCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { Ok(Self::default()) diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/beacon_state/exit_cache.rs index 1c199c0475..b657d62ae6 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/beacon_state/exit_cache.rs @@ -61,7 +61,6 @@ impl ExitCache { } } -#[cfg(feature = "arbitrary-fuzz")] impl arbitrary::Arbitrary<'_> for ExitCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { Ok(Self::default()) diff --git a/consensus/types/src/beacon_state/pubkey_cache.rs b/consensus/types/src/beacon_state/pubkey_cache.rs index d81801e77d..590ea30f99 100644 --- a/consensus/types/src/beacon_state/pubkey_cache.rs +++ b/consensus/types/src/beacon_state/pubkey_cache.rs @@ -42,7 +42,6 @@ impl PubkeyCache { } } -#[cfg(feature = "arbitrary-fuzz")] impl arbitrary::Arbitrary<'_> for PubkeyCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { Ok(Self::default()) diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index abca10e372..d63eaafc4b 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -2,7 +2,7 @@ use crate::test_utils::*; use crate::test_utils::{SeedableRng, XorShiftRng}; use beacon_chain::test_utils::{ - interop_genesis_state, test_spec, BeaconChainHarness, EphemeralHarnessType, + interop_genesis_state_with_eth1, test_spec, BeaconChainHarness, EphemeralHarnessType, DEFAULT_ETH1_BLOCK_HASH, }; use beacon_chain::types::{ @@ -551,7 +551,7 @@ fn tree_hash_cache_linear_history_long_skip() { let spec = &test_spec::(); // This state has a cache that advances normally each slot. - let mut state: BeaconState = interop_genesis_state( + let mut state: BeaconState = interop_genesis_state_with_eth1( &keypairs, 0, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index 2fc56bdc01..efc6573d2b 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -3,6 +3,7 @@ #![allow(clippy::indexing_slicing)] use super::Error; +use crate::historical_summary::HistoricalSummaryCache; use crate::{BeaconState, EthSpec, Hash256, ParticipationList, Slot, Unsigned, Validator}; use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; use rayon::prelude::*; @@ -142,6 +143,7 @@ pub struct BeaconTreeHashCacheInner { block_roots: TreeHashCache, state_roots: TreeHashCache, historical_roots: TreeHashCache, + historical_summaries: OptionalTreeHashCache, balances: TreeHashCache, randao_mixes: TreeHashCache, slashings: TreeHashCache, @@ -164,6 +166,14 @@ impl BeaconTreeHashCacheInner { let historical_roots = state .historical_roots() .new_tree_hash_cache(&mut fixed_arena); + let historical_summaries = OptionalTreeHashCache::new( + state + .historical_summaries() + .ok() + .map(HistoricalSummaryCache::new) + .as_ref(), + ); + let randao_mixes = state.randao_mixes().new_tree_hash_cache(&mut fixed_arena); let validators = ValidatorsListTreeHashCache::new::(state.validators()); @@ -200,6 +210,7 @@ impl BeaconTreeHashCacheInner { block_roots, state_roots, historical_roots, + historical_summaries, balances, randao_mixes, slashings, @@ -249,6 +260,7 @@ impl BeaconTreeHashCacheInner { .slashings() .recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?, ]; + // Participation if let BeaconState::Base(state) = state { leaves.push(state.previous_epoch_attestations.tree_hash_root()); @@ -291,6 +303,24 @@ impl BeaconTreeHashCacheInner { if let Ok(payload_header) = state.latest_execution_payload_header() { leaves.push(payload_header.tree_hash_root()); } + + // Withdrawal indices (Capella and later). + if let Ok(next_withdrawal_index) = state.next_withdrawal_index() { + leaves.push(next_withdrawal_index.tree_hash_root()); + } + if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() { + leaves.push(next_withdrawal_validator_index.tree_hash_root()); + } + + // Historical roots/summaries (Capella and later). + if let Ok(historical_summaries) = state.historical_summaries() { + leaves.push( + self.historical_summaries.recalculate_tree_hash_root( + &HistoricalSummaryCache::new(historical_summaries), + )?, + ); + } + Ok(leaves) } @@ -570,7 +600,6 @@ impl OptionalTreeHashCacheInner { } } -#[cfg(feature = "arbitrary-fuzz")] impl arbitrary::Arbitrary<'_> for BeaconTreeHashCache { fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { Ok(Self::default()) diff --git a/consensus/types/src/blobs_sidecar.rs b/consensus/types/src/blobs_sidecar.rs new file mode 100644 index 0000000000..227be3e2f8 --- /dev/null +++ b/consensus/types/src/blobs_sidecar.rs @@ -0,0 +1,43 @@ +use crate::kzg_proof::KzgProof; +use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot}; +use serde_derive::{Deserialize, Serialize}; +use ssz::Encode; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use tree_hash_derive::TreeHash; + +#[derive( + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + PartialEq, + Default, + arbitrary::Arbitrary, +)] +#[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] +pub struct BlobsSidecar { + pub beacon_block_root: Hash256, + pub beacon_block_slot: Slot, + pub blobs: VariableList, T::MaxBlobsPerBlock>, + pub kzg_aggregate_proof: KzgProof, +} + +impl SignedRoot for BlobsSidecar {} + +impl BlobsSidecar { + pub fn empty() -> Self { + Self::default() + } + #[allow(clippy::integer_arithmetic)] + pub fn max_size() -> usize { + // Fixed part + Self::empty().as_ssz_bytes().len() + // Max size of variable length `blobs` field + + (T::max_blobs_per_block() * as Encode>::ssz_fixed_len()) + } +} diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs new file mode 100644 index 0000000000..b279515bd1 --- /dev/null +++ b/consensus/types/src/bls_to_execution_change.rs @@ -0,0 +1,57 @@ +use crate::test_utils::TestRandom; +use crate::*; +use bls::PublicKeyBytes; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct BlsToExecutionChange { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub validator_index: u64, + pub from_bls_pubkey: PublicKeyBytes, + pub to_execution_address: Address, +} + +impl SignedRoot for BlsToExecutionChange {} + +impl BlsToExecutionChange { + pub fn sign( + self, + secret_key: &SecretKey, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> SignedBlsToExecutionChange { + let domain = spec.compute_domain( + Domain::BlsToExecutionChange, + spec.genesis_fork_version, + genesis_validators_root, + ); + let message = self.signing_root(domain); + SignedBlsToExecutionChange { + message: self, + signature: secret_key.sign(message), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(BlsToExecutionChange); +} diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 047bceae7e..e922e81c70 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,4 +1,7 @@ -use crate::{ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, SignedRoot, Uint256}; +use crate::{ + AbstractExecPayload, ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, ForkName, + ForkVersionDeserialize, SignedRoot, Uint256, +}; use bls::PublicKeyBytes; use bls::Signature; use serde::{Deserialize as De, Deserializer, Serialize as Ser, Serializer}; @@ -10,7 +13,7 @@ use tree_hash_derive::TreeHash; #[serde_as] #[derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone)] #[serde(bound = "E: EthSpec, Payload: ExecPayload")] -pub struct BuilderBid> { +pub struct BuilderBid> { #[serde_as(as = "BlindedPayloadAsHeader")] pub header: Payload, #[serde(with = "eth2_serde_utils::quoted_u256")] @@ -21,16 +24,70 @@ pub struct BuilderBid> { _phantom_data: PhantomData, } -impl> SignedRoot for BuilderBid {} +impl> SignedRoot for BuilderBid {} /// Validator registration, for use in interacting with servers implementing the builder API. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] #[serde(bound = "E: EthSpec, Payload: ExecPayload")] -pub struct SignedBuilderBid> { +pub struct SignedBuilderBid> { pub message: BuilderBid, pub signature: Signature, } +impl> ForkVersionDeserialize + for BuilderBid +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + let convert_err = |_| { + serde::de::Error::custom( + "BuilderBid failed to deserialize: unable to convert payload header to payload", + ) + }; + + #[derive(Deserialize)] + struct Helper { + header: serde_json::Value, + #[serde(with = "eth2_serde_utils::quoted_u256")] + value: Uint256, + pubkey: PublicKeyBytes, + } + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + let payload_header = + ExecutionPayloadHeader::deserialize_by_fork::<'de, D>(helper.header, fork_name)?; + + Ok(Self { + header: Payload::try_from(payload_header).map_err(convert_err)?, + value: helper.value, + pubkey: helper.pubkey, + _phantom_data: Default::default(), + }) + } +} + +impl> ForkVersionDeserialize + for SignedBuilderBid +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + #[derive(Deserialize)] + struct Helper { + pub message: serde_json::Value, + pub signature: Signature, + } + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + + Ok(Self { + message: BuilderBid::deserialize_by_fork::<'de, D>(helper.message, fork_name)?, + signature: helper.signature, + }) + } +} + struct BlindedPayloadAsHeader(PhantomData); impl> SerializeAs for BlindedPayloadAsHeader { @@ -42,7 +99,7 @@ impl> SerializeAs for BlindedPayloa } } -impl<'de, E: EthSpec, Payload: ExecPayload> DeserializeAs<'de, Payload> +impl<'de, E: EthSpec, Payload: AbstractExecPayload> DeserializeAs<'de, Payload> for BlindedPayloadAsHeader { fn deserialize_as(deserializer: D) -> Result @@ -55,7 +112,7 @@ impl<'de, E: EthSpec, Payload: ExecPayload> DeserializeAs<'de, Payload> } } -impl> SignedBuilderBid { +impl> SignedBuilderBid { pub fn verify_signature(&self, spec: &ChainSpec) -> bool { self.message .pubkey diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index c8333868cd..1f947c9e7b 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -11,8 +11,10 @@ use tree_hash::TreeHash; /// Each of the BLS signature domains. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Domain { + BlsToExecutionChange, BeaconProposer, BeaconAttester, + BlobsSideCar, Randao, Deposit, VoluntaryExit, @@ -27,8 +29,7 @@ pub enum Domain { /// Lighthouse's internal configuration struct. /// /// Contains a mixture of "preset" and "config" values w.r.t to the EF definitions. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Debug, Clone)] +#[derive(arbitrary::Arbitrary, PartialEq, Debug, Clone)] pub struct ChainSpec { /* * Config name @@ -71,6 +72,7 @@ pub struct ChainSpec { */ pub genesis_fork_version: [u8; 4], pub bls_withdrawal_prefix_byte: u8, + pub eth1_address_withdrawal_prefix_byte: u8, /* * Time parameters @@ -98,6 +100,7 @@ pub struct ChainSpec { */ pub(crate) domain_beacon_proposer: u32, pub(crate) domain_beacon_attester: u32, + pub(crate) domain_blobs_sidecar: u32, pub(crate) domain_randao: u32, pub(crate) domain_deposit: u32, pub(crate) domain_voluntary_exit: u32, @@ -150,6 +153,20 @@ pub struct ChainSpec { pub terminal_block_hash_activation_epoch: Epoch, pub safe_slots_to_import_optimistically: u64, + /* + * Capella hard fork params + */ + pub capella_fork_version: [u8; 4], + /// The Capella fork epoch is optional, with `None` representing "Capella never happens". + pub capella_fork_epoch: Option, + pub max_validators_per_withdrawals_sweep: u64, + + /* + * Eip4844 hard fork params + */ + pub eip4844_fork_version: [u8; 4], + pub eip4844_fork_epoch: Option, + /* * Networking */ @@ -169,6 +186,11 @@ pub struct ChainSpec { * Application params */ pub(crate) domain_application_mask: u32, + + /* + * Capella params + */ + pub(crate) domain_bls_to_execution_change: u32, } impl ChainSpec { @@ -233,11 +255,17 @@ impl ChainSpec { /// Returns the name of the fork which is active at `epoch`. pub fn fork_name_at_epoch(&self, epoch: Epoch) -> ForkName { - match self.bellatrix_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, - _ => match self.altair_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, - _ => ForkName::Base, + match self.eip4844_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Eip4844, + _ => match self.capella_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Capella, + _ => match self.bellatrix_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, + _ => match self.altair_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, + _ => ForkName::Base, + }, + }, }, } } @@ -248,6 +276,8 @@ impl ChainSpec { ForkName::Base => self.genesis_fork_version, ForkName::Altair => self.altair_fork_version, ForkName::Merge => self.bellatrix_fork_version, + ForkName::Capella => self.capella_fork_version, + ForkName::Eip4844 => self.eip4844_fork_version, } } @@ -257,6 +287,8 @@ impl ChainSpec { ForkName::Base => Some(Epoch::new(0)), ForkName::Altair => self.altair_fork_epoch, ForkName::Merge => self.bellatrix_fork_epoch, + ForkName::Capella => self.capella_fork_epoch, + ForkName::Eip4844 => self.eip4844_fork_epoch, } } @@ -266,6 +298,8 @@ impl ChainSpec { BeaconState::Base(_) => self.inactivity_penalty_quotient, BeaconState::Altair(_) => self.inactivity_penalty_quotient_altair, BeaconState::Merge(_) => self.inactivity_penalty_quotient_bellatrix, + BeaconState::Capella(_) => self.inactivity_penalty_quotient_bellatrix, + BeaconState::Eip4844(_) => self.inactivity_penalty_quotient_bellatrix, } } @@ -278,6 +312,8 @@ impl ChainSpec { BeaconState::Base(_) => self.proportional_slashing_multiplier, BeaconState::Altair(_) => self.proportional_slashing_multiplier_altair, BeaconState::Merge(_) => self.proportional_slashing_multiplier_bellatrix, + BeaconState::Capella(_) => self.proportional_slashing_multiplier_bellatrix, + BeaconState::Eip4844(_) => self.proportional_slashing_multiplier_bellatrix, } } @@ -290,6 +326,8 @@ impl ChainSpec { BeaconState::Base(_) => self.min_slashing_penalty_quotient, BeaconState::Altair(_) => self.min_slashing_penalty_quotient_altair, BeaconState::Merge(_) => self.min_slashing_penalty_quotient_bellatrix, + BeaconState::Capella(_) => self.min_slashing_penalty_quotient_bellatrix, + BeaconState::Eip4844(_) => self.min_slashing_penalty_quotient_bellatrix, } } @@ -328,6 +366,7 @@ impl ChainSpec { match domain { Domain::BeaconProposer => self.domain_beacon_proposer, Domain::BeaconAttester => self.domain_beacon_attester, + Domain::BlobsSideCar => self.domain_blobs_sidecar, Domain::Randao => self.domain_randao, Domain::Deposit => self.domain_deposit, Domain::VoluntaryExit => self.domain_voluntary_exit, @@ -337,6 +376,7 @@ impl ChainSpec { Domain::ContributionAndProof => self.domain_contribution_and_proof, Domain::SyncCommitteeSelectionProof => self.domain_sync_committee_selection_proof, Domain::ApplicationMask(application_domain) => application_domain.get_domain_constant(), + Domain::BlsToExecutionChange => self.domain_bls_to_execution_change, } } @@ -499,7 +539,8 @@ impl ChainSpec { * Initial Values */ genesis_fork_version: [0; 4], - bls_withdrawal_prefix_byte: 0, + bls_withdrawal_prefix_byte: 0x00, + eth1_address_withdrawal_prefix_byte: 0x01, /* * Time parameters @@ -533,6 +574,7 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, + domain_blobs_sidecar: 10, // 0x0a000000 /* * Fork choice @@ -587,6 +629,19 @@ impl ChainSpec { terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, + /* + * Capella hard fork params + */ + capella_fork_version: [0x03, 00, 00, 00], + capella_fork_epoch: None, + max_validators_per_withdrawals_sweep: 16384, + + /* + * Eip4844 hard fork params + */ + eip4844_fork_version: [0x04, 0x00, 0x00, 0x00], + eip4844_fork_epoch: None, + /* * Network specific */ @@ -606,6 +661,11 @@ impl ChainSpec { * Application specific */ domain_application_mask: APPLICATION_DOMAIN_BUILDER, + + /* + * Capella params + */ + domain_bls_to_execution_change: 10, } } @@ -645,6 +705,13 @@ impl ChainSpec { // `Uint256::MAX` which is `2*256- 1`. .checked_add(Uint256::one()) .expect("addition does not overflow"), + // Capella + capella_fork_version: [0x03, 0x00, 0x00, 0x01], + capella_fork_epoch: None, + max_validators_per_withdrawals_sweep: 16, + // Eip4844 + eip4844_fork_version: [0x04, 0x00, 0x00, 0x01], + eip4844_fork_epoch: None, // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -707,7 +774,8 @@ impl ChainSpec { * Initial Values */ genesis_fork_version: [0x00, 0x00, 0x00, 0x64], - bls_withdrawal_prefix_byte: 0, + bls_withdrawal_prefix_byte: 0x00, + eth1_address_withdrawal_prefix_byte: 0x01, /* * Time parameters @@ -741,6 +809,7 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, + domain_blobs_sidecar: 10, /* * Fork choice @@ -797,6 +866,19 @@ impl ChainSpec { terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, + /* + * Capella hard fork params + */ + capella_fork_version: [0x03, 0x00, 0x00, 0x64], + capella_fork_epoch: None, + max_validators_per_withdrawals_sweep: 16384, + + /* + * Eip4844 hard fork params + */ + eip4844_fork_version: [0x04, 0x00, 0x00, 0x64], + eip4844_fork_epoch: None, + /* * Network specific */ @@ -816,6 +898,11 @@ impl ChainSpec { * Application specific */ domain_application_mask: APPLICATION_DOMAIN_BUILDER, + + /* + * Capella params + */ + domain_bls_to_execution_change: 10, } } } @@ -875,6 +962,22 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub bellatrix_fork_epoch: Option>, + #[serde(default = "default_capella_fork_version")] + #[serde(with = "eth2_serde_utils::bytes_4_hex")] + capella_fork_version: [u8; 4], + #[serde(default)] + #[serde(serialize_with = "serialize_fork_epoch")] + #[serde(deserialize_with = "deserialize_fork_epoch")] + pub capella_fork_epoch: Option>, + + #[serde(default = "default_eip4844_fork_version")] + #[serde(with = "eth2_serde_utils::bytes_4_hex")] + eip4844_fork_version: [u8; 4], + #[serde(default)] + #[serde(serialize_with = "serialize_fork_epoch")] + #[serde(deserialize_with = "deserialize_fork_epoch")] + pub eip4844_fork_epoch: Option>, + #[serde(with = "eth2_serde_utils::quoted_u64")] seconds_per_slot: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] @@ -912,6 +1015,16 @@ fn default_bellatrix_fork_version() -> [u8; 4] { [0xff, 0xff, 0xff, 0xff] } +fn default_capella_fork_version() -> [u8; 4] { + // TODO: determine if the bellatrix example should be copied like this + [0xff, 0xff, 0xff, 0xff] +} + +fn default_eip4844_fork_version() -> [u8; 4] { + // This value shouldn't be used. + [0xff, 0xff, 0xff, 0xff] +} + /// Placeholder value: 2^256-2^10 (115792089237316195423570985008687907853269984665640564039457584007913129638912). /// /// Taken from https://github.com/ethereum/consensus-specs/blob/d5e4828aecafaf1c57ef67a5f23c4ae7b08c5137/configs/mainnet.yaml#L15-L16 @@ -1008,6 +1121,14 @@ impl Config { bellatrix_fork_epoch: spec .bellatrix_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), + capella_fork_version: spec.capella_fork_version, + capella_fork_epoch: spec + .capella_fork_epoch + .map(|epoch| MaybeQuoted { value: epoch }), + eip4844_fork_version: spec.eip4844_fork_version, + eip4844_fork_epoch: spec + .eip4844_fork_epoch + .map(|epoch| MaybeQuoted { value: epoch }), seconds_per_slot: spec.seconds_per_slot, seconds_per_eth1_block: spec.seconds_per_eth1_block, @@ -1053,6 +1174,10 @@ impl Config { altair_fork_epoch, bellatrix_fork_epoch, bellatrix_fork_version, + capella_fork_epoch, + capella_fork_version, + eip4844_fork_epoch, + eip4844_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -1083,6 +1208,10 @@ impl Config { altair_fork_epoch: altair_fork_epoch.map(|q| q.value), bellatrix_fork_epoch: bellatrix_fork_epoch.map(|q| q.value), bellatrix_fork_version, + capella_fork_epoch: capella_fork_epoch.map(|q| q.value), + capella_fork_version, + eip4844_fork_epoch: eip4844_fork_epoch.map(|q| q.value), + eip4844_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -1156,6 +1285,7 @@ mod tests { test_domain(Domain::BeaconProposer, spec.domain_beacon_proposer, &spec); test_domain(Domain::BeaconAttester, spec.domain_beacon_attester, &spec); + test_domain(Domain::BlobsSideCar, spec.domain_blobs_sidecar, &spec); test_domain(Domain::Randao, spec.domain_randao, &spec); test_domain(Domain::Deposit, spec.domain_deposit, &spec); test_domain(Domain::VoluntaryExit, spec.domain_voluntary_exit, &spec); @@ -1174,6 +1304,14 @@ mod tests { apply_bit_mask(builder_domain_pre_mask, &spec), &spec, ); + + test_domain( + Domain::BlsToExecutionChange, + spec.domain_bls_to_execution_change, + &spec, + ); + + test_domain(Domain::BlobsSideCar, spec.domain_blobs_sidecar, &spec); } fn apply_bit_mask(domain_bytes: [u8; 4], spec: &ChainSpec) -> u32 { diff --git a/consensus/types/src/checkpoint.rs b/consensus/types/src/checkpoint.rs index cad7fab754..e84798f6f7 100644 --- a/consensus/types/src/checkpoint.rs +++ b/consensus/types/src/checkpoint.rs @@ -8,8 +8,8 @@ use tree_hash_derive::TreeHash; /// Casper FFG checkpoint, used in attestations. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( + arbitrary::Arbitrary, Debug, Clone, Copy, diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index b7ec015ea3..ac93818b9c 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,5 +1,6 @@ use crate::{ - consts::altair, AltairPreset, BasePreset, BellatrixPreset, ChainSpec, Config, EthSpec, ForkName, + consts::altair, AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, + EthSpec, ForkName, }; use maplit::hashmap; use serde_derive::{Deserialize, Serialize}; @@ -11,7 +12,7 @@ use superstruct::superstruct; /// /// Mostly useful for the API. #[superstruct( - variants(Altair, Bellatrix), + variants(Bellatrix, Capella), variant_attributes(derive(Serialize, Deserialize, Debug, PartialEq, Clone)) )] #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] @@ -24,9 +25,11 @@ pub struct ConfigAndPreset { pub base_preset: BasePreset, #[serde(flatten)] pub altair_preset: AltairPreset, - #[superstruct(only(Bellatrix))] #[serde(flatten)] pub bellatrix_preset: BellatrixPreset, + #[superstruct(only(Capella))] + #[serde(flatten)] + pub capella_preset: CapellaPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] pub extra_fields: HashMap, @@ -37,14 +40,24 @@ impl ConfigAndPreset { let config = Config::from_chain_spec::(spec); let base_preset = BasePreset::from_chain_spec::(spec); let altair_preset = AltairPreset::from_chain_spec::(spec); + let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); let extra_fields = get_extra_fields(spec); - if spec.bellatrix_fork_epoch.is_some() + if spec.capella_fork_epoch.is_some() || fork_name.is_none() - || fork_name == Some(ForkName::Merge) + || fork_name == Some(ForkName::Capella) { - let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); + let capella_preset = CapellaPreset::from_chain_spec::(spec); + ConfigAndPreset::Capella(ConfigAndPresetCapella { + config, + base_preset, + altair_preset, + bellatrix_preset, + capella_preset, + extra_fields, + }) + } else { ConfigAndPreset::Bellatrix(ConfigAndPresetBellatrix { config, base_preset, @@ -52,13 +65,6 @@ impl ConfigAndPreset { bellatrix_preset, extra_fields, }) - } else { - ConfigAndPreset::Altair(ConfigAndPresetAltair { - config, - base_preset, - altair_preset, - extra_fields, - }) } } } @@ -72,6 +78,7 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { "bls_withdrawal_prefix".to_uppercase() => u8_hex(spec.bls_withdrawal_prefix_byte), "domain_beacon_proposer".to_uppercase() => u32_hex(spec.domain_beacon_proposer), "domain_beacon_attester".to_uppercase() => u32_hex(spec.domain_beacon_attester), + "domain_blobs_sidecar".to_uppercase() => u32_hex(spec.domain_blobs_sidecar), "domain_randao".to_uppercase()=> u32_hex(spec.domain_randao), "domain_deposit".to_uppercase()=> u32_hex(spec.domain_deposit), "domain_voluntary_exit".to_uppercase() => u32_hex(spec.domain_voluntary_exit), @@ -130,8 +137,8 @@ mod test { .write(false) .open(tmp_file.as_ref()) .expect("error while opening the file"); - let from: ConfigAndPresetBellatrix = + let from: ConfigAndPresetCapella = serde_yaml::from_reader(reader).expect("error while deserializing"); - assert_eq!(ConfigAndPreset::Bellatrix(from), yamlconfig); + assert_eq!(ConfigAndPreset::Capella(from), yamlconfig); } } diff --git a/consensus/types/src/consts.rs b/consensus/types/src/consts.rs index a9377bc3e0..b13e3aa9c3 100644 --- a/consensus/types/src/consts.rs +++ b/consensus/types/src/consts.rs @@ -22,3 +22,17 @@ pub mod altair { pub mod merge { pub const INTERVALS_PER_SLOT: u64 = 3; } +pub mod eip4844 { + use crate::Uint256; + + use lazy_static::lazy_static; + + lazy_static! { + pub static ref BLS_MODULUS: Uint256 = Uint256::from_dec_str( + "52435875175126190479447740508185965837690552500527637822603658699938581184513" + ) + .expect("should initialize BLS_MODULUS"); + } + pub const BLOB_TX_TYPE: u8 = 5; + pub const VERSIONED_HASH_VERSION_KZG: u8 = 1; +} diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/contribution_and_proof.rs index 855e36bc90..167b0857c5 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/contribution_and_proof.rs @@ -9,9 +9,20 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; /// A Validators aggregate sync committee contribution and selection proof. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct ContributionAndProof { /// The index of the validator that created the sync contribution. #[serde(with = "eth2_serde_utils::quoted_u64")] diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit.rs index a347cf675c..bbc3bd9fb8 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit.rs @@ -11,9 +11,18 @@ pub const DEPOSIT_TREE_DEPTH: usize = 32; /// A deposit to potentially become a beacon chain validator. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct Deposit { pub proof: FixedVector, diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index 6c5444e110..1969311671 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -10,9 +10,18 @@ use tree_hash_derive::TreeHash; /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct DepositData { pub pubkey: PublicKeyBytes, diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit_message.rs index d1f245bc98..63073401c2 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit_message.rs @@ -10,8 +10,18 @@ use tree_hash_derive::TreeHash; /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] pub struct DepositMessage { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index 0fe929a1e9..3556e31a9f 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -10,9 +10,18 @@ use tree_hash_derive::TreeHash; /// a nodes local ENR. /// /// Spec v0.11 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Default, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct EnrForkId { #[serde(with = "eth2_serde_utils::bytes_4_hex")] diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index 4fd7d3373c..6b2396e112 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -9,8 +9,8 @@ use tree_hash_derive::TreeHash; /// Contains data obtained from the Eth1 chain. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( + arbitrary::Arbitrary, Debug, PartialEq, Clone, diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index e616976026..e45f5b392a 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -48,7 +48,9 @@ impl fmt::Display for EthSpecId { } } -pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + Eq { +pub trait EthSpec: + 'static + Default + Sync + Send + Clone + Debug + PartialEq + Eq + for<'a> arbitrary::Arbitrary<'a> +{ /* * Constants */ @@ -95,6 +97,16 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + type GasLimitDenominator: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MinGasLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxExtraDataBytes: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * New in Capella + */ + type MaxBlsToExecutionChanges: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxWithdrawalsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * New in Eip4844 + */ + type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -222,6 +234,21 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + fn bytes_per_logs_bloom() -> usize { Self::BytesPerLogsBloom::to_usize() } + + /// Returns the `MAX_BLS_TO_EXECUTION_CHANGES` constant for this specification. + fn max_bls_to_execution_changes() -> usize { + Self::MaxBlsToExecutionChanges::to_usize() + } + + /// Returns the `MAX_WITHDRAWALS_PER_PAYLOAD` constant for this specification. + fn max_withdrawals_per_payload() -> usize { + Self::MaxWithdrawalsPerPayload::to_usize() + } + + /// Returns the `MAX_BLOBS_PER_BLOCK` constant for this specification. + fn max_blobs_per_block() -> usize { + Self::MaxBlobsPerBlock::to_usize() + } } /// Macro to inherit some type values from another EthSpec. @@ -233,8 +260,7 @@ macro_rules! params_from_eth_spec { } /// Ethereum Foundation specifications. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize, arbitrary::Arbitrary)] pub struct MainnetEthSpec; impl EthSpec for MainnetEthSpec { @@ -262,9 +288,13 @@ impl EthSpec for MainnetEthSpec { type GasLimitDenominator = U1024; type MinGasLimit = U5000; type MaxExtraDataBytes = U32; + type MaxBlobsPerBlock = U16; // 2**4 = 16 + type FieldElementsPerBlob = U4096; type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch + type MaxBlsToExecutionChanges = U16; + type MaxWithdrawalsPerPayload = U16; fn default_spec() -> ChainSpec { ChainSpec::mainnet() @@ -276,8 +306,7 @@ impl EthSpec for MainnetEthSpec { } /// Ethereum Foundation minimal spec, as defined in the eth2.0-specs repo. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize, arbitrary::Arbitrary)] pub struct MinimalEthSpec; impl EthSpec for MinimalEthSpec { @@ -290,6 +319,7 @@ impl EthSpec for MinimalEthSpec { type SyncSubcommitteeSize = U8; // 32 committee size / 4 sync committee subnet count type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch type SlotsPerEth1VotingPeriod = U32; // 4 epochs * 8 slots per epoch + type MaxWithdrawalsPerPayload = U4; params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, @@ -309,7 +339,10 @@ impl EthSpec for MinimalEthSpec { BytesPerLogsBloom, GasLimitDenominator, MinGasLimit, - MaxExtraDataBytes + MaxExtraDataBytes, + MaxBlsToExecutionChanges, + MaxBlobsPerBlock, + FieldElementsPerBlob }); fn default_spec() -> ChainSpec { @@ -322,8 +355,7 @@ impl EthSpec for MinimalEthSpec { } /// Gnosis Beacon Chain specifications. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize, arbitrary::Arbitrary)] pub struct GnosisEthSpec; impl EthSpec for GnosisEthSpec { @@ -354,6 +386,10 @@ impl EthSpec for GnosisEthSpec { type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U2048; // 128 max attestations * 16 slots per epoch type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch + type MaxBlsToExecutionChanges = U16; + type MaxWithdrawalsPerPayload = U16; + type MaxBlobsPerBlock = U16; // 2**4 = 16 + type FieldElementsPerBlob = U4096; fn default_spec() -> ChainSpec { ChainSpec::gnosis() diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs index 988dcece5e..363a35a86a 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution_block_hash.rs @@ -6,8 +6,18 @@ use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash, Derivative)] +#[derive( + arbitrary::Arbitrary, + Default, + Clone, + Copy, + Serialize, + Deserialize, + Eq, + PartialEq, + Hash, + Derivative, +)] #[derivative(Debug = "transparent")] #[serde(transparent)] pub struct ExecutionBlockHash(Hash256); diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution_block_header.rs index 4baa5dd395..b19988ff7d 100644 --- a/consensus/types/src/execution_block_header.rs +++ b/consensus/types/src/execution_block_header.rs @@ -17,14 +17,16 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -use crate::{Address, EthSpec, ExecutionPayload, Hash256, Hash64, Uint256}; +use crate::{Address, EthSpec, ExecutionPayloadRef, Hash256, Hash64, Uint256}; use metastruct::metastruct; /// Execution block header as used for RLP encoding and Keccak hashing. /// /// Credit to Reth for the type definition. #[derive(Debug, Clone, PartialEq, Eq, Hash)] -#[metastruct(mappings(map_execution_block_header_fields()))] +#[metastruct(mappings(map_execution_block_header_fields_except_withdrawals(exclude( + withdrawals_root +))))] pub struct ExecutionBlockHeader { pub parent_hash: Hash256, pub ommers_hash: Hash256, @@ -42,33 +44,36 @@ pub struct ExecutionBlockHeader { pub mix_hash: Hash256, pub nonce: Hash64, pub base_fee_per_gas: Uint256, + pub withdrawals_root: Option, } impl ExecutionBlockHeader { pub fn from_payload( - payload: &ExecutionPayload, + payload: ExecutionPayloadRef, rlp_empty_list_root: Hash256, rlp_transactions_root: Hash256, + rlp_withdrawals_root: Option, ) -> Self { // Most of these field mappings are defined in EIP-3675 except for `mixHash`, which is // defined in EIP-4399. ExecutionBlockHeader { - parent_hash: payload.parent_hash.into_root(), + parent_hash: payload.parent_hash().into_root(), ommers_hash: rlp_empty_list_root, - beneficiary: payload.fee_recipient, - state_root: payload.state_root, + beneficiary: payload.fee_recipient(), + state_root: payload.state_root(), transactions_root: rlp_transactions_root, - receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom.clone().into(), + receipts_root: payload.receipts_root(), + logs_bloom: payload.logs_bloom().clone().into(), difficulty: Uint256::zero(), - number: payload.block_number.into(), - gas_limit: payload.gas_limit.into(), - gas_used: payload.gas_used.into(), - timestamp: payload.timestamp, - extra_data: payload.extra_data.clone().into(), - mix_hash: payload.prev_randao, + number: payload.block_number().into(), + gas_limit: payload.gas_limit().into(), + gas_used: payload.gas_used().into(), + timestamp: payload.timestamp(), + extra_data: payload.extra_data().clone().into(), + mix_hash: payload.prev_randao(), nonce: Hash64::zero(), - base_fee_per_gas: payload.base_fee_per_gas, + base_fee_per_gas: payload.base_fee_per_gas(), + withdrawals_root: rlp_withdrawals_root, } } } diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 412e5a8df3..6e055d0a79 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -1,7 +1,7 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; -use ssz::Encode; +use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -12,50 +12,162 @@ pub type Transactions = VariableList< ::MaxTransactionsPerPayload, >; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; + +#[superstruct( + variants(Merge, Capella, Eip4844), + variant_attributes( + derive( + Default, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, + arbitrary::Arbitrary + ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + serde(bound = "T: EthSpec", deny_unknown_fields), + arbitrary(bound = "T: EthSpec") + ), + cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + map_into(FullPayload, BlindedPayload), + map_ref_into(ExecutionPayloadHeader) +)] #[derive( - Default, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, + Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary, )] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] -#[serde(bound = "T: EthSpec")] +#[serde(bound = "T: EthSpec", untagged)] +#[arbitrary(bound = "T: EthSpec")] +#[ssz(enum_behaviour = "transparent")] +#[tree_hash(enum_behaviour = "transparent")] pub struct ExecutionPayload { + #[superstruct(getter(copy))] pub parent_hash: ExecutionBlockHash, + #[superstruct(getter(copy))] pub fee_recipient: Address, + #[superstruct(getter(copy))] pub state_root: Hash256, + #[superstruct(getter(copy))] pub receipts_root: Hash256, #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] pub logs_bloom: FixedVector, + #[superstruct(getter(copy))] pub prev_randao: Hash256, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub block_number: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub gas_limit: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub gas_used: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::quoted_u256")] + #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, + #[superstruct(only(Eip4844))] + #[serde(with = "eth2_serde_utils::quoted_u256")] + #[superstruct(getter(copy))] + pub excess_data_gas: Uint256, + #[superstruct(getter(copy))] pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, + #[superstruct(only(Capella, Eip4844))] + pub withdrawals: Withdrawals, +} + +impl<'a, T: EthSpec> ExecutionPayloadRef<'a, T> { + // this emulates clone on a normal reference type + pub fn clone_from_ref(&self) -> ExecutionPayload { + map_execution_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.clone().into() + }) + } } impl ExecutionPayload { - pub fn empty() -> Self { - Self::default() + pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid(format!( + "unsupported fork for ExecutionPayload: {fork_name}", + ))), + ForkName::Merge => ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge), + ForkName::Capella => ExecutionPayloadCapella::from_ssz_bytes(bytes).map(Self::Capella), + ForkName::Eip4844 => ExecutionPayloadEip4844::from_ssz_bytes(bytes).map(Self::Eip4844), + } } #[allow(clippy::integer_arithmetic)] /// Returns the maximum size of an execution payload. - pub fn max_execution_payload_size() -> usize { + pub fn max_execution_payload_merge_size() -> usize { // Fixed part - Self::empty().as_ssz_bytes().len() + ExecutionPayloadMerge::::default().as_ssz_bytes().len() // Max size of variable length `extra_data` field + (T::max_extra_data_bytes() * ::ssz_fixed_len()) // Max size of variable length `transactions` field + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) } + + #[allow(clippy::integer_arithmetic)] + /// Returns the maximum size of an execution payload. + pub fn max_execution_payload_capella_size() -> usize { + // Fixed part + ExecutionPayloadCapella::::default().as_ssz_bytes().len() + // Max size of variable length `extra_data` field + + (T::max_extra_data_bytes() * ::ssz_fixed_len()) + // Max size of variable length `transactions` field + + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) + // Max size of variable length `withdrawals` field + + (T::max_withdrawals_per_payload() * ::ssz_fixed_len()) + } + + #[allow(clippy::integer_arithmetic)] + /// Returns the maximum size of an execution payload. + pub fn max_execution_payload_eip4844_size() -> usize { + // Fixed part + ExecutionPayloadEip4844::::default().as_ssz_bytes().len() + // Max size of variable length `extra_data` field + + (T::max_extra_data_bytes() * ::ssz_fixed_len()) + // Max size of variable length `transactions` field + + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) + // Max size of variable length `withdrawals` field + + (T::max_withdrawals_per_payload() * ::ssz_fixed_len()) + } +} + +impl ForkVersionDeserialize for ExecutionPayload { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + let convert_err = |e| { + serde::de::Error::custom(format!("ExecutionPayload failed to deserialize: {:?}", e)) + }; + + Ok(match fork_name { + ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Eip4844 => Self::Eip4844(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Base | ForkName::Altair => { + return Err(serde::de::Error::custom(format!( + "ExecutionPayload failed to deserialize: unsupported fork '{}'", + fork_name + ))); + } + }) + } } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 01780fa1c3..4dc79ddc99 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -1,49 +1,167 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; +use ssz::Decode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use BeaconStateError; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[superstruct( + variants(Merge, Capella, Eip4844), + variant_attributes( + derive( + Default, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, + arbitrary::Arbitrary + ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + serde(bound = "T: EthSpec", deny_unknown_fields), + arbitrary(bound = "T: EthSpec") + ), + ref_attributes(derive(PartialEq, TreeHash), tree_hash(enum_behaviour = "transparent")), + cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") +)] #[derive( - Default, Debug, Clone, Serialize, Deserialize, Derivative, Encode, Decode, TreeHash, TestRandom, + Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, )] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +#[serde(bound = "T: EthSpec", untagged)] +#[arbitrary(bound = "T: EthSpec")] +#[tree_hash(enum_behaviour = "transparent")] +#[ssz(enum_behaviour = "transparent")] pub struct ExecutionPayloadHeader { + #[superstruct(getter(copy))] pub parent_hash: ExecutionBlockHash, + #[superstruct(getter(copy))] pub fee_recipient: Address, + #[superstruct(getter(copy))] pub state_root: Hash256, + #[superstruct(getter(copy))] pub receipts_root: Hash256, #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] pub logs_bloom: FixedVector, + #[superstruct(getter(copy))] pub prev_randao: Hash256, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub block_number: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub gas_limit: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub gas_used: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::quoted_u256")] + #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, + #[superstruct(only(Eip4844))] + #[serde(with = "eth2_serde_utils::quoted_u256")] + #[superstruct(getter(copy))] + pub excess_data_gas: Uint256, + #[superstruct(getter(copy))] pub block_hash: ExecutionBlockHash, + #[superstruct(getter(copy))] pub transactions_root: Hash256, + #[superstruct(only(Capella, Eip4844))] + #[superstruct(getter(copy))] + pub withdrawals_root: Hash256, } impl ExecutionPayloadHeader { - pub fn empty() -> Self { - Self::default() + pub fn transactions(&self) -> Option<&Transactions> { + None + } + + pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid(format!( + "unsupported fork for ExecutionPayloadHeader: {fork_name}", + ))), + ForkName::Merge => ExecutionPayloadHeaderMerge::from_ssz_bytes(bytes).map(Self::Merge), + ForkName::Capella => { + ExecutionPayloadHeaderCapella::from_ssz_bytes(bytes).map(Self::Capella) + } + ForkName::Eip4844 => { + ExecutionPayloadHeaderEip4844::from_ssz_bytes(bytes).map(Self::Eip4844) + } + } } } -impl<'a, T: EthSpec> From<&'a ExecutionPayload> for ExecutionPayloadHeader { - fn from(payload: &'a ExecutionPayload) -> Self { - ExecutionPayloadHeader { +impl<'a, T: EthSpec> ExecutionPayloadHeaderRef<'a, T> { + pub fn is_default_with_zero_roots(self) -> bool { + map_execution_payload_header_ref!(&'a _, self, |inner, cons| { + cons(inner); + *inner == Default::default() + }) + } +} + +impl ExecutionPayloadHeaderMerge { + pub fn upgrade_to_capella(&self) -> ExecutionPayloadHeaderCapella { + ExecutionPayloadHeaderCapella { + parent_hash: self.parent_hash, + fee_recipient: self.fee_recipient, + state_root: self.state_root, + receipts_root: self.receipts_root, + logs_bloom: self.logs_bloom.clone(), + prev_randao: self.prev_randao, + block_number: self.block_number, + gas_limit: self.gas_limit, + gas_used: self.gas_used, + timestamp: self.timestamp, + extra_data: self.extra_data.clone(), + base_fee_per_gas: self.base_fee_per_gas, + block_hash: self.block_hash, + transactions_root: self.transactions_root, + withdrawals_root: Hash256::zero(), + } + } +} + +impl ExecutionPayloadHeaderCapella { + pub fn upgrade_to_eip4844(&self) -> ExecutionPayloadHeaderEip4844 { + ExecutionPayloadHeaderEip4844 { + parent_hash: self.parent_hash, + fee_recipient: self.fee_recipient, + state_root: self.state_root, + receipts_root: self.receipts_root, + logs_bloom: self.logs_bloom.clone(), + prev_randao: self.prev_randao, + block_number: self.block_number, + gas_limit: self.gas_limit, + gas_used: self.gas_used, + timestamp: self.timestamp, + extra_data: self.extra_data.clone(), + base_fee_per_gas: self.base_fee_per_gas, + // TODO: verify if this is correct + excess_data_gas: Uint256::zero(), + block_hash: self.block_hash, + transactions_root: self.transactions_root, + withdrawals_root: self.withdrawals_root, + } + } +} + +impl<'a, T: EthSpec> From<&'a ExecutionPayloadMerge> for ExecutionPayloadHeaderMerge { + fn from(payload: &'a ExecutionPayloadMerge) -> Self { + Self { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -61,3 +179,135 @@ impl<'a, T: EthSpec> From<&'a ExecutionPayload> for ExecutionPayloadHeader } } } +impl<'a, T: EthSpec> From<&'a ExecutionPayloadCapella> for ExecutionPayloadHeaderCapella { + fn from(payload: &'a ExecutionPayloadCapella) -> Self { + Self { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom.clone(), + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data.clone(), + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions_root: payload.transactions.tree_hash_root(), + withdrawals_root: payload.withdrawals.tree_hash_root(), + } + } +} + +impl<'a, T: EthSpec> From<&'a ExecutionPayloadEip4844> for ExecutionPayloadHeaderEip4844 { + fn from(payload: &'a ExecutionPayloadEip4844) -> Self { + Self { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom.clone(), + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data.clone(), + base_fee_per_gas: payload.base_fee_per_gas, + excess_data_gas: payload.excess_data_gas, + block_hash: payload.block_hash, + transactions_root: payload.transactions.tree_hash_root(), + withdrawals_root: payload.withdrawals.tree_hash_root(), + } + } +} + +// These impls are required to work around an inelegance in `to_execution_payload_header`. +// They only clone headers so they should be relatively cheap. +impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderMerge { + fn from(payload: &'a Self) -> Self { + payload.clone() + } +} + +impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderCapella { + fn from(payload: &'a Self) -> Self { + payload.clone() + } +} + +impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderEip4844 { + fn from(payload: &'a Self) -> Self { + payload.clone() + } +} + +impl<'a, T: EthSpec> From> for ExecutionPayloadHeader { + fn from(payload: ExecutionPayloadRef<'a, T>) -> Self { + map_execution_payload_ref_into_execution_payload_header!( + &'a _, + payload, + |inner, cons| cons(inner.into()) + ) + } +} + +impl TryFrom> for ExecutionPayloadHeaderMerge { + type Error = BeaconStateError; + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::Merge(execution_payload_header) => Ok(execution_payload_header), + _ => Err(BeaconStateError::IncorrectStateVariant), + } + } +} +impl TryFrom> for ExecutionPayloadHeaderCapella { + type Error = BeaconStateError; + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::Capella(execution_payload_header) => { + Ok(execution_payload_header) + } + _ => Err(BeaconStateError::IncorrectStateVariant), + } + } +} +impl TryFrom> for ExecutionPayloadHeaderEip4844 { + type Error = BeaconStateError; + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::Eip4844(execution_payload_header) => { + Ok(execution_payload_header) + } + _ => Err(BeaconStateError::IncorrectStateVariant), + } + } +} + +impl ForkVersionDeserialize for ExecutionPayloadHeader { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + let convert_err = |e| { + serde::de::Error::custom(format!( + "ExecutionPayloadHeader failed to deserialize: {:?}", + e + )) + }; + + Ok(match fork_name { + ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Eip4844 => Self::Eip4844(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Base | ForkName::Altair => { + return Err(serde::de::Error::custom(format!( + "ExecutionPayloadHeader failed to deserialize: unsupported fork '{}'", + fork_name + ))); + } + }) + } +} diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork.rs index 44b8a16637..de332f0cad 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork.rs @@ -9,8 +9,8 @@ use tree_hash_derive::TreeHash; /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( + arbitrary::Arbitrary, Debug, Clone, Copy, diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 52b9294c8c..f5221dd913 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -47,6 +47,20 @@ impl ForkContext { )); } + if spec.capella_fork_epoch.is_some() { + fork_to_digest.push(( + ForkName::Capella, + ChainSpec::compute_fork_digest(spec.capella_fork_version, genesis_validators_root), + )); + } + + if spec.eip4844_fork_epoch.is_some() { + fork_to_digest.push(( + ForkName::Eip4844, + ChainSpec::compute_fork_digest(spec.eip4844_fork_version, genesis_validators_root), + )); + } + let fork_to_digest: HashMap = fork_to_digest.into_iter().collect(); let digest_to_fork = fork_to_digest diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork_data.rs index be13f71e4d..cc79039315 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork_data.rs @@ -9,9 +9,18 @@ use tree_hash_derive::TreeHash; /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Default, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct ForkData { #[serde(with = "eth2_serde_utils::bytes_4_hex")] diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index e97b08309b..89eaff7985 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -11,11 +11,19 @@ pub enum ForkName { Base, Altair, Merge, + Capella, + Eip4844, } impl ForkName { pub fn list_all() -> Vec { - vec![ForkName::Base, ForkName::Altair, ForkName::Merge] + vec![ + ForkName::Base, + ForkName::Altair, + ForkName::Merge, + ForkName::Capella, + ForkName::Eip4844, + ] } /// Set the activation slots in the given `ChainSpec` so that the fork named by `self` @@ -26,16 +34,36 @@ impl ForkName { ForkName::Base => { spec.altair_fork_epoch = None; spec.bellatrix_fork_epoch = None; + spec.capella_fork_epoch = None; + spec.eip4844_fork_epoch = None; spec } ForkName::Altair => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = None; + spec.capella_fork_epoch = None; + spec.eip4844_fork_epoch = None; spec } ForkName::Merge => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = None; + spec.eip4844_fork_epoch = None; + spec + } + ForkName::Capella => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.eip4844_fork_epoch = None; + spec + } + ForkName::Eip4844 => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.eip4844_fork_epoch = Some(Epoch::new(0)); spec } } @@ -49,6 +77,8 @@ impl ForkName { ForkName::Base => None, ForkName::Altair => Some(ForkName::Base), ForkName::Merge => Some(ForkName::Altair), + ForkName::Capella => Some(ForkName::Merge), + ForkName::Eip4844 => Some(ForkName::Capella), } } @@ -59,7 +89,9 @@ impl ForkName { match self { ForkName::Base => Some(ForkName::Altair), ForkName::Altair => Some(ForkName::Merge), - ForkName::Merge => None, + ForkName::Merge => Some(ForkName::Capella), + ForkName::Capella => Some(ForkName::Eip4844), + ForkName::Eip4844 => None, } } } @@ -101,6 +133,14 @@ macro_rules! map_fork_name_with { let (value, extra_data) = $body; ($t::Merge(value), extra_data) } + ForkName::Capella => { + let (value, extra_data) = $body; + ($t::Capella(value), extra_data) + } + ForkName::Eip4844 => { + let (value, extra_data) = $body; + ($t::Eip4844(value), extra_data) + } } }; } @@ -113,6 +153,8 @@ impl FromStr for ForkName { "phase0" | "base" => ForkName::Base, "altair" => ForkName::Altair, "bellatrix" | "merge" => ForkName::Merge, + "capella" => ForkName::Capella, + "eip4844" => ForkName::Eip4844, _ => return Err(format!("unknown fork name: {}", fork_name)), }) } @@ -124,6 +166,8 @@ impl Display for ForkName { ForkName::Base => "phase0".fmt(f), ForkName::Altair => "altair".fmt(f), ForkName::Merge => "bellatrix".fmt(f), + ForkName::Capella => "capella".fmt(f), + ForkName::Eip4844 => "eip4844".fmt(f), } } } @@ -155,7 +199,7 @@ mod test { #[test] fn previous_and_next_fork_consistent() { - assert_eq!(ForkName::Merge.next_fork(), None); + assert_eq!(ForkName::Eip4844.next_fork(), None); assert_eq!(ForkName::Base.previous_fork(), None); for (prev_fork, fork) in ForkName::list_all().into_iter().tuple_windows() { diff --git a/consensus/types/src/fork_versioned_response.rs b/consensus/types/src/fork_versioned_response.rs new file mode 100644 index 0000000000..07ff40b27e --- /dev/null +++ b/consensus/types/src/fork_versioned_response.rs @@ -0,0 +1,138 @@ +use crate::ForkName; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_json::value::Value; +use std::sync::Arc; + +// Deserialize is only implemented for types that implement ForkVersionDeserialize +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct ExecutionOptimisticForkVersionedResponse { + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + pub execution_optimistic: Option, + pub data: T, +} + +impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticForkVersionedResponse +where + F: ForkVersionDeserialize, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + struct Helper { + version: Option, + execution_optimistic: Option, + data: serde_json::Value, + } + + let helper = Helper::deserialize(deserializer)?; + let data = match helper.version { + Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, + None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, + }; + + Ok(ExecutionOptimisticForkVersionedResponse { + version: helper.version, + execution_optimistic: helper.execution_optimistic, + data, + }) + } +} + +pub trait ForkVersionDeserialize: Sized + DeserializeOwned { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result; +} + +// Deserialize is only implemented for types that implement ForkVersionDeserialize +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct ForkVersionedResponse { + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + pub data: T, +} + +impl<'de, F> serde::Deserialize<'de> for ForkVersionedResponse +where + F: ForkVersionDeserialize, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + struct Helper { + version: Option, + data: serde_json::Value, + } + + let helper = Helper::deserialize(deserializer)?; + let data = match helper.version { + Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, + None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, + }; + + Ok(ForkVersionedResponse { + version: helper.version, + data, + }) + } +} + +impl ForkVersionDeserialize for Arc { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result { + Ok(Arc::new(F::deserialize_by_fork::<'de, D>( + value, fork_name, + )?)) + } +} + +#[cfg(test)] +mod fork_version_response_tests { + use crate::{ + ExecutionPayload, ExecutionPayloadMerge, ForkName, ForkVersionedResponse, MainnetEthSpec, + }; + use serde_json::json; + + #[test] + fn fork_versioned_response_deserialize_correct_fork() { + type E = MainnetEthSpec; + + let response_json = + serde_json::to_string(&json!(ForkVersionedResponse::> { + version: Some(ForkName::Merge), + data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), + })) + .unwrap(); + + let result: Result>, _> = + serde_json::from_str(&response_json); + + assert!(result.is_ok()); + } + + #[test] + fn fork_versioned_response_deserialize_incorrect_fork() { + type E = MainnetEthSpec; + + let response_json = + serde_json::to_string(&json!(ForkVersionedResponse::> { + version: Some(ForkName::Capella), + data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), + })) + .unwrap(); + + let result: Result>, _> = + serde_json::from_str(&response_json); + + assert!(result.is_err()); + } +} diff --git a/consensus/types/src/free_attestation.rs b/consensus/types/src/free_attestation.rs deleted file mode 100644 index 81a778d842..0000000000 --- a/consensus/types/src/free_attestation.rs +++ /dev/null @@ -1,14 +0,0 @@ -/// Note: this object does not actually exist in the spec. -/// -/// We use it for managing attestations that have not been aggregated. -use super::{AttestationData, Signature}; -use serde_derive::Serialize; - -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize)] -pub struct FreeAttestation { - pub data: AttestationData, - pub signature: Signature, - #[serde(with = "eth2_serde_utils::quoted_u64")] - pub validator_index: u64, -} diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index 2b0a645cd0..6288cdbe80 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -14,7 +14,7 @@ pub const GRAFFITI_BYTES_LEN: usize = 32; /// The 32-byte `graffiti` field on a beacon block. #[derive(Default, Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] #[serde(transparent)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(arbitrary::Arbitrary)] pub struct Graffiti(#[serde(with = "serde_graffiti")] pub [u8; GRAFFITI_BYTES_LEN]); impl Graffiti { diff --git a/consensus/types/src/historical_batch.rs b/consensus/types/src/historical_batch.rs index 325f5f8537..e75b64cae9 100644 --- a/consensus/types/src/historical_batch.rs +++ b/consensus/types/src/historical_batch.rs @@ -10,8 +10,19 @@ use tree_hash_derive::TreeHash; /// Historical block and state roots. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, +)] +#[arbitrary(bound = "T: EthSpec")] pub struct HistoricalBatch { pub block_roots: FixedVector, pub state_roots: FixedVector, diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/historical_summary.rs new file mode 100644 index 0000000000..84d87b85fd --- /dev/null +++ b/consensus/types/src/historical_summary.rs @@ -0,0 +1,89 @@ +use crate::test_utils::TestRandom; +use crate::Unsigned; +use crate::{BeaconState, EthSpec, Hash256}; +use cached_tree_hash::Error; +use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; +use compare_fields_derive::CompareFields; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use test_random_derive::TestRandom; +use tree_hash::{mix_in_length, TreeHash, BYTES_PER_CHUNK}; +use tree_hash_derive::TreeHash; + +/// `HistoricalSummary` matches the components of the phase0 `HistoricalBatch` +/// making the two hash_tree_root-compatible. This struct is introduced into the beacon state +/// in the Capella hard fork. +/// +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#historicalsummary +#[derive( + Debug, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + CompareFields, + Clone, + Copy, + Default, + arbitrary::Arbitrary, +)] +pub struct HistoricalSummary { + block_summary_root: Hash256, + state_summary_root: Hash256, +} + +impl HistoricalSummary { + pub fn new(state: &BeaconState) -> Self { + Self { + block_summary_root: state.block_roots().tree_hash_root(), + state_summary_root: state.state_roots().tree_hash_root(), + } + } +} + +/// Wrapper type allowing the implementation of `CachedTreeHash`. +#[derive(Debug)] +pub struct HistoricalSummaryCache<'a, N: Unsigned> { + pub inner: &'a VariableList, +} + +impl<'a, N: Unsigned> HistoricalSummaryCache<'a, N> { + pub fn new(inner: &'a VariableList) -> Self { + Self { inner } + } + + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + self.inner.len() + } +} + +impl<'a, N: Unsigned> CachedTreeHash for HistoricalSummaryCache<'a, N> { + fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache { + TreeHashCache::new(arena, int_log(N::to_usize()), self.len()) + } + + fn recalculate_tree_hash_root( + &self, + arena: &mut CacheArena, + cache: &mut TreeHashCache, + ) -> Result { + Ok(mix_in_length( + &cache.recalculate_merkle_root(arena, leaf_iter(self.inner))?, + self.len(), + )) + } +} + +pub fn leaf_iter( + values: &[HistoricalSummary], +) -> impl Iterator + ExactSizeIterator + '_ { + values + .iter() + .map(|value| value.tree_hash_root()) + .map(Hash256::to_fixed_bytes) +} diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index 32271cfa93..16ffb1ad8f 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -12,12 +12,21 @@ use tree_hash_derive::TreeHash; /// To be included in an `AttesterSlashing`. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Derivative, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Derivative, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, )] #[derivative(PartialEq, Eq)] // to satisfy Clippy's lint about `Hash` #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct IndexedAttestation { /// Lists validator registry indices, not committee indices. #[serde(with = "quoted_variable_list_u64")] diff --git a/consensus/types/src/kzg_commitment.rs b/consensus/types/src/kzg_commitment.rs new file mode 100644 index 0000000000..4612af5de1 --- /dev/null +++ b/consensus/types/src/kzg_commitment.rs @@ -0,0 +1,45 @@ +use crate::test_utils::TestRandom; +use crate::*; +use derivative::Derivative; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use std::fmt; +use std::fmt::{Display, Formatter}; +use tree_hash::{PackedEncoding, TreeHash}; + +#[derive( + Derivative, Debug, Clone, Encode, Decode, Serialize, Deserialize, arbitrary::Arbitrary, +)] +#[derivative(PartialEq, Eq, Hash)] +#[ssz(struct_behaviour = "transparent")] +pub struct KzgCommitment(#[serde(with = "BigArray")] pub [u8; 48]); + +impl Display for KzgCommitment { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) + } +} + +impl TreeHash for KzgCommitment { + fn tree_hash_type() -> tree_hash::TreeHashType { + <[u8; 48] as TreeHash>::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + <[u8; 48] as TreeHash>::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl TestRandom for KzgCommitment { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { + KzgCommitment(<[u8; 48] as TestRandom>::random_for_test(rng)) + } +} diff --git a/consensus/types/src/kzg_proof.rs b/consensus/types/src/kzg_proof.rs new file mode 100644 index 0000000000..9c1136ce51 --- /dev/null +++ b/consensus/types/src/kzg_proof.rs @@ -0,0 +1,74 @@ +use crate::test_utils::{RngCore, TestRandom}; +use serde::{Deserialize, Serialize}; +use serde_big_array::BigArray; +use ssz_derive::{Decode, Encode}; +use std::fmt; +use tree_hash::{PackedEncoding, TreeHash}; + +const KZG_PROOF_BYTES_LEN: usize = 48; + +#[derive( + Debug, + PartialEq, + Hash, + Clone, + Copy, + Encode, + Decode, + Serialize, + Deserialize, + arbitrary::Arbitrary, +)] +#[serde(transparent)] +#[ssz(struct_behaviour = "transparent")] +pub struct KzgProof(#[serde(with = "BigArray")] pub [u8; KZG_PROOF_BYTES_LEN]); + +impl fmt::Display for KzgProof { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) + } +} + +impl Default for KzgProof { + fn default() -> Self { + KzgProof([0; 48]) + } +} + +impl From<[u8; KZG_PROOF_BYTES_LEN]> for KzgProof { + fn from(bytes: [u8; KZG_PROOF_BYTES_LEN]) -> Self { + Self(bytes) + } +} + +impl Into<[u8; KZG_PROOF_BYTES_LEN]> for KzgProof { + fn into(self) -> [u8; KZG_PROOF_BYTES_LEN] { + self.0 + } +} + +impl TreeHash for KzgProof { + fn tree_hash_type() -> tree_hash::TreeHashType { + <[u8; KZG_PROOF_BYTES_LEN]>::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + <[u8; KZG_PROOF_BYTES_LEN]>::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl TestRandom for KzgProof { + fn random_for_test(rng: &mut impl RngCore) -> Self { + let mut bytes = [0; KZG_PROOF_BYTES_LEN]; + rng.fill_bytes(&mut bytes); + Self(bytes) + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 87f5ebe8b3..2926a434b1 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -1,5 +1,4 @@ //! Ethereum 2.0 types - // Required for big type-level numbers #![recursion_limit = "128"] // Clippy lint set up @@ -28,6 +27,7 @@ pub mod beacon_block_body; pub mod beacon_block_header; pub mod beacon_committee; pub mod beacon_state; +pub mod bls_to_execution_change; pub mod builder_bid; pub mod chain_spec; pub mod checkpoint; @@ -46,9 +46,10 @@ pub mod execution_payload_header; pub mod fork; pub mod fork_data; pub mod fork_name; -pub mod free_attestation; +pub mod fork_versioned_response; pub mod graffiti; pub mod historical_batch; +pub mod historical_summary; pub mod indexed_attestation; pub mod light_client_bootstrap; pub mod light_client_finality_update; @@ -63,6 +64,7 @@ pub mod shuffling_id; pub mod signed_aggregate_and_proof; pub mod signed_beacon_block; pub mod signed_beacon_block_header; +pub mod signed_bls_to_execution_change; pub mod signed_contribution_and_proof; pub mod signed_voluntary_exit; pub mod signing_data; @@ -91,11 +93,16 @@ pub mod sync_selection_proof; pub mod sync_subnet_id; mod tree_hash_impls; pub mod validator_registration_data; +pub mod withdrawal; pub mod slot_data; #[cfg(feature = "sqlite")] pub mod sqlite; +pub mod blobs_sidecar; +pub mod kzg_commitment; +pub mod kzg_proof; + use ethereum_types::{H160, H256}; pub use crate::aggregate_and_proof::AggregateAndProof; @@ -104,20 +111,22 @@ pub use crate::attestation_data::AttestationData; pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BeaconBlockRef, - BeaconBlockRefMut, BlindedBeaconBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockEip4844, + BeaconBlockMerge, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, EmptyBlock, }; pub use crate::beacon_block_body::{ - BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, - BeaconBlockBodyRef, BeaconBlockBodyRefMut, + BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyCapella, + BeaconBlockBodyEip4844, BeaconBlockBodyMerge, BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; +pub use crate::blobs_sidecar::BlobsSidecar; +pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; pub use crate::config_and_preset::{ - ConfigAndPreset, ConfigAndPresetAltair, ConfigAndPresetBellatrix, + ConfigAndPreset, ConfigAndPresetBellatrix, ConfigAndPresetCapella, }; pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; @@ -129,23 +138,37 @@ pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_block_header::ExecutionBlockHeader; -pub use crate::execution_payload::{ExecutionPayload, Transaction, Transactions}; -pub use crate::execution_payload_header::ExecutionPayloadHeader; +pub use crate::execution_payload::{ + ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge, + ExecutionPayloadRef, Transaction, Transactions, Withdrawals, +}; +pub use crate::execution_payload_header::{ + ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844, + ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, +}; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; pub use crate::fork_name::{ForkName, InconsistentFork}; -pub use crate::free_attestation::FreeAttestation; +pub use crate::fork_versioned_response::{ + ExecutionOptimisticForkVersionedResponse, ForkVersionDeserialize, ForkVersionedResponse, +}; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; +pub use crate::kzg_commitment::KzgCommitment; +pub use crate::kzg_proof::KzgProof; pub use crate::light_client_finality_update::LightClientFinalityUpdate; pub use crate::light_client_optimistic_update::LightClientOptimisticUpdate; pub use crate::participation_flags::ParticipationFlags; pub use crate::participation_list::ParticipationList; -pub use crate::payload::{BlindedPayload, BlockType, ExecPayload, FullPayload}; +pub use crate::payload::{ + AbstractExecPayload, BlindedPayload, BlindedPayloadCapella, BlindedPayloadEip4844, + BlindedPayloadMerge, BlindedPayloadRef, BlockType, ExecPayload, FullPayload, + FullPayloadCapella, FullPayloadEip4844, FullPayloadMerge, FullPayloadRef, OwnedExecPayload, +}; pub use crate::pending_attestation::PendingAttestation; -pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset}; +pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset, CapellaPreset}; pub use crate::proposer_preparation_data::ProposerPreparationData; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; @@ -153,10 +176,12 @@ pub use crate::selection_proof::SelectionProof; pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{ - SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockHash, - SignedBeaconBlockMerge, SignedBlindedBeaconBlock, + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, + SignedBeaconBlockEip4844, SignedBeaconBlockHash, SignedBeaconBlockMerge, + SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; +pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; pub use crate::signing_data::{SignedRoot, SigningData}; @@ -175,12 +200,17 @@ pub use crate::validator::Validator; pub use crate::validator_registration_data::*; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; +pub use crate::withdrawal::Withdrawal; +use serde_big_array::BigArray; pub type CommitteeIndex = u64; pub type Hash256 = H256; pub type Uint256 = ethereum_types::U256; pub type Address = H160; pub type ForkVersion = [u8; 4]; +pub type BLSFieldElement = Uint256; +pub type Blob = FixedVector::FieldElementsPerBlob>; +pub type VersionedHash = Hash256; pub type Hash64 = ethereum_types::H64; pub use bls::{ diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index d2a46c04a4..1a5eed2205 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -8,9 +8,19 @@ use tree_hash::TreeHash; /// A LightClientBootstrap is the initializer we send over to lightclient nodes /// that are trying to generate their basic storage when booting up. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct LightClientBootstrap { /// Requested beacon block header. pub header: BeaconBlockHeader, diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index cae6266f9e..08069c9308 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -10,9 +10,19 @@ use tree_hash::TreeHash; /// A LightClientFinalityUpdate is the update lightclient request or received by a gossip that /// signal a new finalized beacon block header for the light client sync protocol. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct LightClientFinalityUpdate { /// The last `BeaconBlockHeader` from the last attested block by the sync committee. pub attested_header: BeaconBlockHeader, diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 8dda8cd5ae..7a39bd9ac1 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -9,9 +9,19 @@ use tree_hash::TreeHash; /// A LightClientOptimisticUpdate is the update we send on each slot, /// it is based off the current unfinalized epoch is verified only against BLS signature. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct LightClientOptimisticUpdate { /// The last `BeaconBlockHeader` from the last attested block by the sync committee. pub attested_header: BeaconBlockHeader, diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 7d01f39bfc..ca35f96802 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -52,9 +52,19 @@ impl From for Error { /// A LightClientUpdate is the update we request solely to either complete the bootstraping process, /// or to sync up to the last committee period, we need to have one ready for each ALTAIR period /// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD]. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct LightClientUpdate { /// The last `BeaconBlockHeader` from the last attested block by the sync committee. pub attested_header: BeaconBlockHeader, diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/participation_flags.rs index a2dd494864..bd98f8da07 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/participation_flags.rs @@ -7,7 +7,7 @@ use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; #[derive(Debug, Default, Clone, Copy, PartialEq, Deserialize, Serialize, TestRandom)] #[serde(transparent)] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(arbitrary::Arbitrary)] pub struct ParticipationFlags { #[serde(with = "eth2_serde_utils::quoted_u8")] bits: u8, diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 667fff58c7..cc22bc3ab8 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -2,12 +2,15 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; -use ssz::{Decode, DecodeError, Encode}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::borrow::Cow; use std::convert::TryFrom; use std::fmt::Debug; use std::hash::Hash; use test_random_derive::TestRandom; -use tree_hash::{PackedEncoding, TreeHash}; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; #[derive(Debug)] pub enum BlockType { @@ -15,32 +18,17 @@ pub enum BlockType { Full, } -pub trait ExecPayload: - Debug - + Clone - + Encode - + Debug - + Decode - + TestRandom - + TreeHash - + Default - + PartialEq - + Serialize - + DeserializeOwned - + Hash - + TryFrom> - + From> - + Send - + 'static -{ +/// A trait representing behavior of an `ExecutionPayload` that either has a full list of transactions +/// or a transaction hash in it's place. +pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + Send { fn block_type() -> BlockType; /// Convert the payload into a payload header. fn to_execution_payload_header(&self) -> ExecutionPayloadHeader; - // We provide a subset of field accessors, for the fields used in `consensus`. - // - // More fields can be added here if you wish. + /// We provide a subset of field accessors, for the fields used in `consensus`. + /// + /// More fields can be added here if you wish. fn parent_hash(&self) -> ExecutionBlockHash; fn prev_randao(&self) -> Hash256; fn block_number(&self) -> u64; @@ -48,6 +36,143 @@ pub trait ExecPayload: fn block_hash(&self) -> ExecutionBlockHash; fn fee_recipient(&self) -> Address; fn gas_limit(&self) -> u64; + fn transactions(&self) -> Option<&Transactions>; + /// fork-specific fields + fn withdrawals_root(&self) -> Result; + + /// Is this a default payload with 0x0 roots for transactions and withdrawals? + fn is_default_with_zero_roots(&self) -> bool; + + /// Is this a default payload with the hash of the empty list for transactions and withdrawals? + fn is_default_with_empty_roots(&self) -> bool; +} + +/// `ExecPayload` functionality the requires ownership. +pub trait OwnedExecPayload: + ExecPayload + + Default + + Serialize + + DeserializeOwned + + Encode + + Decode + + TestRandom + + for<'a> arbitrary::Arbitrary<'a> + + 'static +{ +} + +impl OwnedExecPayload for P where + P: ExecPayload + + Default + + Serialize + + DeserializeOwned + + Encode + + Decode + + TestRandom + + for<'a> arbitrary::Arbitrary<'a> + + 'static +{ +} + +pub trait AbstractExecPayload: + ExecPayload + + Sized + + From> + + TryFrom> + + TryInto + + TryInto + + TryInto +{ + type Ref<'a>: ExecPayload + + Copy + + From<&'a Self::Merge> + + From<&'a Self::Capella> + + From<&'a Self::Eip4844>; + + type Merge: OwnedExecPayload + + Into + + for<'a> From>> + + TryFrom>; + type Capella: OwnedExecPayload + + Into + + for<'a> From>> + + TryFrom>; + type Eip4844: OwnedExecPayload + + Into + + for<'a> From>> + + TryFrom>; + + fn default_at_fork(fork_name: ForkName) -> Result; +} + +#[superstruct( + variants(Merge, Capella, Eip4844), + variant_attributes( + derive( + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + Derivative, + arbitrary::Arbitrary, + ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + serde(bound = "T: EthSpec", deny_unknown_fields), + arbitrary(bound = "T: EthSpec"), + ssz(struct_behaviour = "transparent"), + ), + ref_attributes( + derive(Debug, Derivative, TreeHash), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + tree_hash(enum_behaviour = "transparent"), + ), + map_into(ExecutionPayload), + map_ref_into(ExecutionPayloadRef), + cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") +)] +#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +#[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] +#[tree_hash(enum_behaviour = "transparent")] +pub struct FullPayload { + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] + pub execution_payload: ExecutionPayloadMerge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] + pub execution_payload: ExecutionPayloadCapella, + #[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))] + pub execution_payload: ExecutionPayloadEip4844, +} + +impl From> for ExecutionPayload { + fn from(full_payload: FullPayload) -> Self { + map_full_payload_into_execution_payload!(full_payload, move |payload, cons| { + cons(payload.execution_payload) + }) + } +} + +impl<'a, T: EthSpec> From> for ExecutionPayload { + fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self { + map_full_payload_ref!(&'a _, full_payload_ref, move |payload, cons| { + cons(payload); + payload.execution_payload.clone().into() + }) + } +} + +impl<'a, T: EthSpec> From> for FullPayload { + fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self { + map_full_payload_ref!(&'a _, full_payload_ref, move |payload, cons| { + cons(payload); + payload.clone().into() + }) + } } impl ExecPayload for FullPayload { @@ -55,36 +180,284 @@ impl ExecPayload for FullPayload { BlockType::Full } - fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - ExecutionPayloadHeader::from(&self.execution_payload) + fn to_execution_payload_header<'a>(&'a self) -> ExecutionPayloadHeader { + map_full_payload_ref!(&'a _, self.to_ref(), move |inner, cons| { + cons(inner); + let exec_payload_ref: ExecutionPayloadRef<'a, T> = From::from(&inner.execution_payload); + ExecutionPayloadHeader::from(exec_payload_ref) + }) } - fn parent_hash(&self) -> ExecutionBlockHash { - self.execution_payload.parent_hash + fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.parent_hash + }) } - fn prev_randao(&self) -> Hash256 { - self.execution_payload.prev_randao + fn prev_randao<'a>(&'a self) -> Hash256 { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.prev_randao + }) } - fn block_number(&self) -> u64 { - self.execution_payload.block_number + fn block_number<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.block_number + }) } - fn timestamp(&self) -> u64 { - self.execution_payload.timestamp + fn timestamp<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.timestamp + }) } - fn block_hash(&self) -> ExecutionBlockHash { - self.execution_payload.block_hash + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.block_hash + }) } - fn fee_recipient(&self) -> Address { - self.execution_payload.fee_recipient + fn fee_recipient<'a>(&'a self) -> Address { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.fee_recipient + }) } - fn gas_limit(&self) -> u64 { - self.execution_payload.gas_limit + fn gas_limit<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.gas_limit + }) + } + + fn transactions<'a>(&'a self) -> Option<&'a Transactions> { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + Some(&payload.execution_payload.transactions) + }) + } + + fn withdrawals_root(&self) -> Result { + match self { + FullPayload::Merge(_) => Err(Error::IncorrectStateVariant), + FullPayload::Capella(ref inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } + FullPayload::Eip4844(ref inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } + } + } + + fn is_default_with_zero_roots<'a>(&'a self) -> bool { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload == <_>::default() + }) + } + + fn is_default_with_empty_roots(&self) -> bool { + // For full payloads the empty/zero distinction does not exist. + self.is_default_with_zero_roots() + } +} + +impl FullPayload { + pub fn execution_payload(self) -> ExecutionPayload { + map_full_payload_into_execution_payload!(self, |inner, cons| { + cons(inner.execution_payload) + }) + } +} + +impl<'a, T: EthSpec> FullPayloadRef<'a, T> { + pub fn execution_payload_ref(self) -> ExecutionPayloadRef<'a, T> { + map_full_payload_ref_into_execution_payload_ref!(&'a _, self, |inner, cons| { + cons(&inner.execution_payload) + }) + } +} + +impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { + fn block_type() -> BlockType { + BlockType::Full + } + + fn to_execution_payload_header<'a>(&'a self) -> ExecutionPayloadHeader { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.to_execution_payload_header() + }) + } + + fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.parent_hash + }) + } + + fn prev_randao<'a>(&'a self) -> Hash256 { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.prev_randao + }) + } + + fn block_number<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.block_number + }) + } + + fn timestamp<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.timestamp + }) + } + + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.block_hash + }) + } + + fn fee_recipient<'a>(&'a self) -> Address { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.fee_recipient + }) + } + + fn gas_limit<'a>(&'a self) -> u64 { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.gas_limit + }) + } + + fn transactions<'a>(&'a self) -> Option<&'a Transactions> { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + Some(&payload.execution_payload.transactions) + }) + } + + fn withdrawals_root(&self) -> Result { + match self { + FullPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), + FullPayloadRef::Capella(inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } + FullPayloadRef::Eip4844(inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } + } + } + + fn is_default_with_zero_roots<'a>(&'a self) -> bool { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload == <_>::default() + }) + } + + fn is_default_with_empty_roots(&self) -> bool { + // For full payloads the empty/zero distinction does not exist. + self.is_default_with_zero_roots() + } +} + +impl AbstractExecPayload for FullPayload { + type Ref<'a> = FullPayloadRef<'a, T>; + type Merge = FullPayloadMerge; + type Capella = FullPayloadCapella; + type Eip4844 = FullPayloadEip4844; + + fn default_at_fork(fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Merge => Ok(FullPayloadMerge::default().into()), + ForkName::Capella => Ok(FullPayloadCapella::default().into()), + ForkName::Eip4844 => Ok(FullPayloadEip4844::default().into()), + } + } +} + +impl From> for FullPayload { + fn from(execution_payload: ExecutionPayload) -> Self { + map_execution_payload_into_full_payload!(execution_payload, |inner, cons| { + cons(inner.into()) + }) + } +} + +impl TryFrom> for FullPayload { + type Error = (); + fn try_from(_: ExecutionPayloadHeader) -> Result { + Err(()) + } +} + +#[superstruct( + variants(Merge, Capella, Eip4844), + variant_attributes( + derive( + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + Derivative, + arbitrary::Arbitrary + ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + serde(bound = "T: EthSpec", deny_unknown_fields), + arbitrary(bound = "T: EthSpec"), + ssz(struct_behaviour = "transparent"), + ), + ref_attributes( + derive(Debug, Derivative, TreeHash), + derivative(PartialEq, Hash(bound = "T: EthSpec")), + tree_hash(enum_behaviour = "transparent"), + ), + map_into(ExecutionPayloadHeader), + cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") +)] +#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +#[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] +#[tree_hash(enum_behaviour = "transparent")] +pub struct BlindedPayload { + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] + pub execution_payload_header: ExecutionPayloadHeaderMerge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] + pub execution_payload_header: ExecutionPayloadHeaderCapella, + #[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))] + pub execution_payload_header: ExecutionPayloadHeaderEip4844, +} + +impl<'a, T: EthSpec> From> for BlindedPayload { + fn from(blinded_payload_ref: BlindedPayloadRef<'a, T>) -> Self { + map_blinded_payload_ref!(&'a _, blinded_payload_ref, move |payload, cons| { + cons(payload); + payload.clone().into() + }) } } @@ -94,191 +467,495 @@ impl ExecPayload for BlindedPayload { } fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { - self.execution_payload_header.clone() + map_blinded_payload_into_execution_payload_header!(self.clone(), |inner, cons| { + cons(inner.execution_payload_header) + }) } - fn parent_hash(&self) -> ExecutionBlockHash { - self.execution_payload_header.parent_hash + fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.parent_hash + }) } - fn prev_randao(&self) -> Hash256 { - self.execution_payload_header.prev_randao + fn prev_randao<'a>(&'a self) -> Hash256 { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.prev_randao + }) } - fn block_number(&self) -> u64 { - self.execution_payload_header.block_number + fn block_number<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.block_number + }) } - fn timestamp(&self) -> u64 { - self.execution_payload_header.timestamp + fn timestamp<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.timestamp + }) } - fn block_hash(&self) -> ExecutionBlockHash { - self.execution_payload_header.block_hash + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.block_hash + }) } - fn fee_recipient(&self) -> Address { - self.execution_payload_header.fee_recipient + fn fee_recipient<'a>(&'a self) -> Address { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.fee_recipient + }) } - fn gas_limit(&self) -> u64 { - self.execution_payload_header.gas_limit + fn gas_limit<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.gas_limit + }) } -} -#[derive(Debug, Clone, TestRandom, Serialize, Deserialize, Derivative)] -#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] -#[serde(bound = "T: EthSpec")] -pub struct BlindedPayload { - pub execution_payload_header: ExecutionPayloadHeader, -} + fn transactions(&self) -> Option<&Transactions> { + None + } -// NOTE: the `Default` implementation for `BlindedPayload` needs to be different from the `Default` -// implementation for `ExecutionPayloadHeader` because payloads are checked for equality against the -// default payload in `is_merge_transition_block` to determine whether the merge has occurred. -// -// The default `BlindedPayload` is therefore the payload header that results from blinding the -// default `ExecutionPayload`, which differs from the default `ExecutionPayloadHeader` in that -// its `transactions_root` is the hash of the empty list rather than 0x0. -impl Default for BlindedPayload { - fn default() -> Self { - Self { - execution_payload_header: ExecutionPayloadHeader::from(&ExecutionPayload::default()), + fn withdrawals_root(&self) -> Result { + match self { + BlindedPayload::Merge(_) => Err(Error::IncorrectStateVariant), + BlindedPayload::Capella(ref inner) => { + Ok(inner.execution_payload_header.withdrawals_root) + } + BlindedPayload::Eip4844(ref inner) => { + Ok(inner.execution_payload_header.withdrawals_root) + } } } + + fn is_default_with_zero_roots(&self) -> bool { + self.to_ref().is_default_with_zero_roots() + } + + // For blinded payloads we must check "defaultness" against the default `ExecutionPayload` + // which has been blinded into an `ExecutionPayloadHeader`, NOT against the default + // `ExecutionPayloadHeader` which has a zeroed out `transactions_root`. The transactions root + // should be the root of the empty list. + fn is_default_with_empty_roots(&self) -> bool { + self.to_ref().is_default_with_empty_roots() + } +} + +impl<'b, T: EthSpec> ExecPayload for BlindedPayloadRef<'b, T> { + fn block_type() -> BlockType { + BlockType::Blinded + } + + fn to_execution_payload_header<'a>(&'a self) -> ExecutionPayloadHeader { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.to_execution_payload_header() + }) + } + + fn parent_hash<'a>(&'a self) -> ExecutionBlockHash { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.parent_hash + }) + } + + fn prev_randao<'a>(&'a self) -> Hash256 { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.prev_randao + }) + } + + fn block_number<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.block_number + }) + } + + fn timestamp<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.timestamp + }) + } + + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.block_hash + }) + } + + fn fee_recipient<'a>(&'a self) -> Address { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.fee_recipient + }) + } + + fn gas_limit<'a>(&'a self) -> u64 { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.gas_limit + }) + } + + fn transactions(&self) -> Option<&Transactions> { + None + } + + fn withdrawals_root(&self) -> Result { + match self { + BlindedPayloadRef::Merge(_) => Err(Error::IncorrectStateVariant), + BlindedPayloadRef::Capella(inner) => { + Ok(inner.execution_payload_header.withdrawals_root) + } + BlindedPayloadRef::Eip4844(inner) => { + Ok(inner.execution_payload_header.withdrawals_root) + } + } + } + + fn is_default_with_zero_roots<'a>(&'a self) -> bool { + map_blinded_payload_ref!(&'b _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header == <_>::default() + }) + } + + fn is_default_with_empty_roots<'a>(&'a self) -> bool { + map_blinded_payload_ref!(&'b _, self, move |payload, cons| { + cons(payload); + payload.is_default_with_empty_roots() + }) + } +} + +macro_rules! impl_exec_payload_common { + ($wrapper_type:ident, // BlindedPayloadMerge | FullPayloadMerge + $wrapped_type:ident, // ExecutionPayloadHeaderMerge | ExecutionPayloadMerge + $wrapped_type_full:ident, // ExecutionPayloadMerge | ExecutionPayloadMerge + $wrapped_type_header:ident, // ExecutionPayloadHeaderMerge | ExecutionPayloadHeaderMerge + $wrapped_field:ident, // execution_payload_header | execution_payload + $fork_variant:ident, // Merge | Merge + $block_type_variant:ident, // Blinded | Full + $is_default_with_empty_roots:block, + $f:block, + $g:block) => { + impl ExecPayload for $wrapper_type { + fn block_type() -> BlockType { + BlockType::$block_type_variant + } + + fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { + ExecutionPayloadHeader::$fork_variant($wrapped_type_header::from( + &self.$wrapped_field, + )) + } + + fn parent_hash(&self) -> ExecutionBlockHash { + self.$wrapped_field.parent_hash + } + + fn prev_randao(&self) -> Hash256 { + self.$wrapped_field.prev_randao + } + + fn block_number(&self) -> u64 { + self.$wrapped_field.block_number + } + + fn timestamp(&self) -> u64 { + self.$wrapped_field.timestamp + } + + fn block_hash(&self) -> ExecutionBlockHash { + self.$wrapped_field.block_hash + } + + fn fee_recipient(&self) -> Address { + self.$wrapped_field.fee_recipient + } + + fn gas_limit(&self) -> u64 { + self.$wrapped_field.gas_limit + } + + fn is_default_with_zero_roots(&self) -> bool { + self.$wrapped_field == $wrapped_type::default() + } + + fn is_default_with_empty_roots(&self) -> bool { + let f = $is_default_with_empty_roots; + f(self) + } + + fn transactions(&self) -> Option<&Transactions> { + let f = $f; + f(self) + } + + fn withdrawals_root(&self) -> Result { + let g = $g; + g(self) + } + } + + impl From<$wrapped_type> for $wrapper_type { + fn from($wrapped_field: $wrapped_type) -> Self { + Self { $wrapped_field } + } + } + }; +} + +macro_rules! impl_exec_payload_for_fork { + // BlindedPayloadMerge, FullPayloadMerge, ExecutionPayloadHeaderMerge, ExecutionPayloadMerge, Merge + ($wrapper_type_header:ident, $wrapper_type_full:ident, $wrapped_type_header:ident, $wrapped_type_full:ident, $fork_variant:ident) => { + //*************** Blinded payload implementations ******************// + + impl_exec_payload_common!( + $wrapper_type_header, // BlindedPayloadMerge + $wrapped_type_header, // ExecutionPayloadHeaderMerge + $wrapped_type_full, // ExecutionPayloadMerge + $wrapped_type_header, // ExecutionPayloadHeaderMerge + execution_payload_header, + $fork_variant, // Merge + Blinded, + { + |wrapper: &$wrapper_type_header| { + wrapper.execution_payload_header + == $wrapped_type_header::from(&$wrapped_type_full::default()) + } + }, + { |_| { None } }, + { + let c: for<'a> fn(&'a $wrapper_type_header) -> Result = + |payload: &$wrapper_type_header| { + let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); + wrapper_ref_type.withdrawals_root() + }; + c + } + ); + + impl TryInto<$wrapper_type_header> for BlindedPayload { + type Error = Error; + + fn try_into(self) -> Result<$wrapper_type_header, Self::Error> { + match self { + BlindedPayload::$fork_variant(payload) => Ok(payload), + _ => Err(Error::IncorrectStateVariant), + } + } + } + + // NOTE: the `Default` implementation for `BlindedPayload` needs to be different from the `Default` + // implementation for `ExecutionPayloadHeader` because payloads are checked for equality against the + // default payload in `is_merge_transition_block` to determine whether the merge has occurred. + // + // The default `BlindedPayload` is therefore the payload header that results from blinding the + // default `ExecutionPayload`, which differs from the default `ExecutionPayloadHeader` in that + // its `transactions_root` is the hash of the empty list rather than 0x0. + impl Default for $wrapper_type_header { + fn default() -> Self { + Self { + execution_payload_header: $wrapped_type_header::from( + &$wrapped_type_full::default(), + ), + } + } + } + + impl TryFrom> for $wrapper_type_header { + type Error = Error; + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::$fork_variant(execution_payload_header) => { + Ok(execution_payload_header.into()) + } + _ => Err(Error::PayloadConversionLogicFlaw), + } + } + } + + // BlindedPayload* from CoW reference to ExecutionPayload* (hopefully just a reference). + impl<'a, T: EthSpec> From>> for $wrapper_type_header { + fn from(execution_payload: Cow<'a, $wrapped_type_full>) -> Self { + Self { + execution_payload_header: $wrapped_type_header::from(&*execution_payload), + } + } + } + + //*************** Full payload implementations ******************// + + impl_exec_payload_common!( + $wrapper_type_full, // FullPayloadMerge + $wrapped_type_full, // ExecutionPayloadMerge + $wrapped_type_full, // ExecutionPayloadMerge + $wrapped_type_header, // ExecutionPayloadHeaderMerge + execution_payload, + $fork_variant, // Merge + Full, + { + |wrapper: &$wrapper_type_full| { + wrapper.execution_payload == $wrapped_type_full::default() + } + }, + { + let c: for<'a> fn(&'a $wrapper_type_full) -> Option<&'a Transactions> = + |payload: &$wrapper_type_full| Some(&payload.execution_payload.transactions); + c + }, + { + let c: for<'a> fn(&'a $wrapper_type_full) -> Result = + |payload: &$wrapper_type_full| { + let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); + wrapper_ref_type.withdrawals_root() + }; + c + } + ); + + impl Default for $wrapper_type_full { + fn default() -> Self { + Self { + execution_payload: $wrapped_type_full::default(), + } + } + } + + // FullPayload * from CoW reference to ExecutionPayload* (hopefully already owned). + impl<'a, T: EthSpec> From>> for $wrapper_type_full { + fn from(execution_payload: Cow<'a, $wrapped_type_full>) -> Self { + Self { + execution_payload: $wrapped_type_full::from(execution_payload.into_owned()), + } + } + } + + impl TryFrom> for $wrapper_type_full { + type Error = Error; + fn try_from(_: ExecutionPayloadHeader) -> Result { + Err(Error::PayloadConversionLogicFlaw) + } + } + + impl TryFrom<$wrapped_type_header> for $wrapper_type_full { + type Error = Error; + fn try_from(_: $wrapped_type_header) -> Result { + Err(Error::PayloadConversionLogicFlaw) + } + } + + impl TryInto<$wrapper_type_full> for FullPayload { + type Error = Error; + + fn try_into(self) -> Result<$wrapper_type_full, Self::Error> { + match self { + FullPayload::$fork_variant(payload) => Ok(payload), + _ => Err(Error::PayloadConversionLogicFlaw), + } + } + } + }; +} + +impl_exec_payload_for_fork!( + BlindedPayloadMerge, + FullPayloadMerge, + ExecutionPayloadHeaderMerge, + ExecutionPayloadMerge, + Merge +); +impl_exec_payload_for_fork!( + BlindedPayloadCapella, + FullPayloadCapella, + ExecutionPayloadHeaderCapella, + ExecutionPayloadCapella, + Capella +); +impl_exec_payload_for_fork!( + BlindedPayloadEip4844, + FullPayloadEip4844, + ExecutionPayloadHeaderEip4844, + ExecutionPayloadEip4844, + Eip4844 +); + +impl AbstractExecPayload for BlindedPayload { + type Ref<'a> = BlindedPayloadRef<'a, T>; + type Merge = BlindedPayloadMerge; + type Capella = BlindedPayloadCapella; + type Eip4844 = BlindedPayloadEip4844; + + fn default_at_fork(fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Merge => Ok(BlindedPayloadMerge::default().into()), + ForkName::Capella => Ok(BlindedPayloadCapella::default().into()), + ForkName::Eip4844 => Ok(BlindedPayloadEip4844::default().into()), + } + } +} + +impl From> for BlindedPayload { + fn from(payload: ExecutionPayload) -> Self { + // This implementation is a bit wasteful in that it discards the payload body. + // Required by the top-level constraint on AbstractExecPayload but could maybe be loosened + // in future. + map_execution_payload_into_blinded_payload!(payload, |inner, cons| cons(From::from( + Cow::Owned(inner) + ))) + } } impl From> for BlindedPayload { fn from(execution_payload_header: ExecutionPayloadHeader) -> Self { - Self { - execution_payload_header, + match execution_payload_header { + ExecutionPayloadHeader::Merge(execution_payload_header) => { + Self::Merge(BlindedPayloadMerge { + execution_payload_header, + }) + } + ExecutionPayloadHeader::Capella(execution_payload_header) => { + Self::Capella(BlindedPayloadCapella { + execution_payload_header, + }) + } + ExecutionPayloadHeader::Eip4844(execution_payload_header) => { + Self::Eip4844(BlindedPayloadEip4844 { + execution_payload_header, + }) + } } } } impl From> for ExecutionPayloadHeader { fn from(blinded: BlindedPayload) -> Self { - blinded.execution_payload_header - } -} - -impl From> for BlindedPayload { - fn from(execution_payload: ExecutionPayload) -> Self { - Self { - execution_payload_header: ExecutionPayloadHeader::from(&execution_payload), + match blinded { + BlindedPayload::Merge(blinded_payload) => { + ExecutionPayloadHeader::Merge(blinded_payload.execution_payload_header) + } + BlindedPayload::Capella(blinded_payload) => { + ExecutionPayloadHeader::Capella(blinded_payload.execution_payload_header) + } + BlindedPayload::Eip4844(blinded_payload) => { + ExecutionPayloadHeader::Eip4844(blinded_payload.execution_payload_header) + } } } } - -impl TreeHash for BlindedPayload { - fn tree_hash_type() -> tree_hash::TreeHashType { - >::tree_hash_type() - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - self.execution_payload_header.tree_hash_packed_encoding() - } - - fn tree_hash_packing_factor() -> usize { - >::tree_hash_packing_factor() - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - self.execution_payload_header.tree_hash_root() - } -} - -impl Decode for BlindedPayload { - fn is_ssz_fixed_len() -> bool { - as Decode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - as Decode>::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - Ok(Self { - execution_payload_header: ExecutionPayloadHeader::from_ssz_bytes(bytes)?, - }) - } -} - -impl Encode for BlindedPayload { - fn is_ssz_fixed_len() -> bool { - as Encode>::is_ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.execution_payload_header.ssz_append(buf) - } - - fn ssz_bytes_len(&self) -> usize { - self.execution_payload_header.ssz_bytes_len() - } -} - -#[derive(Default, Debug, Clone, Serialize, Deserialize, TestRandom, Derivative)] -#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] -#[serde(bound = "T: EthSpec")] -pub struct FullPayload { - pub execution_payload: ExecutionPayload, -} - -impl From> for FullPayload { - fn from(execution_payload: ExecutionPayload) -> Self { - Self { execution_payload } - } -} - -impl TryFrom> for FullPayload { - type Error = (); - - fn try_from(_: ExecutionPayloadHeader) -> Result { - Err(()) - } -} - -impl TreeHash for FullPayload { - fn tree_hash_type() -> tree_hash::TreeHashType { - >::tree_hash_type() - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - self.execution_payload.tree_hash_packed_encoding() - } - - fn tree_hash_packing_factor() -> usize { - >::tree_hash_packing_factor() - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - self.execution_payload.tree_hash_root() - } -} - -impl Decode for FullPayload { - fn is_ssz_fixed_len() -> bool { - as Decode>::is_ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - Ok(FullPayload { - execution_payload: Decode::from_ssz_bytes(bytes)?, - }) - } -} - -impl Encode for FullPayload { - fn is_ssz_fixed_len() -> bool { - as Encode>::is_ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.execution_payload.ssz_append(buf) - } - - fn ssz_bytes_len(&self) -> usize { - self.execution_payload.ssz_bytes_len() - } -} diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/pending_attestation.rs index 2a65bff66f..1b9903ebbe 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/pending_attestation.rs @@ -9,7 +9,19 @@ use tree_hash_derive::TreeHash; /// An attestation that has been included in the state but not yet fully processed. /// /// Spec v0.12.1 -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, +)] +#[arbitrary(bound = "T: EthSpec")] pub struct PendingAttestation { pub aggregation_bits: BitList, pub data: AttestationData, @@ -19,18 +31,6 @@ pub struct PendingAttestation { pub proposer_index: u64, } -#[cfg(feature = "arbitrary-fuzz")] -impl arbitrary::Arbitrary<'_> for PendingAttestation { - fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - Ok(Self { - aggregation_bits: >::arbitrary(u)?, - data: AttestationData::arbitrary(u)?, - inclusion_delay: u64::arbitrary(u)?, - proposer_index: u64::arbitrary(u)?, - }) - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index fc5aa87300..20c78f0515 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -184,6 +184,27 @@ impl BellatrixPreset { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub struct CapellaPreset { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_bls_to_execution_changes: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_withdrawals_per_payload: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_validators_per_withdrawals_sweep: u64, +} + +impl CapellaPreset { + pub fn from_chain_spec(spec: &ChainSpec) -> Self { + Self { + max_bls_to_execution_changes: T::max_bls_to_execution_changes() as u64, + max_withdrawals_per_payload: T::max_withdrawals_per_payload() as u64, + max_validators_per_withdrawals_sweep: spec.max_validators_per_withdrawals_sweep, + } + } +} + #[cfg(test)] mod test { use super::*; @@ -219,6 +240,9 @@ mod test { let bellatrix: BellatrixPreset = preset_from_file(&preset_name, "bellatrix.yaml"); assert_eq!(bellatrix, BellatrixPreset::from_chain_spec::(&spec)); + + let capella: CapellaPreset = preset_from_file(&preset_name, "capella.yaml"); + assert_eq!(capella, CapellaPreset::from_chain_spec::(&spec)); } #[test] diff --git a/consensus/types/src/proposer_slashing.rs b/consensus/types/src/proposer_slashing.rs index ca048b149a..1ac2464a47 100644 --- a/consensus/types/src/proposer_slashing.rs +++ b/consensus/types/src/proposer_slashing.rs @@ -9,9 +9,19 @@ use tree_hash_derive::TreeHash; /// Two conflicting proposals from the same proposer (validator). /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct ProposerSlashing { pub signed_header_1: SignedBeaconBlockHeader, diff --git a/consensus/types/src/relative_epoch.rs b/consensus/types/src/relative_epoch.rs index e681ce15c2..77a46b56e8 100644 --- a/consensus/types/src/relative_epoch.rs +++ b/consensus/types/src/relative_epoch.rs @@ -14,15 +14,11 @@ impl From for Error { } } -#[cfg(feature = "arbitrary-fuzz")] -use arbitrary::Arbitrary; - /// Defines the epochs relative to some epoch. Most useful when referring to the committees prior /// to and following some epoch. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] -#[derive(Debug, PartialEq, Clone, Copy)] +#[derive(Debug, PartialEq, Clone, Copy, arbitrary::Arbitrary)] pub enum RelativeEpoch { /// The prior epoch. Previous, diff --git a/consensus/types/src/selection_proof.rs b/consensus/types/src/selection_proof.rs index 0a360b0155..f8bc8ba69f 100644 --- a/consensus/types/src/selection_proof.rs +++ b/consensus/types/src/selection_proof.rs @@ -7,8 +7,7 @@ use ssz::Encode; use std::cmp; use std::convert::TryInto; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Debug, Clone)] +#[derive(arbitrary::Arbitrary, PartialEq, Debug, Clone)] pub struct SelectionProof(Signature); impl SelectionProof { diff --git a/consensus/types/src/signed_aggregate_and_proof.rs b/consensus/types/src/signed_aggregate_and_proof.rs index 0047bd3ccd..6d86c05634 100644 --- a/consensus/types/src/signed_aggregate_and_proof.rs +++ b/consensus/types/src/signed_aggregate_and_proof.rs @@ -12,9 +12,20 @@ use tree_hash_derive::TreeHash; /// gossipsub topic. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct SignedAggregateAndProof { /// The `AggregateAndProof` that was signed. pub message: AggregateAndProof, diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 5c40c4685c..70fb28fbe7 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -8,8 +8,7 @@ use superstruct::superstruct; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Eq, Hash, Clone, Copy)] +#[derive(arbitrary::Arbitrary, PartialEq, Eq, Hash, Clone, Copy)] pub struct SignedBeaconBlockHash(Hash256); impl fmt::Debug for SignedBeaconBlockHash { @@ -38,7 +37,7 @@ impl From for Hash256 { /// A `BeaconBlock` and a signature from its proposer. #[superstruct( - variants(Base, Altair, Merge), + variants(Base, Altair, Merge, Capella, Eip4844), variant_attributes( derive( Debug, @@ -49,35 +48,42 @@ impl From for Hash256 { Decode, TreeHash, Derivative, + arbitrary::Arbitrary ), derivative(PartialEq, Hash(bound = "E: EthSpec")), - cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), - serde(bound = "E: EthSpec, Payload: ExecPayload"), + serde(bound = "E: EthSpec, Payload: AbstractExecPayload"), + arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload"), ), map_into(BeaconBlock), map_ref_into(BeaconBlockRef), map_ref_mut_into(BeaconBlockRefMut) )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] +#[derive( + Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, +)] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(untagged)] -#[serde(bound = "E: EthSpec, Payload: ExecPayload")] -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[serde(bound = "E: EthSpec, Payload: AbstractExecPayload")] +#[arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] -pub struct SignedBeaconBlock = FullPayload> { +pub struct SignedBeaconBlock = FullPayload> { #[superstruct(only(Base), partial_getter(rename = "message_base"))] pub message: BeaconBlockBase, #[superstruct(only(Altair), partial_getter(rename = "message_altair"))] pub message: BeaconBlockAltair, #[superstruct(only(Merge), partial_getter(rename = "message_merge"))] pub message: BeaconBlockMerge, + #[superstruct(only(Capella), partial_getter(rename = "message_capella"))] + pub message: BeaconBlockCapella, + #[superstruct(only(Eip4844), partial_getter(rename = "message_eip4844"))] + pub message: BeaconBlockEip4844, pub signature: Signature, } pub type SignedBlindedBeaconBlock = SignedBeaconBlock>; -impl> SignedBeaconBlock { +impl> SignedBeaconBlock { /// Returns the name of the fork pertaining to `self`. /// /// Will return an `Err` if `self` has been instantiated to a variant conflicting with the fork @@ -129,6 +135,12 @@ impl> SignedBeaconBlock { BeaconBlock::Merge(message) => { SignedBeaconBlock::Merge(SignedBeaconBlockMerge { message, signature }) } + BeaconBlock::Capella(message) => { + SignedBeaconBlock::Capella(SignedBeaconBlockCapella { message, signature }) + } + BeaconBlock::Eip4844(message) => { + SignedBeaconBlock::Eip4844(SignedBeaconBlockEip4844 { message, signature }) + } } } @@ -258,7 +270,7 @@ impl From>> impl SignedBeaconBlockMerge> { pub fn into_full_block( self, - execution_payload: ExecutionPayload, + execution_payload: ExecutionPayloadMerge, ) -> SignedBeaconBlockMerge> { let SignedBeaconBlockMerge { message: @@ -278,7 +290,7 @@ impl SignedBeaconBlockMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: BlindedPayload { .. }, + execution_payload: BlindedPayloadMerge { .. }, }, }, signature, @@ -299,7 +311,117 @@ impl SignedBeaconBlockMerge> { deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayload { execution_payload }, + execution_payload: FullPayloadMerge { execution_payload }, + }, + }, + signature, + } + } +} + +impl SignedBeaconBlockCapella> { + pub fn into_full_block( + self, + execution_payload: ExecutionPayloadCapella, + ) -> SignedBeaconBlockCapella> { + let SignedBeaconBlockCapella { + message: + BeaconBlockCapella { + slot, + proposer_index, + parent_root, + state_root, + body: + BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadCapella { .. }, + bls_to_execution_changes, + }, + }, + signature, + } = self; + SignedBeaconBlockCapella { + message: BeaconBlockCapella { + slot, + proposer_index, + parent_root, + state_root, + body: BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadCapella { execution_payload }, + bls_to_execution_changes, + }, + }, + signature, + } + } +} + +impl SignedBeaconBlockEip4844> { + pub fn into_full_block( + self, + execution_payload: ExecutionPayloadEip4844, + ) -> SignedBeaconBlockEip4844> { + let SignedBeaconBlockEip4844 { + message: + BeaconBlockEip4844 { + slot, + proposer_index, + parent_root, + state_root, + body: + BeaconBlockBodyEip4844 { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadEip4844 { .. }, + bls_to_execution_changes, + blob_kzg_commitments, + }, + }, + signature, + } = self; + SignedBeaconBlockEip4844 { + message: BeaconBlockEip4844 { + slot, + proposer_index, + parent_root, + state_root, + body: BeaconBlockBodyEip4844 { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadEip4844 { execution_payload }, + bls_to_execution_changes, + blob_kzg_commitments, }, }, signature, @@ -312,12 +434,23 @@ impl SignedBeaconBlock> { self, execution_payload: Option>, ) -> Option>> { - let full_block = match self { - SignedBeaconBlock::Base(block) => SignedBeaconBlock::Base(block.into()), - SignedBeaconBlock::Altair(block) => SignedBeaconBlock::Altair(block.into()), - SignedBeaconBlock::Merge(block) => { - SignedBeaconBlock::Merge(block.into_full_block(execution_payload?)) + let full_block = match (self, execution_payload) { + (SignedBeaconBlock::Base(block), _) => SignedBeaconBlock::Base(block.into()), + (SignedBeaconBlock::Altair(block), _) => SignedBeaconBlock::Altair(block.into()), + (SignedBeaconBlock::Merge(block), Some(ExecutionPayload::Merge(payload))) => { + SignedBeaconBlock::Merge(block.into_full_block(payload)) } + (SignedBeaconBlock::Capella(block), Some(ExecutionPayload::Capella(payload))) => { + SignedBeaconBlock::Capella(block.into_full_block(payload)) + } + (SignedBeaconBlock::Eip4844(block), Some(ExecutionPayload::Eip4844(payload))) => { + SignedBeaconBlock::Eip4844(block.into_full_block(payload)) + } + // avoid wildcard matching forks so that compiler will + // direct us here when a new fork has been added + (SignedBeaconBlock::Merge(_), _) => return None, + (SignedBeaconBlock::Capella(_), _) => return None, + (SignedBeaconBlock::Eip4844(_), _) => return None, }; Some(full_block) } @@ -354,6 +487,24 @@ impl SignedBeaconBlock { } } +impl> ForkVersionDeserialize + for SignedBeaconBlock +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + Ok(map_fork_name!( + fork_name, + Self, + serde_json::from_value(value).map_err(|e| serde::de::Error::custom(format!( + "SignedBeaconBlock failed to deserialize: {:?}", + e + )))? + )) + } +} + #[cfg(test)] mod test { use super::*; diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index dc786beb6e..c265eded1d 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -10,9 +10,19 @@ use tree_hash_derive::TreeHash; /// A signed header of a `BeaconBlock`. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Eq, + Hash, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct SignedBeaconBlockHeader { pub message: BeaconBlockHeader, diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/signed_bls_to_execution_change.rs new file mode 100644 index 0000000000..2b17095ae7 --- /dev/null +++ b/consensus/types/src/signed_bls_to_execution_change.rs @@ -0,0 +1,33 @@ +use crate::test_utils::TestRandom; +use crate::*; +use bls::Signature; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct SignedBlsToExecutionChange { + pub message: BlsToExecutionChange, + pub signature: Signature, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(SignedBlsToExecutionChange); +} diff --git a/consensus/types/src/signed_contribution_and_proof.rs b/consensus/types/src/signed_contribution_and_proof.rs index 245d33ff48..4cb3588433 100644 --- a/consensus/types/src/signed_contribution_and_proof.rs +++ b/consensus/types/src/signed_contribution_and_proof.rs @@ -10,9 +10,20 @@ use tree_hash_derive::TreeHash; /// A Validators signed contribution proof to publish on the `sync_committee_contribution_and_proof` /// gossipsub topic. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct SignedContributionAndProof { /// The `ContributionAndProof` that was signed. pub message: ContributionAndProof, diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/signed_voluntary_exit.rs index 69f0e6e2c9..3392826a62 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/signed_voluntary_exit.rs @@ -9,9 +9,18 @@ use tree_hash_derive::TreeHash; /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct SignedVoluntaryExit { pub message: VoluntaryExit, diff --git a/consensus/types/src/signing_data.rs b/consensus/types/src/signing_data.rs index 61f7e839fa..b80d4a40d5 100644 --- a/consensus/types/src/signing_data.rs +++ b/consensus/types/src/signing_data.rs @@ -7,8 +7,18 @@ use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] pub struct SigningData { pub object_root: Hash256, pub domain: Hash256, diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index 277aa9deae..2716367c7e 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -24,13 +24,35 @@ use std::iter::Iterator; #[cfg(feature = "legacy-arith")] use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[derive( + arbitrary::Arbitrary, + Clone, + Copy, + Default, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, +)] #[serde(transparent)] pub struct Slot(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[derive( + arbitrary::Arbitrary, + Clone, + Copy, + Default, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, +)] #[serde(transparent)] pub struct Epoch(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index e1de277615..fd06eb78a1 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -18,8 +18,7 @@ lazy_static! { }; } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct SubnetId(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_aggregate.rs index 2292b02111..300c86fc0f 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_aggregate.rs @@ -20,12 +20,21 @@ impl From for Error { } } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, + arbitrary::Arbitrary, )] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct SyncAggregate { pub sync_committee_bits: BitVector, pub sync_committee_signature: AggregateSignature, diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_aggregator_selection_data.rs index 963b9dc604..9e72438be2 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_aggregator_selection_data.rs @@ -6,9 +6,18 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Clone, Serialize, Deserialize, Hash, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Hash, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct SyncAggregatorSelectionData { pub slot: Slot, diff --git a/consensus/types/src/sync_committee.rs b/consensus/types/src/sync_committee.rs index 598d5fc16f..43ba23f121 100644 --- a/consensus/types/src/sync_committee.rs +++ b/consensus/types/src/sync_committee.rs @@ -25,9 +25,20 @@ impl From for Error { } } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct SyncCommittee { pub pubkeys: FixedVector, pub aggregate_pubkey: PublicKeyBytes, diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index c79ceb92fb..ef8b52becf 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -15,9 +15,20 @@ pub enum Error { } /// An aggregation of `SyncCommitteeMessage`s, used in creating a `SignedContributionAndProof`. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary, +)] #[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] pub struct SyncCommitteeContribution { pub slot: Slot, pub beacon_block_root: Hash256, diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee_message.rs index 21dfd9c288..5c2fb08374 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee_message.rs @@ -8,8 +8,18 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; /// The data upon which a `SyncCommitteeContribution` is based. -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] pub struct SyncCommitteeMessage { pub slot: Slot, pub beacon_block_root: Hash256, diff --git a/consensus/types/src/sync_selection_proof.rs b/consensus/types/src/sync_selection_proof.rs index 51395c0c13..570abace1e 100644 --- a/consensus/types/src/sync_selection_proof.rs +++ b/consensus/types/src/sync_selection_proof.rs @@ -12,8 +12,7 @@ use ssz_types::typenum::Unsigned; use std::cmp; use std::convert::TryInto; -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Debug, Clone)] +#[derive(arbitrary::Arbitrary, PartialEq, Debug, Clone)] pub struct SyncSelectionProof(Signature); impl SyncSelectionProof { diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index 9babe32395..11bcf26894 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -19,8 +19,7 @@ lazy_static! { }; } -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct SyncSubnetId(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); diff --git a/consensus/types/src/tree_hash_impls.rs b/consensus/types/src/tree_hash_impls.rs index ec23927d30..34043c0e83 100644 --- a/consensus/types/src/tree_hash_impls.rs +++ b/consensus/types/src/tree_hash_impls.rs @@ -17,7 +17,7 @@ impl CachedTreeHash for Validator { /// Efficiently tree hash a `Validator`, assuming it was updated by a valid state transition. /// - /// Specifically, we assume that the `pubkey` and `withdrawal_credentials` fields are constant. + /// Specifically, we assume that the `pubkey` field is constant. fn recalculate_tree_hash_root( &self, arena: &mut CacheArena, @@ -29,8 +29,8 @@ impl CachedTreeHash for Validator { .iter_mut(arena)? .enumerate() .flat_map(|(i, leaf)| { - // Fields pubkey and withdrawal_credentials are constant - if (i == 0 || i == 1) && cache.initialized { + // Pubkey field (index 0) is constant. + if i == 0 && cache.initialized { None } else if process_field_by_index(self, i, leaf, !cache.initialized) { Some(i) diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 21a6b39b6d..43b892cdf3 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,5 +1,6 @@ use crate::{ - test_utils::TestRandom, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, + PublicKeyBytes, }; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -9,8 +10,18 @@ use tree_hash_derive::TreeHash; /// Information about a `BeaconChain` validator. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[derive( + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, +)] pub struct Validator { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, @@ -65,6 +76,49 @@ impl Validator { // Has not yet been activated && self.activation_epoch == spec.far_future_epoch } + + /// Returns `true` if the validator has eth1 withdrawal credential. + pub fn has_eth1_withdrawal_credential(&self, spec: &ChainSpec) -> bool { + self.withdrawal_credentials + .as_bytes() + .first() + .map(|byte| *byte == spec.eth1_address_withdrawal_prefix_byte) + .unwrap_or(false) + } + + /// Get the eth1 withdrawal address if this validator has one initialized. + pub fn get_eth1_withdrawal_address(&self, spec: &ChainSpec) -> Option
{ + self.has_eth1_withdrawal_credential(spec) + .then(|| { + self.withdrawal_credentials + .as_bytes() + .get(12..) + .map(Address::from_slice) + }) + .flatten() + } + + /// Changes withdrawal credentials to the provided eth1 execution address. + /// + /// WARNING: this function does NO VALIDATION - it just does it! + pub fn change_withdrawal_credentials(&mut self, execution_address: &Address, spec: &ChainSpec) { + let mut bytes = [0u8; 32]; + bytes[0] = spec.eth1_address_withdrawal_prefix_byte; + bytes[12..].copy_from_slice(execution_address.as_bytes()); + self.withdrawal_credentials = Hash256::from(bytes); + } + + /// Returns `true` if the validator is fully withdrawable at some epoch. + pub fn is_fully_withdrawable_at(&self, balance: u64, epoch: Epoch, spec: &ChainSpec) -> bool { + self.has_eth1_withdrawal_credential(spec) && self.withdrawable_epoch <= epoch && balance > 0 + } + + /// Returns `true` if the validator is partially withdrawable. + pub fn is_partially_withdrawable_validator(&self, balance: u64, spec: &ChainSpec) -> bool { + self.has_eth1_withdrawal_credential(spec) + && self.effective_balance == spec.max_effective_balance + && balance > spec.max_effective_balance + } } impl Default for Validator { diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index cc10632d07..20c84986c2 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -11,9 +11,18 @@ use tree_hash_derive::TreeHash; /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + arbitrary::Arbitrary, + Debug, + PartialEq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct VoluntaryExit { /// Earliest epoch when voluntary exit can be processed. diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs new file mode 100644 index 0000000000..5221ff63f0 --- /dev/null +++ b/consensus/types/src/withdrawal.rs @@ -0,0 +1,37 @@ +use crate::test_utils::TestRandom; +use crate::*; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + arbitrary::Arbitrary, + Debug, + PartialEq, + Eq, + Hash, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +pub struct Withdrawal { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub index: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub validator_index: u64, + pub address: Address, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub amount: u64, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(Withdrawal); +} diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 1129e710f4..feda81d030 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.62.1-bullseye AS builder +FROM rust:1.65.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG PORTABLE diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs index 9e91f425a7..7700f23d9d 100644 --- a/lcli/src/create_payload_header.rs +++ b/lcli/src/create_payload_header.rs @@ -4,7 +4,10 @@ use ssz::Encode; use std::fs::File; use std::io::Write; use std::time::{SystemTime, UNIX_EPOCH}; -use types::{EthSpec, ExecutionPayloadHeader}; +use types::{ + EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844, + ExecutionPayloadHeaderMerge, ForkName, +}; pub fn run(matches: &ArgMatches) -> Result<(), String> { let eth1_block_hash = parse_required(matches, "execution-block-hash")?; @@ -17,15 +20,36 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { let base_fee_per_gas = parse_required(matches, "base-fee-per-gas")?; let gas_limit = parse_required(matches, "gas-limit")?; let file_name = matches.value_of("file").ok_or("No file supplied")?; + let fork_name: ForkName = parse_optional(matches, "fork")?.unwrap_or(ForkName::Merge); - let execution_payload_header: ExecutionPayloadHeader = ExecutionPayloadHeader { - gas_limit, - base_fee_per_gas, - timestamp: genesis_time, - block_hash: eth1_block_hash, - prev_randao: eth1_block_hash.into_root(), - ..ExecutionPayloadHeader::default() + let execution_payload_header: ExecutionPayloadHeader = match fork_name { + ForkName::Base | ForkName::Altair => return Err("invalid fork name".to_string()), + ForkName::Merge => ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge { + gas_limit, + base_fee_per_gas, + timestamp: genesis_time, + block_hash: eth1_block_hash, + prev_randao: eth1_block_hash.into_root(), + ..ExecutionPayloadHeaderMerge::default() + }), + ForkName::Capella => ExecutionPayloadHeader::Capella(ExecutionPayloadHeaderCapella { + gas_limit, + base_fee_per_gas, + timestamp: genesis_time, + block_hash: eth1_block_hash, + prev_randao: eth1_block_hash.into_root(), + ..ExecutionPayloadHeaderCapella::default() + }), + ForkName::Eip4844 => ExecutionPayloadHeader::Eip4844(ExecutionPayloadHeaderEip4844 { + gas_limit, + base_fee_per_gas, + timestamp: genesis_time, + block_hash: eth1_block_hash, + prev_randao: eth1_block_hash.into_root(), + ..ExecutionPayloadHeaderEip4844::default() + }), }; + let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?; let bytes = execution_payload_header.as_ssz_bytes(); file.write_all(bytes.as_slice()) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 137a4534b4..d2e852ceca 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -371,7 +371,8 @@ fn main() { .subcommand( SubCommand::with_name("create-payload-header") .about("Generates an SSZ file containing bytes for an `ExecutionPayloadHeader`. \ - Useful as input for `lcli new-testnet --execution-payload-header FILE`. ") + Useful as input for `lcli new-testnet --execution-payload-header FILE`. If `--fork` \ + is not provided, a payload header for the `Bellatrix` fork will be created.") .arg( Arg::with_name("execution-block-hash") .long("execution-block-hash") @@ -417,7 +418,15 @@ fn main() { .takes_value(true) .required(true) .help("Output file"), - ) + ).arg( + Arg::with_name("fork") + .long("fork") + .value_name("FORK") + .takes_value(true) + .default_value("bellatrix") + .help("The fork for which the execution payload header should be created.") + .possible_values(&["merge", "bellatrix", "capella", "eip4844"]) + ) ) .subcommand( SubCommand::with_name("new-testnet") @@ -732,7 +741,6 @@ fn main() { .value_name("PATH") .takes_value(true) .conflicts_with("beacon-url") - .requires("pre-state-path") .help("Path to load a SignedBeaconBlock from file as SSZ."), ) .arg( diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index b2760829cb..4d194ff10b 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -9,7 +9,9 @@ use std::io::Read; use std::path::PathBuf; use std::time::{SystemTime, UNIX_EPOCH}; use types::{ - test_utils::generate_deterministic_keypairs, Address, Config, EthSpec, ExecutionPayloadHeader, + test_utils::generate_deterministic_keypairs, Address, Config, Epoch, EthSpec, + ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844, + ExecutionPayloadHeaderMerge, ForkName, }; pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { @@ -79,8 +81,25 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul .map_err(|e| format!("Unable to open {}: {}", filename, e))?; file.read_to_end(&mut bytes) .map_err(|e| format!("Unable to read {}: {}", filename, e))?; - ExecutionPayloadHeader::::from_ssz_bytes(bytes.as_slice()) - .map_err(|e| format!("SSZ decode failed: {:?}", e)) + let fork_name = spec.fork_name_at_epoch(Epoch::new(0)); + match fork_name { + ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid( + "genesis fork must be post-merge".to_string(), + )), + ForkName::Merge => { + ExecutionPayloadHeaderMerge::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Merge) + } + ForkName::Capella => { + ExecutionPayloadHeaderCapella::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Capella) + } + ForkName::Eip4844 => { + ExecutionPayloadHeaderEip4844::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Eip4844) + } + } + .map_err(|e| format!("SSZ decode failed: {:?}", e)) }) .transpose()?; @@ -88,9 +107,9 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul execution_payload_header.as_ref() { let eth1_block_hash = - parse_optional(matches, "eth1-block-hash")?.unwrap_or(payload.block_hash); + parse_optional(matches, "eth1-block-hash")?.unwrap_or_else(|| payload.block_hash()); let genesis_time = - parse_optional(matches, "genesis-time")?.unwrap_or(payload.timestamp); + parse_optional(matches, "genesis-time")?.unwrap_or_else(|| payload.timestamp()); (eth1_block_hash, genesis_time) } else { let eth1_block_hash = parse_required(matches, "eth1-block-hash").map_err(|_| { diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index e88aa24857..a9e38a8927 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -4,7 +4,7 @@ version = "3.4.0" authors = ["Sigma Prime "] edition = "2021" autotests = false -rust-version = "1.62" +rust-version = "1.65" [features] default = ["slasher-mdbx"] diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 0f24fe9f04..c5ce8793ad 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -13,7 +13,7 @@ lmdb = ["lmdb-rkv", "lmdb-rkv-sys"] bincode = "1.3.1" byteorder = "1.3.4" eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lazy_static = "1.4.0" lighthouse_metrics = { path = "../common/lighthouse_metrics" } diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index d969c9727d..79664a2622 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -23,7 +23,7 @@ serde_derive = "1.0.116" serde_repr = "0.1.6" serde_yaml = "0.8.13" eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" +eth2_ssz_derive = "0.3.1" tree_hash = "0.4.1" tree_hash_derive = "0.4.0" cached_tree_hash = { path = "../../consensus/cached_tree_hash" } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index b2af490dd0..1feba41c86 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.2.0 +TESTS_TAG := v1.3.0-rc.1 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 892b9a3770..f8ddc0a9f2 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -39,8 +39,10 @@ excluded_paths = [ "tests/.*/.*/ssz_static/LightClientOptimistic", # LightClientFinalityUpdate "tests/.*/.*/ssz_static/LightClientFinalityUpdate", - # Capella tests are disabled for now. - "tests/.*/capella", + # LightClientHeader + "tests/.*/.*/ssz_static/LightClientHeader", + # Eip4844 tests are disabled for now. + "tests/.*/eip4844", # One of the EF researchers likes to pack the tarballs on a Mac ".*\.DS_Store.*", # More Mac weirdness. diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index e15a2e2ca3..cd980e374e 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -65,6 +65,8 @@ pub fn previous_fork(fork_name: ForkName) -> ForkName { ForkName::Base => ForkName::Base, ForkName::Altair => ForkName::Base, ForkName::Merge => ForkName::Altair, // TODO: Check this when tests are released.. + ForkName::Capella => ForkName::Merge, // TODO: Check this when tests are released.. + ForkName::Eip4844 => ForkName::Capella, // TODO: Check this when tests are released.. } } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 0283d13da4..59a8ebd41c 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -5,6 +5,7 @@ use crate::decode::{ssz_decode_state, yaml_decode_file}; use crate::type_name; use crate::type_name::TypeName; use serde_derive::Deserialize; +use state_processing::per_epoch_processing::capella::process_historical_summaries_update; use state_processing::per_epoch_processing::{ altair, base, effective_balance_updates::process_effective_balance_updates, @@ -57,6 +58,8 @@ pub struct RandaoMixesReset; #[derive(Debug)] pub struct HistoricalRootsUpdate; #[derive(Debug)] +pub struct HistoricalSummariesUpdate; +#[derive(Debug)] pub struct ParticipationRecordUpdates; #[derive(Debug)] pub struct SyncCommitteeUpdates; @@ -77,6 +80,7 @@ type_name!(EffectiveBalanceUpdates, "effective_balance_updates"); type_name!(SlashingsReset, "slashings_reset"); type_name!(RandaoMixesReset, "randao_mixes_reset"); type_name!(HistoricalRootsUpdate, "historical_roots_update"); +type_name!(HistoricalSummariesUpdate, "historical_summaries_update"); type_name!(ParticipationRecordUpdates, "participation_record_updates"); type_name!(SyncCommitteeUpdates, "sync_committee_updates"); type_name!(InactivityUpdates, "inactivity_updates"); @@ -97,7 +101,10 @@ impl EpochTransition for JustificationAndFinalization { justification_and_finalization_state.apply_changes_to_state(state); Ok(()) } - BeaconState::Altair(_) | BeaconState::Merge(_) => { + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => { let justification_and_finalization_state = altair::process_justification_and_finalization( state, @@ -118,13 +125,14 @@ impl EpochTransition for RewardsAndPenalties { validator_statuses.process_attestations(state)?; base::process_rewards_and_penalties(state, &mut validator_statuses, spec) } - BeaconState::Altair(_) | BeaconState::Merge(_) => { - altair::process_rewards_and_penalties( - state, - &altair::ParticipationCache::new(state, spec).unwrap(), - spec, - ) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => altair::process_rewards_and_penalties( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + spec, + ), } } } @@ -147,7 +155,10 @@ impl EpochTransition for Slashings { spec, )?; } - BeaconState::Altair(_) | BeaconState::Merge(_) => { + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => { process_slashings( state, altair::ParticipationCache::new(state, spec) @@ -187,7 +198,23 @@ impl EpochTransition for RandaoMixesReset { impl EpochTransition for HistoricalRootsUpdate { fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { - process_historical_roots_update(state) + match state { + BeaconState::Base(_) | BeaconState::Altair(_) | BeaconState::Merge(_) => { + process_historical_roots_update(state) + } + _ => Ok(()), + } + } +} + +impl EpochTransition for HistoricalSummariesUpdate { + fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { + match state { + BeaconState::Capella(_) | BeaconState::Eip4844(_) => { + process_historical_summaries_update(state) + } + _ => Ok(()), + } } } @@ -205,9 +232,10 @@ impl EpochTransition for SyncCommitteeUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) => { - altair::process_sync_committee_updates(state, spec) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => altair::process_sync_committee_updates(state, spec), } } } @@ -216,7 +244,10 @@ impl EpochTransition for InactivityUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_inactivity_updates( + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => altair::process_inactivity_updates( state, &altair::ParticipationCache::new(state, spec).unwrap(), spec, @@ -229,9 +260,10 @@ impl EpochTransition for ParticipationFlagUpdates { fn run(state: &mut BeaconState, _: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) => { - altair::process_participation_flag_updates(state) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => altair::process_participation_flag_updates(state), } } } @@ -275,9 +307,18 @@ impl> Case for EpochProcessing { T::name() != "sync_committee_updates" && T::name() != "inactivity_updates" && T::name() != "participation_flag_updates" + && T::name() != "historical_summaries_update" } // No phase0 tests for Altair and later. - ForkName::Altair | ForkName::Merge => T::name() != "participation_record_updates", + ForkName::Altair | ForkName::Merge => { + T::name() != "participation_record_updates" + && T::name() != "historical_summaries_update" + } + ForkName::Capella => { + T::name() != "participation_record_updates" + && T::name() != "historical_roots_update" + } + ForkName::Eip4844 => false, // TODO: revisit when tests are out } } diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index ae12447abf..f79e13005a 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -3,7 +3,7 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::cases::common::previous_fork; use crate::decode::{ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; -use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; +use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella}; use types::{BeaconState, ForkName}; #[derive(Debug, Clone, Default, Deserialize)] @@ -61,6 +61,8 @@ impl Case for ForkTest { ForkName::Base => panic!("phase0 not supported"), ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state), ForkName::Merge => upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state), + ForkName::Capella => upgrade_to_capella(&mut result_state, spec).map(|_| result_state), + ForkName::Eip4844 => panic!("eip4844 not supported"), }; compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 039efb3684..31165d6329 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -311,6 +311,7 @@ impl Tester { .keypairs(vec![]) .genesis_state_ephemeral_store(case.anchor_state.clone()) .mock_execution_layer() + .recalculate_fork_times_with_genesis(0) .mock_execution_layer_all_payloads_valid() .build(); diff --git a/testing/ef_tests/src/cases/genesis_initialization.rs b/testing/ef_tests/src/cases/genesis_initialization.rs index dc139ac0b9..dbf6c70b29 100644 --- a/testing/ef_tests/src/cases/genesis_initialization.rs +++ b/testing/ef_tests/src/cases/genesis_initialization.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; -use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; +use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::initialize_beacon_state_from_eth1; use std::path::PathBuf; @@ -38,8 +38,9 @@ impl LoadCase for GenesisInitialization { let meta: Metadata = yaml_decode_file(&path.join("meta.yaml"))?; let execution_payload_header: Option> = if meta.execution_payload_header.unwrap_or(false) { - Some(ssz_decode_file( + Some(ssz_decode_file_with( &path.join("execution_payload_header.ssz_snappy"), + |bytes| ExecutionPayloadHeader::from_ssz_bytes(bytes, fork_name), )?) } else { None diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index a57abc2e07..c180774bb6 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -29,7 +29,7 @@ pub struct MerkleProofValidity { impl LoadCase for MerkleProofValidity { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); - let state = ssz_decode_state(&path.join("state.ssz_snappy"), spec)?; + let state = ssz_decode_state(&path.join("object.ssz_snappy"), spec)?; let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; // Metadata does not exist in these tests but it is left like this just in case. let meta_path = path.join("meta.yaml"); diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index aaa725f567..71954405c0 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -3,17 +3,16 @@ use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; -use crate::type_name::TypeName; use serde_derive::Deserialize; use state_processing::{ per_block_processing::{ errors::BlockProcessingError, process_block_header, process_execution_payload, process_operations::{ - altair, base, process_attester_slashings, process_deposits, process_exits, - process_proposer_slashings, + altair, base, process_attester_slashings, process_bls_to_execution_changes, + process_deposits, process_exits, process_proposer_slashings, }, - process_sync_aggregate, VerifyBlockRoot, VerifySignatures, + process_sync_aggregate, process_withdrawals, VerifyBlockRoot, VerifySignatures, }, ConsensusContext, }; @@ -21,8 +20,8 @@ use std::fmt::Debug; use std::path::Path; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, - EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedVoluntaryExit, - SyncAggregate, + EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedBlsToExecutionChange, + SignedVoluntaryExit, SyncAggregate, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -36,6 +35,12 @@ struct ExecutionMetadata { execution_valid: bool, } +/// Newtype for testing withdrawals. +#[derive(Debug, Clone, Deserialize)] +pub struct WithdrawalsPayload { + payload: FullPayload, +} + #[derive(Debug, Clone)] pub struct Operations> { metadata: Metadata, @@ -45,10 +50,8 @@ pub struct Operations> { pub post: Option>, } -pub trait Operation: TypeName + Debug + Sync + Sized { - fn handler_name() -> String { - Self::name().to_lowercase() - } +pub trait Operation: Debug + Sync + Sized { + fn handler_name() -> String; fn filename() -> String { format!("{}.ssz_snappy", Self::handler_name()) @@ -58,7 +61,7 @@ pub trait Operation: TypeName + Debug + Sync + Sized { true } - fn decode(path: &Path, spec: &ChainSpec) -> Result; + fn decode(path: &Path, fork_name: ForkName, spec: &ChainSpec) -> Result; fn apply_to( &self, @@ -69,7 +72,11 @@ pub trait Operation: TypeName + Debug + Sync + Sized { } impl Operation for Attestation { - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn handler_name() -> String { + "attestation".into() + } + + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -88,7 +95,10 @@ impl Operation for Attestation { &mut ctxt, spec, ), - BeaconState::Altair(_) | BeaconState::Merge(_) => { + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Eip4844(_) => { altair::process_attestation(state, self, 0, &mut ctxt, VerifySignatures::True, spec) } } @@ -100,7 +110,7 @@ impl Operation for AttesterSlashing { "attester_slashing".into() } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -122,7 +132,11 @@ impl Operation for AttesterSlashing { } impl Operation for Deposit { - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn handler_name() -> String { + "deposit".into() + } + + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -146,7 +160,7 @@ impl Operation for ProposerSlashing { "proposer_slashing".into() } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -172,7 +186,7 @@ impl Operation for SignedVoluntaryExit { "voluntary_exit".into() } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -195,7 +209,7 @@ impl Operation for BeaconBlock { "block.ssz_snappy".into() } - fn decode(path: &Path, spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, spec: &ChainSpec) -> Result { ssz_decode_file_with(path, |bytes| BeaconBlock::from_ssz_bytes(bytes, spec)) } @@ -230,7 +244,7 @@ impl Operation for SyncAggregate { fork_name != ForkName::Base } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file(path) } @@ -258,8 +272,11 @@ impl Operation for FullPayload { fork_name != ForkName::Base && fork_name != ForkName::Altair } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { - ssz_decode_file(path) + fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| { + ExecutionPayload::from_ssz_bytes(bytes, fork_name) + }) + .map(Into::into) } fn apply_to( @@ -273,7 +290,7 @@ impl Operation for FullPayload { .as_ref() .map_or(false, |e| e.execution_valid); if valid { - process_execution_payload(state, self, spec) + process_execution_payload::>(state, self.to_ref(), spec) } else { Err(BlockProcessingError::ExecutionInvalid) } @@ -292,8 +309,11 @@ impl Operation for BlindedPayload { fork_name != ForkName::Base && fork_name != ForkName::Altair } - fn decode(path: &Path, _spec: &ChainSpec) -> Result { - ssz_decode_file::>(path).map(Into::into) + fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| { + ExecutionPayload::from_ssz_bytes(bytes, fork_name) + }) + .map(Into::into) } fn apply_to( @@ -307,13 +327,72 @@ impl Operation for BlindedPayload { .as_ref() .map_or(false, |e| e.execution_valid); if valid { - process_execution_payload(state, self, spec) + process_execution_payload::>(state, self.to_ref(), spec) } else { Err(BlockProcessingError::ExecutionInvalid) } } } +impl Operation for WithdrawalsPayload { + fn handler_name() -> String { + "withdrawals".into() + } + + fn filename() -> String { + "execution_payload.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name != ForkName::Base && fork_name != ForkName::Altair && fork_name != ForkName::Merge + } + + fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| { + ExecutionPayload::from_ssz_bytes(bytes, fork_name) + }) + .map(|payload| WithdrawalsPayload { + payload: payload.into(), + }) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + _: &Operations, + ) -> Result<(), BlockProcessingError> { + process_withdrawals::<_, FullPayload<_>>(state, self.payload.to_ref(), spec) + } +} + +impl Operation for SignedBlsToExecutionChange { + fn handler_name() -> String { + "bls_to_execution_change".into() + } + + fn filename() -> String { + "address_change.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name != ForkName::Base && fork_name != ForkName::Altair && fork_name != ForkName::Merge + } + + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file(path) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + _extra: &Operations, + ) -> Result<(), BlockProcessingError> { + process_bls_to_execution_changes(state, &[self.clone()], VerifySignatures::True, spec) + } +} + impl> LoadCase for Operations { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); @@ -337,7 +416,7 @@ impl> LoadCase for Operations { // Check BLS setting here before SSZ deserialization, as most types require signatures // to be valid. let (operation, bls_error) = if metadata.bls_setting.unwrap_or_default().check().is_ok() { - match O::decode(&path.join(O::filename()), spec) { + match O::decode(&path.join(O::filename()), fork_name, spec) { Ok(op) => (Some(op), None), Err(Error::InvalidBLSInput(error)) => (None, Some(error)), Err(e) => return Err(e), @@ -380,9 +459,11 @@ impl> Case for Operations { let mut expected = self.post.clone(); // Processing requires the committee caches. - state - .build_all_committee_caches(spec) - .expect("committee caches OK"); + // NOTE: some of the withdrawals tests have 0 active validators, do not try + // to build the commitee cache in this case. + if O::handler_name() != "withdrawals" { + state.build_all_committee_caches(spec).unwrap(); + } let mut result = self .operation diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index 2c9134aba5..fb7ccfea64 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -42,6 +42,17 @@ impl LoadCase for TransitionTest { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(metadata.fork_epoch); } + ForkName::Capella => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(metadata.fork_epoch); + } + ForkName::Eip4844 => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.eip4844_fork_epoch = Some(metadata.fork_epoch); + } } // Load blocks diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 13f70fea71..07db7cd2a1 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -24,6 +24,11 @@ pub trait Handler { fn run(&self) { for fork_name in ForkName::list_all() { + // FIXME(eip4844): enable eip4844 + if fork_name == ForkName::Eip4844 { + continue; + } + if self.is_enabled_for_fork(fork_name) { self.run_for_fork(fork_name) } @@ -218,6 +223,10 @@ impl SszStaticHandler { Self::for_forks(vec![ForkName::Merge]) } + pub fn capella_only() -> Self { + Self::for_forks(vec![ForkName::Capella]) + } + pub fn merge_and_later() -> Self { Self::for_forks(ForkName::list_all()[2..].to_vec()) } @@ -365,6 +374,11 @@ impl Handler for SanitySlotsHandler { fn handler_name(&self) -> String { "slots".into() } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + // Some sanity tests compute sync committees, which requires real crypto. + fork_name == ForkName::Base || cfg!(not(feature = "fake_crypto")) + } } #[derive(Derivative)] @@ -533,10 +547,8 @@ impl Handler for ForkChoiceHandler { } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { - // Merge block tests are only enabled for Bellatrix or later. - if self.handler_name == "on_merge_block" - && (fork_name == ForkName::Base || fork_name == ForkName::Altair) - { + // Merge block tests are only enabled for Bellatrix. + if self.handler_name == "on_merge_block" && fork_name != ForkName::Merge { return false; } diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index 5c2ca3fb55..5ab2b4b7b4 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,10 +1,10 @@ pub use case_result::CaseResult; -pub use cases::Case; +pub use cases::WithdrawalsPayload; pub use cases::{ - EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, - JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates, - RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, - SyncCommitteeUpdates, + Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, HistoricalSummariesUpdate, + InactivityUpdates, JustificationAndFinalization, ParticipationFlagUpdates, + ParticipationRecordUpdates, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, + SlashingsReset, SyncCommitteeUpdates, }; pub use decode::log_file_access; pub use error::Error; diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index c075e89b3f..0239293e09 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -1,4 +1,5 @@ //! Mapping from types to canonical string identifiers used in testing. +use types::historical_summary::HistoricalSummary; use types::*; pub trait TypeName { @@ -45,6 +46,8 @@ type_name_generic!(BeaconBlockBody); type_name_generic!(BeaconBlockBodyBase, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyAltair, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyMerge, "BeaconBlockBody"); +type_name_generic!(BeaconBlockBodyCapella, "BeaconBlockBody"); +type_name_generic!(BeaconBlockBodyEip4844, "BeaconBlockBody"); type_name!(BeaconBlockHeader); type_name_generic!(BeaconState); type_name!(Checkpoint); @@ -54,8 +57,14 @@ type_name!(DepositData); type_name!(DepositMessage); type_name!(Eth1Data); type_name_generic!(ExecutionPayload); +type_name_generic!(ExecutionPayloadMerge, "ExecutionPayload"); +type_name_generic!(ExecutionPayloadCapella, "ExecutionPayload"); +type_name_generic!(ExecutionPayloadEip4844, "ExecutionPayload"); type_name_generic!(FullPayload, "ExecutionPayload"); type_name_generic!(ExecutionPayloadHeader); +type_name_generic!(ExecutionPayloadHeaderMerge, "ExecutionPayloadHeader"); +type_name_generic!(ExecutionPayloadHeaderCapella, "ExecutionPayloadHeader"); +type_name_generic!(ExecutionPayloadHeaderEip4844, "ExecutionPayloadHeader"); type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); type_name!(ForkData); @@ -76,3 +85,7 @@ type_name_generic!(SyncAggregate); type_name_generic!(SyncCommittee); type_name!(Validator); type_name!(VoluntaryExit); +type_name!(Withdrawal); +type_name!(BlsToExecutionChange, "BLSToExecutionChange"); +type_name!(SignedBlsToExecutionChange, "SignedBLSToExecutionChange"); +type_name!(HistoricalSummary); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 87a6bec71b..8a7209b89b 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -82,6 +82,18 @@ fn operations_execution_payload_blinded() { OperationsHandler::>::default().run(); } +#[test] +fn operations_withdrawals() { + OperationsHandler::>::default().run(); + OperationsHandler::>::default().run(); +} + +#[test] +fn operations_bls_to_execution_change() { + OperationsHandler::::default().run(); + OperationsHandler::::default().run(); +} + #[test] fn sanity_blocks() { SanityBlocksHandler::::default().run(); @@ -203,6 +215,7 @@ macro_rules! ssz_static_test_no_run { #[cfg(feature = "fake_crypto")] mod ssz_static { use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler}; + use types::historical_summary::HistoricalSummary; use types::*; ssz_static_test!(aggregate_and_proof, AggregateAndProof<_>); @@ -250,6 +263,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::merge_only() .run(); + SszStaticHandler::, MinimalEthSpec>::capella_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::capella_only() + .run(); } // Altair and later @@ -302,18 +319,50 @@ mod ssz_static { // Merge and later #[test] fn execution_payload() { - SszStaticHandler::, MinimalEthSpec>::merge_and_later() + SszStaticHandler::, MinimalEthSpec>::merge_only() .run(); - SszStaticHandler::, MainnetEthSpec>::merge_and_later() + SszStaticHandler::, MainnetEthSpec>::merge_only() + .run(); + SszStaticHandler::, MinimalEthSpec>::capella_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::capella_only() .run(); } #[test] fn execution_payload_header() { - SszStaticHandler::, MinimalEthSpec>::merge_and_later() + SszStaticHandler::, MinimalEthSpec>::merge_only() .run(); - SszStaticHandler::, MainnetEthSpec>::merge_and_later() + SszStaticHandler::, MainnetEthSpec>::merge_only() .run(); + SszStaticHandler::, MinimalEthSpec> + ::capella_only().run(); + SszStaticHandler::, MainnetEthSpec> + ::capella_only().run(); + } + + #[test] + fn withdrawal() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); + } + + #[test] + fn bls_to_execution_change() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); + } + + #[test] + fn signed_bls_to_execution_change() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); + } + + #[test] + fn historical_summary() { + SszStaticHandler::::capella_only().run(); + SszStaticHandler::::capella_only().run(); } } @@ -381,6 +430,12 @@ fn epoch_processing_historical_roots_update() { EpochProcessingHandler::::default().run(); } +#[test] +fn epoch_processing_historical_summaries_update() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + #[test] fn epoch_processing_participation_record_updates() { EpochProcessingHandler::::default().run(); diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index ee20129f87..bb416634e5 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -15,8 +15,8 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::time::sleep; use types::{ - Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, - MainnetEthSpec, PublicKeyBytes, Slot, Uint256, + Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ForkName, FullPayload, + Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(30); @@ -110,6 +110,8 @@ impl TestRig { let (runtime_shutdown, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); + let mut spec = MainnetEthSpec::default_spec(); + spec.terminal_total_difficulty = Uint256::zero(); let fee_recipient = None; @@ -151,9 +153,6 @@ impl TestRig { } }; - let mut spec = MainnetEthSpec::default_spec(); - spec.terminal_total_difficulty = Uint256::zero(); - Self { runtime, ee_a, @@ -271,6 +270,8 @@ impl TestRig { }; let proposer_index = 0; + // To save sending proposer preparation data, just set the fee recipient + // to the fee recipient configured for EE A. let prepared = self .ee_a .execution_layer @@ -278,13 +279,8 @@ impl TestRig { Slot::new(1), // Insert proposer for the next slot head_root, proposer_index, - PayloadAttributes { - timestamp, - prev_randao, - // To save sending proposer preparation data, just set the fee recipient - // to the fee recipient configured for EE A. - suggested_fee_recipient: Address::repeat_byte(42), - }, + // TODO: think about how to test different forks + PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None), ) .await; @@ -317,22 +313,30 @@ impl TestRig { slot: Slot::new(0), chain_health: ChainHealth::Healthy, }; + let suggested_fee_recipient = self + .ee_a + .execution_layer + .get_suggested_fee_recipient(proposer_index) + .await; + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); let valid_payload = self .ee_a .execution_layer .get_payload::>( parent_hash, - timestamp, - prev_randao, - proposer_index, + &payload_attributes, forkchoice_update_params, builder_params, + // FIXME: think about how to test other forks + ForkName::Merge, &self.spec, ) .await .unwrap() - .execution_payload; - assert_eq!(valid_payload.transactions.len(), pending_txs.len()); + .to_payload() + .execution_payload(); + assert_eq!(valid_payload.transactions().len(), pending_txs.len()); /* * Execution Engine A: @@ -340,7 +344,7 @@ impl TestRig { * Indicate that the payload is the head of the chain, before submitting a * `notify_new_payload`. */ - let head_block_hash = valid_payload.block_hash; + let head_block_hash = valid_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(42); @@ -380,7 +384,7 @@ impl TestRig { * * Do not provide payload attributes (we'll test that later). */ - let head_block_hash = valid_payload.block_hash; + let head_block_hash = valid_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(42); @@ -416,7 +420,7 @@ impl TestRig { */ let mut invalid_payload = valid_payload.clone(); - invalid_payload.prev_randao = Hash256::from_low_u64_be(42); + *invalid_payload.prev_randao_mut() = Hash256::from_low_u64_be(42); let status = self .ee_a .execution_layer @@ -431,8 +435,8 @@ impl TestRig { * Produce another payload atop the previous one. */ - let parent_hash = valid_payload.block_hash; - let timestamp = valid_payload.timestamp + 1; + let parent_hash = valid_payload.block_hash(); + let timestamp = valid_payload.timestamp() + 1; let prev_randao = Hash256::zero(); let proposer_index = 0; let builder_params = BuilderParams { @@ -440,21 +444,29 @@ impl TestRig { slot: Slot::new(0), chain_health: ChainHealth::Healthy, }; + let suggested_fee_recipient = self + .ee_a + .execution_layer + .get_suggested_fee_recipient(proposer_index) + .await; + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); let second_payload = self .ee_a .execution_layer .get_payload::>( parent_hash, - timestamp, - prev_randao, - proposer_index, + &payload_attributes, forkchoice_update_params, builder_params, + // FIXME: think about how to test other forks + ForkName::Merge, &self.spec, ) .await .unwrap() - .execution_payload; + .to_payload() + .execution_payload(); /* * Execution Engine A: @@ -476,15 +488,13 @@ impl TestRig { * * Indicate that the payload is the head of the chain, providing payload attributes. */ - let head_block_hash = valid_payload.block_hash; + let head_block_hash = valid_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); - let payload_attributes = PayloadAttributes { - timestamp: second_payload.timestamp + 1, - prev_randao: Hash256::zero(), - // To save sending proposer preparation data, just set the fee recipient - // to the fee recipient configured for EE A. - suggested_fee_recipient: Address::repeat_byte(42), - }; + // TODO: think about how to handle different forks + // To save sending proposer preparation data, just set the fee recipient + // to the fee recipient configured for EE A. + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(100); let validator_index = 0; @@ -528,7 +538,7 @@ impl TestRig { * * Set the second payload as the head, without providing payload attributes. */ - let head_block_hash = second_payload.block_hash; + let head_block_hash = second_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(42); @@ -580,7 +590,7 @@ impl TestRig { * * Set the second payload as the head, without providing payload attributes. */ - let head_block_hash = second_payload.block_hash; + let head_block_hash = second_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(42); @@ -609,7 +619,8 @@ async fn check_payload_reconstruction( ) { let reconstructed = ee .execution_layer - .get_payload_by_block_hash(payload.block_hash) + // FIXME: handle other forks here? + .get_payload_by_block_hash(payload.block_hash(), ForkName::Merge) .await .unwrap() .unwrap(); diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 02f4f76d51..d34cdbc9ff 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -1,7 +1,7 @@ use crate::local_network::LocalNetwork; use node_test_rig::eth2::types::{BlockId, StateId}; use std::time::Duration; -use types::{Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, Unsigned}; +use types::{Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, Slot, Unsigned}; /// Checks that all of the validators have on-boarded by the start of the second eth1 voting /// period. @@ -228,7 +228,7 @@ pub async fn verify_transition_block_finalized( .map_err(|e| format!("Get state root via http failed: {:?}", e))? .message() .execution_payload() - .map(|payload| payload.execution_payload.block_hash) + .map(|payload| payload.block_hash()) .map_err(|e| format!("Execution payload does not exist: {:?}", e))?; block_hashes.push(execution_block_hash); } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index bef51a694a..6fd519ebaf 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -7,7 +7,6 @@ use crate::{ }; use crate::{http_metrics::metrics, validator_store::ValidatorStore}; use environment::RuntimeContext; -use eth2::types::Graffiti; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::ops::Deref; @@ -15,7 +14,10 @@ use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use tokio::time::sleep; -use types::{BlindedPayload, BlockType, EthSpec, ExecPayload, FullPayload, PublicKeyBytes, Slot}; +use types::{ + AbstractExecPayload, BlindedPayload, BlockType, EthSpec, FullPayload, Graffiti, PublicKeyBytes, + Slot, +}; #[derive(Debug)] pub enum BlockError { @@ -295,7 +297,7 @@ impl BlockService { } /// Produce a block at the given slot for validator_pubkey - async fn publish_block>( + async fn publish_block>( self, slot: Slot, validator_pubkey: PublicKeyBytes, @@ -468,6 +470,7 @@ impl BlockService { "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), "slot" => signed_block.slot().as_u64(), ); + Ok(()) } } diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 5aa24a2b02..d453d7038a 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -212,9 +212,9 @@ impl ApiTester { pub async fn test_get_lighthouse_spec(self) -> Self { let result = self .client - .get_lighthouse_spec::() + .get_lighthouse_spec::() .await - .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .map(|res| ConfigAndPreset::Capella(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&E::default_spec(), None); diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 0cb3417fc7..2d5b9b1db3 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -57,6 +57,11 @@ lazy_static::lazy_static! { "Total count of attempted block signings", &["status"] ); + pub static ref SIGNED_BLOBS_TOTAL: Result = try_create_int_counter_vec( + "vc_signed_beacon_blobs_total", + "Total count of attempted blob signings", + &["status"] + ); pub static ref SIGNED_ATTESTATIONS_TOTAL: Result = try_create_int_counter_vec( "vc_signed_attestations_total", "Total count of attempted Attestation signings", diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index de69d99003..ae9df08096 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -34,7 +34,7 @@ pub enum Error { } /// Enumerates all messages that can be signed by a validator. -pub enum SignableMessage<'a, T: EthSpec, Payload: ExecPayload = FullPayload> { +pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload = FullPayload> { RandaoReveal(Epoch), BeaconBlock(&'a BeaconBlock), AttestationData(&'a AttestationData), @@ -49,7 +49,7 @@ pub enum SignableMessage<'a, T: EthSpec, Payload: ExecPayload = FullPayload> SignableMessage<'a, T, Payload> { +impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, T, Payload> { /// Returns the `SignedRoot` for the contained message. /// /// The actual `SignedRoot` trait is not used since it also requires a `TreeHash` impl, which is @@ -116,7 +116,7 @@ impl SigningContext { impl SigningMethod { /// Return the signature of `signable_message`, with respect to the `signing_context`. - pub async fn get_signature>( + pub async fn get_signature>( &self, signable_message: SignableMessage<'_, T, Payload>, signing_context: SigningContext, @@ -141,7 +141,7 @@ impl SigningMethod { .await } - pub async fn get_signature_from_root>( + pub async fn get_signature_from_root>( &self, signable_message: SignableMessage<'_, T, Payload>, signing_root: Hash256, diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index cf02ae0c32..512cbc7d02 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -26,6 +26,8 @@ pub enum ForkName { Phase0, Altair, Bellatrix, + Capella, + Eip4844, } #[derive(Debug, PartialEq, Serialize)] @@ -36,7 +38,7 @@ pub struct ForkInfo { #[derive(Debug, PartialEq, Serialize)] #[serde(bound = "T: EthSpec", rename_all = "snake_case")] -pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload> { +pub enum Web3SignerObject<'a, T: EthSpec, Payload: AbstractExecPayload> { AggregationSlot { slot: Slot, }, @@ -72,7 +74,7 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload> { ValidatorRegistration(&'a ValidatorRegistrationData), } -impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { +impl<'a, T: EthSpec, Payload: AbstractExecPayload> Web3SignerObject<'a, T, Payload> { pub fn beacon_block(block: &'a BeaconBlock) -> Result { match block { BeaconBlock::Base(_) => Ok(Web3SignerObject::BeaconBlock { @@ -90,6 +92,16 @@ impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { block: None, block_header: Some(block.block_header()), }), + BeaconBlock::Capella(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Capella, + block: None, + block_header: Some(block.block_header()), + }), + BeaconBlock::Eip4844(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Eip4844, + block: None, + block_header: Some(block.block_header()), + }), } } @@ -116,7 +128,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { #[derive(Debug, PartialEq, Serialize)] #[serde(bound = "T: EthSpec")] -pub struct SigningRequest<'a, T: EthSpec, Payload: ExecPayload> { +pub struct SigningRequest<'a, T: EthSpec, Payload: AbstractExecPayload> { #[serde(rename = "type")] pub message_type: MessageType, #[serde(skip_serializing_if = "Option::is_none")] diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 292b49ac3a..36a0d05734 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -18,9 +18,9 @@ use std::path::Path; use std::sync::Arc; use task_executor::TaskExecutor; use types::{ - attestation::Error as AttestationError, graffiti::GraffitiString, Address, AggregateAndProof, - Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, - EthSpec, ExecPayload, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, + attestation::Error as AttestationError, graffiti::GraffitiString, AbstractExecPayload, Address, + AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, + Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, @@ -454,7 +454,7 @@ impl ValidatorStore { .unwrap_or(self.builder_proposals) } - pub async fn sign_block>( + pub async fn sign_block>( &self, validator_pubkey: PublicKeyBytes, block: BeaconBlock,