From d8f7277bebac17f6b56bee0aab4ab3f7eb98981a Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 30 Dec 2022 11:00:14 -0500 Subject: [PATCH] cleanup --- beacon_node/beacon_chain/src/beacon_chain.rs | 19 --- .../beacon_chain/src/execution_payload.rs | 4 +- .../src/engine_api/json_structures.rs | 6 +- beacon_node/execution_layer/src/lib.rs | 31 ++-- beacon_node/http_api/src/publish_blocks.rs | 5 + .../lighthouse_network/src/rpc/protocol.rs | 2 +- .../network/src/beacon_processor/mod.rs | 40 +---- .../beacon_processor/worker/gossip_methods.rs | 18 +-- .../beacon_processor/worker/rpc_methods.rs | 149 ------------------ beacon_node/network/src/sync/manager.rs | 7 +- common/eth2/src/lib.rs | 21 --- consensus/types/src/eth_spec.rs | 5 - consensus/types/src/payload.rs | 24 ++- lcli/src/create_payload_header.rs | 32 +++- lcli/src/main.rs | 13 +- lcli/src/new_testnet.rs | 28 +++- validator_client/src/signing_method.rs | 3 - .../src/signing_method/web3signer.rs | 4 - 18 files changed, 109 insertions(+), 302 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 69889014a5..edf0e149c7 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6,7 +6,6 @@ use crate::attestation_verification::{ use crate::attester_cache::{AttesterCache, AttesterCacheKey}; use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::BeaconProposerCache; -use crate::blob_verification::{BlobError, VerifiedBlobsSidecar}; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::{ check_block_is_finalized_descendant, check_block_relevancy, get_block_root, @@ -1818,23 +1817,6 @@ impl BeaconChain { }) } - /// Accepts some `BlobsSidecar` received over from the network and attempts to verify it, - /// returning `Ok(_)` if it is valid to be (re)broadcast on the gossip network. - pub fn verify_blobs_sidecar_for_gossip<'a>( - &self, - blobs_sidecar: &'a BlobsSidecar, - ) -> Result, BlobError> { - metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_REQUESTS); - let _timer = metrics::start_timer(&metrics::BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES); - VerifiedBlobsSidecar::verify(blobs_sidecar, self).map(|v| { - if let Some(_event_handler) = self.event_handler.as_ref() { - // TODO: Handle sse events - } - metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_SUCCESSES); - v - }) - } - /// Accepts some 'LightClientFinalityUpdate' from the network and attempts to verify it pub fn verify_finality_update_for_gossip( self: &Arc, @@ -4479,7 +4461,6 @@ impl BeaconChain { .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, bls_to_execution_changes: bls_to_execution_changes.into(), - //FIXME(sean) get blobs blob_kzg_commitments: VariableList::from(kzg_commitments), }, }), diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index d52df4853d..619b713a33 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -460,7 +460,7 @@ where if is_terminal_block_hash_set && !is_activation_epoch_reached { // Use the "empty" payload if there's a terminal block hash, but we haven't reached the // terminal block epoch yet. - return Ok(BlockProposalContents::default_at_fork(fork)); + return BlockProposalContents::default_at_fork(fork).map_err(Into::into); } let terminal_pow_block_hash = execution_layer @@ -473,7 +473,7 @@ where } else { // If the merge transition hasn't occurred yet and the EL hasn't found the terminal // block, return an "empty" payload. - return Ok(BlockProposalContents::default_at_fork(fork)); + return BlockProposalContents::default_at_fork(fork).map_err(Into::into); } } else { latest_execution_payload_header_block_hash diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 13948affb5..c09541f3bd 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -350,12 +350,14 @@ impl From for JsonWithdrawal { impl From for Withdrawal { fn from(jw: JsonWithdrawal) -> Self { + // This comparison is to avoid a scenarion where the EE gives us too large a number this + // panics when it attempts to case to a `u64`. + let amount = std::cmp::max(jw.amount / 1000000000, Uint256::from(u64::MAX)); Self { index: jw.index, validator_index: jw.validator_index, address: jw.address, - //FIXME(sean) if EE gives us too large a number this panics - amount: (jw.amount / 1000000000).as_u64(), + amount: amount.as_u64(), } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index e22da42a72..d79ac0c364 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -35,7 +35,7 @@ use tokio::{ time::sleep, }; use tokio_stream::wrappers::WatchStream; -use types::{AbstractExecPayload, Blob, ExecPayload, KzgCommitment}; +use types::{AbstractExecPayload, BeaconStateError, Blob, ExecPayload, KzgCommitment}; use types::{ BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256, @@ -95,6 +95,13 @@ pub enum Error { FeeRecipientUnspecified, MissingLatestValidHash, InvalidJWTSecret(String), + BeaconStateError(BeaconStateError), +} + +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Error::BeaconStateError(e) + } } impl From for Error { @@ -153,17 +160,17 @@ impl> BlockProposalContents Some(blobs), } } - pub fn default_at_fork(fork_name: ForkName) -> Self { - match fork_name { + pub fn default_at_fork(fork_name: ForkName) -> Result { + Ok(match fork_name { ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - BlockProposalContents::Payload(Payload::default_at_fork(fork_name)) + BlockProposalContents::Payload(Payload::default_at_fork(fork_name)?) } ForkName::Eip4844 => BlockProposalContents::PayloadAndBlobs { - payload: Payload::default_at_fork(fork_name), + payload: Payload::default_at_fork(fork_name)?, blobs: vec![], kzg_commitments: vec![], }, - } + }) } } @@ -803,10 +810,6 @@ impl ExecutionLayer { spec, ) { Ok(()) => Ok(ProvenancedPayload::Builder( - //FIXME(sean) the builder API needs to be updated - // NOTE the comment above was removed in the - // rebase with unstable.. I think it goes - // here now? BlockProposalContents::Payload(relay.data.message.header), )), Err(reason) if !reason.payload_invalid() => { @@ -858,19 +861,11 @@ impl ExecutionLayer { spec, ) { Ok(()) => Ok(ProvenancedPayload::Builder( - //FIXME(sean) the builder API needs to be updated - // NOTE the comment above was removed in the - // rebase with unstable.. I think it goes - // here now? BlockProposalContents::Payload(relay.data.message.header), )), // If the payload is valid then use it. The local EE failed // to produce a payload so we have no alternative. Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder( - //FIXME(sean) the builder API needs to be updated - // NOTE the comment above was removed in the - // rebase with unstable.. I think it goes - // here now? BlockProposalContents::Payload(relay.data.message.header), )), Err(reason) => { diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 9e85a8b5c6..83ab8ceee6 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -189,6 +189,11 @@ async fn reconstruct_block( .spec .fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())), ) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Default payload construction error: {e:?}" + )) + })? .into() // If we already have an execution payload with this transactions root cached, use it. } else if let Some(cached_payload) = diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 8bf7283462..691b16e419 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -119,8 +119,8 @@ lazy_static! { pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M /// The maximum bytes that can be sent across the RPC post-merge. pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M - //FIXME(sean) should these be the same? pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M + // FIXME(sean) should this be increased to account for blobs? pub(crate) const MAX_RPC_SIZE_POST_EIP4844: usize = 10 * 1_048_576; // 10M /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 445d144ac7..158379b7e1 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -115,7 +115,8 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024; /// before we start dropping them. const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; -//FIXME(sean) verify +/// The maximum number of queued `SignedBeaconBlockAndBlobsSidecar` objects received on gossip that +/// will be stored before we start dropping them. const MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN: usize = 1_024; /// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but @@ -1186,7 +1187,6 @@ impl BeaconProcessor { // required to verify some attestations. } else if let Some(item) = gossip_block_queue.pop() { self.spawn_worker(item, toolbox); - //FIXME(sean) } else if let Some(item) = gossip_block_and_blobs_sidecar_queue.pop() { self.spawn_worker(item, toolbox); // Check the aggregates, *then* the unaggregates since we assume that @@ -1675,23 +1675,9 @@ impl BeaconProcessor { /* * Verification for blobs sidecars received on gossip. */ - Work::GossipBlockAndBlobsSidecar { - message_id, - peer_id, - peer_client, - block_and_blobs, - seen_timestamp, - } => task_spawner.spawn_async(async move { - worker - .process_gossip_block_and_blobs_sidecar( - message_id, - peer_id, - peer_client, - block_and_blobs, - seen_timestamp, - ) - .await - }), + Work::GossipBlockAndBlobsSidecar { .. } => { + warn!(self.log, "Unexpected block and blobs on gossip") + } /* * Import for blocks that we received earlier than their intended slot. */ @@ -1892,19 +1878,9 @@ impl BeaconProcessor { request, ) }), - Work::BlobsByRangeRequest { - peer_id, - request_id, - request, - } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { - worker.handle_blobs_by_range_request( - sub_executor, - send_idle_on_drop, - peer_id, - request_id, - request, - ) - }), + Work::BlobsByRangeRequest { .. } => { + warn!(self.log.clone(), "Unexpected BlobsByRange Request") + } /* * Processing of lightclient bootstrap requests from other peers. */ diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 14d69898f7..589d7e9b47 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -11,10 +11,7 @@ use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError, GossipVerifiedBlock, NotifyExecutionLayer, }; -use lighthouse_network::{ - Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource, - SignedBeaconBlockAndBlobsSidecar, -}; +use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; @@ -699,19 +696,6 @@ impl Worker { } } - #[allow(clippy::too_many_arguments)] - pub async fn process_gossip_block_and_blobs_sidecar( - self, - _message_id: MessageId, - _peer_id: PeerId, - _peer_client: Client, - _block_and_blob: Arc>, - _seen_timestamp: Duration, - ) { - //FIXME - unimplemented!() - } - /// Process the beacon block received from the gossip network and /// if it passes gossip propagation criteria, tell the network thread to forward it. /// diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index c3a452acd4..bfa0ea516f 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -4,7 +4,6 @@ use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; use itertools::process_results; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MAX_REQUEST_BLOBS_SIDECARS}; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; @@ -455,152 +454,4 @@ impl Worker { "load_blocks_by_range_blocks", ); } - - /// Handle a `BlobsByRange` request from the peer. - pub fn handle_blobs_by_range_request( - self, - _executor: TaskExecutor, - _send_on_drop: SendOnDrop, - peer_id: PeerId, - _request_id: PeerRequestId, - mut req: BlobsByRangeRequest, - ) { - debug!(self.log, "Received BlobsByRange Request"; - "peer_id" => %peer_id, - "count" => req.count, - "start_slot" => req.start_slot, - ); - - // Should not send more than max request blocks - if req.count > MAX_REQUEST_BLOBS_SIDECARS { - req.count = MAX_REQUEST_BLOBS_SIDECARS; - } - - //FIXME(sean) create the blobs iter - - // let forwards_block_root_iter = match self - // .chain - // .forwards_iter_block_roots(Slot::from(req.start_slot)) - // { - // Ok(iter) => iter, - // Err(BeaconChainError::HistoricalBlockError( - // HistoricalBlockError::BlockOutOfRange { - // slot, - // oldest_block_slot, - // }, - // )) => { - // debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot); - // return self.send_error_response( - // peer_id, - // RPCResponseErrorCode::ResourceUnavailable, - // "Backfilling".into(), - // request_id, - // ); - // } - // Err(e) => return error!(self.log, "Unable to obtain root iter"; "error" => ?e), - // }; - // - // // Pick out the required blocks, ignoring skip-slots. - // let mut last_block_root = None; - // let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { - // iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) - // // map skip slots to None - // .map(|(root, _)| { - // let result = if Some(root) == last_block_root { - // None - // } else { - // Some(root) - // }; - // last_block_root = Some(root); - // result - // }) - // .collect::>>() - // }); - // - // let block_roots = match maybe_block_roots { - // Ok(block_roots) => block_roots, - // Err(e) => return error!(self.log, "Error during iteration over blocks"; "error" => ?e), - // }; - // - // // remove all skip slots - // let block_roots = block_roots.into_iter().flatten().collect::>(); - // - // // Fetching blocks is async because it may have to hit the execution layer for payloads. - // executor.spawn( - // async move { - // let mut blocks_sent = 0; - // let mut send_response = true; - // - // for root in block_roots { - // match self.chain.store.get_blobs(&root) { - // Ok(Some(blob)) => { - // blocks_sent += 1; - // self.send_network_message(NetworkMessage::SendResponse { - // peer_id, - // response: Response::BlobsByRange(Some(Arc::new(VariableList::new(vec![blob.message]).unwrap()))), - // id: request_id, - // }); - // } - // Ok(None) => { - // error!( - // self.log, - // "Blob in the chain is not in the store"; - // "request_root" => ?root - // ); - // break; - // } - // Err(e) => { - // error!( - // self.log, - // "Error fetching block for peer"; - // "block_root" => ?root, - // "error" => ?e - // ); - // break; - // } - // } - // } - // - // let current_slot = self - // .chain - // .slot() - // .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); - // - // if blocks_sent < (req.count as usize) { - // debug!( - // self.log, - // "BlocksByRange Response processed"; - // "peer" => %peer_id, - // "msg" => "Failed to return all requested blocks", - // "start_slot" => req.start_slot, - // "current_slot" => current_slot, - // "requested" => req.count, - // "returned" => blocks_sent - // ); - // } else { - // debug!( - // self.log, - // "BlocksByRange Response processed"; - // "peer" => %peer_id, - // "start_slot" => req.start_slot, - // "current_slot" => current_slot, - // "requested" => req.count, - // "returned" => blocks_sent - // ); - // } - // - // if send_response { - // // send the stream terminator - // self.send_network_message(NetworkMessage::SendResponse { - // peer_id, - // response: Response::BlobsByRange(None), - // id: request_id, - // }); - // } - // - // drop(send_on_drop); - // }, - // "load_blocks_by_range_blocks", - // ); - } } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index a1eeda84ed..0548b0906b 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -47,7 +47,7 @@ use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; -use slog::{crit, debug, error, info, trace, Logger}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use std::boxed::Box; use std::ops::Sub; use std::sync::Arc; @@ -592,8 +592,9 @@ impl SyncManager { .block_lookups .parent_chain_processed(chain_hash, result, &mut self.network), }, - //FIXME(sean) - SyncMessage::RpcBlob { .. } => todo!(), + SyncMessage::RpcBlob { .. } => { + warn!(self.log, "Unexpected blob message received"); + } } } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index fcfff7284a..752e472e24 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -628,27 +628,6 @@ impl BeaconNodeHttpClient { Ok(()) } - /// `POST beacon/blobs` - /// - /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blobs( - &self, - block: &BlobsSidecar, - ) -> Result<(), Error> { - let mut path = self.eth_path(V1)?; - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("beacon") - .push("blobs"); - - //FIXME(sean) should we re-use the proposal timeout? seems reasonable to.. - self.post_with_timeout(path, block, self.timeouts.proposal) - .await?; - - Ok(()) - } - /// `POST beacon/blinded_blocks` /// /// Returns `Ok(None)` on a 404 error. diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 661484fde8..5ed5307ffd 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -247,11 +247,6 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + fn max_blobs_per_block() -> usize { Self::MaxBlobsPerBlock::to_usize() } - - /// FIXME: why is this called chunks_per_blob?? - fn chunks_per_blob() -> usize { - Self::FieldElementsPerBlob::to_usize() - } } /// Macro to inherit some type values from another EthSpec. diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 8bba00b46d..f56b88fc92 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -92,7 +92,7 @@ pub trait AbstractExecPayload: + From> + TryFrom>; - fn default_at_fork(fork_name: ForkName) -> Self; + fn default_at_fork(fork_name: ForkName) -> Result; } #[superstruct( @@ -372,13 +372,12 @@ impl AbstractExecPayload for FullPayload { type Capella = FullPayloadCapella; type Eip4844 = FullPayloadEip4844; - fn default_at_fork(fork_name: ForkName) -> Self { + fn default_at_fork(fork_name: ForkName) -> Result { match fork_name { - //FIXME(sean) error handling - ForkName::Base | ForkName::Altair => panic!(), - ForkName::Merge => FullPayloadMerge::default().into(), - ForkName::Capella => FullPayloadCapella::default().into(), - ForkName::Eip4844 => FullPayloadEip4844::default().into(), + ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Merge => Ok(FullPayloadMerge::default().into()), + ForkName::Capella => Ok(FullPayloadCapella::default().into()), + ForkName::Eip4844 => Ok(FullPayloadEip4844::default().into()), } } } @@ -882,13 +881,12 @@ impl AbstractExecPayload for BlindedPayload { type Capella = BlindedPayloadCapella; type Eip4844 = BlindedPayloadEip4844; - fn default_at_fork(fork_name: ForkName) -> Self { + fn default_at_fork(fork_name: ForkName) -> Result { match fork_name { - //FIXME(sean) error handling - ForkName::Base | ForkName::Altair => panic!(), - ForkName::Merge => BlindedPayloadMerge::default().into(), - ForkName::Capella => BlindedPayloadCapella::default().into(), - ForkName::Eip4844 => BlindedPayloadEip4844::default().into(), + ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Merge => Ok(BlindedPayloadMerge::default().into()), + ForkName::Capella => Ok(BlindedPayloadCapella::default().into()), + ForkName::Eip4844 => Ok(BlindedPayloadEip4844::default().into()), } } } diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs index ebda936165..7700f23d9d 100644 --- a/lcli/src/create_payload_header.rs +++ b/lcli/src/create_payload_header.rs @@ -4,7 +4,10 @@ use ssz::Encode; use std::fs::File; use std::io::Write; use std::time::{SystemTime, UNIX_EPOCH}; -use types::{EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderMerge}; +use types::{ + EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844, + ExecutionPayloadHeaderMerge, ForkName, +}; pub fn run(matches: &ArgMatches) -> Result<(), String> { let eth1_block_hash = parse_required(matches, "execution-block-hash")?; @@ -17,17 +20,36 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { let base_fee_per_gas = parse_required(matches, "base-fee-per-gas")?; let gas_limit = parse_required(matches, "gas-limit")?; let file_name = matches.value_of("file").ok_or("No file supplied")?; + let fork_name: ForkName = parse_optional(matches, "fork")?.unwrap_or(ForkName::Merge); - //FIXME(sean) - let execution_payload_header: ExecutionPayloadHeader = - ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge { + let execution_payload_header: ExecutionPayloadHeader = match fork_name { + ForkName::Base | ForkName::Altair => return Err("invalid fork name".to_string()), + ForkName::Merge => ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge { gas_limit, base_fee_per_gas, timestamp: genesis_time, block_hash: eth1_block_hash, prev_randao: eth1_block_hash.into_root(), ..ExecutionPayloadHeaderMerge::default() - }); + }), + ForkName::Capella => ExecutionPayloadHeader::Capella(ExecutionPayloadHeaderCapella { + gas_limit, + base_fee_per_gas, + timestamp: genesis_time, + block_hash: eth1_block_hash, + prev_randao: eth1_block_hash.into_root(), + ..ExecutionPayloadHeaderCapella::default() + }), + ForkName::Eip4844 => ExecutionPayloadHeader::Eip4844(ExecutionPayloadHeaderEip4844 { + gas_limit, + base_fee_per_gas, + timestamp: genesis_time, + block_hash: eth1_block_hash, + prev_randao: eth1_block_hash.into_root(), + ..ExecutionPayloadHeaderEip4844::default() + }), + }; + let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?; let bytes = execution_payload_header.as_ssz_bytes(); file.write_all(bytes.as_slice()) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index de6039f35a..238c7e9f16 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -371,7 +371,8 @@ fn main() { .subcommand( SubCommand::with_name("create-payload-header") .about("Generates an SSZ file containing bytes for an `ExecutionPayloadHeader`. \ - Useful as input for `lcli new-testnet --execution-payload-header FILE`. ") + Useful as input for `lcli new-testnet --execution-payload-header FILE`. If `--fork` \ + is not provided, a payload header for the `Bellatrix` fork will be created.") .arg( Arg::with_name("execution-block-hash") .long("execution-block-hash") @@ -417,7 +418,15 @@ fn main() { .takes_value(true) .required(true) .help("Output file"), - ) + ).arg( + Arg::with_name("fork") + .long("fork") + .value_name("FORK") + .takes_value(true) + .default_value("bellatrix") + .help("The fork for which the execution payload header should be created.") + .possible_values(&["merge", "bellatrix", "capella", "eip4844"]) + ) ) .subcommand( SubCommand::with_name("new-testnet") diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 650addc18c..4d194ff10b 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -9,8 +9,9 @@ use std::io::Read; use std::path::PathBuf; use std::time::{SystemTime, UNIX_EPOCH}; use types::{ - test_utils::generate_deterministic_keypairs, Address, Config, EthSpec, ExecutionPayloadHeader, - ExecutionPayloadHeaderMerge, + test_utils::generate_deterministic_keypairs, Address, Config, Epoch, EthSpec, + ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844, + ExecutionPayloadHeaderMerge, ForkName, }; pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { @@ -80,10 +81,25 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul .map_err(|e| format!("Unable to open {}: {}", filename, e))?; file.read_to_end(&mut bytes) .map_err(|e| format!("Unable to read {}: {}", filename, e))?; - //FIXME(sean) - ExecutionPayloadHeaderMerge::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Merge) - .map_err(|e| format!("SSZ decode failed: {:?}", e)) + let fork_name = spec.fork_name_at_epoch(Epoch::new(0)); + match fork_name { + ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid( + "genesis fork must be post-merge".to_string(), + )), + ForkName::Merge => { + ExecutionPayloadHeaderMerge::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Merge) + } + ForkName::Capella => { + ExecutionPayloadHeaderCapella::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Capella) + } + ForkName::Eip4844 => { + ExecutionPayloadHeaderEip4844::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Eip4844) + } + } + .map_err(|e| format!("SSZ decode failed: {:?}", e)) }) .transpose()?; diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index 2ebca2dfb7..ae9df08096 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -37,7 +37,6 @@ pub enum Error { pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload = FullPayload> { RandaoReveal(Epoch), BeaconBlock(&'a BeaconBlock), - BlobsSidecar(&'a BlobsSidecar), AttestationData(&'a AttestationData), SignedAggregateAndProof(&'a AggregateAndProof), SelectionProof(Slot), @@ -59,7 +58,6 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, T, Pay match self { SignableMessage::RandaoReveal(epoch) => epoch.signing_root(domain), SignableMessage::BeaconBlock(b) => b.signing_root(domain), - SignableMessage::BlobsSidecar(b) => b.signing_root(domain), SignableMessage::AttestationData(a) => a.signing_root(domain), SignableMessage::SignedAggregateAndProof(a) => a.signing_root(domain), SignableMessage::SelectionProof(slot) => slot.signing_root(domain), @@ -182,7 +180,6 @@ impl SigningMethod { Web3SignerObject::RandaoReveal { epoch } } SignableMessage::BeaconBlock(block) => Web3SignerObject::beacon_block(block)?, - SignableMessage::BlobsSidecar(blob) => Web3SignerObject::BlobsSidecar(blob), SignableMessage::AttestationData(a) => Web3SignerObject::Attestation(a), SignableMessage::SignedAggregateAndProof(a) => { Web3SignerObject::AggregateAndProof(a) diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 5daa42fa3a..512cbc7d02 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -11,7 +11,6 @@ pub enum MessageType { AggregateAndProof, Attestation, BlockV2, - BlobsSidecar, Deposit, RandaoReveal, VoluntaryExit, @@ -52,8 +51,6 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: AbstractExecPayload> { #[serde(skip_serializing_if = "Option::is_none")] block_header: Option, }, - //FIXME(sean) just guessing here - BlobsSidecar(&'a BlobsSidecar), #[allow(dead_code)] Deposit { pubkey: PublicKeyBytes, @@ -114,7 +111,6 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> Web3SignerObject<'a, T, Pa Web3SignerObject::AggregateAndProof(_) => MessageType::AggregateAndProof, Web3SignerObject::Attestation(_) => MessageType::Attestation, Web3SignerObject::BeaconBlock { .. } => MessageType::BlockV2, - Web3SignerObject::BlobsSidecar(_) => MessageType::BlobsSidecar, Web3SignerObject::Deposit { .. } => MessageType::Deposit, Web3SignerObject::RandaoReveal { .. } => MessageType::RandaoReveal, Web3SignerObject::VoluntaryExit(_) => MessageType::VoluntaryExit,