Merge branch 'deneb-free-blobs' of https://github.com/sigp/lighthouse into too-many-blob-branches

This commit is contained in:
realbigsean
2023-03-17 09:38:14 -04:00
10 changed files with 301 additions and 204 deletions

View File

@@ -258,7 +258,7 @@ impl<T: BeaconChainTypes> Processor<T> {
);
if let RequestId::Sync(id) = request_id {
self.send_to_sync(SyncMessage::RpcBlobs {
self.send_to_sync(SyncMessage::RpcBlob {
peer_id,
request_id: id,
blob_sidecar,
@@ -330,7 +330,7 @@ impl<T: BeaconChainTypes> Processor<T> {
"Received BlobsByRoot Response";
"peer" => %peer_id,
);
self.send_to_sync(SyncMessage::RpcBlobs {
self.send_to_sync(SyncMessage::RpcBlob {
request_id,
peer_id,
blob_sidecar,

View File

@@ -35,7 +35,7 @@
use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart};
use super::block_lookups::BlockLookups;
use super::network_context::{BlockOrBlobs, SyncNetworkContext};
use super::network_context::{BlockOrBlob, SyncNetworkContext};
use super::peer_sync_info::{remote_sync_type, PeerSyncType};
use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH};
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent};
@@ -86,6 +86,10 @@ pub enum RequestId {
RangeBlobs { id: Id },
}
// TODO(diva) I'm updating functions what at a time, but this should be revisited because I think
// some code paths that are split for blobs and blocks can be made just one after sync as a whole
// is updated.
#[derive(Debug)]
/// A message that can be sent to the sync manager thread.
pub enum SyncMessage<T: EthSpec> {
@@ -101,7 +105,7 @@ pub enum SyncMessage<T: EthSpec> {
},
/// A blob has been received from the RPC.
RpcBlobs {
RpcBlob {
request_id: RequestId,
peer_id: PeerId,
blob_sidecar: Option<Arc<BlobSidecar<T>>>,
@@ -554,7 +558,12 @@ impl<T: BeaconChainTypes> SyncManager<T> {
beacon_block,
seen_timestamp,
} => {
self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp);
self.rpc_block_or_blob_received(
request_id,
peer_id,
beacon_block.into(),
seen_timestamp,
);
}
SyncMessage::UnknownBlock(peer_id, block, block_root) => {
// If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore
@@ -638,12 +647,17 @@ impl<T: BeaconChainTypes> SyncManager<T> {
.block_lookups
.parent_chain_processed(chain_hash, result, &mut self.network),
},
SyncMessage::RpcBlobs {
SyncMessage::RpcBlob {
request_id,
peer_id,
blob_sidecar,
seen_timestamp,
} => self.rpc_blobs_received(request_id, peer_id, blob_sidecar, seen_timestamp),
} => self.rpc_block_or_blob_received(
request_id,
peer_id,
blob_sidecar.into(),
seen_timestamp,
),
}
}
@@ -702,30 +716,50 @@ impl<T: BeaconChainTypes> SyncManager<T> {
}
}
fn rpc_block_received(
fn rpc_block_or_blob_received(
&mut self,
request_id: RequestId,
peer_id: PeerId,
beacon_block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
block_or_blob: BlockOrBlob<T::EthSpec>,
seen_timestamp: Duration,
) {
match request_id {
RequestId::SingleBlock { id } => self.block_lookups.single_block_lookup_response(
id,
peer_id,
beacon_block.map(|block| block.into()),
seen_timestamp,
&mut self.network,
),
RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_response(
id,
peer_id,
beacon_block.map(|block| block.into()),
seen_timestamp,
&mut self.network,
),
RequestId::SingleBlock { id } => {
// TODO(diva) adjust when dealing with by root requests. This code is here to
// satisfy dead code analysis
match block_or_blob {
BlockOrBlob::Block(maybe_block) => {
self.block_lookups.single_block_lookup_response(
id,
peer_id,
maybe_block.map(BlockWrapper::Block),
seen_timestamp,
&mut self.network,
)
}
BlockOrBlob::Sidecar(_) => unimplemented!("Mimatch between BlockWrapper and what the network receives needs to be handled first."),
}
}
RequestId::ParentLookup { id } => {
// TODO(diva) adjust when dealing with by root requests. This code is here to
// satisfy dead code analysis
match block_or_blob {
BlockOrBlob::Block(maybe_block) => self.block_lookups.parent_lookup_response(
id,
peer_id,
maybe_block.map(BlockWrapper::Block),
seen_timestamp,
&mut self.network,
),
BlockOrBlob::Sidecar(_) => unimplemented!("Mimatch between BlockWrapper and what the network receives needs to be handled first."),
}
}
RequestId::BackFillBlocks { id } => {
let is_stream_terminator = beacon_block.is_none();
let maybe_block = match block_or_blob {
BlockOrBlob::Block(maybe_block) => maybe_block,
BlockOrBlob::Sidecar(_) => todo!("I think this is unreachable"),
};
let is_stream_terminator = maybe_block.is_none();
if let Some(batch_id) = self
.network
.backfill_sync_only_blocks_response(id, is_stream_terminator)
@@ -735,7 +769,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
batch_id,
&peer_id,
id,
beacon_block.map(|block| block.into()),
maybe_block.map(|block| block.into()),
) {
Ok(ProcessResult::SyncCompleted) => self.update_sync_state(),
Ok(ProcessResult::Successful) => {}
@@ -748,7 +782,11 @@ impl<T: BeaconChainTypes> SyncManager<T> {
}
}
RequestId::RangeBlocks { id } => {
let is_stream_terminator = beacon_block.is_none();
let maybe_block = match block_or_blob {
BlockOrBlob::Block(maybe_block) => maybe_block,
BlockOrBlob::Sidecar(_) => todo!("I think this should be unreachable, since this is a range only-blocks request, and the network should not accept this chunk at all. Needs better handling"),
};
let is_stream_terminator = maybe_block.is_none();
if let Some((chain_id, batch_id)) = self
.network
.range_sync_block_response(id, is_stream_terminator)
@@ -759,28 +797,28 @@ impl<T: BeaconChainTypes> SyncManager<T> {
chain_id,
batch_id,
id,
beacon_block.map(|block| block.into()),
maybe_block.map(|block| block.into()),
);
self.update_sync_state();
}
}
RequestId::BackFillBlobs { id } => {
self.blobs_backfill_response(id, peer_id, beacon_block.into())
self.backfill_block_and_blobs_response(id, peer_id, block_or_blob)
}
RequestId::RangeBlobs { id } => {
self.blobs_range_response(id, peer_id, beacon_block.into())
self.range_block_and_blobs_response(id, peer_id, block_or_blob)
}
}
}
/// Handles receiving a response for a range sync request that should have both blocks and
/// blobs.
fn blobs_range_response(
fn range_block_and_blobs_response(
&mut self,
id: Id,
peer_id: PeerId,
block_or_blob: BlockOrBlobs<T::EthSpec>,
block_or_blob: BlockOrBlob<T::EthSpec>,
) {
if let Some((chain_id, resp)) = self
.network
@@ -822,11 +860,11 @@ impl<T: BeaconChainTypes> SyncManager<T> {
/// Handles receiving a response for a Backfill sync request that should have both blocks and
/// blobs.
fn blobs_backfill_response(
fn backfill_block_and_blobs_response(
&mut self,
id: Id,
peer_id: PeerId,
block_or_blob: BlockOrBlobs<T::EthSpec>,
block_or_blob: BlockOrBlob<T::EthSpec>,
) {
if let Some(resp) = self
.network
@@ -871,32 +909,6 @@ impl<T: BeaconChainTypes> SyncManager<T> {
}
}
}
fn rpc_blobs_received(
&mut self,
request_id: RequestId,
_peer_id: PeerId,
_maybe_blob: Option<Arc<BlobSidecar<<T>::EthSpec>>>,
_seen_timestamp: Duration,
) {
match request_id {
RequestId::SingleBlock { .. } | RequestId::ParentLookup { .. } => {
unreachable!("There is no such thing as a singular 'by root' glob request that is not accompanied by the block")
}
RequestId::BackFillBlocks { .. } => {
unreachable!("An only blocks request does not receive sidecars")
}
RequestId::BackFillBlobs { .. } => {
unimplemented!("Adjust backfill sync");
}
RequestId::RangeBlocks { .. } => {
unreachable!("Only-blocks range requests don't receive sidecars")
}
RequestId::RangeBlobs { id: _ } => {
unimplemented!("Adjust range");
}
}
}
}
impl<IgnoredOkVal, T: EthSpec> From<Result<IgnoredOkVal, BlockError<T>>> for BlockProcessResult<T> {

View File

@@ -75,20 +75,20 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
}
/// Small enumeration to make dealing with block and blob requests easier.
pub enum BlockOrBlobs<T: EthSpec> {
pub enum BlockOrBlob<T: EthSpec> {
Block(Option<Arc<SignedBeaconBlock<T>>>),
Blobs(Option<Arc<BlobSidecar<T>>>),
Sidecar(Option<Arc<BlobSidecar<T>>>),
}
impl<T: EthSpec> From<Option<Arc<SignedBeaconBlock<T>>>> for BlockOrBlobs<T> {
impl<T: EthSpec> From<Option<Arc<SignedBeaconBlock<T>>>> for BlockOrBlob<T> {
fn from(block: Option<Arc<SignedBeaconBlock<T>>>) -> Self {
BlockOrBlobs::Block(block)
BlockOrBlob::Block(block)
}
}
impl<T: EthSpec> From<Option<Arc<BlobSidecar<T>>>> for BlockOrBlobs<T> {
impl<T: EthSpec> From<Option<Arc<BlobSidecar<T>>>> for BlockOrBlob<T> {
fn from(blob: Option<Arc<BlobSidecar<T>>>) -> Self {
BlockOrBlobs::Blobs(blob)
BlockOrBlob::Sidecar(blob)
}
}
@@ -311,15 +311,15 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
pub fn range_sync_block_and_blob_response(
&mut self,
request_id: Id,
block_or_blob: BlockOrBlobs<T::EthSpec>,
block_or_blob: BlockOrBlob<T::EthSpec>,
) -> Option<(ChainId, BlocksAndBlobsByRangeResponse<T::EthSpec>)> {
match self.range_blocks_and_blobs_requests.entry(request_id) {
Entry::Occupied(mut entry) => {
let req = entry.get_mut();
let info = &mut req.block_blob_info;
match block_or_blob {
BlockOrBlobs::Block(maybe_block) => info.add_block_response(maybe_block),
BlockOrBlobs::Blobs(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar),
BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block),
BlockOrBlob::Sidecar(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar),
}
if info.is_finished() {
// If the request is finished, dequeue everything
@@ -402,14 +402,14 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
pub fn backfill_sync_block_and_blob_response(
&mut self,
request_id: Id,
block_or_blob: BlockOrBlobs<T::EthSpec>,
block_or_blob: BlockOrBlob<T::EthSpec>,
) -> Option<BlocksAndBlobsByRangeResponse<T::EthSpec>> {
match self.backfill_blocks_and_blobs_requests.entry(request_id) {
Entry::Occupied(mut entry) => {
let (_, info) = entry.get_mut();
match block_or_blob {
BlockOrBlobs::Block(maybe_block) => info.add_block_response(maybe_block),
BlockOrBlobs::Blobs(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar),
BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block),
BlockOrBlob::Sidecar(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar),
}
if info.is_finished() {
// If the request is finished, dequeue everything

View File

@@ -1326,7 +1326,7 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> Into<BeaconBlock<T, Payload>>
pub type BlockContentsTuple<T, Payload> = (
SignedBeaconBlock<T, Payload>,
Option<VariableList<SignedBlobSidecar<T>, <T as EthSpec>::MaxBlobsPerBlock>>,
Option<SignedBlobSidecarList<T>>,
);
/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobSidecars`].
@@ -1374,9 +1374,25 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> From<SignedBeaconBlock<T, Payl
}
}
impl<T: EthSpec, Payload: AbstractExecPayload<T>> From<BlockContentsTuple<T, Payload>>
for SignedBlockContents<T, Payload>
{
fn from(block_contents_tuple: BlockContentsTuple<T, Payload>) -> Self {
match block_contents_tuple {
(signed_block, None) => SignedBlockContents::Block(signed_block),
(signed_block, Some(signed_blob_sidecars)) => {
SignedBlockContents::BlockAndBlobSidecars(SignedBeaconBlockAndBlobSidecars {
signed_block,
signed_blob_sidecars,
})
}
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, Encode)]
#[serde(bound = "T: EthSpec")]
pub struct SignedBeaconBlockAndBlobSidecars<T: EthSpec, Payload: AbstractExecPayload<T>> {
pub signed_block: SignedBeaconBlock<T, Payload>,
pub signed_blob_sidecars: VariableList<SignedBlobSidecar<T>, <T as EthSpec>::MaxBlobsPerBlock>,
pub signed_blob_sidecars: SignedBlobSidecarList<T>,
}

View File

@@ -14,7 +14,7 @@ pub enum Domain {
BlsToExecutionChange,
BeaconProposer,
BeaconAttester,
BlobsSideCar,
BlobSidecar,
Randao,
Deposit,
VoluntaryExit,
@@ -100,7 +100,7 @@ pub struct ChainSpec {
*/
pub(crate) domain_beacon_proposer: u32,
pub(crate) domain_beacon_attester: u32,
pub(crate) domain_blobs_sidecar: u32,
pub(crate) domain_blob_sidecar: u32,
pub(crate) domain_randao: u32,
pub(crate) domain_deposit: u32,
pub(crate) domain_voluntary_exit: u32,
@@ -366,7 +366,7 @@ impl ChainSpec {
match domain {
Domain::BeaconProposer => self.domain_beacon_proposer,
Domain::BeaconAttester => self.domain_beacon_attester,
Domain::BlobsSideCar => self.domain_blobs_sidecar,
Domain::BlobSidecar => self.domain_blob_sidecar,
Domain::Randao => self.domain_randao,
Domain::Deposit => self.domain_deposit,
Domain::VoluntaryExit => self.domain_voluntary_exit,
@@ -574,7 +574,7 @@ impl ChainSpec {
domain_voluntary_exit: 4,
domain_selection_proof: 5,
domain_aggregate_and_proof: 6,
domain_blobs_sidecar: 10, // 0x0a000000
domain_blob_sidecar: 11, // 0x0B000000
/*
* Fork choice
@@ -809,7 +809,7 @@ impl ChainSpec {
domain_voluntary_exit: 4,
domain_selection_proof: 5,
domain_aggregate_and_proof: 6,
domain_blobs_sidecar: 10,
domain_blob_sidecar: 11,
/*
* Fork choice
@@ -1285,7 +1285,7 @@ mod tests {
test_domain(Domain::BeaconProposer, spec.domain_beacon_proposer, &spec);
test_domain(Domain::BeaconAttester, spec.domain_beacon_attester, &spec);
test_domain(Domain::BlobsSideCar, spec.domain_blobs_sidecar, &spec);
test_domain(Domain::BlobSidecar, spec.domain_blob_sidecar, &spec);
test_domain(Domain::Randao, spec.domain_randao, &spec);
test_domain(Domain::Deposit, spec.domain_deposit, &spec);
test_domain(Domain::VoluntaryExit, spec.domain_voluntary_exit, &spec);
@@ -1311,7 +1311,7 @@ mod tests {
&spec,
);
test_domain(Domain::BlobsSideCar, spec.domain_blobs_sidecar, &spec);
test_domain(Domain::BlobSidecar, spec.domain_blob_sidecar, &spec);
}
fn apply_bit_mask(domain_bytes: [u8; 4], spec: &ChainSpec) -> u32 {

View File

@@ -78,7 +78,7 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap<String, Value> {
"bls_withdrawal_prefix".to_uppercase() => u8_hex(spec.bls_withdrawal_prefix_byte),
"domain_beacon_proposer".to_uppercase() => u32_hex(spec.domain_beacon_proposer),
"domain_beacon_attester".to_uppercase() => u32_hex(spec.domain_beacon_attester),
"domain_blobs_sidecar".to_uppercase() => u32_hex(spec.domain_blobs_sidecar),
"domain_blob_sidecar".to_uppercase() => u32_hex(spec.domain_blob_sidecar),
"domain_randao".to_uppercase()=> u32_hex(spec.domain_randao),
"domain_deposit".to_uppercase()=> u32_hex(spec.domain_deposit),
"domain_voluntary_exit".to_uppercase() => u32_hex(spec.domain_voluntary_exit),

View File

@@ -1,10 +1,8 @@
use crate::{
test_utils::TestRandom, BlobSidecar, ChainSpec, EthSpec, Fork, Hash256, PublicKey, Signature,
SignedRoot,
};
use crate::{test_utils::TestRandom, BlobSidecar, EthSpec, Signature};
use derivative::Derivative;
use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use ssz_types::VariableList;
use test_random_derive::TestRandom;
use tree_hash_derive::TreeHash;
@@ -29,18 +27,5 @@ pub struct SignedBlobSidecar<T: EthSpec> {
pub signature: Signature,
}
impl<T: EthSpec> SignedRoot for SignedBlobSidecar<T> {}
impl<T: EthSpec> SignedBlobSidecar<T> {
pub fn verify_signature(
&self,
_object_root_opt: Option<Hash256>,
_pubkey: &PublicKey,
_fork: &Fork,
_genesis_validators_root: Hash256,
_spec: &ChainSpec,
) -> bool {
// TODO (pawan): fill up logic
unimplemented!()
}
}
pub type SignedBlobSidecarList<T> =
VariableList<SignedBlobSidecar<T>, <T as EthSpec>::MaxBlobsPerBlock>;

View File

@@ -6,9 +6,11 @@ use crate::{
OfflineOnFailure,
};
use crate::{http_metrics::metrics, validator_store::ValidatorStore};
use bls::SignatureBytes;
use environment::RuntimeContext;
use eth2::types::SignedBlockContents;
use slog::{crit, debug, error, info, trace, warn};
use eth2::types::{BlockContents, SignedBlockContents};
use eth2::BeaconNodeHttpClient;
use slog::{crit, debug, error, info, trace, warn, Logger};
use slot_clock::SlotClock;
use std::ops::Deref;
use std::sync::Arc;
@@ -16,8 +18,8 @@ use std::time::Duration;
use tokio::sync::mpsc;
use tokio::time::sleep;
use types::{
AbstractExecPayload, BeaconBlock, BlindedPayload, BlockType, EthSpec, FullPayload, Graffiti,
PublicKeyBytes, Slot,
AbstractExecPayload, BlindedPayload, BlockType, EthSpec, FullPayload, Graffiti, PublicKeyBytes,
Slot,
};
#[derive(Debug)]
@@ -342,80 +344,46 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
"slot" => slot.as_u64(),
);
// Request block from first responsive beacon node.
let block = self
let block_contents = self
.beacon_nodes
.first_success(
RequireSynced::No,
OfflineOnFailure::Yes,
|beacon_node| async move {
let block: BeaconBlock<E, Payload> = match Payload::block_type() {
BlockType::Full => {
let _get_timer = metrics::start_timer_vec(
&metrics::BLOCK_SERVICE_TIMES,
&[metrics::BEACON_BLOCK_HTTP_GET],
);
beacon_node
.get_validator_blocks::<E, Payload>(
slot,
randao_reveal_ref,
graffiti.as_ref(),
)
.await
.map_err(|e| {
BlockError::Recoverable(format!(
"Error from beacon node when producing block: {:?}",
e
))
})?
.data
.into()
}
BlockType::Blinded => {
let _get_timer = metrics::start_timer_vec(
&metrics::BLOCK_SERVICE_TIMES,
&[metrics::BLINDED_BEACON_BLOCK_HTTP_GET],
);
beacon_node
.get_validator_blinded_blocks::<E, Payload>(
slot,
randao_reveal_ref,
graffiti.as_ref(),
)
.await
.map_err(|e| {
BlockError::Recoverable(format!(
"Error from beacon node when producing block: {:?}",
e
))
})?
.data
}
};
info!(
move |beacon_node| {
Self::get_validator_block(
beacon_node,
slot,
randao_reveal_ref,
graffiti,
proposer_index,
log,
"Received unsigned block";
"slot" => slot.as_u64(),
);
if proposer_index != Some(block.proposer_index()) {
return Err(BlockError::Recoverable(
"Proposer index does not match block proposer. Beacon chain re-orged"
.to_string(),
));
}
Ok::<_, BlockError>(block)
)
},
)
.await?;
let (block, maybe_blob_sidecars) = block_contents.deconstruct();
let signing_timer = metrics::start_timer(&metrics::BLOCK_SIGNING_TIMES);
let signed_block_contents: SignedBlockContents<E, Payload> = self_ref
let signed_block = self_ref
.validator_store
.sign_block::<Payload>(*validator_pubkey_ref, block, current_slot)
.await
.map_err(|e| BlockError::Recoverable(format!("Unable to sign block: {:?}", e)))?
.into();
.map_err(|e| BlockError::Recoverable(format!("Unable to sign block: {:?}", e)))?;
let maybe_signed_blobs = match maybe_blob_sidecars {
Some(blob_sidecars) => Some(
self_ref
.validator_store
.sign_blobs(*validator_pubkey_ref, blob_sidecars)
.await
.map_err(|e| {
BlockError::Recoverable(format!("Unable to sign blob: {:?}", e))
})?,
),
None => None,
};
let signing_time_ms =
Duration::from_secs_f64(signing_timer.map_or(0.0, |t| t.stop_and_record())).as_millis();
@@ -426,46 +394,19 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
"signing_time_ms" => signing_time_ms,
);
let signed_block_contents = SignedBlockContents::from((signed_block, maybe_signed_blobs));
// Publish block with first available beacon node.
self.beacon_nodes
.first_success(
RequireSynced::No,
OfflineOnFailure::Yes,
|beacon_node| async {
match Payload::block_type() {
BlockType::Full => {
let _post_timer = metrics::start_timer_vec(
&metrics::BLOCK_SERVICE_TIMES,
&[metrics::BEACON_BLOCK_HTTP_POST],
);
beacon_node
.post_beacon_blocks(&signed_block_contents)
.await
.map_err(|e| {
BlockError::Irrecoverable(format!(
"Error from beacon node when publishing block: {:?}",
e
))
})?
}
BlockType::Blinded => {
let _post_timer = metrics::start_timer_vec(
&metrics::BLOCK_SERVICE_TIMES,
&[metrics::BLINDED_BEACON_BLOCK_HTTP_POST],
);
beacon_node
// TODO: need to be adjusted for blobs
.post_beacon_blinded_blocks(signed_block_contents.signed_block())
.await
.map_err(|e| {
BlockError::Irrecoverable(format!(
"Error from beacon node when publishing block: {:?}",
e
))
})?
}
}
Ok::<_, BlockError>(())
Self::publish_signed_block_contents::<Payload>(
&signed_block_contents,
beacon_node,
)
.await
},
)
.await?;
@@ -482,4 +423,106 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
Ok(())
}
async fn publish_signed_block_contents<Payload: AbstractExecPayload<E>>(
signed_block_contents: &SignedBlockContents<E, Payload>,
beacon_node: &BeaconNodeHttpClient,
) -> Result<(), BlockError> {
match Payload::block_type() {
BlockType::Full => {
let _post_timer = metrics::start_timer_vec(
&metrics::BLOCK_SERVICE_TIMES,
&[metrics::BEACON_BLOCK_HTTP_POST],
);
beacon_node
.post_beacon_blocks(signed_block_contents)
.await
.map_err(|e| {
BlockError::Irrecoverable(format!(
"Error from beacon node when publishing block: {:?}",
e
))
})?
}
BlockType::Blinded => {
let _post_timer = metrics::start_timer_vec(
&metrics::BLOCK_SERVICE_TIMES,
&[metrics::BLINDED_BEACON_BLOCK_HTTP_POST],
);
todo!("need to be adjusted for blobs");
// beacon_node
// .post_beacon_blinded_blocks(signed_block_contents.signed_block())
// .await
// .map_err(|e| {
// BlockError::Irrecoverable(format!(
// "Error from beacon node when publishing block: {:?}",
// e
// ))
// })?
}
}
Ok::<_, BlockError>(())
}
async fn get_validator_block<Payload: AbstractExecPayload<E>>(
beacon_node: &BeaconNodeHttpClient,
slot: Slot,
randao_reveal_ref: &SignatureBytes,
graffiti: Option<Graffiti>,
proposer_index: Option<u64>,
log: &Logger,
) -> Result<BlockContents<E, Payload>, BlockError> {
let block_contents: BlockContents<E, Payload> = match Payload::block_type() {
BlockType::Full => {
let _get_timer = metrics::start_timer_vec(
&metrics::BLOCK_SERVICE_TIMES,
&[metrics::BEACON_BLOCK_HTTP_GET],
);
beacon_node
.get_validator_blocks::<E, Payload>(slot, randao_reveal_ref, graffiti.as_ref())
.await
.map_err(|e| {
BlockError::Recoverable(format!(
"Error from beacon node when producing block: {:?}",
e
))
})?
.data
}
BlockType::Blinded => {
let _get_timer = metrics::start_timer_vec(
&metrics::BLOCK_SERVICE_TIMES,
&[metrics::BLINDED_BEACON_BLOCK_HTTP_GET],
);
todo!("implement blinded flow for blobs");
// beacon_node
// .get_validator_blinded_blocks::<E, Payload>(
// slot,
// randao_reveal_ref,
// graffiti.as_ref(),
// )
// .await
// .map_err(|e| {
// BlockError::Recoverable(format!(
// "Error from beacon node when producing block: {:?}",
// e
// ))
// })?
// .data
}
};
info!(
log,
"Received unsigned block";
"slot" => slot.as_u64(),
);
if proposer_index != Some(block_contents.block().proposer_index()) {
return Err(BlockError::Recoverable(
"Proposer index does not match block proposer. Beacon chain re-orged".to_string(),
));
}
Ok::<_, BlockError>(block_contents)
}
}

View File

@@ -37,6 +37,7 @@ pub enum Error {
pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload<T> = FullPayload<T>> {
RandaoReveal(Epoch),
BeaconBlock(&'a BeaconBlock<T, Payload>),
BlobSidecar(&'a BlobSidecar<T>),
AttestationData(&'a AttestationData),
SignedAggregateAndProof(&'a AggregateAndProof<T>),
SelectionProof(Slot),
@@ -58,6 +59,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> SignableMessage<'a, T, Pay
match self {
SignableMessage::RandaoReveal(epoch) => epoch.signing_root(domain),
SignableMessage::BeaconBlock(b) => b.signing_root(domain),
SignableMessage::BlobSidecar(b) => b.signing_root(domain),
SignableMessage::AttestationData(a) => a.signing_root(domain),
SignableMessage::SignedAggregateAndProof(a) => a.signing_root(domain),
SignableMessage::SelectionProof(slot) => slot.signing_root(domain),
@@ -180,6 +182,10 @@ impl SigningMethod {
Web3SignerObject::RandaoReveal { epoch }
}
SignableMessage::BeaconBlock(block) => Web3SignerObject::beacon_block(block)?,
SignableMessage::BlobSidecar(_) => {
// https://github.com/ConsenSys/web3signer/issues/726
unimplemented!("Web3Signer blob signing not implemented.")
}
SignableMessage::AttestationData(a) => Web3SignerObject::Attestation(a),
SignableMessage::SignedAggregateAndProof(a) => {
Web3SignerObject::AggregateAndProof(a)

View File

@@ -6,6 +6,7 @@ use crate::{
Config,
};
use account_utils::{validator_definitions::ValidatorDefinition, ZeroizeString};
use eth2::types::VariableList;
use parking_lot::{Mutex, RwLock};
use slashing_protection::{
interchange::Interchange, InterchangeError, NotSafe, Safe, SlashingDatabase,
@@ -19,11 +20,12 @@ use std::sync::Arc;
use task_executor::TaskExecutor;
use types::{
attestation::Error as AttestationError, graffiti::GraffitiString, AbstractExecPayload, Address,
AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof,
Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof,
Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot,
SignedValidatorRegistrationData, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution,
SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData,
AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, BlobSidecarList, ChainSpec,
ContributionAndProof, Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes,
SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBlobSidecar,
SignedBlobSidecarList, SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData,
Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage,
SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData,
};
use validator_dir::ValidatorDir;
@@ -531,6 +533,39 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> {
}
}
pub async fn sign_blobs(
&self,
validator_pubkey: PublicKeyBytes,
blob_sidecars: BlobSidecarList<E>,
) -> Result<SignedBlobSidecarList<E>, Error> {
let mut signed_blob_sidecars = Vec::new();
for blob_sidecar in blob_sidecars.into_iter() {
let slot = blob_sidecar.slot;
let signing_epoch = slot.epoch(E::slots_per_epoch());
let signing_context = self.signing_context(Domain::BlobSidecar, signing_epoch);
let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?;
let signature = signing_method
.get_signature::<E, BlindedPayload<E>>(
SignableMessage::BlobSidecar(&blob_sidecar),
signing_context,
&self.spec,
&self.task_executor,
)
.await?;
metrics::inc_counter_vec(&metrics::SIGNED_BLOBS_TOTAL, &[metrics::SUCCESS]);
signed_blob_sidecars.push(SignedBlobSidecar {
message: blob_sidecar,
signature,
});
}
Ok(VariableList::from(signed_blob_sidecars))
}
pub async fn sign_attestation(
&self,
validator_pubkey: PublicKeyBytes,