Merge remote-tracking branch 'origin/deneb-free-blobs' into tree-states-deneb

This commit is contained in:
Michael Sproul
2023-10-06 11:11:36 +11:00
85 changed files with 3731 additions and 2675 deletions

View File

@@ -3,6 +3,8 @@ use crate::{
BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError,
VerifyBlockRoot,
};
use itertools::Itertools;
use std::iter::Peekable;
use std::marker::PhantomData;
use types::{BeaconState, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot};
@@ -26,7 +28,7 @@ pub struct BlockReplayer<
'a,
Spec: EthSpec,
Error = BlockReplayError,
StateRootIter = StateRootIterDefault<Error>,
StateRootIter: Iterator<Item = Result<(Hash256, Slot), Error>> = StateRootIterDefault<Error>,
> {
state: BeaconState<Spec>,
spec: &'a ChainSpec,
@@ -36,7 +38,7 @@ pub struct BlockReplayer<
post_block_hook: Option<PostBlockHook<'a, Spec, Error>>,
pre_slot_hook: Option<PreSlotHook<'a, Spec, Error>>,
post_slot_hook: Option<PostSlotHook<'a, Spec, Error>>,
state_root_iter: Option<StateRootIter>,
pub(crate) state_root_iter: Option<Peekable<StateRootIter>>,
state_root_miss: bool,
_phantom: PhantomData<Error>,
}
@@ -137,7 +139,7 @@ where
/// `self.state.slot` to the `target_slot` supplied to `apply_blocks` (inclusive of both
/// endpoints).
pub fn state_root_iter(mut self, iter: StateRootIter) -> Self {
self.state_root_iter = Some(iter);
self.state_root_iter = Some(iter.peekable());
self
}
@@ -186,7 +188,7 @@ where
// If a state root iterator is configured, use it to find the root.
if let Some(ref mut state_root_iter) = self.state_root_iter {
let opt_root = state_root_iter
.take_while(|res| res.as_ref().map_or(true, |(_, s)| *s <= slot))
.peeking_take_while(|res| res.as_ref().map_or(true, |(_, s)| *s <= slot))
.find(|res| res.as_ref().map_or(true, |(_, s)| *s == slot))
.transpose()?;

View File

@@ -1,11 +1,11 @@
#![cfg(all(test, not(feature = "fake_crypto")))]
#![cfg(all(test, not(feature = "fake_crypto"), not(debug_assertions)))]
use crate::per_block_processing::errors::{
AttestationInvalid, AttesterSlashingInvalid, BlockOperationError, BlockProcessingError,
DepositInvalid, HeaderInvalid, IndexedAttestationInvalid, IntoWithIndex,
ProposerSlashingInvalid,
};
use crate::{per_block_processing, StateProcessingStrategy};
use crate::{per_block_processing, BlockReplayError, BlockReplayer, StateProcessingStrategy};
use crate::{
per_block_processing::{process_operations, verify_exit::verify_exit},
BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures,
@@ -1035,3 +1035,51 @@ async fn fork_spanning_exit() {
)
.expect_err("phase0 exit does not verify against bellatrix state");
}
/// Check that the block replayer does not consume state roots unnecessarily.
#[tokio::test]
async fn block_replayer_peeking_state_roots() {
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
let target_state = harness.get_current_state();
let target_block_root = harness.head_block_root();
let target_block = harness
.chain
.get_blinded_block(&target_block_root)
.unwrap()
.unwrap();
let parent_block_root = target_block.parent_root();
let parent_block = harness
.chain
.get_blinded_block(&parent_block_root)
.unwrap()
.unwrap();
let parent_state = harness
.chain
.get_state(&parent_block.state_root(), Some(parent_block.slot()))
.unwrap()
.unwrap();
// Omit the state root for `target_state` but provide a dummy state root at the *next* slot.
// If the block replayer is peeking at the state roots rather than consuming them, then the
// dummy state should still be there after block replay completes.
let dummy_state_root = Hash256::repeat_byte(0xff);
let dummy_slot = target_state.slot() + 1;
let state_root_iter = vec![Ok::<_, BlockReplayError>((dummy_state_root, dummy_slot))];
let block_replayer = BlockReplayer::new(parent_state, &harness.chain.spec)
.state_root_iter(state_root_iter.into_iter())
.no_signature_verification()
.apply_blocks(vec![target_block], None)
.unwrap();
assert_eq!(
block_replayer
.state_root_iter
.unwrap()
.next()
.unwrap()
.unwrap(),
(dummy_state_root, dummy_slot)
);
}

View File

@@ -3,7 +3,7 @@ use crate::*;
use derivative::Derivative;
use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use ssz_types::VariableList;
use ssz_types::{FixedVector, VariableList};
use std::marker::PhantomData;
use superstruct::superstruct;
use test_random_derive::TestRandom;
@@ -11,6 +11,8 @@ use tree_hash_derive::TreeHash;
pub type KzgCommitments<T> =
VariableList<KzgCommitment, <T as EthSpec>::MaxBlobCommitmentsPerBlock>;
pub type KzgCommitmentOpts<T> =
FixedVector<Option<KzgCommitment>, <T as EthSpec>::MaxBlobsPerBlock>;
/// The body of a `BeaconChain` block, containing operations.
///

View File

@@ -23,6 +23,19 @@ pub struct BlobIdentifier {
pub index: u64,
}
impl BlobIdentifier {
pub fn get_all_blob_ids<E: EthSpec>(block_root: Hash256) -> Vec<BlobIdentifier> {
let mut blob_ids = Vec::with_capacity(E::max_blobs_per_block());
for i in 0..E::max_blobs_per_block() {
blob_ids.push(BlobIdentifier {
block_root,
index: i as u64,
});
}
blob_ids
}
}
impl PartialOrd for BlobIdentifier {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.index.partial_cmp(&other.index)

View File

@@ -1,8 +1,8 @@
use crate::beacon_block_body::KzgCommitments;
use crate::{
BlobRootsList, ChainSpec, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb,
ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, ForkName, ForkVersionDeserialize,
KzgProofs, SignedRoot, Uint256,
ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ForkName,
ForkVersionDeserialize, KzgProofs, SignedRoot, Uint256,
};
use bls::PublicKeyBytes;
use bls::Signature;
@@ -26,7 +26,8 @@ pub struct BlindedBlobsBundle<E: EthSpec> {
derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone),
serde(bound = "E: EthSpec", deny_unknown_fields)
),
map_ref_into(ExecutionPayloadHeaderRef)
map_ref_into(ExecutionPayloadHeaderRef),
map_ref_mut_into(ExecutionPayloadHeaderRefMut)
)]
#[derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone)]
#[serde(bound = "E: EthSpec", deny_unknown_fields, untagged)]
@@ -59,6 +60,14 @@ impl<'a, E: EthSpec> BuilderBidRef<'a, E> {
}
}
impl<'a, E: EthSpec> BuilderBidRefMut<'a, E> {
pub fn header_mut(self) -> ExecutionPayloadHeaderRefMut<'a, E> {
map_builder_bid_ref_mut_into_execution_payload_header_ref_mut!(&'a _, self, |bid, cons| {
cons(&mut bid.header)
})
}
}
impl<E: EthSpec> SignedRoot for BuilderBid<E> {}
/// Validator registration, for use in interacting with servers implementing the builder API.

View File

@@ -1,4 +1,3 @@
use crate::blob_sidecar::BlobIdentifier;
use crate::*;
use bls::Signature;
use derivative::Derivative;
@@ -257,30 +256,6 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> SignedBeaconBlock<E, Payload>
.map(|c| c.len())
.unwrap_or(0)
}
pub fn get_expected_blob_ids(&self, block_root: Option<Hash256>) -> Vec<BlobIdentifier> {
self.get_filtered_blob_ids(block_root, |_, _| true)
}
/// If the filter returns `true` the id for the corresponding index and root will be included.
pub fn get_filtered_blob_ids(
&self,
block_root: Option<Hash256>,
filter: impl Fn(usize, Hash256) -> bool,
) -> Vec<BlobIdentifier> {
let block_root = block_root.unwrap_or_else(|| self.canonical_root());
let num_blobs_expected = self.num_expected_blobs();
let mut blob_ids = Vec::with_capacity(num_blobs_expected);
for i in 0..num_blobs_expected {
if filter(i, block_root) {
blob_ids.push(BlobIdentifier {
block_root,
index: i as u64,
});
}
}
blob_ids
}
}
// We can convert pre-Bellatrix blocks without payloads into blocks with payloads.

View File

@@ -21,3 +21,17 @@ pub struct ValidatorRegistrationData {
}
impl SignedRoot for ValidatorRegistrationData {}
impl SignedValidatorRegistrationData {
pub fn verify_signature(&self, spec: &ChainSpec) -> bool {
self.message
.pubkey
.decompress()
.map(|pubkey| {
let domain = spec.get_builder_domain();
let message = self.message.signing_root(domain);
self.signature.verify(&pubkey, message)
})
.unwrap_or(false)
}
}