mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-16 20:39:10 +00:00
Rename random to prev_randao (#3040)
## Issue Addressed As discussed on last-night's consensus call, the testnets next week will target the [Kiln Spec v2](https://hackmd.io/@n0ble/kiln-spec). Presently, we support Kiln V1. V2 is backwards compatible, except for renaming `random` to `prev_randao` in: - https://github.com/ethereum/execution-apis/pull/180 - https://github.com/ethereum/consensus-specs/pull/2835 With this PR we'll no longer be compatible with the existing Kintsugi and Kiln testnets, however we'll be ready for the testnets next week. I raised this breaking change in the call last night, we are all keen to move forward and break things. We now target the [`merge-kiln-v2`](https://github.com/MariusVanDerWijden/go-ethereum/tree/merge-kiln-v2) branch for interop with Geth. This required adding the `--http.aauthport` to the tester to avoid a port conflict at startup. ### Changes to exec integration tests There's some change in the `merge-kiln-v2` version of Geth that means it can't compile on a vanilla Github runner. Bumping the `go` version on the runner solved this issue. Whilst addressing this, I refactored the `testing/execution_integration` crate to be a *binary* rather than a *library* with tests. This means that we don't need to run the `build.rs` and build Geth whenever someone runs `make lint` or `make test-release`. This is nice for everyday users, but it's also nice for CI so that we can have a specific runner for these tests and we don't need to ensure *all* runners support everything required to build all execution clients. ## More Info - [x] ~~EF tests are failing since the rename has broken some tests that reference the old field name. I have been told there will be new tests released in the coming days (25/02/22 or 26/02/22).~~
This commit is contained in:
@@ -53,7 +53,7 @@ use eth2::types::{
|
||||
EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead, SyncDuty,
|
||||
};
|
||||
use execution_layer::{ExecutionLayer, PayloadStatus};
|
||||
use fork_choice::{AttestationFromBlock, ForkChoice};
|
||||
use fork_choice::{AttestationFromBlock, ForkChoice, InvalidationOperation};
|
||||
use futures::channel::mpsc::Sender;
|
||||
use itertools::process_results;
|
||||
use itertools::Itertools;
|
||||
@@ -3180,49 +3180,29 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// This method must be called whenever an execution engine indicates that a payload is
|
||||
/// invalid.
|
||||
///
|
||||
/// If the `latest_root` is known to fork-choice it will be invalidated. If it is not known, an
|
||||
/// error will be returned.
|
||||
/// Fork choice will be run after the invalidation. The client may be shut down if the `op`
|
||||
/// results in the justified checkpoint being invalidated.
|
||||
///
|
||||
/// If `latest_valid_hash` is `None` or references a block unknown to fork choice, no other
|
||||
/// blocks will be invalidated. If `latest_valid_hash` is a block known to fork choice, all
|
||||
/// blocks between the `latest_root` and the `latest_valid_hash` will be invalidated (which may
|
||||
/// cause further, second-order invalidations).
|
||||
///
|
||||
/// ## Notes
|
||||
///
|
||||
/// Use these rules to set `latest_root`:
|
||||
///
|
||||
/// - When `forkchoiceUpdated` indicates an invalid block, set `latest_root` to be the
|
||||
/// block root that was the head of the chain when `forkchoiceUpdated` was called.
|
||||
/// - When `executePayload` returns an invalid block *during* block import, set
|
||||
/// `latest_root` to be the parent of the beacon block containing the invalid
|
||||
/// payload (because the block containing the payload is not present in fork choice).
|
||||
/// - When `executePayload` returns an invalid block *after* block import, set
|
||||
/// `latest_root` to be root of the beacon block containing the invalid payload.
|
||||
/// See the documentation of `InvalidationOperation` for information about defining `op`.
|
||||
pub fn process_invalid_execution_payload(
|
||||
&self,
|
||||
latest_root: Hash256,
|
||||
latest_valid_hash: Option<ExecutionBlockHash>,
|
||||
op: &InvalidationOperation,
|
||||
) -> Result<(), Error> {
|
||||
debug!(
|
||||
self.log,
|
||||
"Invalid execution payload in block";
|
||||
"latest_valid_hash" => ?latest_valid_hash,
|
||||
"latest_root" => ?latest_root,
|
||||
"latest_valid_ancestor" => ?op.latest_valid_ancestor(),
|
||||
"block_root" => ?op.block_root(),
|
||||
);
|
||||
|
||||
// Update fork choice.
|
||||
if let Err(e) = self
|
||||
.fork_choice
|
||||
.write()
|
||||
.on_invalid_execution_payload(latest_root, latest_valid_hash)
|
||||
{
|
||||
if let Err(e) = self.fork_choice.write().on_invalid_execution_payload(op) {
|
||||
crit!(
|
||||
self.log,
|
||||
"Failed to process invalid payload";
|
||||
"error" => ?e,
|
||||
"latest_valid_hash" => ?latest_valid_hash,
|
||||
"latest_root" => ?latest_root,
|
||||
"latest_valid_ancestor" => ?op.latest_valid_ancestor(),
|
||||
"block_root" => ?op.block_root(),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -3763,8 +3743,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
// The execution engine has stated that all blocks between the
|
||||
// `head_execution_block_hash` and `latest_valid_hash` are invalid.
|
||||
self.process_invalid_execution_payload(
|
||||
head_block_root,
|
||||
Some(*latest_valid_hash),
|
||||
&InvalidationOperation::InvalidateMany {
|
||||
head_block_root,
|
||||
always_invalidate_head: true,
|
||||
latest_valid_ancestor: *latest_valid_hash,
|
||||
},
|
||||
)?;
|
||||
|
||||
Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status })
|
||||
@@ -3781,7 +3764,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
//
|
||||
// Using a `None` latest valid ancestor will result in only the head block
|
||||
// being invalidated (no ancestors).
|
||||
self.process_invalid_execution_payload(head_block_root, None)?;
|
||||
self.process_invalid_execution_payload(
|
||||
&InvalidationOperation::InvalidateOne {
|
||||
block_root: head_block_root,
|
||||
},
|
||||
)?;
|
||||
|
||||
Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status })
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ use crate::{
|
||||
ExecutionPayloadError,
|
||||
};
|
||||
use execution_layer::PayloadStatus;
|
||||
use fork_choice::PayloadVerificationStatus;
|
||||
use fork_choice::{InvalidationOperation, PayloadVerificationStatus};
|
||||
use proto_array::{Block as ProtoBlock, ExecutionStatus};
|
||||
use slog::debug;
|
||||
use slot_clock::SlotClock;
|
||||
@@ -68,7 +68,13 @@ pub fn notify_new_payload<T: BeaconChainTypes>(
|
||||
// This block has not yet been applied to fork choice, so the latest block that was
|
||||
// imported to fork choice was the parent.
|
||||
let latest_root = block.parent_root();
|
||||
chain.process_invalid_execution_payload(latest_root, Some(latest_valid_hash))?;
|
||||
chain.process_invalid_execution_payload(
|
||||
&InvalidationOperation::InvalidateMany {
|
||||
head_block_root: latest_root,
|
||||
always_invalidate_head: false,
|
||||
latest_valid_ancestor: latest_valid_hash,
|
||||
},
|
||||
)?;
|
||||
|
||||
Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into())
|
||||
}
|
||||
@@ -145,11 +151,19 @@ pub fn validate_merge_block<T: BeaconChainTypes>(
|
||||
.slot_clock
|
||||
.now()
|
||||
.ok_or(BeaconChainError::UnableToReadSlot)?;
|
||||
// Check the optimistic sync conditions. Note that because this is the merge block,
|
||||
// the justified checkpoint can't have execution enabled so we only need to check the
|
||||
// current slot is at least SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY ahead of the block
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.1.9/sync/optimistic.md#when-to-optimistically-import-blocks
|
||||
if block.slot() + chain.spec.safe_slots_to_import_optimistically <= current_slot {
|
||||
|
||||
// Ensure the block is a candidate for optimistic import.
|
||||
if chain
|
||||
.fork_choice
|
||||
.read()
|
||||
.is_optimistic_candidate_block(
|
||||
current_slot,
|
||||
block.slot(),
|
||||
&block.parent_root(),
|
||||
&chain.spec,
|
||||
)
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
debug!(
|
||||
chain.log,
|
||||
"Optimistically accepting terminal block";
|
||||
|
||||
@@ -231,8 +231,10 @@ fn valid_invalid_syncing() {
|
||||
/// `latest_valid_hash`.
|
||||
#[test]
|
||||
fn invalid_payload_invalidates_parent() {
|
||||
let mut rig = InvalidPayloadRig::new();
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
rig.import_block(Payload::Valid); // Import a valid transition block.
|
||||
rig.move_to_first_justification(Payload::Syncing);
|
||||
|
||||
let roots = vec![
|
||||
rig.import_block(Payload::Syncing),
|
||||
@@ -258,6 +260,7 @@ fn invalid_payload_invalidates_parent() {
|
||||
fn justified_checkpoint_becomes_invalid() {
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
rig.import_block(Payload::Valid); // Import a valid transition block.
|
||||
rig.move_to_first_justification(Payload::Syncing);
|
||||
|
||||
let justified_checkpoint = rig.head_info().current_justified_checkpoint;
|
||||
@@ -305,7 +308,9 @@ fn pre_finalized_latest_valid_hash() {
|
||||
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
|
||||
let mut blocks = vec![];
|
||||
blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block.
|
||||
blocks.extend(rig.build_blocks(num_blocks - 1, Payload::Syncing));
|
||||
|
||||
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
||||
|
||||
@@ -330,7 +335,11 @@ fn pre_finalized_latest_valid_hash() {
|
||||
for i in E::slots_per_epoch() * finalized_epoch..num_blocks {
|
||||
let slot = Slot::new(i);
|
||||
let root = rig.block_root_at_slot(slot).unwrap();
|
||||
assert!(rig.execution_status(root).is_not_verified());
|
||||
if slot == 1 {
|
||||
assert!(rig.execution_status(root).is_valid());
|
||||
} else {
|
||||
assert!(rig.execution_status(root).is_not_verified());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -344,7 +353,10 @@ fn latest_valid_hash_will_validate() {
|
||||
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
let blocks = rig.build_blocks(4, Payload::Syncing);
|
||||
|
||||
let mut blocks = vec![];
|
||||
blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block.
|
||||
blocks.extend(rig.build_blocks(4, Payload::Syncing));
|
||||
|
||||
let latest_valid_root = rig
|
||||
.block_root_at_slot(Slot::new(LATEST_VALID_SLOT))
|
||||
@@ -357,7 +369,7 @@ fn latest_valid_hash_will_validate() {
|
||||
|
||||
assert_eq!(rig.head_info().slot, LATEST_VALID_SLOT);
|
||||
|
||||
for slot in 0..=4 {
|
||||
for slot in 0..=5 {
|
||||
let slot = Slot::new(slot);
|
||||
let root = if slot > 0 {
|
||||
// If not the genesis slot, check the blocks we just produced.
|
||||
@@ -386,7 +398,9 @@ fn latest_valid_hash_is_junk() {
|
||||
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
|
||||
let mut blocks = vec![];
|
||||
blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block.
|
||||
blocks.extend(rig.build_blocks(num_blocks, Payload::Syncing));
|
||||
|
||||
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
||||
|
||||
@@ -408,7 +422,11 @@ fn latest_valid_hash_is_junk() {
|
||||
for i in E::slots_per_epoch() * finalized_epoch..num_blocks {
|
||||
let slot = Slot::new(i);
|
||||
let root = rig.block_root_at_slot(slot).unwrap();
|
||||
assert!(rig.execution_status(root).is_not_verified());
|
||||
if slot == 1 {
|
||||
assert!(rig.execution_status(root).is_valid());
|
||||
} else {
|
||||
assert!(rig.execution_status(root).is_not_verified());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -421,6 +439,7 @@ fn invalidates_all_descendants() {
|
||||
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
rig.import_block(Payload::Valid); // Import a valid transition block.
|
||||
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
|
||||
|
||||
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
||||
@@ -493,6 +512,7 @@ fn switches_heads() {
|
||||
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
rig.import_block(Payload::Valid); // Import a valid transition block.
|
||||
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
|
||||
|
||||
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
||||
@@ -571,8 +591,9 @@ fn invalid_during_processing() {
|
||||
|
||||
#[test]
|
||||
fn invalid_after_optimistic_sync() {
|
||||
let mut rig = InvalidPayloadRig::new();
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
rig.import_block(Payload::Valid); // Import a valid transition block.
|
||||
|
||||
let mut roots = vec![
|
||||
rig.import_block(Payload::Syncing),
|
||||
|
||||
Reference in New Issue
Block a user