mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-17 04:48:21 +00:00
v1.1.6 Fork Choice changes (#2822)
## Issue Addressed Resolves: https://github.com/sigp/lighthouse/issues/2741 Includes: https://github.com/sigp/lighthouse/pull/2853 so that we can get ssz static tests passing here on v1.1.6. If we want to merge that first, we can make this diff slightly smaller ## Proposed Changes - Changes the `justified_epoch` and `finalized_epoch` in the `ProtoArrayNode` each to an `Option<Checkpoint>`. The `Option` is necessary only for the migration, so not ideal. But does allow us to add a default logic to `None` on these fields during the database migration. - Adds a database migration from a legacy fork choice struct to the new one, search for all necessary block roots in fork choice by iterating through blocks in the db. - updates related to https://github.com/ethereum/consensus-specs/pull/2727 - We will have to update the persisted forkchoice to make sure the justified checkpoint stored is correct according to the updated fork choice logic. This boils down to setting the forkchoice store's justified checkpoint to the justified checkpoint of the block that advanced the finalized checkpoint to the current one. - AFAICT there's no migration steps necessary for the update to allow applying attestations from prior blocks, but would appreciate confirmation on that - I updated the consensus spec tests to v1.1.6 here, but they will fail until we also implement the proposer score boost updates. I confirmed that the previously failing scenario `new_finalized_slot_is_justified_checkpoint_ancestor` will now pass after the boost updates, but haven't confirmed _all_ tests will pass because I just quickly stubbed out the proposer boost test scenario formatting. - This PR now also includes proposer boosting https://github.com/ethereum/consensus-specs/pull/2730 ## Additional Info I realized checking justified and finalized roots in fork choice makes it more likely that we trigger this bug: https://github.com/ethereum/consensus-specs/pull/2727 It's possible the combination of justified checkpoint and finalized checkpoint in the forkchoice store is different from in any block in fork choice. So when trying to startup our store's justified checkpoint seems invalid to the rest of fork choice (but it should be valid). When this happens we get an `InvalidBestNode` error and fail to start up. So I'm including that bugfix in this branch. Todo: - [x] Fix fork choice tests - [x] Self review - [x] Add fix for https://github.com/ethereum/consensus-specs/pull/2727 - [x] Rebase onto Kintusgi - [x] Fix `num_active_validators` calculation as @michaelsproul pointed out - [x] Clean up db migrations Co-authored-by: realbigsean <seananderson33@gmail.com>
This commit is contained in:
@@ -51,7 +51,7 @@ use eth2::types::{
|
||||
EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead, SyncDuty,
|
||||
};
|
||||
use execution_layer::ExecutionLayer;
|
||||
use fork_choice::ForkChoice;
|
||||
use fork_choice::{AttestationFromBlock, ForkChoice};
|
||||
use futures::channel::mpsc::Sender;
|
||||
use itertools::process_results;
|
||||
use itertools::Itertools;
|
||||
@@ -1700,7 +1700,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
self.fork_choice
|
||||
.write()
|
||||
.on_attestation(self.slot()?, verified.indexed_attestation())
|
||||
.on_attestation(
|
||||
self.slot()?,
|
||||
verified.indexed_attestation(),
|
||||
AttestationFromBlock::False,
|
||||
)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -2443,11 +2447,17 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
{
|
||||
let _fork_choice_block_timer =
|
||||
metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES);
|
||||
let block_delay = self
|
||||
.slot_clock
|
||||
.seconds_from_current_slot_start(self.spec.seconds_per_slot)
|
||||
.ok_or(Error::UnableToComputeTimeAtSlot)?;
|
||||
|
||||
fork_choice
|
||||
.on_block(
|
||||
current_slot,
|
||||
&block,
|
||||
block_root,
|
||||
block_delay,
|
||||
&state,
|
||||
payload_verification_status,
|
||||
&self.spec,
|
||||
@@ -2472,7 +2482,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
let indexed_attestation = get_indexed_attestation(committee.committee, attestation)
|
||||
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
||||
|
||||
match fork_choice.on_attestation(current_slot, &indexed_attestation) {
|
||||
match fork_choice.on_attestation(
|
||||
current_slot,
|
||||
&indexed_attestation,
|
||||
AttestationFromBlock::True,
|
||||
) {
|
||||
Ok(()) => Ok(()),
|
||||
// Ignore invalid attestations whilst importing attestations from a block. The
|
||||
// block might be very old and therefore the attestations useless to fork choice.
|
||||
@@ -3009,7 +3023,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
fn fork_choice_internal(&self) -> Result<(), Error> {
|
||||
// Determine the root of the block that is the head of the chain.
|
||||
let beacon_block_root = self.fork_choice.write().get_head(self.slot()?)?;
|
||||
let beacon_block_root = self
|
||||
.fork_choice
|
||||
.write()
|
||||
.get_head(self.slot()?, &self.spec)?;
|
||||
|
||||
let current_head = self.head_info()?;
|
||||
let old_finalized_checkpoint = current_head.finalized_checkpoint;
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
//! Defines the `BeaconForkChoiceStore` which provides the persistent storage for the `ForkChoice`
|
||||
//! struct.
|
||||
//!
|
||||
//! Additionally, the private `BalancesCache` struct is defined; a cache designed to avoid database
|
||||
//! Additionally, the `BalancesCache` struct is defined; a cache designed to avoid database
|
||||
//! reads when fork choice requires the validator balances of the justified state.
|
||||
|
||||
use crate::{metrics, BeaconSnapshot};
|
||||
use derivative::Derivative;
|
||||
use fork_choice::ForkChoiceStore;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use store::{Error as StoreError, HotColdDB, ItemStore};
|
||||
use superstruct::superstruct;
|
||||
use types::{BeaconBlock, BeaconState, BeaconStateError, Checkpoint, EthSpec, Hash256, Slot};
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -68,7 +70,7 @@ struct CacheItem {
|
||||
///
|
||||
/// It is effectively a mapping of `epoch_boundary_block_root -> state.balances`.
|
||||
#[derive(PartialEq, Clone, Default, Debug, Encode, Decode)]
|
||||
struct BalancesCache {
|
||||
pub struct BalancesCache {
|
||||
items: Vec<CacheItem>,
|
||||
}
|
||||
|
||||
@@ -154,8 +156,10 @@ impl BalancesCache {
|
||||
|
||||
/// Implements `fork_choice::ForkChoiceStore` in order to provide a persistent backing to the
|
||||
/// `fork_choice::ForkChoice` struct.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Derivative)]
|
||||
#[derivative(PartialEq(bound = "E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>"))]
|
||||
pub struct BeaconForkChoiceStore<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
||||
#[derivative(PartialEq = "ignore")]
|
||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
balances_cache: BalancesCache,
|
||||
time: Slot,
|
||||
@@ -163,26 +167,10 @@ pub struct BeaconForkChoiceStore<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<
|
||||
justified_checkpoint: Checkpoint,
|
||||
justified_balances: Vec<u64>,
|
||||
best_justified_checkpoint: Checkpoint,
|
||||
proposer_boost_root: Hash256,
|
||||
_phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<E, Hot, Cold> PartialEq for BeaconForkChoiceStore<E, Hot, Cold>
|
||||
where
|
||||
E: EthSpec,
|
||||
Hot: ItemStore<E>,
|
||||
Cold: ItemStore<E>,
|
||||
{
|
||||
/// This implementation ignores the `store` and `slot_clock`.
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.balances_cache == other.balances_cache
|
||||
&& self.time == other.time
|
||||
&& self.finalized_checkpoint == other.finalized_checkpoint
|
||||
&& self.justified_checkpoint == other.justified_checkpoint
|
||||
&& self.justified_balances == other.justified_balances
|
||||
&& self.best_justified_checkpoint == other.best_justified_checkpoint
|
||||
}
|
||||
}
|
||||
|
||||
impl<E, Hot, Cold> BeaconForkChoiceStore<E, Hot, Cold>
|
||||
where
|
||||
E: EthSpec,
|
||||
@@ -225,6 +213,7 @@ where
|
||||
justified_balances: anchor_state.balances().clone().into(),
|
||||
finalized_checkpoint,
|
||||
best_justified_checkpoint: justified_checkpoint,
|
||||
proposer_boost_root: Hash256::zero(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
@@ -239,6 +228,7 @@ where
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
justified_balances: self.justified_balances.clone(),
|
||||
best_justified_checkpoint: self.best_justified_checkpoint,
|
||||
proposer_boost_root: self.proposer_boost_root,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -255,6 +245,7 @@ where
|
||||
justified_checkpoint: persisted.justified_checkpoint,
|
||||
justified_balances: persisted.justified_balances,
|
||||
best_justified_checkpoint: persisted.best_justified_checkpoint,
|
||||
proposer_boost_root: persisted.proposer_boost_root,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
@@ -301,6 +292,10 @@ where
|
||||
&self.finalized_checkpoint
|
||||
}
|
||||
|
||||
fn proposer_boost_root(&self) -> Hash256 {
|
||||
self.proposer_boost_root
|
||||
}
|
||||
|
||||
fn set_finalized_checkpoint(&mut self, checkpoint: Checkpoint) {
|
||||
self.finalized_checkpoint = checkpoint
|
||||
}
|
||||
@@ -336,15 +331,23 @@ where
|
||||
fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint) {
|
||||
self.best_justified_checkpoint = checkpoint
|
||||
}
|
||||
|
||||
fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256) {
|
||||
self.proposer_boost_root = proposer_boost_root;
|
||||
}
|
||||
}
|
||||
|
||||
/// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database.
|
||||
#[derive(Encode, Decode)]
|
||||
#[superstruct(variants(V1, V7), variant_attributes(derive(Encode, Decode)), no_enum)]
|
||||
pub struct PersistedForkChoiceStore {
|
||||
balances_cache: BalancesCache,
|
||||
time: Slot,
|
||||
finalized_checkpoint: Checkpoint,
|
||||
justified_checkpoint: Checkpoint,
|
||||
justified_balances: Vec<u64>,
|
||||
best_justified_checkpoint: Checkpoint,
|
||||
pub balances_cache: BalancesCache,
|
||||
pub time: Slot,
|
||||
pub finalized_checkpoint: Checkpoint,
|
||||
pub justified_checkpoint: Checkpoint,
|
||||
pub justified_balances: Vec<u64>,
|
||||
pub best_justified_checkpoint: Checkpoint,
|
||||
#[superstruct(only(V7))]
|
||||
pub proposer_boost_root: Hash256,
|
||||
}
|
||||
|
||||
pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV7;
|
||||
|
||||
@@ -591,7 +591,7 @@ where
|
||||
};
|
||||
|
||||
let initial_head_block_root = fork_choice
|
||||
.get_head(current_slot)
|
||||
.get_head(current_slot, &self.spec)
|
||||
.map_err(|e| format!("Unable to get fork choice head: {:?}", e))?;
|
||||
|
||||
// Try to decode the head block according to the current fork, if that fails, try
|
||||
|
||||
@@ -5,6 +5,7 @@ use slog::{info, warn, Logger};
|
||||
use state_processing::state_advance::complete_state_advance;
|
||||
use state_processing::{per_block_processing, per_block_processing::BlockSignatureStrategy};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore};
|
||||
use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot};
|
||||
|
||||
@@ -176,6 +177,8 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
|
||||
block.slot(),
|
||||
&block,
|
||||
block.canonical_root(),
|
||||
// Reward proposer boost. We are reinforcing the canonical chain.
|
||||
Duration::from_secs(0),
|
||||
&state,
|
||||
payload_verification_status,
|
||||
spec,
|
||||
|
||||
@@ -1,25 +1,38 @@
|
||||
use crate::beacon_fork_choice_store::PersistedForkChoiceStore as ForkChoiceStore;
|
||||
use fork_choice::PersistedForkChoice as ForkChoice;
|
||||
use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7};
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use store::{DBColumn, Error, StoreItem};
|
||||
use superstruct::superstruct;
|
||||
|
||||
#[derive(Encode, Decode)]
|
||||
// If adding a new version you should update this type alias and fix the breakages.
|
||||
pub type PersistedForkChoice = PersistedForkChoiceV7;
|
||||
|
||||
#[superstruct(variants(V1, V7), variant_attributes(derive(Encode, Decode)), no_enum)]
|
||||
pub struct PersistedForkChoice {
|
||||
pub fork_choice: ForkChoice,
|
||||
pub fork_choice_store: ForkChoiceStore,
|
||||
pub fork_choice: fork_choice::PersistedForkChoice,
|
||||
#[superstruct(only(V1))]
|
||||
pub fork_choice_store: PersistedForkChoiceStoreV1,
|
||||
#[superstruct(only(V7))]
|
||||
pub fork_choice_store: PersistedForkChoiceStoreV7,
|
||||
}
|
||||
|
||||
impl StoreItem for PersistedForkChoice {
|
||||
fn db_column() -> DBColumn {
|
||||
DBColumn::ForkChoice
|
||||
}
|
||||
macro_rules! impl_store_item {
|
||||
($type:ty) => {
|
||||
impl StoreItem for $type {
|
||||
fn db_column() -> DBColumn {
|
||||
DBColumn::ForkChoice
|
||||
}
|
||||
|
||||
fn as_store_bytes(&self) -> Vec<u8> {
|
||||
self.as_ssz_bytes()
|
||||
}
|
||||
fn as_store_bytes(&self) -> Vec<u8> {
|
||||
self.as_ssz_bytes()
|
||||
}
|
||||
|
||||
fn from_store_bytes(bytes: &[u8]) -> std::result::Result<Self, Error> {
|
||||
Self::from_ssz_bytes(bytes).map_err(Into::into)
|
||||
}
|
||||
fn from_store_bytes(bytes: &[u8]) -> std::result::Result<Self, Error> {
|
||||
Self::from_ssz_bytes(bytes).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_store_item!(PersistedForkChoiceV1);
|
||||
impl_store_item!(PersistedForkChoiceV7);
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
//! Utilities for managing database schema changes.
|
||||
mod migration_schema_v6;
|
||||
mod migration_schema_v7;
|
||||
mod types;
|
||||
|
||||
use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY};
|
||||
use crate::persisted_fork_choice::PersistedForkChoice;
|
||||
use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7};
|
||||
use crate::store::{get_key_for_col, KeyValueStoreOp};
|
||||
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
|
||||
use operation_pool::{PersistedOperationPool, PersistedOperationPoolBase};
|
||||
use proto_array::ProtoArrayForkChoice;
|
||||
use slog::{warn, Logger};
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use std::fs;
|
||||
@@ -22,6 +27,7 @@ pub fn migrate_schema<T: BeaconChainTypes>(
|
||||
datadir: &Path,
|
||||
from: SchemaVersion,
|
||||
to: SchemaVersion,
|
||||
log: Logger,
|
||||
) -> Result<(), StoreError> {
|
||||
match (from, to) {
|
||||
// Migrating from the current schema version to iself is always OK, a no-op.
|
||||
@@ -29,8 +35,8 @@ pub fn migrate_schema<T: BeaconChainTypes>(
|
||||
// Migrate across multiple versions by recursively migrating one step at a time.
|
||||
(_, _) if from.as_u64() + 1 < to.as_u64() => {
|
||||
let next = SchemaVersion(from.as_u64() + 1);
|
||||
migrate_schema::<T>(db.clone(), datadir, from, next)?;
|
||||
migrate_schema::<T>(db, datadir, next, to)
|
||||
migrate_schema::<T>(db.clone(), datadir, from, next, log.clone())?;
|
||||
migrate_schema::<T>(db, datadir, next, to, log)
|
||||
}
|
||||
// Migration from v0.3.0 to v0.3.x, adding the temporary states column.
|
||||
// Nothing actually needs to be done, but once a DB uses v2 it shouldn't go back.
|
||||
@@ -95,25 +101,77 @@ pub fn migrate_schema<T: BeaconChainTypes>(
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Migration for adding `is_merge_complete` field to the fork choice store.
|
||||
// Migration for adding `execution_status` field to the fork choice store.
|
||||
(SchemaVersion(5), SchemaVersion(6)) => {
|
||||
let fork_choice_opt = db
|
||||
.get_item::<PersistedForkChoice>(&FORK_CHOICE_DB_KEY)?
|
||||
.map(|mut persisted_fork_choice| {
|
||||
let fork_choice = ProtoArrayForkChoice::from_bytes_legacy(
|
||||
&persisted_fork_choice.fork_choice.proto_array_bytes,
|
||||
)?;
|
||||
persisted_fork_choice.fork_choice.proto_array_bytes = fork_choice.as_bytes();
|
||||
Ok::<_, String>(persisted_fork_choice)
|
||||
})
|
||||
.transpose()
|
||||
.map_err(StoreError::SchemaMigrationError)?;
|
||||
if let Some(fork_choice) = fork_choice_opt {
|
||||
// Store the converted fork choice store under the same key.
|
||||
db.put_item::<PersistedForkChoice>(&FORK_CHOICE_DB_KEY, &fork_choice)?;
|
||||
// Database operations to be done atomically
|
||||
let mut ops = vec![];
|
||||
|
||||
// The top-level `PersistedForkChoice` struct is still V1 but will have its internal
|
||||
// bytes for the fork choice updated to V6.
|
||||
let fork_choice_opt = db.get_item::<PersistedForkChoiceV1>(&FORK_CHOICE_DB_KEY)?;
|
||||
if let Some(mut persisted_fork_choice) = fork_choice_opt {
|
||||
migration_schema_v6::update_execution_statuses::<T>(&mut persisted_fork_choice)
|
||||
.map_err(StoreError::SchemaMigrationError)?;
|
||||
|
||||
let column = PersistedForkChoiceV1::db_column().into();
|
||||
let key = FORK_CHOICE_DB_KEY.as_bytes();
|
||||
let db_key = get_key_for_col(column, key);
|
||||
let op =
|
||||
KeyValueStoreOp::PutKeyValue(db_key, persisted_fork_choice.as_store_bytes());
|
||||
ops.push(op);
|
||||
}
|
||||
|
||||
db.store_schema_version(to)?;
|
||||
db.store_schema_version_atomically(to, ops)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// 1. Add `proposer_boost_root`.
|
||||
// 2. Update `justified_epoch` to `justified_checkpoint` and `finalized_epoch` to
|
||||
// `finalized_checkpoint`.
|
||||
// 3. This migration also includes a potential update to the justified
|
||||
// checkpoint in case the fork choice store's justified checkpoint and finalized checkpoint
|
||||
// combination does not actually exist for any blocks in fork choice. This was possible in
|
||||
// the consensus spec prior to v1.1.6.
|
||||
//
|
||||
// Relevant issues:
|
||||
//
|
||||
// https://github.com/sigp/lighthouse/issues/2741
|
||||
// https://github.com/ethereum/consensus-specs/pull/2727
|
||||
// https://github.com/ethereum/consensus-specs/pull/2730
|
||||
(SchemaVersion(6), SchemaVersion(7)) => {
|
||||
// Database operations to be done atomically
|
||||
let mut ops = vec![];
|
||||
|
||||
let fork_choice_opt = db.get_item::<PersistedForkChoiceV1>(&FORK_CHOICE_DB_KEY)?;
|
||||
if let Some(persisted_fork_choice_v1) = fork_choice_opt {
|
||||
// This migrates the `PersistedForkChoiceStore`, adding the `proposer_boost_root` field.
|
||||
let mut persisted_fork_choice_v7 = persisted_fork_choice_v1.into();
|
||||
|
||||
let result = migration_schema_v7::update_fork_choice::<T>(
|
||||
&mut persisted_fork_choice_v7,
|
||||
db.clone(),
|
||||
);
|
||||
|
||||
// Fall back to re-initializing fork choice from an anchor state if necessary.
|
||||
if let Err(e) = result {
|
||||
warn!(log, "Unable to migrate to database schema 7, re-initializing fork choice"; "error" => ?e);
|
||||
migration_schema_v7::update_with_reinitialized_fork_choice::<T>(
|
||||
&mut persisted_fork_choice_v7,
|
||||
db.clone(),
|
||||
)
|
||||
.map_err(StoreError::SchemaMigrationError)?;
|
||||
}
|
||||
|
||||
// Store the converted fork choice store under the same key.
|
||||
let column = PersistedForkChoiceV7::db_column().into();
|
||||
let key = FORK_CHOICE_DB_KEY.as_bytes();
|
||||
let db_key = get_key_for_col(column, key);
|
||||
let op =
|
||||
KeyValueStoreOp::PutKeyValue(db_key, persisted_fork_choice_v7.as_store_bytes());
|
||||
ops.push(op);
|
||||
}
|
||||
|
||||
db.store_schema_version_atomically(to, ops)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
74
beacon_node/beacon_chain/src/schema_change/README.md
Normal file
74
beacon_node/beacon_chain/src/schema_change/README.md
Normal file
@@ -0,0 +1,74 @@
|
||||
Database Schema Migrations
|
||||
====
|
||||
|
||||
This document is an attempt to record some best practices and design conventions for applying
|
||||
database schema migrations within Lighthouse.
|
||||
|
||||
## General Structure
|
||||
|
||||
If you make a breaking change to an on-disk data structure you need to increment the
|
||||
`SCHEMA_VERSION` in `beacon_node/store/src/metadata.rs` and add a migration from the previous
|
||||
version to the new version.
|
||||
|
||||
The entry-point for database migrations is in `schema_change.rs`, _not_ `migrate.rs` (which deals
|
||||
with finalization). Supporting code for a specific migration may be added in
|
||||
`schema_change/migration_schema_vX.rs`, where `X` is the version being migrated _to_.
|
||||
|
||||
## Combining Schema Changes
|
||||
|
||||
Schema changes may be combined if they are part of the same pull request to
|
||||
`unstable`. Once a schema version is defined in `unstable` we should not apply changes to it
|
||||
without incrementing the version. This prevents conflicts between versions that appear to be the
|
||||
same. This allows us to deploy `unstable` to nodes without having to worry about needing to resync
|
||||
because of a sneaky schema change.
|
||||
|
||||
Changing the on-disk structure for a version _before_ it is merged to `unstable` is OK. You will
|
||||
just have to handle manually resyncing any test nodes (use checkpoint sync).
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
Prefer to name versions of structs by _the version at which the change was introduced_. For example
|
||||
if you add a field to `Foo` in v9, call the previous version `FooV1` (assuming this is `Foo`'s first
|
||||
migration) and write a schema change that migrates from `FooV1` to `FooV9`.
|
||||
|
||||
Prefer to use explicit version names in `schema_change.rs` and the `schema_change` module. To
|
||||
interface with the outside either:
|
||||
|
||||
1. Define a type alias to the latest version, e.g. `pub type Foo = FooV9`, or
|
||||
2. Define a mapping from the latest version to the version used elsewhere, e.g.
|
||||
```rust
|
||||
impl From<FooV9> for Foo {}
|
||||
```
|
||||
|
||||
Avoid names like:
|
||||
|
||||
* `LegacyFoo`
|
||||
* `OldFoo`
|
||||
* `FooWithoutX`
|
||||
|
||||
## First-version vs Last-version
|
||||
|
||||
Previously the schema migration code would name types by the _last_ version at which they were
|
||||
valid. For example if `Foo` changed in `V9` then we would name the two variants `FooV8` and `FooV9`.
|
||||
The problem with this scheme is that if `Foo` changes again in the future at say v12 then `FooV9` would
|
||||
need to be renamed to `FooV11`, which is annoying. Using the _first_ valid version as described
|
||||
above does not have this issue.
|
||||
|
||||
## Using SuperStruct
|
||||
|
||||
If possible, consider using [`superstruct`](https://crates.io/crates/superstruct) to handle data
|
||||
structure changes between versions.
|
||||
|
||||
* Use `superstruct(no_enum)` to avoid generating an unnecessary top-level enum.
|
||||
|
||||
## Example
|
||||
|
||||
A field is added to `Foo` in v9, and there are two variants: `FooV1` and `FooV9`. There is a
|
||||
migration from `FooV1` to `FooV9`. `Foo` is aliased to `FooV9`.
|
||||
|
||||
Some time later another field is added to `Foo` in v12. A new `FooV12` is created, along with a
|
||||
migration from `FooV9` to `FooV12`. The primary `Foo` type gets re-aliased to `FooV12`. The previous
|
||||
migration from V1 to V9 shouldn't break because the schema migration refers to `FooV9` explicitly
|
||||
rather than `Foo`. Due to the re-aliasing (or re-mapping) the compiler will check every usage
|
||||
of `Foo` to make sure that it still makes sense with `FooV12`.
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
///! These functions and structs are only relevant to the database migration from schema 5 to 6.
|
||||
use crate::persisted_fork_choice::PersistedForkChoiceV1;
|
||||
use crate::schema_change::types::{SszContainerV1, SszContainerV6};
|
||||
use crate::BeaconChainTypes;
|
||||
use ssz::four_byte_option_impl;
|
||||
use ssz::{Decode, Encode};
|
||||
|
||||
// Define a "legacy" implementation of `Option<usize>` which uses four bytes for encoding the union
|
||||
// selector.
|
||||
four_byte_option_impl!(four_byte_option_usize, usize);
|
||||
|
||||
pub(crate) fn update_execution_statuses<T: BeaconChainTypes>(
|
||||
persisted_fork_choice: &mut PersistedForkChoiceV1,
|
||||
) -> Result<(), String> {
|
||||
let ssz_container_v1 =
|
||||
SszContainerV1::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes)
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
let ssz_container_v6: SszContainerV6 = ssz_container_v1.into();
|
||||
|
||||
persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v6.as_ssz_bytes();
|
||||
Ok(())
|
||||
}
|
||||
@@ -0,0 +1,327 @@
|
||||
///! These functions and structs are only relevant to the database migration from schema 6 to 7.
|
||||
use crate::beacon_chain::BeaconChainTypes;
|
||||
use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7};
|
||||
use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7};
|
||||
use crate::schema_change::types::{ProtoNodeV6, SszContainerV6, SszContainerV7};
|
||||
use crate::types::{Checkpoint, Epoch, Hash256};
|
||||
use crate::types::{EthSpec, Slot};
|
||||
use crate::{BeaconForkChoiceStore, BeaconSnapshot};
|
||||
use fork_choice::ForkChoice;
|
||||
use proto_array::{core::ProtoNode, core::SszContainer, ProtoArrayForkChoice};
|
||||
use ssz::four_byte_option_impl;
|
||||
use ssz::{Decode, Encode};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use store::hot_cold_store::HotColdDB;
|
||||
use store::iter::BlockRootsIterator;
|
||||
use store::Error as StoreError;
|
||||
|
||||
// Define a "legacy" implementation of `Option<usize>` which uses four bytes for encoding the union
|
||||
// selector.
|
||||
four_byte_option_impl!(four_byte_option_usize, usize);
|
||||
|
||||
/// This method is used to re-initialize fork choice from the finalized state in case we hit an
|
||||
/// error during this migration.
|
||||
pub(crate) fn update_with_reinitialized_fork_choice<T: BeaconChainTypes>(
|
||||
persisted_fork_choice: &mut PersistedForkChoiceV7,
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
) -> Result<(), String> {
|
||||
let anchor_block_root = persisted_fork_choice
|
||||
.fork_choice_store
|
||||
.finalized_checkpoint
|
||||
.root;
|
||||
let anchor_block = db
|
||||
.get_block(&anchor_block_root)
|
||||
.map_err(|e| format!("{:?}", e))?
|
||||
.ok_or_else(|| "Missing anchor beacon block".to_string())?;
|
||||
let anchor_state = db
|
||||
.get_state(&anchor_block.state_root(), Some(anchor_block.slot()))
|
||||
.map_err(|e| format!("{:?}", e))?
|
||||
.ok_or_else(|| "Missing anchor beacon state".to_string())?;
|
||||
let snapshot = BeaconSnapshot {
|
||||
beacon_block: anchor_block,
|
||||
beacon_block_root: anchor_block_root,
|
||||
beacon_state: anchor_state,
|
||||
};
|
||||
let store = BeaconForkChoiceStore::get_forkchoice_store(db, &snapshot);
|
||||
let fork_choice = ForkChoice::from_anchor(
|
||||
store,
|
||||
anchor_block_root,
|
||||
&snapshot.beacon_block,
|
||||
&snapshot.beacon_state,
|
||||
)
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
persisted_fork_choice.fork_choice = fork_choice.to_persisted();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn update_fork_choice<T: BeaconChainTypes>(
|
||||
persisted_fork_choice: &mut PersistedForkChoiceV7,
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
) -> Result<(), StoreError> {
|
||||
// `PersistedForkChoice` stores the `ProtoArray` as a `Vec<u8>`. Deserialize these
|
||||
// bytes assuming the legacy struct, and transform them to the new struct before
|
||||
// re-serializing.
|
||||
let ssz_container_v6 =
|
||||
SszContainerV6::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes)
|
||||
.map_err(|e| {
|
||||
StoreError::SchemaMigrationError(format!(
|
||||
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
// Clone the V6 proto nodes in order to maintain information about `node.justified_epoch`
|
||||
// and `node.finalized_epoch`.
|
||||
let nodes_v6 = ssz_container_v6.nodes.clone();
|
||||
|
||||
let justified_checkpoint = persisted_fork_choice.fork_choice_store.justified_checkpoint;
|
||||
let finalized_checkpoint = persisted_fork_choice.fork_choice_store.finalized_checkpoint;
|
||||
|
||||
// These transformations instantiate `node.justified_checkpoint` and `node.finalized_checkpoint`
|
||||
// to `None`.
|
||||
let ssz_container_v7: SszContainerV7 =
|
||||
ssz_container_v6.into_ssz_container_v7(justified_checkpoint, finalized_checkpoint);
|
||||
let ssz_container: SszContainer = ssz_container_v7.into();
|
||||
let mut fork_choice: ProtoArrayForkChoice = ssz_container.into();
|
||||
|
||||
update_checkpoints::<T>(finalized_checkpoint.root, &nodes_v6, &mut fork_choice, db)
|
||||
.map_err(StoreError::SchemaMigrationError)?;
|
||||
|
||||
// Update the justified checkpoint in the store in case we have a discrepancy
|
||||
// between the store and the proto array nodes.
|
||||
update_store_justified_checkpoint(persisted_fork_choice, &mut fork_choice)
|
||||
.map_err(StoreError::SchemaMigrationError)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct HeadInfo {
|
||||
index: usize,
|
||||
root: Hash256,
|
||||
slot: Slot,
|
||||
}
|
||||
|
||||
fn update_checkpoints<T: BeaconChainTypes>(
|
||||
finalized_root: Hash256,
|
||||
nodes_v6: &[ProtoNodeV6],
|
||||
fork_choice: &mut ProtoArrayForkChoice,
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
) -> Result<(), String> {
|
||||
let heads = find_finalized_descendant_heads(finalized_root, fork_choice);
|
||||
|
||||
// For each head, first gather all epochs we will need to find justified or finalized roots for.
|
||||
for head in heads {
|
||||
// `relevant_epochs` are epochs for which we will need to find the root at the start slot.
|
||||
// We don't need to worry about whether the are finalized or justified epochs.
|
||||
let mut relevant_epochs = HashSet::new();
|
||||
let relevant_epoch_finder = |index, _: &mut ProtoNode| {
|
||||
let (justified_epoch, finalized_epoch) = nodes_v6
|
||||
.get(index)
|
||||
.map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch))
|
||||
.ok_or_else(|| "Index not found in legacy proto nodes".to_string())?;
|
||||
relevant_epochs.insert(justified_epoch);
|
||||
relevant_epochs.insert(finalized_epoch);
|
||||
Ok(())
|
||||
};
|
||||
|
||||
apply_to_chain_of_ancestors(
|
||||
finalized_root,
|
||||
head.index,
|
||||
fork_choice,
|
||||
relevant_epoch_finder,
|
||||
)?;
|
||||
|
||||
// find the block roots associated with each relevant epoch.
|
||||
let roots_by_epoch =
|
||||
map_relevant_epochs_to_roots::<T>(head.root, head.slot, relevant_epochs, db.clone())?;
|
||||
|
||||
// Apply this mutator to the chain of descendants from this head, adding justified
|
||||
// and finalized checkpoints for each.
|
||||
let node_mutator = |index, node: &mut ProtoNode| {
|
||||
let (justified_epoch, finalized_epoch) = nodes_v6
|
||||
.get(index)
|
||||
.map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch))
|
||||
.ok_or_else(|| "Index not found in legacy proto nodes".to_string())?;
|
||||
|
||||
// Update the checkpoints only if they haven't already been populated.
|
||||
if node.justified_checkpoint.is_none() {
|
||||
let justified_checkpoint =
|
||||
roots_by_epoch
|
||||
.get(&justified_epoch)
|
||||
.map(|&root| Checkpoint {
|
||||
epoch: justified_epoch,
|
||||
root,
|
||||
});
|
||||
node.justified_checkpoint = justified_checkpoint;
|
||||
}
|
||||
if node.finalized_checkpoint.is_none() {
|
||||
let finalized_checkpoint =
|
||||
roots_by_epoch
|
||||
.get(&finalized_epoch)
|
||||
.map(|&root| Checkpoint {
|
||||
epoch: finalized_epoch,
|
||||
root,
|
||||
});
|
||||
node.finalized_checkpoint = finalized_checkpoint;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
};
|
||||
|
||||
apply_to_chain_of_ancestors(finalized_root, head.index, fork_choice, node_mutator)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Coverts the given `HashSet<Epoch>` to a `Vec<Epoch>` then reverse sorts by `Epoch`. Next, a
|
||||
/// single `BlockRootsIterator` is created which is used to iterate backwards from the given
|
||||
/// `head_root` and `head_slot`, finding the block root at the start slot of each epoch.
|
||||
fn map_relevant_epochs_to_roots<T: BeaconChainTypes>(
|
||||
head_root: Hash256,
|
||||
head_slot: Slot,
|
||||
epochs: HashSet<Epoch>,
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
) -> Result<HashMap<Epoch, Hash256>, String> {
|
||||
// Convert the `HashSet` to a `Vec` and reverse sort the epochs.
|
||||
let mut relevant_epochs = epochs.into_iter().collect::<Vec<_>>();
|
||||
relevant_epochs.sort_unstable_by(|a, b| b.cmp(a));
|
||||
|
||||
// Iterate backwards from the given `head_root` and `head_slot` and find the block root at each epoch.
|
||||
let mut iter = std::iter::once(Ok((head_root, head_slot)))
|
||||
.chain(BlockRootsIterator::from_block(db, head_root).map_err(|e| format!("{:?}", e))?);
|
||||
let mut roots_by_epoch = HashMap::new();
|
||||
for epoch in relevant_epochs {
|
||||
let start_slot = epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||
|
||||
let root = iter
|
||||
.find_map(|next| match next {
|
||||
Ok((root, slot)) => (slot == start_slot).then(|| Ok(root)),
|
||||
Err(e) => Some(Err(format!("{:?}", e))),
|
||||
})
|
||||
.transpose()?
|
||||
.ok_or_else(|| "Justified root not found".to_string())?;
|
||||
roots_by_epoch.insert(epoch, root);
|
||||
}
|
||||
Ok(roots_by_epoch)
|
||||
}
|
||||
|
||||
/// Applies a mutator to every node in a chain, starting from the node at the given
|
||||
/// `head_index` and iterating through ancestors until the `finalized_root` is reached.
|
||||
fn apply_to_chain_of_ancestors<F>(
|
||||
finalized_root: Hash256,
|
||||
head_index: usize,
|
||||
fork_choice: &mut ProtoArrayForkChoice,
|
||||
mut node_mutator: F,
|
||||
) -> Result<(), String>
|
||||
where
|
||||
F: FnMut(usize, &mut ProtoNode) -> Result<(), String>,
|
||||
{
|
||||
let head = fork_choice
|
||||
.core_proto_array_mut()
|
||||
.nodes
|
||||
.get_mut(head_index)
|
||||
.ok_or_else(|| "Head index not found in proto nodes".to_string())?;
|
||||
|
||||
node_mutator(head_index, head)?;
|
||||
|
||||
let mut parent_index_opt = head.parent;
|
||||
let mut parent_opt =
|
||||
parent_index_opt.and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index));
|
||||
|
||||
// Iterate backwards through all parents until there is no reference to a parent or we reach
|
||||
// the `finalized_root` node.
|
||||
while let (Some(parent), Some(parent_index)) = (parent_opt, parent_index_opt) {
|
||||
node_mutator(parent_index, parent)?;
|
||||
|
||||
// Break out of this while loop *after* the `node_mutator` has been applied to the finalized
|
||||
// node.
|
||||
if parent.root == finalized_root {
|
||||
break;
|
||||
}
|
||||
|
||||
// Update parent values
|
||||
parent_index_opt = parent.parent;
|
||||
parent_opt = parent_index_opt
|
||||
.and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Finds all heads by finding all nodes in the proto array that are not referenced as parents. Then
|
||||
/// checks that these nodes are descendants of the finalized root in order to determine if they are
|
||||
/// relevant.
|
||||
fn find_finalized_descendant_heads(
|
||||
finalized_root: Hash256,
|
||||
fork_choice: &ProtoArrayForkChoice,
|
||||
) -> Vec<HeadInfo> {
|
||||
let nodes_referenced_as_parents: HashSet<usize> = fork_choice
|
||||
.core_proto_array()
|
||||
.nodes
|
||||
.iter()
|
||||
.filter_map(|node| node.parent)
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
fork_choice
|
||||
.core_proto_array()
|
||||
.nodes
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(index, node)| {
|
||||
(!nodes_referenced_as_parents.contains(&index)
|
||||
&& fork_choice.is_descendant(finalized_root, node.root))
|
||||
.then(|| HeadInfo {
|
||||
index,
|
||||
root: node.root,
|
||||
slot: node.slot,
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
fn update_store_justified_checkpoint(
|
||||
persisted_fork_choice: &mut PersistedForkChoiceV7,
|
||||
fork_choice: &mut ProtoArrayForkChoice,
|
||||
) -> Result<(), String> {
|
||||
let justified_checkpoint = fork_choice
|
||||
.core_proto_array()
|
||||
.nodes
|
||||
.iter()
|
||||
.filter_map(|node| {
|
||||
(node.finalized_checkpoint
|
||||
== Some(persisted_fork_choice.fork_choice_store.finalized_checkpoint))
|
||||
.then(|| node.justified_checkpoint)
|
||||
.flatten()
|
||||
})
|
||||
.max_by_key(|justified_checkpoint| justified_checkpoint.epoch)
|
||||
.ok_or("Proto node with current finalized checkpoint not found")?;
|
||||
|
||||
fork_choice.core_proto_array_mut().justified_checkpoint = justified_checkpoint;
|
||||
persisted_fork_choice.fork_choice.proto_array_bytes = fork_choice.as_bytes();
|
||||
persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Add a zero `proposer_boost_root` when migrating from V1-6 to V7.
|
||||
impl From<PersistedForkChoiceStoreV1> for PersistedForkChoiceStoreV7 {
|
||||
fn from(other: PersistedForkChoiceStoreV1) -> Self {
|
||||
Self {
|
||||
balances_cache: other.balances_cache,
|
||||
time: other.time,
|
||||
finalized_checkpoint: other.finalized_checkpoint,
|
||||
justified_checkpoint: other.justified_checkpoint,
|
||||
justified_balances: other.justified_balances,
|
||||
best_justified_checkpoint: other.best_justified_checkpoint,
|
||||
proposer_boost_root: Hash256::zero(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PersistedForkChoiceV1> for PersistedForkChoiceV7 {
|
||||
fn from(other: PersistedForkChoiceV1) -> Self {
|
||||
Self {
|
||||
fork_choice: other.fork_choice,
|
||||
fork_choice_store: other.fork_choice_store.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
192
beacon_node/beacon_chain/src/schema_change/types.rs
Normal file
192
beacon_node/beacon_chain/src/schema_change/types.rs
Normal file
@@ -0,0 +1,192 @@
|
||||
use crate::types::{AttestationShufflingId, Checkpoint, Epoch, Hash256, Slot};
|
||||
use proto_array::core::{ProposerBoost, ProtoNode, SszContainer, VoteTracker};
|
||||
use proto_array::ExecutionStatus;
|
||||
use ssz::four_byte_option_impl;
|
||||
use ssz::Encode;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use superstruct::superstruct;
|
||||
|
||||
// Define a "legacy" implementation of `Option<usize>` which uses four bytes for encoding the union
|
||||
// selector.
|
||||
four_byte_option_impl!(four_byte_option_usize, usize);
|
||||
four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint);
|
||||
|
||||
#[superstruct(
|
||||
variants(V1, V6, V7),
|
||||
variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
pub struct ProtoNode {
|
||||
pub slot: Slot,
|
||||
pub state_root: Hash256,
|
||||
pub target_root: Hash256,
|
||||
pub current_epoch_shuffling_id: AttestationShufflingId,
|
||||
pub next_epoch_shuffling_id: AttestationShufflingId,
|
||||
pub root: Hash256,
|
||||
#[ssz(with = "four_byte_option_usize")]
|
||||
pub parent: Option<usize>,
|
||||
#[superstruct(only(V1, V6))]
|
||||
pub justified_epoch: Epoch,
|
||||
#[superstruct(only(V1, V6))]
|
||||
pub finalized_epoch: Epoch,
|
||||
#[ssz(with = "four_byte_option_checkpoint")]
|
||||
#[superstruct(only(V7))]
|
||||
pub justified_checkpoint: Option<Checkpoint>,
|
||||
#[ssz(with = "four_byte_option_checkpoint")]
|
||||
#[superstruct(only(V7))]
|
||||
pub finalized_checkpoint: Option<Checkpoint>,
|
||||
pub weight: u64,
|
||||
#[ssz(with = "four_byte_option_usize")]
|
||||
pub best_child: Option<usize>,
|
||||
#[ssz(with = "four_byte_option_usize")]
|
||||
pub best_descendant: Option<usize>,
|
||||
#[superstruct(only(V6, V7))]
|
||||
pub execution_status: ExecutionStatus,
|
||||
}
|
||||
|
||||
impl Into<ProtoNodeV6> for ProtoNodeV1 {
|
||||
fn into(self) -> ProtoNodeV6 {
|
||||
ProtoNodeV6 {
|
||||
slot: self.slot,
|
||||
state_root: self.state_root,
|
||||
target_root: self.target_root,
|
||||
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
|
||||
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
|
||||
root: self.root,
|
||||
parent: self.parent,
|
||||
justified_epoch: self.justified_epoch,
|
||||
finalized_epoch: self.finalized_epoch,
|
||||
weight: self.weight,
|
||||
best_child: self.best_child,
|
||||
best_descendant: self.best_descendant,
|
||||
// We set the following execution value as if the block is a pre-merge-fork block. This
|
||||
// is safe as long as we never import a merge block with the old version of proto-array.
|
||||
// This will be safe since we can't actually process merge blocks until we've made this
|
||||
// change to fork choice.
|
||||
execution_status: ExecutionStatus::irrelevant(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<ProtoNodeV7> for ProtoNodeV6 {
|
||||
fn into(self) -> ProtoNodeV7 {
|
||||
ProtoNodeV7 {
|
||||
slot: self.slot,
|
||||
state_root: self.state_root,
|
||||
target_root: self.target_root,
|
||||
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
|
||||
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
|
||||
root: self.root,
|
||||
parent: self.parent,
|
||||
justified_checkpoint: None,
|
||||
finalized_checkpoint: None,
|
||||
weight: self.weight,
|
||||
best_child: self.best_child,
|
||||
best_descendant: self.best_descendant,
|
||||
execution_status: self.execution_status,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<ProtoNode> for ProtoNodeV7 {
|
||||
fn into(self) -> ProtoNode {
|
||||
ProtoNode {
|
||||
slot: self.slot,
|
||||
state_root: self.state_root,
|
||||
target_root: self.target_root,
|
||||
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
|
||||
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
|
||||
root: self.root,
|
||||
parent: self.parent,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
weight: self.weight,
|
||||
best_child: self.best_child,
|
||||
best_descendant: self.best_descendant,
|
||||
execution_status: self.execution_status,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[superstruct(
|
||||
variants(V1, V6, V7),
|
||||
variant_attributes(derive(Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
#[derive(Encode, Decode)]
|
||||
pub struct SszContainer {
|
||||
pub votes: Vec<VoteTracker>,
|
||||
pub balances: Vec<u64>,
|
||||
pub prune_threshold: usize,
|
||||
#[superstruct(only(V1, V6))]
|
||||
pub justified_epoch: Epoch,
|
||||
#[superstruct(only(V1, V6))]
|
||||
pub finalized_epoch: Epoch,
|
||||
#[superstruct(only(V7))]
|
||||
pub justified_checkpoint: Checkpoint,
|
||||
#[superstruct(only(V7))]
|
||||
pub finalized_checkpoint: Checkpoint,
|
||||
#[superstruct(only(V1))]
|
||||
pub nodes: Vec<ProtoNodeV1>,
|
||||
#[superstruct(only(V6))]
|
||||
pub nodes: Vec<ProtoNodeV6>,
|
||||
#[superstruct(only(V7))]
|
||||
pub nodes: Vec<ProtoNodeV7>,
|
||||
pub indices: Vec<(Hash256, usize)>,
|
||||
#[superstruct(only(V7))]
|
||||
pub previous_proposer_boost: ProposerBoost,
|
||||
}
|
||||
|
||||
impl Into<SszContainerV6> for SszContainerV1 {
|
||||
fn into(self) -> SszContainerV6 {
|
||||
let nodes = self.nodes.into_iter().map(Into::into).collect();
|
||||
|
||||
SszContainerV6 {
|
||||
votes: self.votes,
|
||||
balances: self.balances,
|
||||
prune_threshold: self.prune_threshold,
|
||||
justified_epoch: self.justified_epoch,
|
||||
finalized_epoch: self.finalized_epoch,
|
||||
nodes,
|
||||
indices: self.indices,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SszContainerV6 {
|
||||
pub(crate) fn into_ssz_container_v7(
|
||||
self,
|
||||
justified_checkpoint: Checkpoint,
|
||||
finalized_checkpoint: Checkpoint,
|
||||
) -> SszContainerV7 {
|
||||
let nodes = self.nodes.into_iter().map(Into::into).collect();
|
||||
|
||||
SszContainerV7 {
|
||||
votes: self.votes,
|
||||
balances: self.balances,
|
||||
prune_threshold: self.prune_threshold,
|
||||
justified_checkpoint,
|
||||
finalized_checkpoint,
|
||||
nodes,
|
||||
indices: self.indices,
|
||||
previous_proposer_boost: ProposerBoost::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<SszContainer> for SszContainerV7 {
|
||||
fn into(self) -> SszContainer {
|
||||
let nodes = self.nodes.into_iter().map(Into::into).collect();
|
||||
|
||||
SszContainer {
|
||||
votes: self.votes,
|
||||
balances: self.balances,
|
||||
prune_threshold: self.prune_threshold,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
nodes,
|
||||
indices: self.indices,
|
||||
previous_proposer_boost: self.previous_proposer_boost,
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user