diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 60487f9c46..95fde28f5b 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -231,35 +231,6 @@ where } } - /// Restore `Self` from a previously-generated `PersistedForkChoiceStore`. - /// - /// DEPRECATED. Can be deleted once migrations no longer require it. - pub fn from_persisted_v17( - persisted: PersistedForkChoiceStoreV17, - justified_state_root: Hash256, - unrealized_justified_state_root: Hash256, - store: Arc>, - ) -> Result { - let justified_balances = - JustifiedBalances::from_effective_balances(persisted.justified_balances)?; - - Ok(Self { - store, - balances_cache: <_>::default(), - time: persisted.time, - finalized_checkpoint: persisted.finalized_checkpoint, - justified_checkpoint: persisted.justified_checkpoint, - justified_balances, - justified_state_root, - unrealized_justified_checkpoint: persisted.unrealized_justified_checkpoint, - unrealized_justified_state_root, - unrealized_finalized_checkpoint: persisted.unrealized_finalized_checkpoint, - proposer_boost_root: persisted.proposer_boost_root, - equivocating_indices: persisted.equivocating_indices, - _phantom: PhantomData, - }) - } - /// Restore `Self` from a previously-generated `PersistedForkChoiceStore`. pub fn from_persisted( persisted: PersistedForkChoiceStore, @@ -411,45 +382,15 @@ where pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV28; /// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database. -#[superstruct( - variants(V17, V28), - variant_attributes(derive(Encode, Decode)), - no_enum -)] +#[superstruct(variants(V28), variant_attributes(derive(Encode, Decode)), no_enum)] pub struct PersistedForkChoiceStore { - /// The balances cache was removed from disk storage in schema V28. - #[superstruct(only(V17))] - pub balances_cache: BalancesCacheV8, pub time: Slot, pub finalized_checkpoint: Checkpoint, pub justified_checkpoint: Checkpoint, - /// The justified balances were removed from disk storage in schema V28. - #[superstruct(only(V17))] - pub justified_balances: Vec, - /// The justified state root is stored so that it can be used to load the justified balances. - #[superstruct(only(V28))] pub justified_state_root: Hash256, pub unrealized_justified_checkpoint: Checkpoint, - #[superstruct(only(V28))] pub unrealized_justified_state_root: Hash256, pub unrealized_finalized_checkpoint: Checkpoint, pub proposer_boost_root: Hash256, pub equivocating_indices: BTreeSet, } - -// Convert V28 to V17 by adding balances and removing justified state roots. -impl From<(PersistedForkChoiceStoreV28, JustifiedBalances)> for PersistedForkChoiceStoreV17 { - fn from((v28, balances): (PersistedForkChoiceStoreV28, JustifiedBalances)) -> Self { - Self { - balances_cache: Default::default(), - time: v28.time, - finalized_checkpoint: v28.finalized_checkpoint, - justified_checkpoint: v28.justified_checkpoint, - justified_balances: balances.effective_balances, - unrealized_justified_checkpoint: v28.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: v28.unrealized_finalized_checkpoint, - proposer_boost_root: v28.proposer_boost_root, - equivocating_indices: v28.equivocating_indices, - } - } -} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index cf427d1a40..d71aec6987 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -75,7 +75,7 @@ pub use self::errors::{BeaconChainError, BlockProductionError}; pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{ - BeaconForkChoiceStore, Error as ForkChoiceStoreError, PersistedForkChoiceStoreV17, + BeaconForkChoiceStore, Error as ForkChoiceStoreError, PersistedForkChoiceStore, PersistedForkChoiceStoreV28, }; pub use block_verification::{ diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index d8fcc0901b..6229544e81 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -1,52 +1,19 @@ -use crate::{ - beacon_fork_choice_store::{PersistedForkChoiceStoreV17, PersistedForkChoiceStoreV28}, - metrics, -}; +use crate::{beacon_fork_choice_store::PersistedForkChoiceStoreV28, metrics}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use store::{DBColumn, Error, KeyValueStoreOp, StoreConfig, StoreItem}; +use store::{DBColumn, Error, KeyValueStoreOp, StoreConfig}; use superstruct::superstruct; use types::Hash256; // If adding a new version you should update this type alias and fix the breakages. pub type PersistedForkChoice = PersistedForkChoiceV28; -#[superstruct( - variants(V17, V28), - variant_attributes(derive(Encode, Decode)), - no_enum -)] +#[superstruct(variants(V28), variant_attributes(derive(Encode, Decode)), no_enum)] pub struct PersistedForkChoice { - #[superstruct(only(V17))] - pub fork_choice_v17: fork_choice::PersistedForkChoiceV17, - #[superstruct(only(V28))] pub fork_choice: fork_choice::PersistedForkChoiceV28, - #[superstruct(only(V17))] - pub fork_choice_store_v17: PersistedForkChoiceStoreV17, - #[superstruct(only(V28))] pub fork_choice_store: PersistedForkChoiceStoreV28, } -macro_rules! impl_store_item { - ($type:ty) => { - impl StoreItem for $type { - fn db_column() -> DBColumn { - DBColumn::ForkChoice - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> std::result::Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) - } - } - }; -} - -impl_store_item!(PersistedForkChoiceV17); - impl PersistedForkChoiceV28 { pub fn from_bytes(bytes: &[u8], store_config: &StoreConfig) -> Result { let decompressed_bytes = store_config diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index ddc5978339..ed82143c38 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,11 +1,4 @@ //! Utilities for managing database schema changes. -mod migration_schema_v23; -mod migration_schema_v24; -mod migration_schema_v25; -mod migration_schema_v26; -mod migration_schema_v27; -mod migration_schema_v28; - use crate::beacon_chain::BeaconChainTypes; use std::sync::Arc; use store::Error as StoreError; @@ -13,81 +6,17 @@ use store::hot_cold_store::{HotColdDB, HotColdDBError}; use store::metadata::{CURRENT_SCHEMA_VERSION, SchemaVersion}; /// Migrate the database from one schema version to another, applying all requisite mutations. +/// +/// All migrations for schema versions up to and including v28 have been removed. Nodes on live +/// networks are already running v28, so only the current version check remains. pub fn migrate_schema( - db: Arc>, + _db: Arc>, from: SchemaVersion, to: SchemaVersion, ) -> Result<(), StoreError> { match (from, to) { // Migrating from the current schema version to itself is always OK, a no-op. (_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()), - // Upgrade across multiple versions by recursively migrating one step at a time. - (_, _) if from.as_u64() + 1 < to.as_u64() => { - let next = SchemaVersion(from.as_u64() + 1); - migrate_schema::(db.clone(), from, next)?; - migrate_schema::(db, next, to) - } - // Downgrade across multiple versions by recursively migrating one step at a time. - (_, _) if to.as_u64() + 1 < from.as_u64() => { - let next = SchemaVersion(from.as_u64() - 1); - migrate_schema::(db.clone(), from, next)?; - migrate_schema::(db, next, to) - } - - // - // Migrations from before SchemaVersion(22) are deprecated. - // - (SchemaVersion(22), SchemaVersion(23)) => { - let ops = migration_schema_v23::upgrade_to_v23::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(23), SchemaVersion(22)) => { - let ops = migration_schema_v23::downgrade_from_v23::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(23), SchemaVersion(24)) => { - let ops = migration_schema_v24::upgrade_to_v24::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(24), SchemaVersion(23)) => { - let ops = migration_schema_v24::downgrade_from_v24::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(24), SchemaVersion(25)) => { - let ops = migration_schema_v25::upgrade_to_v25()?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(25), SchemaVersion(24)) => { - let ops = migration_schema_v25::downgrade_from_v25()?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(25), SchemaVersion(26)) => { - let ops = migration_schema_v26::upgrade_to_v26::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(26), SchemaVersion(25)) => { - let ops = migration_schema_v26::downgrade_from_v26::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(26), SchemaVersion(27)) => { - // This migration updates the blobs db. The schema version - // is bumped inside upgrade_to_v27. - migration_schema_v27::upgrade_to_v27::(db.clone()) - } - (SchemaVersion(27), SchemaVersion(26)) => { - // Downgrading is essentially a no-op and is only possible - // if peer das isn't scheduled. - migration_schema_v27::downgrade_from_v27::(db.clone())?; - db.store_schema_version_atomically(to, vec![]) - } - (SchemaVersion(27), SchemaVersion(28)) => { - let ops = migration_schema_v28::upgrade_to_v28::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(28), SchemaVersion(27)) => { - let ops = migration_schema_v28::downgrade_from_v28::(db.clone())?; - db.store_schema_version_atomically(to, ops) - } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs deleted file mode 100644 index e238e1efb6..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs +++ /dev/null @@ -1,180 +0,0 @@ -use crate::BeaconForkChoiceStore; -use crate::beacon_chain::BeaconChainTypes; -use crate::persisted_fork_choice::PersistedForkChoiceV17; -use crate::schema_change::StoreError; -use crate::test_utils::{BEACON_CHAIN_DB_KEY, FORK_CHOICE_DB_KEY, PersistedBeaconChain}; -use fork_choice::{ForkChoice, ResetPayloadStatuses}; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use std::sync::Arc; -use store::{DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem}; -use tracing::{debug, info}; -use types::{Hash256, Slot}; - -/// Dummy value to use for the canonical head block root, see below. -pub const DUMMY_CANONICAL_HEAD_BLOCK_ROOT: Hash256 = Hash256::repeat_byte(0xff); - -pub fn upgrade_to_v23( - db: Arc>, -) -> Result, Error> { - info!("Upgrading DB schema from v22 to v23"); - - // 1) Set the head-tracker to empty - let Some(persisted_beacon_chain_v22) = - db.get_item::(&BEACON_CHAIN_DB_KEY)? - else { - return Err(Error::MigrationError( - "No persisted beacon chain found in DB. Datadir could be incorrect or DB could be corrupt".to_string() - )); - }; - - let persisted_beacon_chain = PersistedBeaconChain { - genesis_block_root: persisted_beacon_chain_v22.genesis_block_root, - }; - - let mut ops = vec![persisted_beacon_chain.as_kv_store_op(BEACON_CHAIN_DB_KEY)]; - - // 2) Wipe out all state temporary flags. While un-used in V23, if there's a rollback we could - // end-up with an inconsistent DB. - for state_root_result in db - .hot_db - .iter_column_keys::(DBColumn::BeaconStateTemporary) - { - let state_root = state_root_result?; - debug!( - ?state_root, - "Deleting temporary state on v23 schema migration" - ); - ops.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconStateTemporary, - state_root.as_slice().to_vec(), - )); - - // We also delete the temporary states themselves. Although there are known issue with - // temporary states and this could lead to DB corruption, we will only corrupt the DB in - // cases where the DB would be corrupted by restarting on v7.0.x. We consider these DBs - // "too far gone". Deleting here has the advantage of not generating warnings about - // disjoint state DAGs in the v24 upgrade, or the first pruning after migration. - ops.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconState, - state_root.as_slice().to_vec(), - )); - ops.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconStateSummary, - state_root.as_slice().to_vec(), - )); - } - - Ok(ops) -} - -pub fn downgrade_from_v23( - db: Arc>, -) -> Result, Error> { - let Some(persisted_beacon_chain) = db.get_item::(&BEACON_CHAIN_DB_KEY)? - else { - // The `PersistedBeaconChain` must exist if fork choice exists. - return Err(Error::MigrationError( - "No persisted beacon chain found in DB. Datadir could be incorrect or DB could be corrupt".to_string(), - )); - }; - - // Recreate head-tracker from fork choice. - let Some(persisted_fork_choice) = db.get_item::(&FORK_CHOICE_DB_KEY)? - else { - // Fork choice should exist if the database exists. - return Err(Error::MigrationError( - "No fork choice found in DB".to_string(), - )); - }; - - // We use dummy roots for the justified states because we can source the balances from the v17 - // persited fork choice. The justified state root isn't required to look up the justified state's - // balances (as it would be in V28). This fork choice object with corrupt state roots SHOULD NOT - // be written to disk. - let dummy_justified_state_root = Hash256::repeat_byte(0x66); - let dummy_unrealized_justified_state_root = Hash256::repeat_byte(0x77); - - let fc_store = BeaconForkChoiceStore::from_persisted_v17( - persisted_fork_choice.fork_choice_store_v17, - dummy_justified_state_root, - dummy_unrealized_justified_state_root, - db.clone(), - ) - .map_err(|e| { - Error::MigrationError(format!( - "Error loading fork choice store from persisted: {e:?}" - )) - })?; - - // Doesn't matter what policy we use for invalid payloads, as our head calculation just - // considers descent from finalization. - let reset_payload_statuses = ResetPayloadStatuses::OnlyWithInvalidPayload; - let fork_choice = ForkChoice::from_persisted( - persisted_fork_choice.fork_choice_v17.try_into()?, - reset_payload_statuses, - fc_store, - &db.spec, - ) - .map_err(|e| { - Error::MigrationError(format!("Error loading fork choice from persisted: {e:?}")) - })?; - - let heads = fork_choice - .proto_array() - .heads_descended_from_finalization::(fork_choice.finalized_checkpoint()); - - let head_roots = heads.iter().map(|node| node.root).collect(); - let head_slots = heads.iter().map(|node| node.slot).collect(); - - let persisted_beacon_chain_v22 = PersistedBeaconChainV22 { - _canonical_head_block_root: DUMMY_CANONICAL_HEAD_BLOCK_ROOT, - genesis_block_root: persisted_beacon_chain.genesis_block_root, - ssz_head_tracker: SszHeadTracker { - roots: head_roots, - slots: head_slots, - }, - }; - - let ops = vec![persisted_beacon_chain_v22.as_kv_store_op(BEACON_CHAIN_DB_KEY)]; - - Ok(ops) -} - -/// Helper struct that is used to encode/decode the state of the `HeadTracker` as SSZ bytes. -/// -/// This is used when persisting the state of the `BeaconChain` to disk. -#[derive(Encode, Decode, Clone)] -pub struct SszHeadTracker { - roots: Vec, - slots: Vec, -} - -#[derive(Clone, Encode, Decode)] -pub struct PersistedBeaconChainV22 { - /// This value is ignored to resolve the issue described here: - /// - /// https://github.com/sigp/lighthouse/pull/1639 - /// - /// Its removal is tracked here: - /// - /// https://github.com/sigp/lighthouse/issues/1784 - pub _canonical_head_block_root: Hash256, - pub genesis_block_root: Hash256, - /// DEPRECATED - pub ssz_head_tracker: SszHeadTracker, -} - -impl StoreItem for PersistedBeaconChainV22 { - fn db_column() -> DBColumn { - DBColumn::BeaconChain - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs deleted file mode 100644 index c8dfe1ac9b..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs +++ /dev/null @@ -1,607 +0,0 @@ -use crate::{ - beacon_chain::BeaconChainTypes, - summaries_dag::{DAGStateSummary, DAGStateSummaryV22, StateSummariesDAG}, -}; -use ssz::{Decode, DecodeError, Encode}; -use ssz_derive::Encode; -use std::{ - sync::Arc, - time::{Duration, Instant}, -}; -use store::{ - DBColumn, Error, HotColdDB, HotStateSummary, KeyValueStore, KeyValueStoreOp, StoreItem, - hdiff::StorageStrategy, - hot_cold_store::{HotStateSummaryV22, OptionalDiffBaseState}, -}; -use tracing::{debug, info, warn}; -use types::{ - BeaconState, CACHED_EPOCHS, ChainSpec, Checkpoint, CommitteeCache, EthSpec, Hash256, Slot, - execution::StatePayloadStatus, -}; - -/// We stopped using the pruning checkpoint in schema v23 but never explicitly deleted it. -/// -/// We delete it as part of the v24 migration. -pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3); - -pub fn store_full_state_v22( - state_root: &Hash256, - state: &BeaconState, - ops: &mut Vec, -) -> Result<(), Error> { - let bytes = StorageContainer::new(state).as_ssz_bytes(); - ops.push(KeyValueStoreOp::PutKeyValue( - DBColumn::BeaconState, - state_root.as_slice().to_vec(), - bytes, - )); - Ok(()) -} - -/// Fetch a V22 state from the database either as a full state or using block replay. -pub fn get_state_v22( - db: &Arc>, - state_root: &Hash256, - spec: &ChainSpec, -) -> Result>, Error> { - let Some(summary) = db.get_item::(state_root)? else { - return Ok(None); - }; - let Some(base_state) = - get_full_state_v22(&db.hot_db, &summary.epoch_boundary_state_root, spec)? - else { - return Ok(None); - }; - // Loading hot states via block replay doesn't care about the schema version, so we can use - // the DB's current method for this. - let update_cache = false; - db.load_hot_state_using_replay( - base_state, - summary.slot, - summary.latest_block_root, - StatePayloadStatus::Pending, - update_cache, - ) - .map(Some) -} - -pub fn get_full_state_v22, E: EthSpec>( - db: &KV, - state_root: &Hash256, - spec: &ChainSpec, -) -> Result>, Error> { - match db.get_bytes(DBColumn::BeaconState, state_root.as_slice())? { - Some(bytes) => { - let container = StorageContainer::from_ssz_bytes(&bytes, spec)?; - Ok(Some(container.try_into()?)) - } - None => Ok(None), - } -} - -/// A container for storing `BeaconState` components. -/// -/// DEPRECATED. -#[derive(Encode)] -pub struct StorageContainer { - state: BeaconState, - committee_caches: Vec>, -} - -impl StorageContainer { - /// Create a new instance for storing a `BeaconState`. - pub fn new(state: &BeaconState) -> Self { - Self { - state: state.clone(), - committee_caches: state.committee_caches().to_vec(), - } - } - - pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { - // We need to use the slot-switching `from_ssz_bytes` of `BeaconState`, which doesn't - // compose with the other SSZ utils, so we duplicate some parts of `ssz_derive` here. - let mut builder = ssz::SszDecoderBuilder::new(bytes); - - builder.register_anonymous_variable_length_item()?; - builder.register_type::>()?; - - let mut decoder = builder.build()?; - - let state = decoder.decode_next_with(|bytes| BeaconState::from_ssz_bytes(bytes, spec))?; - let committee_caches = decoder.decode_next()?; - - Ok(Self { - state, - committee_caches, - }) - } -} - -impl TryInto> for StorageContainer { - type Error = Error; - - fn try_into(mut self) -> Result, Error> { - let mut state = self.state; - - for i in (0..CACHED_EPOCHS).rev() { - if i >= self.committee_caches.len() { - return Err(Error::SszDecodeError(DecodeError::BytesInvalid( - "Insufficient committees for BeaconState".to_string(), - ))); - }; - - state.committee_caches_mut()[i] = self.committee_caches.remove(i); - } - - Ok(state) - } -} - -/// The checkpoint used for pruning the database. -/// -/// Updated whenever pruning is successful. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct PruningCheckpoint { - pub checkpoint: Checkpoint, -} - -impl StoreItem for PruningCheckpoint { - fn db_column() -> DBColumn { - DBColumn::BeaconMeta - } - - fn as_store_bytes(&self) -> Vec { - self.checkpoint.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(PruningCheckpoint { - checkpoint: Checkpoint::from_ssz_bytes(bytes)?, - }) - } -} - -pub fn upgrade_to_v24( - db: Arc>, -) -> Result, Error> { - let mut migrate_ops = vec![]; - let split = db.get_split_info(); - let hot_hdiff_start_slot = split.slot; - - // Delete the `PruningCheckpoint` (no longer used). - migrate_ops.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconMeta, - PRUNING_CHECKPOINT_KEY.as_slice().to_vec(), - )); - - // Sanity check to make sure the HDiff grid is aligned with the epoch start - if hot_hdiff_start_slot % T::EthSpec::slots_per_epoch() != 0 { - return Err(Error::MigrationError(format!( - "hot_hdiff_start_slot is not first slot in epoch {hot_hdiff_start_slot}" - ))); - } - - // After V24 hot tree states, the in-memory `anchor_info.anchor_slot` is the start slot of the - // hot HDiff grid. Before the migration, it's set to the slot of the anchor state in the DB: - // - the genesis state on a genesis sync, or - // - the checkpoint state on a checkpoint sync. - // - // If the node has been running for a while the `anchor_slot` might be less than the finalized - // checkpoint. This upgrade constructs a grid only with unfinalized states, rooted in the - // current finalized state. So we set the `anchor_slot` to `split.slot` to root the grid in the - // current finalized state. Each migration sets the split to - // ``` - // Split { slot: finalized_state.slot(), state_root: finalized_state_root } - // ``` - { - let anchor_info = db.get_anchor_info(); - - // If the node is already an archive node, we can set the anchor slot to 0 and copy - // snapshots and diffs from the freezer DB to the hot DB in order to establish an initial - // hot grid that is aligned/"perfect" (no `start_slot`/`anchor_slot` to worry about). - // - // This only works if all of the following are true: - // - // - We have the previous snapshot for the split state stored in the freezer DB, i.e. - // if `previous_snapshot_slot >= state_upper_limit`. - // - The split state itself will be stored as a diff or snapshot in the new grid. We choose - // not to support a split state that requires block replay, because computing its previous - // state root from the DAG is not straight-forward. - let dummy_start_slot = Slot::new(0); - let closest_layer_points = db - .hierarchy - .closest_layer_points(split.slot, dummy_start_slot); - - let previous_snapshot_slot = - closest_layer_points - .iter() - .copied() - .min() - .ok_or(Error::MigrationError( - "closest_layer_points must not be empty".to_string(), - ))?; - - if previous_snapshot_slot >= anchor_info.state_upper_limit - && db - .hierarchy - .storage_strategy(split.slot, dummy_start_slot) - .is_ok_and(|strategy| !strategy.is_replay_from()) - { - info!( - %previous_snapshot_slot, - split_slot = %split.slot, - "Aligning hot diff grid to freezer" - ); - - // Set anchor slot to 0 in case it was set to something else by a previous checkpoint - // sync. - let mut new_anchor_info = anchor_info.clone(); - new_anchor_info.anchor_slot = Slot::new(0); - - // Update the anchor on disk atomically if migration is successful - migrate_ops.push(db.compare_and_set_anchor_info(anchor_info, new_anchor_info)?); - - // Copy each of the freezer layers to the hot DB in slot ascending order. - for layer_slot in closest_layer_points.into_iter().rev() { - // Do not try to load the split state itself from the freezer, it won't be there. - // It will be migrated in the main loop below. - if layer_slot == split.slot { - continue; - } - - let mut freezer_state = db.load_cold_state_by_slot(layer_slot)?; - - let state_root = freezer_state.canonical_root()?; - - let mut state_ops = vec![]; - db.store_hot_state(&state_root, &freezer_state, &mut state_ops)?; - db.hot_db.do_atomically(state_ops)?; - } - } else { - // Otherwise for non-archive nodes, set the anchor slot for the hot grid to the current - // split slot (the oldest slot available). - let mut new_anchor_info = anchor_info.clone(); - new_anchor_info.anchor_slot = hot_hdiff_start_slot; - - // Update the anchor in disk atomically if migration is successful - migrate_ops.push(db.compare_and_set_anchor_info(anchor_info, new_anchor_info)?); - } - } - - let state_summaries_dag = new_dag::(&db)?; - - // We compute the state summaries DAG outside of a DB migration. Therefore if the DB is properly - // prunned, it should have a single root equal to the split. - let state_summaries_dag_roots = state_summaries_dag.tree_roots(); - if state_summaries_dag_roots.len() == 1 { - let (root_summary_state_root, root_summary) = - state_summaries_dag_roots.first().expect("len == 1"); - if *root_summary_state_root != split.state_root { - warn!( - ?root_summary_state_root, - ?root_summary, - ?split, - "State summaries DAG root is not the split" - ); - } - } else { - warn!( - location = "migration", - state_summaries_dag_roots = ?state_summaries_dag_roots, - "State summaries DAG found more than one root" - ); - } - - // Sort summaries by slot so we have their ancestor diffs already stored when we store them. - // If the summaries are sorted topologically we can insert them into the DB like if they were a - // new state, re-using existing code. As states are likely to be sequential the diff cache - // should kick in making the migration more efficient. If we just iterate the column of - // summaries we may get distance state of each iteration. - let summaries_by_slot = state_summaries_dag.summaries_by_slot_ascending(); - debug!( - summaries_count = state_summaries_dag.summaries_count(), - slots_count = summaries_by_slot.len(), - min_slot = ?summaries_by_slot.first_key_value().map(|(slot, _)| slot), - max_slot = ?summaries_by_slot.last_key_value().map(|(slot, _)| slot), - ?state_summaries_dag_roots, - %hot_hdiff_start_slot, - split_state_root = ?split.state_root, - "Starting hot states migration" - ); - - // Upgrade all hot DB state summaries to the new type: - // - Set all summaries of boundary states to `Snapshot` type - // - Set all others to `Replay` pointing to `epoch_boundary_state_root` - - let mut diffs_written = 0; - let mut summaries_written = 0; - let mut last_log_time = Instant::now(); - - for (slot, old_hot_state_summaries) in summaries_by_slot { - for (state_root, old_summary) in old_hot_state_summaries { - if slot < hot_hdiff_start_slot { - // To reach here, there must be some pruning issue with the DB where we still have - // hot states below the split slot. This states can't be migrated as we can't compute - // a storage strategy for them. After this if else block, the summary and state are - // scheduled for deletion. - debug!( - %slot, - ?state_root, - "Ignoring state summary prior to split slot" - ); - } else { - // 1. Store snapshot or diff at this slot (if required). - let storage_strategy = db.hot_storage_strategy(slot)?; - debug!( - %slot, - ?state_root, - ?storage_strategy, - "Migrating state summary" - ); - - match storage_strategy { - StorageStrategy::DiffFrom(_) | StorageStrategy::Snapshot => { - // Load the state and re-store it as a snapshot or diff. - let state = get_state_v22::(&db, &state_root, &db.spec)? - .ok_or(Error::MissingState(state_root))?; - - // Store immediately so that future diffs can load and diff from it. - let mut ops = vec![]; - // We must commit the hot state summary immediately, otherwise we can't diff - // against it and future writes will fail. That's why we write the new hot - // summaries in a different column to have both new and old data present at - // once. Otherwise if the process crashes during the migration the database will - // be broken. - db.store_hot_state_summary(&state_root, &state, &mut ops)?; - db.store_hot_state_diffs(&state_root, &state, &mut ops)?; - db.hot_db.do_atomically(ops)?; - diffs_written += 1; - } - StorageStrategy::ReplayFrom(diff_base_slot) => { - // Optimization: instead of having to load the state of each summary we load x32 - // less states by manually computing the HotStateSummary roots using the - // computed state dag. - // - // No need to store diffs for states that will be reconstructed by replaying - // blocks. - // - // 2. Convert the summary to the new format. - if state_root == split.state_root { - return Err(Error::MigrationError( - "unreachable: split state should be stored as a snapshot or diff" - .to_string(), - )); - } - let previous_state_root = state_summaries_dag - .previous_state_root(state_root) - .map_err(|e| { - Error::MigrationError(format!( - "error computing previous_state_root {e:?}" - )) - })?; - - let diff_base_state = OptionalDiffBaseState::new( - diff_base_slot, - state_summaries_dag - .ancestor_state_root_at_slot(state_root, diff_base_slot) - .map_err(|e| { - Error::MigrationError(format!( - "error computing ancestor_state_root_at_slot \ - ({state_root:?}, {diff_base_slot}): {e:?}" - )) - })?, - ); - - let new_summary = HotStateSummary { - slot, - latest_block_root: old_summary.latest_block_root, - latest_block_slot: old_summary.latest_block_slot, - previous_state_root, - diff_base_state, - }; - let op = new_summary.as_kv_store_op(state_root); - // It's not necessary to immediately commit the summaries of states that are - // ReplayFrom. However we do so for simplicity. - db.hot_db.do_atomically(vec![op])?; - } - } - } - - // 3. Stage old data for deletion. - if slot % T::EthSpec::slots_per_epoch() == 0 { - migrate_ops.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconState, - state_root.as_slice().to_vec(), - )); - } - - // Delete previous summaries - migrate_ops.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconStateSummary, - state_root.as_slice().to_vec(), - )); - - summaries_written += 1; - if last_log_time.elapsed() > Duration::from_secs(5) { - last_log_time = Instant::now(); - info!( - diffs_written, - summaries_written, - summaries_count = state_summaries_dag.summaries_count(), - "Hot states migration in progress" - ); - } - } - } - - info!( - diffs_written, - summaries_written, - summaries_count = state_summaries_dag.summaries_count(), - "Hot states migration complete" - ); - - Ok(migrate_ops) -} - -pub fn downgrade_from_v24( - db: Arc>, -) -> Result, Error> { - let state_summaries = db - .load_hot_state_summaries()? - .into_iter() - .map(|(state_root, summary)| (state_root, summary.into())) - .collect::>(); - - info!( - summaries_count = state_summaries.len(), - "DB downgrade of v24 state summaries started" - ); - - let state_summaries_dag = StateSummariesDAG::new(state_summaries) - .map_err(|e| Error::MigrationError(format!("Error on new StateSumariesDAG {e:?}")))?; - - let mut migrate_ops = vec![]; - let mut states_written = 0; - let mut summaries_written = 0; - let mut summaries_skipped = 0; - let mut last_log_time = Instant::now(); - - // Rebuild the PruningCheckpoint from the split. - let split = db.get_split_info(); - let pruning_checkpoint = PruningCheckpoint { - checkpoint: Checkpoint { - epoch: split.slot.epoch(T::EthSpec::slots_per_epoch()), - root: split.block_root, - }, - }; - migrate_ops.push(pruning_checkpoint.as_kv_store_op(PRUNING_CHECKPOINT_KEY)); - - // Convert state summaries back to the old format. - for (state_root, summary) in state_summaries_dag - .summaries_by_slot_ascending() - .into_iter() - .flat_map(|(_, summaries)| summaries) - { - // No need to migrate any states prior to the split. The v22 schema does not need them, and - // they would generate warnings about a disjoint DAG when re-upgrading to V24. - if summary.slot < split.slot { - debug!( - slot = %summary.slot, - ?state_root, - "Skipping migration of pre-split state" - ); - summaries_skipped += 1; - continue; - } - - // If boundary state: persist. - // Do not cache these states as they are unlikely to be relevant later. - let update_cache = false; - if summary.slot % T::EthSpec::slots_per_epoch() == 0 { - let (state, _) = db - .load_hot_state(&state_root, update_cache)? - .ok_or(Error::MissingState(state_root))?; - - // Immediately commit the state, so we don't OOM. It's stored in a different - // column so if the migration crashes we'll just store extra harmless junk in the DB. - let mut state_write_ops = vec![]; - store_full_state_v22(&state_root, &state, &mut state_write_ops)?; - db.hot_db.do_atomically(state_write_ops)?; - states_written += 1; - } - - // Persist old summary. - let epoch_boundary_state_slot = summary.slot - summary.slot % T::EthSpec::slots_per_epoch(); - let old_summary = HotStateSummaryV22 { - slot: summary.slot, - latest_block_root: summary.latest_block_root, - epoch_boundary_state_root: state_summaries_dag - .ancestor_state_root_at_slot(state_root, epoch_boundary_state_slot) - .map_err(|e| { - Error::MigrationError(format!( - "error computing ancestor_state_root_at_slot({state_root:?}, {epoch_boundary_state_slot}) {e:?}" - )) - })?, - }; - migrate_ops.push(KeyValueStoreOp::PutKeyValue( - DBColumn::BeaconStateSummary, - state_root.as_slice().to_vec(), - old_summary.as_ssz_bytes(), - )); - summaries_written += 1; - - if last_log_time.elapsed() > Duration::from_secs(5) { - last_log_time = Instant::now(); - info!( - states_written, - summaries_written, - summaries_count = state_summaries_dag.summaries_count(), - "DB downgrade of v24 state summaries in progress" - ); - } - } - - // Delete all V24 schema data. We do this outside the loop over summaries to ensure we cover - // every piece of data and to simplify logic around skipping certain summaries that do not get - // migrated. - for db_column in [ - DBColumn::BeaconStateHotSummary, - DBColumn::BeaconStateHotDiff, - DBColumn::BeaconStateHotSnapshot, - ] { - for key in db.hot_db.iter_column_keys::(db_column) { - let state_root = key?; - migrate_ops.push(KeyValueStoreOp::DeleteKey( - db_column, - state_root.as_slice().to_vec(), - )); - } - } - - info!( - states_written, - summaries_written, - summaries_skipped, - summaries_count = state_summaries_dag.summaries_count(), - "DB downgrade of v24 state summaries completed" - ); - - Ok(migrate_ops) -} - -fn new_dag( - db: &HotColdDB, -) -> Result { - // Collect all sumaries for unfinalized states - let state_summaries_v22 = db - .hot_db - // Collect summaries from the legacy V22 column BeaconStateSummary - .iter_column::(DBColumn::BeaconStateSummary) - .map(|res| { - let (key, value) = res?; - let state_root: Hash256 = key; - let summary = HotStateSummaryV22::from_ssz_bytes(&value)?; - let block_root = summary.latest_block_root; - // Read blocks to get the block slot and parent root. In Holesky forced finalization it - // took 5100 ms to read 15072 state summaries, so it's not really necessary to - // de-duplicate block reads. - let block = db - .get_blinded_block(&block_root)? - .ok_or(Error::MissingBlock(block_root))?; - - Ok(( - state_root, - DAGStateSummaryV22 { - slot: summary.slot, - latest_block_root: summary.latest_block_root, - block_slot: block.slot(), - block_parent_root: block.parent_root(), - }, - )) - }) - .collect::, Error>>()?; - - StateSummariesDAG::new_from_v22(state_summaries_v22) - .map_err(|e| Error::MigrationError(format!("error computing states summaries dag {e:?}"))) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs deleted file mode 100644 index 44e8894d6f..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs +++ /dev/null @@ -1,20 +0,0 @@ -use store::{DBColumn, Error, KeyValueStoreOp}; -use tracing::info; -use types::Hash256; - -pub const ETH1_CACHE_DB_KEY: Hash256 = Hash256::ZERO; - -/// Delete the on-disk eth1 data. -pub fn upgrade_to_v25() -> Result, Error> { - info!("Deleting eth1 data from disk for v25 DB upgrade"); - Ok(vec![KeyValueStoreOp::DeleteKey( - DBColumn::Eth1Cache, - ETH1_CACHE_DB_KEY.as_slice().to_vec(), - )]) -} - -/// No-op: we don't need to recreate on-disk eth1 data, as previous versions gracefully handle -/// data missing from disk. -pub fn downgrade_from_v25() -> Result, Error> { - Ok(vec![]) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs deleted file mode 100644 index 38714ea060..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs +++ /dev/null @@ -1,91 +0,0 @@ -use crate::BeaconChainTypes; -use crate::custody_context::CustodyContextSsz; -use crate::persisted_custody::{CUSTODY_DB_KEY, PersistedCustody}; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use std::sync::Arc; -use store::{DBColumn, Error, HotColdDB, KeyValueStoreOp, StoreItem}; -use tracing::info; - -#[derive(Debug, Encode, Decode, Clone)] -pub(crate) struct CustodyContextSszV24 { - pub(crate) validator_custody_at_head: u64, - pub(crate) persisted_is_supernode: bool, -} - -pub(crate) struct PersistedCustodyV24(CustodyContextSszV24); - -impl StoreItem for PersistedCustodyV24 { - fn db_column() -> DBColumn { - DBColumn::CustodyContext - } - - fn as_store_bytes(&self) -> Vec { - self.0.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - let custody_context = CustodyContextSszV24::from_ssz_bytes(bytes)?; - Ok(PersistedCustodyV24(custody_context)) - } -} - -/// Upgrade the `CustodyContext` entry to v26. -pub fn upgrade_to_v26( - db: Arc>, -) -> Result, Error> { - let ops = if db.spec.is_peer_das_scheduled() { - match db.get_item::(&CUSTODY_DB_KEY) { - Ok(Some(PersistedCustodyV24(ssz_v24))) => { - info!("Migrating `CustodyContext` to v26 schema"); - let custody_context_v2 = CustodyContextSsz { - validator_custody_at_head: ssz_v24.validator_custody_at_head, - persisted_is_supernode: ssz_v24.persisted_is_supernode, - epoch_validator_custody_requirements: vec![], - }; - vec![KeyValueStoreOp::PutKeyValue( - DBColumn::CustodyContext, - CUSTODY_DB_KEY.as_slice().to_vec(), - PersistedCustody(custody_context_v2).as_store_bytes(), - )] - } - _ => { - vec![] - } - } - } else { - // Delete it from db if PeerDAS hasn't been scheduled - vec![KeyValueStoreOp::DeleteKey( - DBColumn::CustodyContext, - CUSTODY_DB_KEY.as_slice().to_vec(), - )] - }; - - Ok(ops) -} - -pub fn downgrade_from_v26( - db: Arc>, -) -> Result, Error> { - let res = db.get_item::(&CUSTODY_DB_KEY); - let ops = match res { - Ok(Some(PersistedCustody(ssz_v26))) => { - info!("Migrating `CustodyContext` back from v26 schema"); - let custody_context_v24 = CustodyContextSszV24 { - validator_custody_at_head: ssz_v26.validator_custody_at_head, - persisted_is_supernode: ssz_v26.persisted_is_supernode, - }; - vec![KeyValueStoreOp::PutKeyValue( - DBColumn::CustodyContext, - CUSTODY_DB_KEY.as_slice().to_vec(), - PersistedCustodyV24(custody_context_v24).as_store_bytes(), - )] - } - _ => { - // no op if it's not on the db, as previous versions gracefully handle data missing from disk. - vec![] - } - }; - - Ok(ops) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs deleted file mode 100644 index fbe865ee27..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs +++ /dev/null @@ -1,26 +0,0 @@ -use crate::BeaconChainTypes; -use std::sync::Arc; -use store::{Error, HotColdDB, metadata::SchemaVersion}; - -/// Add `DataColumnCustodyInfo` entry to v27. -pub fn upgrade_to_v27( - db: Arc>, -) -> Result<(), Error> { - if db.spec.is_peer_das_scheduled() { - db.put_data_column_custody_info(None)?; - db.store_schema_version_atomically(SchemaVersion(27), vec![])?; - } - - Ok(()) -} - -pub fn downgrade_from_v27( - db: Arc>, -) -> Result<(), Error> { - if db.spec.is_peer_das_scheduled() { - return Err(Error::MigrationError( - "Cannot downgrade from v27 if peerDAS is scheduled".to_string(), - )); - } - Ok(()) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v28.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v28.rs deleted file mode 100644 index 5885eaabc0..0000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v28.rs +++ /dev/null @@ -1,152 +0,0 @@ -use crate::{ - BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, PersistedForkChoiceStoreV17, - beacon_chain::FORK_CHOICE_DB_KEY, - persisted_fork_choice::{PersistedForkChoiceV17, PersistedForkChoiceV28}, - summaries_dag::{DAGStateSummary, StateSummariesDAG}, -}; -use fork_choice::{ForkChoice, ForkChoiceStore, ResetPayloadStatuses}; -use std::sync::Arc; -use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; -use tracing::{info, warn}; -use types::{EthSpec, Hash256}; - -/// Upgrade `PersistedForkChoice` from V17 to V28. -pub fn upgrade_to_v28( - db: Arc>, -) -> Result, Error> { - let Some(persisted_fork_choice_v17) = - db.get_item::(&FORK_CHOICE_DB_KEY)? - else { - warn!("No fork choice found to upgrade to v28"); - return Ok(vec![]); - }; - - // Load state DAG in order to compute justified checkpoint roots. - let state_summaries_dag = { - let state_summaries = db - .load_hot_state_summaries()? - .into_iter() - .map(|(state_root, summary)| (state_root, summary.into())) - .collect::>(); - - StateSummariesDAG::new(state_summaries).map_err(|e| { - Error::MigrationError(format!("Error loading state summaries DAG: {e:?}")) - })? - }; - - // Determine the justified state roots. - let justified_checkpoint = persisted_fork_choice_v17 - .fork_choice_store_v17 - .justified_checkpoint; - let justified_block_root = justified_checkpoint.root; - let justified_slot = justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let justified_state_root = state_summaries_dag - .state_root_at_slot(justified_block_root, justified_slot) - .ok_or_else(|| { - Error::MigrationError(format!( - "Missing state root for justified slot {justified_slot} with latest_block_root \ - {justified_block_root:?}" - )) - })?; - - let unrealized_justified_checkpoint = persisted_fork_choice_v17 - .fork_choice_store_v17 - .unrealized_justified_checkpoint; - let unrealized_justified_block_root = unrealized_justified_checkpoint.root; - let unrealized_justified_slot = unrealized_justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let unrealized_justified_state_root = state_summaries_dag - .state_root_at_slot(unrealized_justified_block_root, unrealized_justified_slot) - .ok_or_else(|| { - Error::MigrationError(format!( - "Missing state root for unrealized justified slot {unrealized_justified_slot} \ - with latest_block_root {unrealized_justified_block_root:?}" - )) - })?; - - let fc_store = BeaconForkChoiceStore::from_persisted_v17( - persisted_fork_choice_v17.fork_choice_store_v17, - justified_state_root, - unrealized_justified_state_root, - db.clone(), - ) - .map_err(|e| { - Error::MigrationError(format!( - "Error loading fork choice store from persisted: {e:?}" - )) - })?; - - info!( - ?justified_state_root, - %justified_slot, - "Added justified state root to fork choice" - ); - - // Construct top-level ForkChoice struct using the patched fork choice store, and the converted - // proto array. - let reset_payload_statuses = ResetPayloadStatuses::OnlyWithInvalidPayload; - let fork_choice = ForkChoice::from_persisted( - persisted_fork_choice_v17.fork_choice_v17.try_into()?, - reset_payload_statuses, - fc_store, - db.get_chain_spec(), - ) - .map_err(|e| Error::MigrationError(format!("Unable to build ForkChoice: {e:?}")))?; - - let ops = vec![BeaconChain::::persist_fork_choice_in_batch_standalone( - &fork_choice, - db.get_config(), - )?]; - - info!("Upgraded fork choice for DB schema v28"); - - Ok(ops) -} - -pub fn downgrade_from_v28( - db: Arc>, -) -> Result, Error> { - let reset_payload_statuses = ResetPayloadStatuses::OnlyWithInvalidPayload; - let Some(fork_choice) = - BeaconChain::::load_fork_choice(db.clone(), reset_payload_statuses, db.get_chain_spec()) - .map_err(|e| Error::MigrationError(format!("Unable to load fork choice: {e:?}")))? - else { - warn!("No fork choice to downgrade"); - return Ok(vec![]); - }; - - // Recreate V28 persisted fork choice, then convert each field back to its V17 version. - let persisted_fork_choice = PersistedForkChoiceV28 { - fork_choice: fork_choice.to_persisted(), - fork_choice_store: fork_choice.fc_store().to_persisted(), - }; - - let justified_balances = fork_choice.fc_store().justified_balances(); - - // 1. Create `proto_array::PersistedForkChoiceV17`. - let fork_choice_v17: fork_choice::PersistedForkChoiceV17 = ( - persisted_fork_choice.fork_choice, - justified_balances.clone(), - ) - .into(); - - let fork_choice_store_v17: PersistedForkChoiceStoreV17 = ( - persisted_fork_choice.fork_choice_store, - justified_balances.clone(), - ) - .into(); - - let persisted_fork_choice_v17 = PersistedForkChoiceV17 { - fork_choice_v17, - fork_choice_store_v17, - }; - - let ops = vec![persisted_fork_choice_v17.as_kv_store_op(FORK_CHOICE_DB_KEY)]; - - info!("Downgraded fork choice for DB schema v28"); - - Ok(ops) -} diff --git a/beacon_node/beacon_chain/src/summaries_dag.rs b/beacon_node/beacon_chain/src/summaries_dag.rs index 4ddcdaab5a..50fc0b3820 100644 --- a/beacon_node/beacon_chain/src/summaries_dag.rs +++ b/beacon_node/beacon_chain/src/summaries_dag.rs @@ -14,14 +14,6 @@ pub struct DAGStateSummary { pub previous_state_root: Hash256, } -#[derive(Debug, Clone, Copy)] -pub struct DAGStateSummaryV22 { - pub slot: Slot, - pub latest_block_root: Hash256, - pub block_slot: Slot, - pub block_parent_root: Hash256, -} - pub struct StateSummariesDAG { // state_root -> state_summary state_summaries_by_state_root: HashMap, @@ -40,10 +32,6 @@ pub enum Error { new_state_summary: (Slot, Hash256), }, MissingStateSummary(Hash256), - MissingStateSummaryByBlockRoot { - state_root: Hash256, - latest_block_root: Hash256, - }, MissingChildStateRoot(Hash256), RequestedSlotAboveSummary { starting_state_root: Hash256, @@ -109,89 +97,6 @@ impl StateSummariesDAG { }) } - /// Computes a DAG from a sequence of state summaries, including their parent block - /// relationships. - /// - /// - Expects summaries to be contiguous per slot: there must exist a summary at every slot - /// of each tree branch - /// - Maybe include multiple disjoint trees. The root of each tree will have a ZERO parent state - /// root, which will error later when calling `previous_state_root`. - pub fn new_from_v22( - state_summaries_v22: Vec<(Hash256, DAGStateSummaryV22)>, - ) -> Result { - // Group them by latest block root, and sorted state slot - let mut state_summaries_by_block_root = HashMap::<_, BTreeMap<_, _>>::new(); - for (state_root, summary) in state_summaries_v22.iter() { - let summaries = state_summaries_by_block_root - .entry(summary.latest_block_root) - .or_default(); - - // Sanity check to ensure no duplicate summaries for the tuple (block_root, state_slot) - match summaries.entry(summary.slot) { - Entry::Vacant(entry) => { - entry.insert((state_root, summary)); - } - Entry::Occupied(existing) => { - return Err(Error::DuplicateStateSummary { - block_root: summary.latest_block_root, - existing_state_summary: (summary.slot, *state_root).into(), - new_state_summary: (*existing.key(), *existing.get().0), - }); - } - } - } - - let state_summaries = state_summaries_v22 - .iter() - .map(|(state_root, summary)| { - let previous_state_root = if summary.slot == 0 { - Hash256::ZERO - } else { - let previous_slot = summary.slot - 1; - - // Check the set of states in the same state's block root - let same_block_root_summaries = state_summaries_by_block_root - .get(&summary.latest_block_root) - // Should never error: we construct the HashMap here and must have at least - // one entry per block root - .ok_or(Error::MissingStateSummaryByBlockRoot { - state_root: *state_root, - latest_block_root: summary.latest_block_root, - })?; - if let Some((state_root, _)) = same_block_root_summaries.get(&previous_slot) { - // Skipped slot: block root at previous slot is the same as latest block root. - **state_root - } else { - // Common case: not a skipped slot. - // - // If we can't find a state summmary for the parent block and previous slot, - // then there is some amount of disjointedness in the DAG. We set the parent - // state root to 0x0 in this case, and will prune any dangling states. - let parent_block_root = summary.block_parent_root; - state_summaries_by_block_root - .get(&parent_block_root) - .and_then(|parent_block_summaries| { - parent_block_summaries.get(&previous_slot) - }) - .map_or(Hash256::ZERO, |(parent_state_root, _)| **parent_state_root) - } - }; - - Ok(( - *state_root, - DAGStateSummary { - slot: summary.slot, - latest_block_root: summary.latest_block_root, - latest_block_slot: summary.block_slot, - previous_state_root, - }, - )) - }) - .collect::, _>>()?; - - Self::new(state_summaries) - } - // Returns all non-unique latest block roots of a given set of states pub fn blocks_of_states<'a, I: Iterator>( &self, @@ -379,106 +284,3 @@ impl From for DAGStateSummary { } } } - -#[cfg(test)] -mod tests { - use super::{DAGStateSummaryV22, Error, StateSummariesDAG}; - use bls::FixedBytesExtended; - use types::{Hash256, Slot}; - - fn root(n: u64) -> Hash256 { - Hash256::from_low_u64_le(n) - } - - #[test] - fn new_from_v22_empty() { - StateSummariesDAG::new_from_v22(vec![]).unwrap(); - } - - fn assert_previous_state_root_is_zero(dag: &StateSummariesDAG, root: Hash256) { - assert!(matches!( - dag.previous_state_root(root).unwrap_err(), - Error::RootUnknownPreviousStateRoot { .. } - )); - } - - #[test] - fn new_from_v22_one_state() { - let root_a = root(0xa); - let root_1 = root(1); - let root_2 = root(2); - let summary_1 = DAGStateSummaryV22 { - slot: Slot::new(1), - latest_block_root: root_1, - block_parent_root: root_2, - block_slot: Slot::new(1), - }; - - let dag = StateSummariesDAG::new_from_v22(vec![(root_a, summary_1)]).unwrap(); - - // The parent of the root summary is ZERO - assert_previous_state_root_is_zero(&dag, root_a); - } - - #[test] - fn new_from_v22_multiple_states() { - let dag = StateSummariesDAG::new_from_v22(vec![ - ( - root(0xa), - DAGStateSummaryV22 { - slot: Slot::new(3), - latest_block_root: root(3), - block_parent_root: root(1), - block_slot: Slot::new(3), - }, - ), - ( - root(0xb), - DAGStateSummaryV22 { - slot: Slot::new(4), - latest_block_root: root(4), - block_parent_root: root(3), - block_slot: Slot::new(4), - }, - ), - // fork 1 - ( - root(0xc), - DAGStateSummaryV22 { - slot: Slot::new(5), - latest_block_root: root(5), - block_parent_root: root(4), - block_slot: Slot::new(5), - }, - ), - // fork 2 - // skipped slot - ( - root(0xd), - DAGStateSummaryV22 { - slot: Slot::new(5), - latest_block_root: root(4), - block_parent_root: root(3), - block_slot: Slot::new(4), - }, - ), - // normal slot - ( - root(0xe), - DAGStateSummaryV22 { - slot: Slot::new(6), - latest_block_root: root(6), - block_parent_root: root(4), - block_slot: Slot::new(6), - }, - ), - ]) - .unwrap(); - - // The parent of the root summary is ZERO - assert_previous_state_root_is_zero(&dag, root(0xa)); - assert_eq!(dag.previous_state_root(root(0xc)).unwrap(), root(0xb)); - assert_eq!(dag.previous_state_root(root(0xd)).unwrap(), root(0xb)); - assert_eq!(dag.previous_state_root(root(0xe)).unwrap(), root(0xd)); - } -} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 0e187a8f4b..2b4152b550 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3995,11 +3995,7 @@ async fn schema_downgrade_to_min_version(store_config: StoreConfig, archive: boo ) .await; - let min_version = if spec.is_fulu_scheduled() { - SchemaVersion(27) - } else { - SchemaVersion(22) - }; + let min_version = CURRENT_SCHEMA_VERSION; // Save the slot clock so that the new harness doesn't revert in time. let slot_clock = harness.chain.slot_clock.clone(); diff --git a/beacon_node/store/src/database/leveldb_impl.rs b/beacon_node/store/src/database/leveldb_impl.rs index 6b8c615631..6e01648263 100644 --- a/beacon_node/store/src/database/leveldb_impl.rs +++ b/beacon_node/store/src/database/leveldb_impl.rs @@ -186,10 +186,8 @@ impl LevelDB { ) }; - for (start_key, end_key) in [ - endpoints(DBColumn::BeaconState), - endpoints(DBColumn::BeaconStateSummary), - ] { + { + let (start_key, end_key) = endpoints(DBColumn::BeaconStateHotSummary); self.db.compact(&start_key, &end_key); } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 8ef91b3c74..78dd69e55a 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -3270,12 +3270,10 @@ impl, Cold: ItemStore> HotColdDB Some(mut split) => { debug!(?split, "Loaded split partial"); // Load the hot state summary to get the block root. - let latest_block_root = self - .load_block_root_from_summary_any_version(&split.state_root) - .ok_or(HotColdDBError::MissingSplitState( - split.state_root, - split.slot, - ))?; + let latest_block_root = + self.load_block_root_from_summary(&split.state_root).ok_or( + HotColdDBError::MissingSplitState(split.state_root, split.slot), + )?; split.block_root = latest_block_root; Ok(Some(split)) } @@ -3306,29 +3304,11 @@ impl, Cold: ItemStore> HotColdDB .map_err(|e| Error::LoadHotStateSummary(*state_root, e.into())) } - /// Load a hot state's summary in V22 format, given its root. - pub fn load_hot_state_summary_v22( - &self, - state_root: &Hash256, - ) -> Result, Error> { - self.hot_db - .get(state_root) - .map_err(|e| Error::LoadHotStateSummary(*state_root, e.into())) - } - - /// Load the latest block root for a hot state summary either in modern form, or V22 form. - /// - /// This function is required to open a V22 database for migration to V24, or vice versa. - pub fn load_block_root_from_summary_any_version( - &self, - state_root: &Hash256, - ) -> Option { + /// Load the latest block root for a hot state summary. + pub fn load_block_root_from_summary(&self, state_root: &Hash256) -> Option { if let Ok(Some(summary)) = self.load_hot_state_summary(state_root) { return Some(summary.latest_block_root); } - if let Ok(Some(summary)) = self.load_hot_state_summary_v22(state_root) { - return Some(summary.latest_block_root); - } None } @@ -4287,30 +4267,6 @@ impl HotStateSummary { } } -/// Legacy hot state summary used in schema V22 and before. -/// -/// This can be deleted when we remove V22 support. -#[derive(Debug, Clone, Copy, Encode, Decode)] -pub struct HotStateSummaryV22 { - pub slot: Slot, - pub latest_block_root: Hash256, - pub epoch_boundary_state_root: Hash256, -} - -impl StoreItem for HotStateSummaryV22 { - fn db_column() -> DBColumn { - DBColumn::BeaconStateSummary - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) - } -} - /// Struct for summarising a state in the freezer database. #[derive(Debug, Clone, Copy, Default, Encode, Decode)] pub(crate) struct ColdStateSummary { diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index bfa1200602..bd8caa3ad5 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -77,11 +77,7 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { fn compact(&self) -> Result<(), Error> { // Compact state and block related columns as they are likely to have the most churn, // i.e. entries being created and deleted. - for column in [ - DBColumn::BeaconState, - DBColumn::BeaconStateHotSummary, - DBColumn::BeaconBlock, - ] { + for column in [DBColumn::BeaconStateHotSummary, DBColumn::BeaconBlock] { self.compact_column(column)?; } Ok(()) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 9744b9fa08..74b287975e 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -6,7 +6,6 @@ use proto_array::{ Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, JustifiedBalances, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; -use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::{ per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing, @@ -1529,47 +1528,17 @@ where /// /// This is used when persisting the state of the fork choice to disk. #[superstruct( - variants(V17, V28), + variants(V28), variant_attributes(derive(Encode, Decode, Clone)), no_enum )] pub struct PersistedForkChoice { - #[superstruct(only(V17))] - pub proto_array_bytes: Vec, - #[superstruct(only(V28))] pub proto_array: proto_array::core::SszContainerV28, pub queued_attestations: Vec, } pub type PersistedForkChoice = PersistedForkChoiceV28; -impl TryFrom for PersistedForkChoiceV28 { - type Error = ssz::DecodeError; - - fn try_from(v17: PersistedForkChoiceV17) -> Result { - let container_v17 = - proto_array::core::SszContainerV17::from_ssz_bytes(&v17.proto_array_bytes)?; - let container_v28 = container_v17.into(); - - Ok(Self { - proto_array: container_v28, - queued_attestations: v17.queued_attestations, - }) - } -} - -impl From<(PersistedForkChoiceV28, JustifiedBalances)> for PersistedForkChoiceV17 { - fn from((v28, balances): (PersistedForkChoiceV28, JustifiedBalances)) -> Self { - let container_v17 = proto_array::core::SszContainerV17::from((v28.proto_array, balances)); - let proto_array_bytes = container_v17.as_ssz_bytes(); - - Self { - proto_array_bytes, - queued_attestations: v28.queued_attestations, - } - } -} - #[cfg(test)] mod tests { use types::MainnetEthSpec; diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index afe06dee1b..8cf2936db4 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -5,7 +5,7 @@ mod metrics; pub use crate::fork_choice::{ AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, - PersistedForkChoiceV17, PersistedForkChoiceV28, QueuedAttestation, ResetPayloadStatuses, + PersistedForkChoiceV28, QueuedAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{ diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index 964e836d91..04e57d791b 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -16,5 +16,5 @@ pub use error::Error; pub mod core { pub use super::proto_array::{ProposerBoost, ProtoArray, ProtoNode}; pub use super::proto_array_fork_choice::VoteTracker; - pub use super::ssz_container::{SszContainer, SszContainerV17, SszContainerV28}; + pub use super::ssz_container::{SszContainer, SszContainerV28}; } diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 1e01b74c8c..42696256f7 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -17,14 +17,12 @@ four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); pub type SszContainer = SszContainerV28; #[superstruct( - variants(V17, V28), + variants(V28), variant_attributes(derive(Encode, Decode, Clone)), no_enum )] pub struct SszContainer { pub votes: Vec, - #[superstruct(only(V17))] - pub balances: Vec, pub prune_threshold: usize, // Deprecated, remove in a future schema migration justified_checkpoint: Checkpoint, @@ -73,34 +71,3 @@ impl TryFrom<(SszContainer, JustifiedBalances)> for ProtoArrayForkChoice { }) } } - -// Convert V17 to V28 by dropping balances. -impl From for SszContainerV28 { - fn from(v17: SszContainerV17) -> Self { - Self { - votes: v17.votes, - prune_threshold: v17.prune_threshold, - justified_checkpoint: v17.justified_checkpoint, - finalized_checkpoint: v17.finalized_checkpoint, - nodes: v17.nodes, - indices: v17.indices, - previous_proposer_boost: v17.previous_proposer_boost, - } - } -} - -// Convert V28 to V17 by re-adding balances. -impl From<(SszContainerV28, JustifiedBalances)> for SszContainerV17 { - fn from((v28, balances): (SszContainerV28, JustifiedBalances)) -> Self { - Self { - votes: v28.votes, - balances: balances.effective_balances.clone(), - prune_threshold: v28.prune_threshold, - justified_checkpoint: v28.justified_checkpoint, - finalized_checkpoint: v28.finalized_checkpoint, - nodes: v28.nodes, - indices: v28.indices, - previous_proposer_boost: v28.previous_proposer_boost, - } - } -}