Store pubkey cache decompressed on disk (#5897)

* Support uncompressed keys in crypto/bls

* Use uncompressed keys in cache

* Implement DB upgrade

* Implement downgrade

* More logging on v20 upgrade

* Revert "More logging on v20 upgrade"

This reverts commit cc5789b9d3.

* Merge remote-tracking branch 'origin/unstable' into uncompressed-pubkeys

* Add a little more logging

* Merge remote-tracking branch 'origin/unstable' into uncompressed-pubkeys
This commit is contained in:
Michael Sproul
2024-07-04 14:27:41 +10:00
committed by GitHub
parent d9ad1f5bfa
commit d84e3e391e
10 changed files with 199 additions and 25 deletions

View File

@@ -3,6 +3,7 @@ mod migration_schema_v17;
mod migration_schema_v18;
mod migration_schema_v19;
mod migration_schema_v20;
mod migration_schema_v21;
use crate::beacon_chain::BeaconChainTypes;
use crate::types::ChainSpec;
@@ -87,6 +88,14 @@ pub fn migrate_schema<T: BeaconChainTypes>(
let ops = migration_schema_v20::downgrade_from_v20::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(20), SchemaVersion(21)) => {
let ops = migration_schema_v21::upgrade_to_v21::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(21), SchemaVersion(20)) => {
let ops = migration_schema_v21::downgrade_from_v21::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
// Anything else is an error.
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
target_version: to,

View File

@@ -11,6 +11,8 @@ pub fn upgrade_to_v20<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
info!(log, "Upgrading from v19 to v20");
// Load a V15 op pool and transform it to V20.
let Some(PersistedOperationPoolV15::<T::EthSpec> {
attestations_v15,
@@ -52,6 +54,8 @@ pub fn downgrade_from_v20<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
info!(log, "Downgrading from v20 to v19");
// Load a V20 op pool and transform it to V15.
let Some(PersistedOperationPoolV20::<T::EthSpec> {
attestations,

View File

@@ -0,0 +1,83 @@
use crate::beacon_chain::BeaconChainTypes;
use crate::validator_pubkey_cache::DatabasePubkey;
use slog::{info, Logger};
use ssz::{Decode, Encode};
use std::sync::Arc;
use store::{
get_key_for_col, DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem,
};
use types::{Hash256, PublicKey};
const LOG_EVERY: usize = 200_000;
pub fn upgrade_to_v21<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
info!(log, "Upgrading from v20 to v21");
let mut ops = vec![];
// Iterate through all pubkeys and decompress them.
for (i, res) in db
.hot_db
.iter_column::<Hash256>(DBColumn::PubkeyCache)
.enumerate()
{
let (key, value) = res?;
let pubkey = PublicKey::from_ssz_bytes(&value)?;
let decompressed = DatabasePubkey::from_pubkey(&pubkey);
ops.push(decompressed.as_kv_store_op(key));
if i > 0 && i % LOG_EVERY == 0 {
info!(
log,
"Public key decompression in progress";
"keys_decompressed" => i
);
}
}
info!(log, "Public key decompression complete");
Ok(ops)
}
pub fn downgrade_from_v21<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
info!(log, "Downgrading from v21 to v20");
let mut ops = vec![];
// Iterate through all pubkeys and recompress them.
for (i, res) in db
.hot_db
.iter_column::<Hash256>(DBColumn::PubkeyCache)
.enumerate()
{
let (key, value) = res?;
let decompressed = DatabasePubkey::from_ssz_bytes(&value)?;
let (_, pubkey_bytes) = decompressed.as_pubkey().map_err(|e| Error::DBError {
message: format!("{e:?}"),
})?;
let db_key = get_key_for_col(DBColumn::PubkeyCache.into(), key.as_bytes());
ops.push(KeyValueStoreOp::PutKeyValue(
db_key,
pubkey_bytes.as_ssz_bytes(),
));
if i > 0 && i % LOG_EVERY == 0 {
info!(
log,
"Public key compression in progress";
"keys_compressed" => i
);
}
}
info!(log, "Public key compression complete");
Ok(ops)
}

View File

@@ -1,6 +1,9 @@
use crate::errors::BeaconChainError;
use crate::{BeaconChainTypes, BeaconStore};
use bls::PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN;
use smallvec::SmallVec;
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use std::collections::HashMap;
use std::marker::PhantomData;
use store::{DBColumn, Error as StoreError, StoreItem, StoreOp};
@@ -49,14 +52,13 @@ impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> {
let mut pubkey_bytes = vec![];
for validator_index in 0.. {
if let Some(DatabasePubkey(pubkey)) =
if let Some(db_pubkey) =
store.get_item(&DatabasePubkey::key_for_index(validator_index))?
{
pubkeys.push((&pubkey).try_into().map_err(|e| {
BeaconChainError::ValidatorPubkeyCacheError(format!("{:?}", e))
})?);
pubkey_bytes.push(pubkey);
indices.insert(pubkey, validator_index);
let (pk, pk_bytes) = DatabasePubkey::as_pubkey(&db_pubkey)?;
pubkeys.push(pk);
indices.insert(pk_bytes, validator_index);
pubkey_bytes.push(pk_bytes);
} else {
break;
}
@@ -104,29 +106,29 @@ impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> {
self.indices.reserve(validator_keys.len());
let mut store_ops = Vec::with_capacity(validator_keys.len());
for pubkey in validator_keys {
for pubkey_bytes in validator_keys {
let i = self.pubkeys.len();
if self.indices.contains_key(&pubkey) {
if self.indices.contains_key(&pubkey_bytes) {
return Err(BeaconChainError::DuplicateValidatorPublicKey);
}
let pubkey = (&pubkey_bytes)
.try_into()
.map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?;
// Stage the new validator key for writing to disk.
// It will be committed atomically when the block that introduced it is written to disk.
// Notably it is NOT written while the write lock on the cache is held.
// See: https://github.com/sigp/lighthouse/issues/2327
store_ops.push(StoreOp::KeyValueOp(
DatabasePubkey(pubkey).as_kv_store_op(DatabasePubkey::key_for_index(i)),
DatabasePubkey::from_pubkey(&pubkey)
.as_kv_store_op(DatabasePubkey::key_for_index(i)),
));
self.pubkeys.push(
(&pubkey)
.try_into()
.map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?,
);
self.pubkey_bytes.push(pubkey);
self.indices.insert(pubkey, i);
self.pubkeys.push(pubkey);
self.pubkey_bytes.push(pubkey_bytes);
self.indices.insert(pubkey_bytes, i);
}
Ok(store_ops)
@@ -166,7 +168,10 @@ impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> {
/// Wrapper for a public key stored in the database.
///
/// Keyed by the validator index as `Hash256::from_low_u64_be(index)`.
struct DatabasePubkey(PublicKeyBytes);
#[derive(Encode, Decode)]
pub struct DatabasePubkey {
pubkey: SmallVec<[u8; PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN]>,
}
impl StoreItem for DatabasePubkey {
fn db_column() -> DBColumn {
@@ -174,11 +179,11 @@ impl StoreItem for DatabasePubkey {
}
fn as_store_bytes(&self) -> Vec<u8> {
self.0.as_ssz_bytes()
self.as_ssz_bytes()
}
fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> {
Ok(Self(PublicKeyBytes::from_ssz_bytes(bytes)?))
Ok(Self::from_ssz_bytes(bytes)?)
}
}
@@ -186,6 +191,19 @@ impl DatabasePubkey {
fn key_for_index(index: usize) -> Hash256 {
Hash256::from_low_u64_be(index as u64)
}
pub fn from_pubkey(pubkey: &PublicKey) -> Self {
Self {
pubkey: pubkey.serialize_uncompressed().into(),
}
}
pub fn as_pubkey(&self) -> Result<(PublicKey, PublicKeyBytes), BeaconChainError> {
let pubkey = PublicKey::deserialize_uncompressed(&self.pubkey)
.map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?;
let pubkey_bytes = pubkey.compress();
Ok((pubkey, pubkey_bytes))
}
}
#[cfg(test)]