mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-20 21:34:46 +00:00
Compute columns in post-PeerDAS checkpoint sync (#6760)
Addresses #6026. Post-PeerDAS the DB expects to have data columns for the finalized block. Instead of forcing the user to submit the columns, this PR computes the columns from the blobs that we can already fetch from the checkpointz server or with the existing CLI options. Note 1: (EDIT) Pruning concern addressed Note 2: I have not tested this feature Note 3: @michaelsproul an alternative I recall is to not require the blobs / columns at this point and expect backfill to populate the finalized block
This commit is contained in:
@@ -863,6 +863,24 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
));
|
||||
}
|
||||
|
||||
pub fn put_data_columns(
|
||||
&self,
|
||||
block_root: &Hash256,
|
||||
data_columns: DataColumnSidecarList<E>,
|
||||
) -> Result<(), Error> {
|
||||
for data_column in data_columns {
|
||||
self.blobs_db.put_bytes(
|
||||
DBColumn::BeaconDataColumn,
|
||||
&get_data_column_key(block_root, &data_column.index),
|
||||
&data_column.as_ssz_bytes(),
|
||||
)?;
|
||||
self.block_cache
|
||||
.lock()
|
||||
.put_data_column(*block_root, data_column);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn data_columns_as_kv_store_ops(
|
||||
&self,
|
||||
block_root: &Hash256,
|
||||
|
||||
Reference in New Issue
Block a user