Backfill blob storage fix (#5119)

* store blobs in the correct db in backfill

* add database migration

* add migration file

* remove log info suggesting deneb isn't schedule

* add batching in blob migration
This commit is contained in:
realbigsean
2024-01-23 17:35:02 -05:00
committed by GitHub
parent 5851027bfd
commit 1cebf41452
5 changed files with 83 additions and 5 deletions

View File

@@ -101,8 +101,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
ChunkWriter::<BlockRoots, _, _>::new(&self.store.cold_db, prev_block_slot.as_usize())?;
let mut new_oldest_blob_slot = blob_info.oldest_blob_slot;
let mut blob_batch = Vec::with_capacity(n_blobs_lists_to_import);
let mut cold_batch = Vec::with_capacity(blocks_to_import.len());
let mut hot_batch = Vec::with_capacity(blocks_to_import.len() + n_blobs_lists_to_import);
let mut hot_batch = Vec::with_capacity(blocks_to_import.len());
let mut signed_blocks = Vec::with_capacity(blocks_to_import.len());
for available_block in blocks_to_import.into_iter().rev() {
@@ -124,7 +125,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
if let Some(blobs) = maybe_blobs {
new_oldest_blob_slot = Some(block.slot());
self.store
.blobs_as_kv_store_ops(&block_root, blobs, &mut hot_batch);
.blobs_as_kv_store_ops(&block_root, blobs, &mut blob_batch);
}
// Store block roots, including at all skip slots in the freezer DB.
@@ -199,6 +200,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Write the I/O batches to disk, writing the blocks themselves first, as it's better
// for the hot DB to contain extra blocks than for the cold DB to point to blocks that
// do not exist.
self.store.blobs_db.do_atomically(blob_batch)?;
self.store.hot_db.do_atomically(hot_batch)?;
self.store.cold_db.do_atomically(cold_batch)?;