mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-03 00:31:50 +00:00
Fix block backfill with genesis skip slots (#4820)
## Issue Addressed Closes #4817. ## Proposed Changes - Fill in the linear block roots array between 0 and the slot of the first block (e.g. slots 0 and 1 on Holesky). - Backport the `--freezer`, `--skip` and `--limit` options for `lighthouse db inspect` from tree-states. This allows us to easily view the database corruption of 4817 using `lighthouse db inspect --network holesky --freezer --column bbr --output values --limit 2`. - Backport the `iter_column_from` change and `MemoryStore` overhaul from tree-states. These are required to enable `lighthouse db inspect`. - Rework `freezer_upper_limit` to allow state lookups for slots below the `state_lower_limit`. Currently state lookups will fail until state reconstruction completes entirely. There is a new regression test for the main bug, but no test for the `freezer_upper_limit` fix because we don't currently support running state reconstruction partially (see #3026). This will be fixed once we merge `tree-states`! In lieu of an automated test, I've tested manually on a Holesky node while it was reconstructing. ## Additional Info Users who backfilled Holesky to slot 0 (e.g. using `--reconstruct-historic-states`) need to either: - Re-sync from genesis. - Re-sync using checkpoint sync and the changes from this PR. Due to the recency of the Holesky genesis, writing a custom pass to fix up broken databases (which would require its own thorough testing) was deemed unnecessary. This is the primary reason for this PR being marked `backwards-incompat`. This will create few conflicts with Deneb, which I've already resolved on `tree-states-deneb` and will be happy to backport to Deneb once this PR is merged to unstable.
This commit is contained in:
@@ -60,6 +60,24 @@ pub fn inspect_cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.default_value("sizes")
|
||||
.possible_values(InspectTarget::VARIANTS),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("skip")
|
||||
.long("skip")
|
||||
.value_name("N")
|
||||
.help("Skip over the first N keys"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("limit")
|
||||
.long("limit")
|
||||
.value_name("N")
|
||||
.help("Output at most N keys"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("freezer")
|
||||
.long("freezer")
|
||||
.help("Inspect the freezer DB rather than the hot DB")
|
||||
.takes_value(false),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("output-dir")
|
||||
.long("output-dir")
|
||||
@@ -208,6 +226,9 @@ pub enum InspectTarget {
|
||||
pub struct InspectConfig {
|
||||
column: DBColumn,
|
||||
target: InspectTarget,
|
||||
skip: Option<usize>,
|
||||
limit: Option<usize>,
|
||||
freezer: bool,
|
||||
/// Configures where the inspect output should be stored.
|
||||
output_dir: PathBuf,
|
||||
}
|
||||
@@ -215,11 +236,18 @@ pub struct InspectConfig {
|
||||
fn parse_inspect_config(cli_args: &ArgMatches) -> Result<InspectConfig, String> {
|
||||
let column = clap_utils::parse_required(cli_args, "column")?;
|
||||
let target = clap_utils::parse_required(cli_args, "output")?;
|
||||
let skip = clap_utils::parse_optional(cli_args, "skip")?;
|
||||
let limit = clap_utils::parse_optional(cli_args, "limit")?;
|
||||
let freezer = cli_args.is_present("freezer");
|
||||
|
||||
let output_dir: PathBuf =
|
||||
clap_utils::parse_optional(cli_args, "output-dir")?.unwrap_or_else(PathBuf::new);
|
||||
Ok(InspectConfig {
|
||||
column,
|
||||
target,
|
||||
skip,
|
||||
limit,
|
||||
freezer,
|
||||
output_dir,
|
||||
})
|
||||
}
|
||||
@@ -247,6 +275,17 @@ pub fn inspect_db<E: EthSpec>(
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
|
||||
let mut total = 0;
|
||||
let mut num_keys = 0;
|
||||
|
||||
let sub_db = if inspect_config.freezer {
|
||||
&db.cold_db
|
||||
} else {
|
||||
&db.hot_db
|
||||
};
|
||||
|
||||
let skip = inspect_config.skip.unwrap_or(0);
|
||||
let limit = inspect_config.limit.unwrap_or(usize::MAX);
|
||||
|
||||
let base_path = &inspect_config.output_dir;
|
||||
|
||||
if let InspectTarget::Values = inspect_config.target {
|
||||
@@ -254,20 +293,24 @@ pub fn inspect_db<E: EthSpec>(
|
||||
.map_err(|e| format!("Unable to create import directory: {:?}", e))?;
|
||||
}
|
||||
|
||||
for res in db.hot_db.iter_column(inspect_config.column) {
|
||||
for res in sub_db
|
||||
.iter_column::<Vec<u8>>(inspect_config.column)
|
||||
.skip(skip)
|
||||
.take(limit)
|
||||
{
|
||||
let (key, value) = res.map_err(|e| format!("{:?}", e))?;
|
||||
|
||||
match inspect_config.target {
|
||||
InspectTarget::ValueSizes => {
|
||||
println!("{:?}: {} bytes", key, value.len());
|
||||
total += value.len();
|
||||
}
|
||||
InspectTarget::ValueTotal => {
|
||||
total += value.len();
|
||||
println!("{}: {} bytes", hex::encode(&key), value.len());
|
||||
}
|
||||
InspectTarget::ValueTotal => (),
|
||||
InspectTarget::Values => {
|
||||
let file_path =
|
||||
base_path.join(format!("{}_{}.ssz", inspect_config.column.as_str(), key));
|
||||
let file_path = base_path.join(format!(
|
||||
"{}_{}.ssz",
|
||||
inspect_config.column.as_str(),
|
||||
hex::encode(&key)
|
||||
));
|
||||
|
||||
let write_result = fs::OpenOptions::new()
|
||||
.create(true)
|
||||
@@ -283,17 +326,14 @@ pub fn inspect_db<E: EthSpec>(
|
||||
} else {
|
||||
println!("Successfully saved values to file: {:?}", file_path);
|
||||
}
|
||||
|
||||
total += value.len();
|
||||
}
|
||||
}
|
||||
total += value.len();
|
||||
num_keys += 1;
|
||||
}
|
||||
|
||||
match inspect_config.target {
|
||||
InspectTarget::ValueSizes | InspectTarget::ValueTotal | InspectTarget::Values => {
|
||||
println!("Total: {} bytes", total);
|
||||
}
|
||||
}
|
||||
println!("Num keys: {}", num_keys);
|
||||
println!("Total: {} bytes", total);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user